2 * Copyright © 2018 Valve Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Daniel Schürmann (daniel.schuermann@campus.tu-berlin.de)
31 #include "aco_builder.h"
32 #include "util/u_math.h"
34 #include "vulkan/radv_shader.h"
39 struct lower_context
{
41 std::vector
<aco_ptr
<Instruction
>> instructions
;
44 aco_opcode
get_reduce_opcode(chip_class chip
, ReduceOp op
) {
46 case iadd32
: return chip
>= GFX9
? aco_opcode::v_add_u32
: aco_opcode::v_add_co_u32
;
47 case imul32
: return aco_opcode::v_mul_lo_u32
;
48 case fadd32
: return aco_opcode::v_add_f32
;
49 case fmul32
: return aco_opcode::v_mul_f32
;
50 case imax32
: return aco_opcode::v_max_i32
;
51 case imin32
: return aco_opcode::v_min_i32
;
52 case umin32
: return aco_opcode::v_min_u32
;
53 case umax32
: return aco_opcode::v_max_u32
;
54 case fmin32
: return aco_opcode::v_min_f32
;
55 case fmax32
: return aco_opcode::v_max_f32
;
56 case iand32
: return aco_opcode::v_and_b32
;
57 case ixor32
: return aco_opcode::v_xor_b32
;
58 case ior32
: return aco_opcode::v_or_b32
;
59 case iadd64
: return aco_opcode::num_opcodes
;
60 case imul64
: return aco_opcode::num_opcodes
;
61 case fadd64
: return aco_opcode::v_add_f64
;
62 case fmul64
: return aco_opcode::v_mul_f64
;
63 case imin64
: return aco_opcode::num_opcodes
;
64 case imax64
: return aco_opcode::num_opcodes
;
65 case umin64
: return aco_opcode::num_opcodes
;
66 case umax64
: return aco_opcode::num_opcodes
;
67 case fmin64
: return aco_opcode::v_min_f64
;
68 case fmax64
: return aco_opcode::v_max_f64
;
69 case iand64
: return aco_opcode::num_opcodes
;
70 case ior64
: return aco_opcode::num_opcodes
;
71 case ixor64
: return aco_opcode::num_opcodes
;
72 default: return aco_opcode::num_opcodes
;
76 void emit_vadd32(Builder
& bld
, Definition def
, Operand src0
, Operand src1
)
78 Instruction
*instr
= bld
.vadd32(def
, src0
, src1
, false, Operand(s2
), true);
79 if (instr
->definitions
.size() >= 2)
80 instr
->definitions
[1].setFixed(vcc
);
83 void emit_int64_dpp_op(lower_context
*ctx
, PhysReg dst_reg
, PhysReg src0_reg
, PhysReg src1_reg
,
84 PhysReg vtmp_reg
, ReduceOp op
,
85 unsigned dpp_ctrl
, unsigned row_mask
, unsigned bank_mask
, bool bound_ctrl
,
86 Operand
*identity
=NULL
)
88 Builder
bld(ctx
->program
, &ctx
->instructions
);
89 Definition dst
[] = {Definition(dst_reg
, v1
), Definition(PhysReg
{dst_reg
+1}, v1
)};
90 Definition vtmp_def
[] = {Definition(vtmp_reg
, v1
), Definition(PhysReg
{vtmp_reg
+1}, v1
)};
91 Operand src0
[] = {Operand(src0_reg
, v1
), Operand(PhysReg
{src0_reg
+1}, v1
)};
92 Operand src1
[] = {Operand(src1_reg
, v1
), Operand(PhysReg
{src1_reg
+1}, v1
)};
93 Operand src1_64
= Operand(src1_reg
, v2
);
94 Operand vtmp_op
[] = {Operand(vtmp_reg
, v1
), Operand(PhysReg
{vtmp_reg
+1}, v1
)};
95 Operand vtmp_op64
= Operand(vtmp_reg
, v2
);
97 if (ctx
->program
->chip_class
>= GFX10
) {
99 bld
.vop1(aco_opcode::v_mov_b32
, vtmp_def
[0], identity
[0]);
100 bld
.vop1_dpp(aco_opcode::v_mov_b32
, vtmp_def
[0], src0
[0],
101 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
102 bld
.vop3(aco_opcode::v_add_co_u32_e64
, dst
[0], bld
.def(s2
, vcc
), vtmp_op
[0], src1
[0]);
104 bld
.vop2_dpp(aco_opcode::v_add_co_u32
, dst
[0], bld
.def(s2
, vcc
), src0
[0], src1
[0],
105 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
107 bld
.vop2_dpp(aco_opcode::v_addc_co_u32
, dst
[1], bld
.def(s2
, vcc
), src0
[1], src1
[1], Operand(vcc
, s2
),
108 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
109 } else if (op
== iand64
) {
110 bld
.vop2_dpp(aco_opcode::v_and_b32
, dst
[0], src0
[0], src1
[0],
111 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
112 bld
.vop2_dpp(aco_opcode::v_and_b32
, dst
[1], src0
[1], src1
[1],
113 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
114 } else if (op
== ior64
) {
115 bld
.vop2_dpp(aco_opcode::v_or_b32
, dst
[0], src0
[0], src1
[0],
116 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
117 bld
.vop2_dpp(aco_opcode::v_or_b32
, dst
[1], src0
[1], src1
[1],
118 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
119 } else if (op
== ixor64
) {
120 bld
.vop2_dpp(aco_opcode::v_xor_b32
, dst
[0], src0
[0], src1
[0],
121 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
122 bld
.vop2_dpp(aco_opcode::v_xor_b32
, dst
[1], src0
[1], src1
[1],
123 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
124 } else if (op
== umin64
|| op
== umax64
|| op
== imin64
|| op
== imax64
) {
125 aco_opcode cmp
= aco_opcode::num_opcodes
;
128 cmp
= aco_opcode::v_cmp_gt_u64
;
131 cmp
= aco_opcode::v_cmp_lt_u64
;
134 cmp
= aco_opcode::v_cmp_gt_i64
;
137 cmp
= aco_opcode::v_cmp_lt_i64
;
144 bld
.vop1(aco_opcode::v_mov_b32
, vtmp_def
[0], identity
[0]);
145 bld
.vop1(aco_opcode::v_mov_b32
, vtmp_def
[1], identity
[1]);
147 bld
.vop1_dpp(aco_opcode::v_mov_b32
, vtmp_def
[0], src0
[0],
148 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
149 bld
.vop1_dpp(aco_opcode::v_mov_b32
, vtmp_def
[1], src0
[1],
150 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
152 bld
.vopc(cmp
, bld
.def(s2
, vcc
), vtmp_op64
, src1_64
);
153 bld
.vop2(aco_opcode::v_cndmask_b32
, dst
[0], vtmp_op
[0], src1
[0], Operand(vcc
, s2
));
154 bld
.vop2(aco_opcode::v_cndmask_b32
, dst
[1], vtmp_op
[1], src1
[1], Operand(vcc
, s2
));
155 } else if (op
== imul64
) {
157 * t1 = umul_lo(t4, y_lo)
159 * t0 = umul_lo(t3, y_hi)
161 * t5 = umul_hi(t3, y_lo)
162 * res_hi = iadd(t2, t5)
163 * res_lo = umul_lo(t3, y_lo)
164 * Requires that res_hi != src0[0] and res_hi != src1[0]
165 * and that vtmp[0] != res_hi.
168 bld
.vop1(aco_opcode::v_mov_b32
, vtmp_def
[0], identity
[1]);
169 bld
.vop1_dpp(aco_opcode::v_mov_b32
, vtmp_def
[0], src0
[1],
170 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
171 bld
.vop3(aco_opcode::v_mul_lo_u32
, vtmp_def
[1], vtmp_op
[0], src1
[0]);
173 bld
.vop1(aco_opcode::v_mov_b32
, vtmp_def
[0], identity
[0]);
174 bld
.vop1_dpp(aco_opcode::v_mov_b32
, vtmp_def
[0], src0
[0],
175 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
176 bld
.vop3(aco_opcode::v_mul_lo_u32
, vtmp_def
[0], vtmp_op
[0], src1
[1]);
177 emit_vadd32(bld
, vtmp_def
[1], vtmp_op
[0], vtmp_op
[1]);
179 bld
.vop1(aco_opcode::v_mov_b32
, vtmp_def
[0], identity
[0]);
180 bld
.vop1_dpp(aco_opcode::v_mov_b32
, vtmp_def
[0], src0
[0],
181 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
182 bld
.vop3(aco_opcode::v_mul_hi_u32
, vtmp_def
[0], vtmp_op
[0], src1
[0]);
183 emit_vadd32(bld
, dst
[1], vtmp_op
[1], vtmp_op
[0]);
185 bld
.vop1(aco_opcode::v_mov_b32
, vtmp_def
[0], identity
[0]);
186 bld
.vop1_dpp(aco_opcode::v_mov_b32
, vtmp_def
[0], src0
[0],
187 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
188 bld
.vop3(aco_opcode::v_mul_lo_u32
, dst
[0], vtmp_op
[0], src1
[0]);
192 void emit_int64_op(lower_context
*ctx
, PhysReg dst_reg
, PhysReg src0_reg
, PhysReg src1_reg
, PhysReg vtmp
, ReduceOp op
)
194 Builder
bld(ctx
->program
, &ctx
->instructions
);
195 Definition dst
[] = {Definition(dst_reg
, v1
), Definition(PhysReg
{dst_reg
+1}, v1
)};
196 RegClass src0_rc
= src0_reg
.reg
>= 256 ? v1
: s1
;
197 Operand src0
[] = {Operand(src0_reg
, src0_rc
), Operand(PhysReg
{src0_reg
+1}, src0_rc
)};
198 Operand src1
[] = {Operand(src1_reg
, v1
), Operand(PhysReg
{src1_reg
+1}, v1
)};
199 Operand src0_64
= Operand(src0_reg
, src0_reg
.reg
>= 256 ? v2
: s2
);
200 Operand src1_64
= Operand(src1_reg
, v2
);
203 (op
== imul64
|| op
== umin64
|| op
== umax64
|| op
== imin64
|| op
== imax64
)) {
204 assert(vtmp
.reg
!= 0);
205 bld
.vop1(aco_opcode::v_mov_b32
, Definition(vtmp
, v1
), src0
[0]);
206 bld
.vop1(aco_opcode::v_mov_b32
, Definition(PhysReg
{vtmp
+1}, v1
), src0
[1]);
208 src0
[0] = Operand(vtmp
, v1
);
209 src0
[1] = Operand(PhysReg
{vtmp
+1}, v1
);
210 src0_64
= Operand(vtmp
, v2
);
211 } else if (src0_rc
== s1
&& op
== iadd64
) {
212 assert(vtmp
.reg
!= 0);
213 bld
.vop1(aco_opcode::v_mov_b32
, Definition(PhysReg
{vtmp
+1}, v1
), src0
[1]);
214 src0
[1] = Operand(PhysReg
{vtmp
+1}, v1
);
218 if (ctx
->program
->chip_class
>= GFX10
) {
219 bld
.vop3(aco_opcode::v_add_co_u32_e64
, dst
[0], bld
.def(s2
, vcc
), src0
[0], src1
[0]);
221 bld
.vop2(aco_opcode::v_add_co_u32
, dst
[0], bld
.def(s2
, vcc
), src0
[0], src1
[0]);
223 bld
.vop2(aco_opcode::v_addc_co_u32
, dst
[1], bld
.def(s2
, vcc
), src0
[1], src1
[1], Operand(vcc
, s2
));
224 } else if (op
== iand64
) {
225 bld
.vop2(aco_opcode::v_and_b32
, dst
[0], src0
[0], src1
[0]);
226 bld
.vop2(aco_opcode::v_and_b32
, dst
[1], src0
[1], src1
[1]);
227 } else if (op
== ior64
) {
228 bld
.vop2(aco_opcode::v_or_b32
, dst
[0], src0
[0], src1
[0]);
229 bld
.vop2(aco_opcode::v_or_b32
, dst
[1], src0
[1], src1
[1]);
230 } else if (op
== ixor64
) {
231 bld
.vop2(aco_opcode::v_xor_b32
, dst
[0], src0
[0], src1
[0]);
232 bld
.vop2(aco_opcode::v_xor_b32
, dst
[1], src0
[1], src1
[1]);
233 } else if (op
== umin64
|| op
== umax64
|| op
== imin64
|| op
== imax64
) {
234 aco_opcode cmp
= aco_opcode::num_opcodes
;
237 cmp
= aco_opcode::v_cmp_gt_u64
;
240 cmp
= aco_opcode::v_cmp_lt_u64
;
243 cmp
= aco_opcode::v_cmp_gt_i64
;
246 cmp
= aco_opcode::v_cmp_lt_i64
;
252 bld
.vopc(cmp
, bld
.def(s2
, vcc
), src0_64
, src1_64
);
253 bld
.vop2(aco_opcode::v_cndmask_b32
, dst
[0], src0
[0], src1
[0], Operand(vcc
, s2
));
254 bld
.vop2(aco_opcode::v_cndmask_b32
, dst
[1], src0
[1], src1
[1], Operand(vcc
, s2
));
255 } else if (op
== imul64
) {
256 if (src1_reg
== dst_reg
) {
257 /* it's fine if src0==dst but not if src1==dst */
258 std::swap(src0_reg
, src1_reg
);
259 std::swap(src0
[0], src1
[0]);
260 std::swap(src0
[1], src1
[1]);
261 std::swap(src0_64
, src1_64
);
263 assert(!(src0_reg
== src1_reg
));
264 /* t1 = umul_lo(x_hi, y_lo)
265 * t0 = umul_lo(x_lo, y_hi)
267 * t5 = umul_hi(x_lo, y_lo)
268 * res_hi = iadd(t2, t5)
269 * res_lo = umul_lo(x_lo, y_lo)
270 * assumes that it's ok to modify x_hi/y_hi, since we might not have vtmp
272 Definition
tmp0_def(PhysReg
{src0_reg
+1}, v1
);
273 Definition
tmp1_def(PhysReg
{src1_reg
+1}, v1
);
274 Operand tmp0_op
= src0
[1];
275 Operand tmp1_op
= src1
[1];
276 bld
.vop3(aco_opcode::v_mul_lo_u32
, tmp0_def
, src0
[1], src1
[0]);
277 bld
.vop3(aco_opcode::v_mul_lo_u32
, tmp1_def
, src0
[0], src1
[1]);
278 emit_vadd32(bld
, tmp0_def
, tmp1_op
, tmp0_op
);
279 bld
.vop3(aco_opcode::v_mul_hi_u32
, tmp1_def
, src0
[0], src1
[0]);
280 emit_vadd32(bld
, dst
[1], tmp0_op
, tmp1_op
);
281 bld
.vop3(aco_opcode::v_mul_lo_u32
, dst
[0], src0
[0], src1
[0]);
285 void emit_dpp_op(lower_context
*ctx
, PhysReg dst_reg
, PhysReg src0_reg
, PhysReg src1_reg
,
286 PhysReg vtmp
, ReduceOp op
, unsigned size
,
287 unsigned dpp_ctrl
, unsigned row_mask
, unsigned bank_mask
, bool bound_ctrl
,
288 Operand
*identity
=NULL
) /* for VOP3 with sparse writes */
290 Builder
bld(ctx
->program
, &ctx
->instructions
);
291 RegClass rc
= RegClass(RegType::vgpr
, size
);
292 Definition
dst(dst_reg
, rc
);
293 Operand
src0(src0_reg
, rc
);
294 Operand
src1(src1_reg
, rc
);
296 aco_opcode opcode
= get_reduce_opcode(ctx
->program
->chip_class
, op
);
297 bool vop3
= op
== imul32
|| size
== 2;
300 if (opcode
== aco_opcode::v_add_co_u32
)
301 bld
.vop2_dpp(opcode
, dst
, bld
.def(s2
, vcc
), src0
, src1
, dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
303 bld
.vop2_dpp(opcode
, dst
, src0
, src1
, dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
307 if (opcode
== aco_opcode::num_opcodes
) {
308 emit_int64_dpp_op(ctx
, dst_reg
,src0_reg
, src1_reg
, vtmp
, op
,
309 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
, identity
);
314 bld
.vop1(aco_opcode::v_mov_b32
, Definition(vtmp
, v1
), identity
[0]);
315 if (identity
&& size
>= 2)
316 bld
.vop1(aco_opcode::v_mov_b32
, Definition(PhysReg
{vtmp
+1}, v1
), identity
[1]);
318 for (unsigned i
= 0; i
< size
; i
++)
319 bld
.vop1_dpp(aco_opcode::v_mov_b32
, Definition(PhysReg
{vtmp
+i
}, v1
), Operand(PhysReg
{src0_reg
+i
}, v1
),
320 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
322 bld
.vop3(opcode
, dst
, Operand(vtmp
, rc
), src1
);
325 void emit_op(lower_context
*ctx
, PhysReg dst_reg
, PhysReg src0_reg
, PhysReg src1_reg
,
326 PhysReg vtmp
, ReduceOp op
, unsigned size
)
328 Builder
bld(ctx
->program
, &ctx
->instructions
);
329 RegClass rc
= RegClass(RegType::vgpr
, size
);
330 Definition
dst(dst_reg
, rc
);
331 Operand
src0(src0_reg
, RegClass(src0_reg
.reg
>= 256 ? RegType::vgpr
: RegType::sgpr
, size
));
332 Operand
src1(src1_reg
, rc
);
334 aco_opcode opcode
= get_reduce_opcode(ctx
->program
->chip_class
, op
);
335 bool vop3
= op
== imul32
|| size
== 2;
337 if (opcode
== aco_opcode::num_opcodes
) {
338 emit_int64_op(ctx
, dst_reg
, src0_reg
, src1_reg
, vtmp
, op
);
343 bld
.vop3(opcode
, dst
, src0
, src1
);
344 } else if (opcode
== aco_opcode::v_add_co_u32
) {
345 bld
.vop2(opcode
, dst
, bld
.def(s2
, vcc
), src0
, src1
);
347 bld
.vop2(opcode
, dst
, src0
, src1
);
351 void emit_dpp_mov(lower_context
*ctx
, PhysReg dst
, PhysReg src0
, unsigned size
,
352 unsigned dpp_ctrl
, unsigned row_mask
, unsigned bank_mask
, bool bound_ctrl
)
354 Builder
bld(ctx
->program
, &ctx
->instructions
);
355 for (unsigned i
= 0; i
< size
; i
++) {
356 bld
.vop1_dpp(aco_opcode::v_mov_b32
, Definition(PhysReg
{dst
+i
}, v1
), Operand(PhysReg
{src0
+i
}, v1
),
357 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl
);
361 uint32_t get_reduction_identity(ReduceOp op
, unsigned idx
)
379 return 0x3f800000u
; /* 1.0 */
381 return idx
? 0x3ff00000u
: 0u; /* 1.0 */
385 return idx
? 0x7fffffffu
: 0xffffffffu
;
389 return idx
? 0x80000000u
: 0;
396 return 0x7f800000u
; /* infinity */
398 return idx
? 0x7ff00000u
: 0u; /* infinity */
400 return 0xff800000u
; /* negative infinity */
402 return idx
? 0xfff00000u
: 0u; /* negative infinity */
404 unreachable("Invalid reduction operation");
410 void emit_reduction(lower_context
*ctx
, aco_opcode op
, ReduceOp reduce_op
, unsigned cluster_size
, PhysReg tmp
,
411 PhysReg stmp
, PhysReg vtmp
, PhysReg sitmp
, Operand src
, Definition dst
)
413 assert(cluster_size
== 64 || op
== aco_opcode::p_reduce
);
415 Builder
bld(ctx
->program
, &ctx
->instructions
);
418 identity
[0] = Operand(get_reduction_identity(reduce_op
, 0));
419 identity
[1] = Operand(get_reduction_identity(reduce_op
, 1));
420 Operand vcndmask_identity
[2] = {identity
[0], identity
[1]};
422 /* First, copy the source to tmp and set inactive lanes to the identity */
423 bld
.sop1(aco_opcode::s_or_saveexec_b64
, Definition(stmp
, s2
), Definition(scc
, s1
), Definition(exec
, s2
), Operand(UINT64_MAX
), Operand(exec
, s2
));
425 for (unsigned i
= 0; i
< src
.size(); i
++) {
426 /* p_exclusive_scan needs it to be a sgpr or inline constant for the v_writelane_b32
427 * except on GFX10, where v_writelane_b32 can take a literal. */
428 if (identity
[i
].isLiteral() && op
== aco_opcode::p_exclusive_scan
&& ctx
->program
->chip_class
< GFX10
) {
429 bld
.sop1(aco_opcode::s_mov_b32
, Definition(PhysReg
{sitmp
+i
}, s1
), identity
[i
]);
430 identity
[i
] = Operand(PhysReg
{sitmp
+i
}, s1
);
432 bld
.vop1(aco_opcode::v_mov_b32
, Definition(PhysReg
{tmp
+i
}, v1
), identity
[i
]);
433 vcndmask_identity
[i
] = Operand(PhysReg
{tmp
+i
}, v1
);
434 } else if (identity
[i
].isLiteral()) {
435 bld
.vop1(aco_opcode::v_mov_b32
, Definition(PhysReg
{tmp
+i
}, v1
), identity
[i
]);
436 vcndmask_identity
[i
] = Operand(PhysReg
{tmp
+i
}, v1
);
440 for (unsigned i
= 0; i
< src
.size(); i
++) {
441 bld
.vop2_e64(aco_opcode::v_cndmask_b32
, Definition(PhysReg
{tmp
+ i
}, v1
),
442 vcndmask_identity
[i
], Operand(PhysReg
{src
.physReg() + i
}, v1
),
446 bool exec_restored
= false;
447 bool dst_written
= false;
449 case aco_opcode::p_reduce
:
450 if (cluster_size
== 1) break;
451 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(),
452 dpp_quad_perm(1, 0, 3, 2), 0xf, 0xf, false);
453 if (cluster_size
== 2) break;
454 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(),
455 dpp_quad_perm(2, 3, 0, 1), 0xf, 0xf, false);
456 if (cluster_size
== 4) break;
457 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(),
458 dpp_row_half_mirror
, 0xf, 0xf, false);
459 if (cluster_size
== 8) break;
460 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(),
461 dpp_row_mirror
, 0xf, 0xf, false);
462 if (cluster_size
== 16) break;
463 if (cluster_size
== 32) {
464 for (unsigned i
= 0; i
< src
.size(); i
++)
465 bld
.ds(aco_opcode::ds_swizzle_b32
, Definition(PhysReg
{vtmp
+i
}, v1
), Operand(PhysReg
{tmp
+i
}, s1
), ds_pattern_bitmode(0x1f, 0, 0x10));
466 bld
.sop1(aco_opcode::s_mov_b64
, Definition(exec
, s2
), Operand(stmp
, s2
));
467 exec_restored
= true;
468 emit_op(ctx
, dst
.physReg(), vtmp
, tmp
, PhysReg
{0}, reduce_op
, src
.size());
470 } else if (ctx
->program
->chip_class
>= GFX10
) {
471 assert(cluster_size
== 64);
472 /* GFX10+ doesn't support row_bcast15 and row_bcast31 */
473 for (unsigned i
= 0; i
< src
.size(); i
++)
474 bld
.vop3(aco_opcode::v_permlanex16_b32
, Definition(PhysReg
{vtmp
+i
}, v1
), Operand(PhysReg
{tmp
+i
}, v1
), Operand(0u), Operand(0u));
475 emit_op(ctx
, tmp
, tmp
, vtmp
, PhysReg
{0}, reduce_op
, src
.size());
477 for (unsigned i
= 0; i
< src
.size(); i
++)
478 bld
.vop3(aco_opcode::v_readlane_b32
, Definition(PhysReg
{sitmp
+i
}, s1
), Operand(PhysReg
{tmp
+i
}, v1
), Operand(31u));
479 emit_op(ctx
, tmp
, sitmp
, tmp
, vtmp
, reduce_op
, src
.size());
481 assert(cluster_size
== 64);
482 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(),
483 dpp_row_bcast15
, 0xa, 0xf, false);
484 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(),
485 dpp_row_bcast31
, 0xc, 0xf, false);
488 case aco_opcode::p_exclusive_scan
:
489 if (ctx
->program
->chip_class
>= GFX10
) { /* gfx10 doesn't support wf_sr1, so emulate it */
490 /* shift rows right */
491 emit_dpp_mov(ctx
, vtmp
, tmp
, src
.size(), dpp_row_sr(1), 0xf, 0xf, true);
493 /* fill in the gaps in rows 1 and 3 */
494 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_lo
, s1
), Operand(0x10000u
));
495 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_hi
, s1
), Operand(0x10000u
));
496 for (unsigned i
= 0; i
< src
.size(); i
++) {
497 Instruction
*perm
= bld
.vop3(aco_opcode::v_permlanex16_b32
,
498 Definition(PhysReg
{vtmp
+i
}, v1
),
499 Operand(PhysReg
{tmp
+i
}, v1
),
500 Operand(0xffffffffu
), Operand(0xffffffffu
)).instr
;
501 static_cast<VOP3A_instruction
*>(perm
)->opsel
[0] = true; /* FI (Fetch Inactive) */
503 bld
.sop1(aco_opcode::s_mov_b64
, Definition(exec
, s2
), Operand(UINT64_MAX
));
505 /* fill in the gap in row 2 */
506 for (unsigned i
= 0; i
< src
.size(); i
++) {
507 bld
.vop3(aco_opcode::v_readlane_b32
, Definition(PhysReg
{sitmp
+i
}, s1
), Operand(PhysReg
{tmp
+i
}, v1
), Operand(31u));
508 bld
.vop3(aco_opcode::v_writelane_b32
, Definition(PhysReg
{vtmp
+i
}, v1
), Operand(PhysReg
{sitmp
+i
}, s1
), Operand(32u));
510 std::swap(tmp
, vtmp
);
512 emit_dpp_mov(ctx
, tmp
, tmp
, src
.size(), dpp_wf_sr1
, 0xf, 0xf, true);
514 for (unsigned i
= 0; i
< src
.size(); i
++) {
515 if (!identity
[i
].isConstant() || identity
[i
].constantValue()) { /* bound_ctrl should take case of this overwise */
516 if (ctx
->program
->chip_class
< GFX10
)
517 assert((identity
[i
].isConstant() && !identity
[i
].isLiteral()) || identity
[i
].physReg() == PhysReg
{sitmp
+i
});
518 bld
.vop3(aco_opcode::v_writelane_b32
, Definition(PhysReg
{tmp
+i
}, v1
),
519 identity
[i
], Operand(0u));
523 case aco_opcode::p_inclusive_scan
:
524 assert(cluster_size
== 64);
525 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(),
526 dpp_row_sr(1), 0xf, 0xf, false, identity
);
527 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(),
528 dpp_row_sr(2), 0xf, 0xf, false, identity
);
529 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(),
530 dpp_row_sr(4), 0xf, 0xf, false, identity
);
531 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(),
532 dpp_row_sr(8), 0xf, 0xf, false, identity
);
533 if (ctx
->program
->chip_class
>= GFX10
) {
534 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_lo
, s1
), Operand(0xffff0000u
));
535 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_hi
, s1
), Operand(0xffff0000u
));
536 for (unsigned i
= 0; i
< src
.size(); i
++) {
537 Instruction
*perm
= bld
.vop3(aco_opcode::v_permlanex16_b32
,
538 Definition(PhysReg
{vtmp
+i
}, v1
),
539 Operand(PhysReg
{tmp
+i
}, v1
),
540 Operand(0xffffffffu
), Operand(0xffffffffu
)).instr
;
541 static_cast<VOP3A_instruction
*>(perm
)->opsel
[0] = true; /* FI (Fetch Inactive) */
543 emit_op(ctx
, tmp
, tmp
, vtmp
, PhysReg
{0}, reduce_op
, src
.size());
545 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_lo
, s1
), Operand(0u));
546 bld
.sop1(aco_opcode::s_mov_b32
, Definition(exec_hi
, s1
), Operand(0xffffffffu
));
547 for (unsigned i
= 0; i
< src
.size(); i
++)
548 bld
.vop3(aco_opcode::v_readlane_b32
, Definition(PhysReg
{sitmp
+i
}, s1
), Operand(PhysReg
{tmp
+i
}, v1
), Operand(31u));
549 emit_op(ctx
, tmp
, sitmp
, tmp
, vtmp
, reduce_op
, src
.size());
551 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(),
552 dpp_row_bcast15
, 0xa, 0xf, false, identity
);
553 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, reduce_op
, src
.size(),
554 dpp_row_bcast31
, 0xc, 0xf, false, identity
);
558 unreachable("Invalid reduction mode");
562 bld
.sop1(aco_opcode::s_mov_b64
, Definition(exec
, s2
), Operand(stmp
, s2
));
564 if (op
== aco_opcode::p_reduce
&& cluster_size
== 64) {
565 for (unsigned k
= 0; k
< src
.size(); k
++) {
566 bld
.vop3(aco_opcode::v_readlane_b32
, Definition(PhysReg
{dst
.physReg() + k
}, s1
),
567 Operand(PhysReg
{tmp
+ k
}, v1
), Operand(63u));
569 } else if (!(dst
.physReg() == tmp
) && !dst_written
) {
570 for (unsigned k
= 0; k
< src
.size(); k
++) {
571 bld
.vop1(aco_opcode::v_mov_b32
, Definition(PhysReg
{dst
.physReg() + k
}, s1
),
572 Operand(PhysReg
{tmp
+ k
}, v1
));
577 struct copy_operation
{
584 void handle_operands(std::map
<PhysReg
, copy_operation
>& copy_map
, lower_context
* ctx
, chip_class chip_class
, Pseudo_instruction
*pi
)
586 Builder
bld(ctx
->program
, &ctx
->instructions
);
587 aco_ptr
<Instruction
> mov
;
588 std::map
<PhysReg
, copy_operation
>::iterator it
= copy_map
.begin();
589 std::map
<PhysReg
, copy_operation
>::iterator target
;
590 bool writes_scc
= false;
592 /* count the number of uses for each dst reg */
593 while (it
!= copy_map
.end()) {
594 if (it
->second
.op
.isConstant()) {
599 if (it
->second
.def
.physReg() == scc
)
602 assert(!pi
->tmp_in_scc
|| !(it
->second
.def
.physReg() == pi
->scratch_sgpr
));
604 /* if src and dst reg are the same, remove operation */
605 if (it
->first
== it
->second
.op
.physReg()) {
606 it
= copy_map
.erase(it
);
609 /* check if the operand reg may be overwritten by another copy operation */
610 target
= copy_map
.find(it
->second
.op
.physReg());
611 if (target
!= copy_map
.end()) {
612 target
->second
.uses
++;
618 /* first, handle paths in the location transfer graph */
619 bool preserve_scc
= pi
->tmp_in_scc
&& !writes_scc
;
620 it
= copy_map
.begin();
621 while (it
!= copy_map
.end()) {
623 /* the target reg is not used as operand for any other copy */
624 if (it
->second
.uses
== 0) {
626 /* try to coalesce 32-bit sgpr copies to 64-bit copies */
627 if (it
->second
.def
.getTemp().type() == RegType::sgpr
&& it
->second
.size
== 1 &&
628 !it
->second
.op
.isConstant() && it
->first
% 2 == it
->second
.op
.physReg() % 2) {
630 PhysReg other_def_reg
= PhysReg
{it
->first
% 2 ? it
->first
- 1 : it
->first
+ 1};
631 PhysReg other_op_reg
= PhysReg
{it
->first
% 2 ? it
->second
.op
.physReg() - 1 : it
->second
.op
.physReg() + 1};
632 std::map
<PhysReg
, copy_operation
>::iterator other
= copy_map
.find(other_def_reg
);
634 if (other
!= copy_map
.end() && !other
->second
.uses
&& other
->second
.size
== 1 &&
635 other
->second
.op
.physReg() == other_op_reg
&& !other
->second
.op
.isConstant()) {
636 std::map
<PhysReg
, copy_operation
>::iterator to_erase
= it
->first
% 2 ? it
: other
;
637 it
= it
->first
% 2 ? other
: it
;
638 copy_map
.erase(to_erase
);
643 if (it
->second
.def
.physReg() == scc
) {
644 bld
.sopc(aco_opcode::s_cmp_lg_i32
, it
->second
.def
, it
->second
.op
, Operand(0u));
646 } else if (it
->second
.size
== 2 && it
->second
.def
.getTemp().type() == RegType::sgpr
) {
647 bld
.sop1(aco_opcode::s_mov_b64
, it
->second
.def
, Operand(it
->second
.op
.physReg(), s2
));
649 bld
.copy(it
->second
.def
, it
->second
.op
);
652 /* reduce the number of uses of the operand reg by one */
653 if (!it
->second
.op
.isConstant()) {
654 for (unsigned i
= 0; i
< it
->second
.size
; i
++) {
655 target
= copy_map
.find(PhysReg
{it
->second
.op
.physReg() + i
});
656 if (target
!= copy_map
.end())
657 target
->second
.uses
--;
662 it
= copy_map
.begin();
665 /* the target reg is used as operand, check the next entry */
670 if (copy_map
.empty())
673 /* all target regs are needed as operand somewhere which means, all entries are part of a cycle */
674 bool constants
= false;
675 for (it
= copy_map
.begin(); it
!= copy_map
.end(); ++it
) {
676 assert(it
->second
.op
.isFixed());
677 if (it
->first
== it
->second
.op
.physReg())
679 /* do constants later */
680 if (it
->second
.op
.isConstant()) {
685 if (preserve_scc
&& it
->second
.def
.getTemp().type() == RegType::sgpr
)
686 assert(!(it
->second
.def
.physReg() == pi
->scratch_sgpr
));
688 /* to resolve the cycle, we have to swap the src reg with the dst reg */
689 copy_operation swap
= it
->second
;
690 assert(swap
.op
.regClass() == swap
.def
.regClass());
691 Operand def_as_op
= Operand(swap
.def
.physReg(), swap
.def
.regClass());
692 Definition op_as_def
= Definition(swap
.op
.physReg(), swap
.op
.regClass());
693 if (chip_class
>= GFX9
&& swap
.def
.getTemp().type() == RegType::vgpr
) {
694 bld
.vop1(aco_opcode::v_swap_b32
, swap
.def
, op_as_def
, swap
.op
, def_as_op
);
695 } else if (swap
.op
.physReg() == scc
|| swap
.def
.physReg() == scc
) {
696 /* we need to swap scc and another sgpr */
697 assert(!preserve_scc
);
699 PhysReg other
= swap
.op
.physReg() == scc
? swap
.def
.physReg() : swap
.op
.physReg();
701 bld
.sop1(aco_opcode::s_mov_b32
, Definition(pi
->scratch_sgpr
, s1
), Operand(scc
, s1
));
702 bld
.sopc(aco_opcode::s_cmp_lg_i32
, Definition(scc
, s1
), Operand(other
, s1
), Operand(0u));
703 bld
.sop1(aco_opcode::s_mov_b32
, Definition(other
, s1
), Operand(pi
->scratch_sgpr
, s1
));
704 } else if (swap
.def
.getTemp().type() == RegType::sgpr
) {
706 bld
.sop1(aco_opcode::s_mov_b32
, Definition(pi
->scratch_sgpr
, s1
), swap
.op
);
707 bld
.sop1(aco_opcode::s_mov_b32
, op_as_def
, def_as_op
);
708 bld
.sop1(aco_opcode::s_mov_b32
, swap
.def
, Operand(pi
->scratch_sgpr
, s1
));
710 bld
.sop2(aco_opcode::s_xor_b32
, op_as_def
, Definition(scc
, s1
), swap
.op
, def_as_op
);
711 bld
.sop2(aco_opcode::s_xor_b32
, swap
.def
, Definition(scc
, s1
), swap
.op
, def_as_op
);
712 bld
.sop2(aco_opcode::s_xor_b32
, op_as_def
, Definition(scc
, s1
), swap
.op
, def_as_op
);
715 bld
.vop2(aco_opcode::v_xor_b32
, op_as_def
, swap
.op
, def_as_op
);
716 bld
.vop2(aco_opcode::v_xor_b32
, swap
.def
, swap
.op
, def_as_op
);
717 bld
.vop2(aco_opcode::v_xor_b32
, op_as_def
, swap
.op
, def_as_op
);
720 /* change the operand reg of the target's use */
721 assert(swap
.uses
== 1);
723 for (++target
; target
!= copy_map
.end(); ++target
) {
724 if (target
->second
.op
.physReg() == it
->first
) {
725 target
->second
.op
.setFixed(swap
.op
.physReg());
731 /* copy constants into a registers which were operands */
733 for (it
= copy_map
.begin(); it
!= copy_map
.end(); ++it
) {
734 if (!it
->second
.op
.isConstant())
736 if (it
->second
.def
.physReg() == scc
) {
737 bld
.sopc(aco_opcode::s_cmp_lg_i32
, Definition(scc
, s1
), Operand(0u), Operand(it
->second
.op
.constantValue() ? 1u : 0u));
739 bld
.copy(it
->second
.def
, it
->second
.op
);
745 void lower_to_hw_instr(Program
* program
)
747 Block
*discard_block
= NULL
;
749 for (size_t i
= 0; i
< program
->blocks
.size(); i
++)
751 Block
*block
= &program
->blocks
[i
];
753 ctx
.program
= program
;
754 Builder
bld(program
, &ctx
.instructions
);
756 bool set_mode
= i
== 0 && block
->fp_mode
.val
!= program
->config
->float_mode
;
757 for (unsigned pred
: block
->linear_preds
) {
758 if (program
->blocks
[pred
].fp_mode
.val
!= block
->fp_mode
.val
) {
764 /* only allow changing modes at top-level blocks so this doesn't break
765 * the "jump over empty blocks" optimization */
766 assert(block
->kind
& block_kind_top_level
);
767 uint32_t mode
= block
->fp_mode
.val
;
768 /* "((size - 1) << 11) | register" (MODE is encoded as register 1) */
769 bld
.sopk(aco_opcode::s_setreg_imm32_b32
, Operand(mode
), (7 << 11) | 1);
772 for (size_t j
= 0; j
< block
->instructions
.size(); j
++) {
773 aco_ptr
<Instruction
>& instr
= block
->instructions
[j
];
774 aco_ptr
<Instruction
> mov
;
775 if (instr
->format
== Format::PSEUDO
) {
776 Pseudo_instruction
*pi
= (Pseudo_instruction
*)instr
.get();
778 switch (instr
->opcode
)
780 case aco_opcode::p_extract_vector
:
782 unsigned reg
= instr
->operands
[0].physReg() + instr
->operands
[1].constantValue() * instr
->definitions
[0].size();
783 RegClass rc
= RegClass(instr
->operands
[0].getTemp().type(), 1);
784 RegClass rc_def
= RegClass(instr
->definitions
[0].getTemp().type(), 1);
785 if (reg
== instr
->definitions
[0].physReg())
788 std::map
<PhysReg
, copy_operation
> copy_operations
;
789 for (unsigned i
= 0; i
< instr
->definitions
[0].size(); i
++) {
790 Definition def
= Definition(PhysReg
{instr
->definitions
[0].physReg() + i
}, rc_def
);
791 copy_operations
[def
.physReg()] = {Operand(PhysReg
{reg
+ i
}, rc
), def
, 0, 1};
793 handle_operands(copy_operations
, &ctx
, program
->chip_class
, pi
);
796 case aco_opcode::p_create_vector
:
798 std::map
<PhysReg
, copy_operation
> copy_operations
;
799 RegClass rc_def
= RegClass(instr
->definitions
[0].getTemp().type(), 1);
800 unsigned reg_idx
= 0;
801 for (const Operand
& op
: instr
->operands
) {
802 if (op
.isConstant()) {
803 const PhysReg reg
= PhysReg
{instr
->definitions
[0].physReg() + reg_idx
};
804 const Definition def
= Definition(reg
, rc_def
);
805 copy_operations
[reg
] = {op
, def
, 0, 1};
810 RegClass rc_op
= RegClass(op
.getTemp().type(), 1);
811 for (unsigned j
= 0; j
< op
.size(); j
++)
813 const Operand copy_op
= Operand(PhysReg
{op
.physReg() + j
}, rc_op
);
814 const Definition def
= Definition(PhysReg
{instr
->definitions
[0].physReg() + reg_idx
}, rc_def
);
815 copy_operations
[def
.physReg()] = {copy_op
, def
, 0, 1};
819 handle_operands(copy_operations
, &ctx
, program
->chip_class
, pi
);
822 case aco_opcode::p_split_vector
:
824 std::map
<PhysReg
, copy_operation
> copy_operations
;
825 RegClass rc_op
= instr
->operands
[0].isConstant() ? s1
: RegClass(instr
->operands
[0].regClass().type(), 1);
826 for (unsigned i
= 0; i
< instr
->definitions
.size(); i
++) {
827 unsigned k
= instr
->definitions
[i
].size();
828 RegClass rc_def
= RegClass(instr
->definitions
[i
].getTemp().type(), 1);
829 for (unsigned j
= 0; j
< k
; j
++) {
830 Operand op
= Operand(PhysReg
{instr
->operands
[0].physReg() + (i
*k
+j
)}, rc_op
);
831 Definition def
= Definition(PhysReg
{instr
->definitions
[i
].physReg() + j
}, rc_def
);
832 copy_operations
[def
.physReg()] = {op
, def
, 0, 1};
835 handle_operands(copy_operations
, &ctx
, program
->chip_class
, pi
);
838 case aco_opcode::p_parallelcopy
:
839 case aco_opcode::p_wqm
:
841 std::map
<PhysReg
, copy_operation
> copy_operations
;
842 for (unsigned i
= 0; i
< instr
->operands
.size(); i
++)
844 Operand operand
= instr
->operands
[i
];
845 if (operand
.isConstant() || operand
.size() == 1) {
846 assert(instr
->definitions
[i
].size() == 1);
847 copy_operations
[instr
->definitions
[i
].physReg()] = {operand
, instr
->definitions
[i
], 0, 1};
849 RegClass def_rc
= RegClass(instr
->definitions
[i
].regClass().type(), 1);
850 RegClass op_rc
= RegClass(operand
.getTemp().type(), 1);
851 for (unsigned j
= 0; j
< operand
.size(); j
++)
853 Operand op
= Operand(PhysReg
{instr
->operands
[i
].physReg() + j
}, op_rc
);
854 Definition def
= Definition(PhysReg
{instr
->definitions
[i
].physReg() + j
}, def_rc
);
855 copy_operations
[def
.physReg()] = {op
, def
, 0, 1};
859 handle_operands(copy_operations
, &ctx
, program
->chip_class
, pi
);
862 case aco_opcode::p_exit_early_if
:
864 /* don't bother with an early exit at the end of the program */
865 if (block
->instructions
[j
+ 1]->opcode
== aco_opcode::p_logical_end
&&
866 block
->instructions
[j
+ 2]->opcode
== aco_opcode::s_endpgm
) {
870 if (!discard_block
) {
871 discard_block
= program
->create_and_insert_block();
872 block
= &program
->blocks
[i
];
874 bld
.reset(discard_block
);
875 bld
.exp(aco_opcode::exp
, Operand(v1
), Operand(v1
), Operand(v1
), Operand(v1
),
876 0, V_008DFC_SQ_EXP_NULL
, false, true, true);
877 if (program
->wb_smem_l1_on_end
)
878 bld
.smem(aco_opcode::s_dcache_wb
);
879 bld
.sopp(aco_opcode::s_endpgm
);
881 bld
.reset(&ctx
.instructions
);
884 //TODO: exec can be zero here with block_kind_discard
886 assert(instr
->operands
[0].physReg() == scc
);
887 bld
.sopp(aco_opcode::s_cbranch_scc0
, instr
->operands
[0], discard_block
->index
);
889 discard_block
->linear_preds
.push_back(block
->index
);
890 block
->linear_succs
.push_back(discard_block
->index
);
893 case aco_opcode::p_spill
:
895 assert(instr
->operands
[0].regClass() == v1
.as_linear());
896 for (unsigned i
= 0; i
< instr
->operands
[2].size(); i
++) {
897 bld
.vop3(aco_opcode::v_writelane_b32
, bld
.def(v1
, instr
->operands
[0].physReg()),
898 Operand(PhysReg
{instr
->operands
[2].physReg() + i
}, s1
),
899 Operand(instr
->operands
[1].constantValue() + i
));
903 case aco_opcode::p_reload
:
905 assert(instr
->operands
[0].regClass() == v1
.as_linear());
906 for (unsigned i
= 0; i
< instr
->definitions
[0].size(); i
++) {
907 bld
.vop3(aco_opcode::v_readlane_b32
,
908 bld
.def(s1
, PhysReg
{instr
->definitions
[0].physReg() + i
}),
909 instr
->operands
[0], Operand(instr
->operands
[1].constantValue() + i
));
913 case aco_opcode::p_as_uniform
:
915 if (instr
->operands
[0].isConstant() || instr
->operands
[0].regClass().type() == RegType::sgpr
) {
916 std::map
<PhysReg
, copy_operation
> copy_operations
;
917 Operand operand
= instr
->operands
[0];
918 if (operand
.isConstant() || operand
.size() == 1) {
919 assert(instr
->definitions
[0].size() == 1);
920 copy_operations
[instr
->definitions
[0].physReg()] = {operand
, instr
->definitions
[0], 0, 1};
922 for (unsigned i
= 0; i
< operand
.size(); i
++)
924 Operand op
= Operand(PhysReg
{operand
.physReg() + i
}, s1
);
925 Definition def
= Definition(PhysReg
{instr
->definitions
[0].physReg() + i
}, s1
);
926 copy_operations
[def
.physReg()] = {op
, def
, 0, 1};
930 handle_operands(copy_operations
, &ctx
, program
->chip_class
, pi
);
932 assert(instr
->operands
[0].regClass().type() == RegType::vgpr
);
933 assert(instr
->definitions
[0].regClass().type() == RegType::sgpr
);
934 assert(instr
->operands
[0].size() == instr
->definitions
[0].size());
935 for (unsigned i
= 0; i
< instr
->definitions
[0].size(); i
++) {
936 bld
.vop1(aco_opcode::v_readfirstlane_b32
,
937 bld
.def(s1
, PhysReg
{instr
->definitions
[0].physReg() + i
}),
938 Operand(PhysReg
{instr
->operands
[0].physReg() + i
}, v1
));
946 } else if (instr
->format
== Format::PSEUDO_BRANCH
) {
947 Pseudo_branch_instruction
* branch
= static_cast<Pseudo_branch_instruction
*>(instr
.get());
948 /* check if all blocks from current to target are empty */
949 bool can_remove
= block
->index
< branch
->target
[0];
950 for (unsigned i
= block
->index
+ 1; can_remove
&& i
< branch
->target
[0]; i
++) {
951 if (program
->blocks
[i
].instructions
.size())
957 switch (instr
->opcode
) {
958 case aco_opcode::p_branch
:
959 assert(block
->linear_succs
[0] == branch
->target
[0]);
960 bld
.sopp(aco_opcode::s_branch
, branch
->target
[0]);
962 case aco_opcode::p_cbranch_nz
:
963 assert(block
->linear_succs
[1] == branch
->target
[0]);
964 if (branch
->operands
[0].physReg() == exec
)
965 bld
.sopp(aco_opcode::s_cbranch_execnz
, branch
->target
[0]);
966 else if (branch
->operands
[0].physReg() == vcc
)
967 bld
.sopp(aco_opcode::s_cbranch_vccnz
, branch
->target
[0]);
969 assert(branch
->operands
[0].physReg() == scc
);
970 bld
.sopp(aco_opcode::s_cbranch_scc1
, branch
->target
[0]);
973 case aco_opcode::p_cbranch_z
:
974 assert(block
->linear_succs
[1] == branch
->target
[0]);
975 if (branch
->operands
[0].physReg() == exec
)
976 bld
.sopp(aco_opcode::s_cbranch_execz
, branch
->target
[0]);
977 else if (branch
->operands
[0].physReg() == vcc
)
978 bld
.sopp(aco_opcode::s_cbranch_vccz
, branch
->target
[0]);
980 assert(branch
->operands
[0].physReg() == scc
);
981 bld
.sopp(aco_opcode::s_cbranch_scc0
, branch
->target
[0]);
985 unreachable("Unknown Pseudo branch instruction!");
988 } else if (instr
->format
== Format::PSEUDO_REDUCTION
) {
989 Pseudo_reduction_instruction
* reduce
= static_cast<Pseudo_reduction_instruction
*>(instr
.get());
990 if (reduce
->reduce_op
== gfx10_wave64_bpermute
) {
991 /* Only makes sense on GFX10 wave64 */
992 assert(program
->chip_class
>= GFX10
);
993 assert(program
->info
->wave_size
== 64);
994 assert(instr
->definitions
[0].regClass() == v1
); /* Destination */
995 assert(instr
->definitions
[1].regClass() == s2
); /* Temp EXEC */
996 assert(instr
->definitions
[1].physReg() != vcc
);
997 assert(instr
->definitions
[2].physReg() == scc
); /* SCC clobber */
998 assert(instr
->operands
[0].physReg() == vcc
); /* Compare */
999 assert(instr
->operands
[1].regClass() == v2
.as_linear()); /* Temp VGPR pair */
1000 assert(instr
->operands
[2].regClass() == v1
); /* Indices x4 */
1001 assert(instr
->operands
[3].regClass() == v1
); /* Input data */
1003 PhysReg shared_vgpr_reg_lo
= PhysReg(align(program
->config
->num_vgprs
, 4) + 256);
1004 PhysReg shared_vgpr_reg_hi
= PhysReg(shared_vgpr_reg_lo
+ 1);
1005 Operand compare
= instr
->operands
[0];
1006 Operand
tmp1(instr
->operands
[1].physReg(), v1
);
1007 Operand
tmp2(PhysReg(instr
->operands
[1].physReg() + 1), v1
);
1008 Operand index_x4
= instr
->operands
[2];
1009 Operand input_data
= instr
->operands
[3];
1010 Definition
shared_vgpr_lo(shared_vgpr_reg_lo
, v1
);
1011 Definition
shared_vgpr_hi(shared_vgpr_reg_hi
, v1
);
1012 Definition
def_temp1(tmp1
.physReg(), v1
);
1013 Definition
def_temp2(tmp2
.physReg(), v1
);
1015 /* Save EXEC and set it for all lanes */
1016 bld
.sop1(aco_opcode::s_or_saveexec_b64
, instr
->definitions
[1], instr
->definitions
[2],
1017 Definition(exec
, s2
), Operand((uint64_t)-1), Operand(exec
, s2
));
1019 /* HI: Copy data from high lanes 32-63 to shared vgpr */
1020 bld
.vop1_dpp(aco_opcode::v_mov_b32
, shared_vgpr_hi
, input_data
, dpp_quad_perm(0, 1, 2, 3), 0xc, 0xf, false);
1022 /* LO: Copy data from low lanes 0-31 to shared vgpr */
1023 bld
.vop1_dpp(aco_opcode::v_mov_b32
, shared_vgpr_lo
, input_data
, dpp_quad_perm(0, 1, 2, 3), 0x3, 0xf, false);
1024 /* LO: Copy shared vgpr (high lanes' data) to output vgpr */
1025 bld
.vop1_dpp(aco_opcode::v_mov_b32
, def_temp1
, Operand(shared_vgpr_reg_hi
, v1
), dpp_quad_perm(0, 1, 2, 3), 0x3, 0xf, false);
1027 /* HI: Copy shared vgpr (low lanes' data) to output vgpr */
1028 bld
.vop1_dpp(aco_opcode::v_mov_b32
, def_temp1
, Operand(shared_vgpr_reg_lo
, v1
), dpp_quad_perm(0, 1, 2, 3), 0xc, 0xf, false);
1030 /* Permute the original input */
1031 bld
.ds(aco_opcode::ds_bpermute_b32
, def_temp2
, index_x4
, input_data
);
1032 /* Permute the swapped input */
1033 bld
.ds(aco_opcode::ds_bpermute_b32
, def_temp1
, index_x4
, tmp1
);
1035 /* Restore saved EXEC */
1036 bld
.sop1(aco_opcode::s_mov_b64
, Definition(exec
, s2
), Operand(instr
->definitions
[1].physReg(), s2
));
1037 /* Choose whether to use the original or swapped */
1038 bld
.vop2(aco_opcode::v_cndmask_b32
, instr
->definitions
[0], tmp1
, tmp2
, compare
);
1040 emit_reduction(&ctx
, reduce
->opcode
, reduce
->reduce_op
, reduce
->cluster_size
,
1041 reduce
->operands
[1].physReg(), // tmp
1042 reduce
->definitions
[1].physReg(), // stmp
1043 reduce
->operands
[2].physReg(), // vtmp
1044 reduce
->definitions
[2].physReg(), // sitmp
1045 reduce
->operands
[0], reduce
->definitions
[0]);
1048 ctx
.instructions
.emplace_back(std::move(instr
));
1052 block
->instructions
.swap(ctx
.instructions
);