2 * Copyright © 2018 Valve Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Daniel Schürmann (daniel.schuermann@campus.tu-berlin.de)
31 #include "aco_builder.h"
32 #include "util/u_math.h"
38 struct lower_context
{
40 std::vector
<aco_ptr
<Instruction
>> instructions
;
43 void emit_dpp_op(lower_context
*ctx
, PhysReg dst
, PhysReg src0
, PhysReg src1
, PhysReg vtmp
, PhysReg wrtmp
,
44 aco_opcode op
, Format format
, bool clobber_vcc
, unsigned dpp_ctrl
,
45 unsigned row_mask
, unsigned bank_mask
, bool bound_ctrl_zero
, unsigned size
,
46 Operand
*identity
=NULL
) /* for VOP3 with sparse writes */
48 RegClass rc
= RegClass(RegType::vgpr
, size
);
49 if (format
== Format::VOP3
) {
50 Builder
bld(ctx
->program
, &ctx
->instructions
);
53 bld
.vop1(aco_opcode::v_mov_b32
, Definition(vtmp
, v1
), identity
[0]);
54 if (identity
&& size
>= 2)
55 bld
.vop1(aco_opcode::v_mov_b32
, Definition(PhysReg
{vtmp
+1}, v1
), identity
[1]);
57 for (unsigned i
= 0; i
< size
; i
++)
58 bld
.vop1_dpp(aco_opcode::v_mov_b32
, Definition(PhysReg
{vtmp
+i
}, v1
), Operand(PhysReg
{src0
+i
}, v1
),
59 dpp_ctrl
, row_mask
, bank_mask
, bound_ctrl_zero
);
62 bld
.vop3(op
, Definition(dst
, rc
), Definition(vcc
, s2
), Operand(vtmp
, rc
), Operand(src1
, rc
));
64 bld
.vop3(op
, Definition(dst
, rc
), Operand(vtmp
, rc
), Operand(src1
, rc
));
66 assert(format
== Format::VOP2
|| format
== Format::VOP1
);
67 assert(size
== 1 || (op
== aco_opcode::v_mov_b32
));
69 for (unsigned i
= 0; i
< size
; i
++) {
70 aco_ptr
<DPP_instruction
> dpp
{create_instruction
<DPP_instruction
>(
71 op
, (Format
) ((uint32_t) format
| (uint32_t) Format::DPP
),
72 format
== Format::VOP2
? 2 : 1, clobber_vcc
? 2 : 1)};
73 dpp
->operands
[0] = Operand(PhysReg
{src0
+i
}, rc
);
74 if (format
== Format::VOP2
)
75 dpp
->operands
[1] = Operand(PhysReg
{src1
+i
}, rc
);
76 dpp
->definitions
[0] = Definition(PhysReg
{dst
+i
}, rc
);
78 dpp
->definitions
[1] = Definition(vcc
, s2
);
79 dpp
->dpp_ctrl
= dpp_ctrl
;
80 dpp
->row_mask
= row_mask
;
81 dpp
->bank_mask
= bank_mask
;
82 dpp
->bound_ctrl
= bound_ctrl_zero
;
83 ctx
->instructions
.emplace_back(std::move(dpp
));
88 uint32_t get_reduction_identity(ReduceOp op
, unsigned idx
)
106 return 0x3f800000u
; /* 1.0 */
108 return idx
? 0x3ff00000u
: 0u; /* 1.0 */
112 return idx
? 0x7fffffffu
: 0xffffffffu
;
116 return idx
? 0x80000000u
: 0;
123 return 0x7f800000u
; /* infinity */
125 return idx
? 0x7ff00000u
: 0u; /* infinity */
127 return 0xff800000u
; /* negative infinity */
129 return idx
? 0xfff00000u
: 0u; /* negative infinity */
131 unreachable("Invalid reduction operation");
134 aco_opcode
get_reduction_opcode(lower_context
*ctx
, ReduceOp op
, bool *clobber_vcc
, Format
*format
)
136 *clobber_vcc
= false;
137 *format
= Format::VOP2
;
140 *clobber_vcc
= ctx
->program
->chip_class
< GFX9
;
141 return ctx
->program
->chip_class
< GFX9
? aco_opcode::v_add_co_u32
: aco_opcode::v_add_u32
;
143 *format
= Format::VOP3
;
144 return aco_opcode::v_mul_lo_u32
;
146 return aco_opcode::v_add_f32
;
148 return aco_opcode::v_mul_f32
;
150 return aco_opcode::v_max_i32
;
152 return aco_opcode::v_min_i32
;
154 return aco_opcode::v_min_u32
;
156 return aco_opcode::v_max_u32
;
158 return aco_opcode::v_min_f32
;
160 return aco_opcode::v_max_f32
;
162 return aco_opcode::v_and_b32
;
164 return aco_opcode::v_xor_b32
;
166 return aco_opcode::v_or_b32
;
172 *format
= Format::VOP3
;
173 return aco_opcode::v_add_f64
;
175 *format
= Format::VOP3
;
176 return aco_opcode::v_mul_f64
;
184 *format
= Format::VOP3
;
185 return aco_opcode::v_min_f64
;
187 *format
= Format::VOP3
;
188 return aco_opcode::v_max_f64
;
195 unreachable("Invalid reduction operation");
196 return aco_opcode::v_min_u32
;
199 void emit_vopn(lower_context
*ctx
, PhysReg dst
, PhysReg src0
, PhysReg src1
,
200 RegClass rc
, aco_opcode op
, Format format
, bool clobber_vcc
)
202 aco_ptr
<Instruction
> instr
;
205 instr
.reset(create_instruction
<VOP2_instruction
>(op
, format
, 2, clobber_vcc
? 2 : 1));
208 instr
.reset(create_instruction
<VOP3A_instruction
>(op
, format
, 2, clobber_vcc
? 2 : 1));
213 instr
->operands
[0] = Operand(src0
, rc
);
214 instr
->operands
[1] = Operand(src1
, rc
);
215 instr
->definitions
[0] = Definition(dst
, rc
);
217 instr
->definitions
[1] = Definition(vcc
, s2
);
218 ctx
->instructions
.emplace_back(std::move(instr
));
221 void emit_reduction(lower_context
*ctx
, aco_opcode op
, ReduceOp reduce_op
, unsigned cluster_size
, PhysReg tmp
,
222 PhysReg stmp
, PhysReg vtmp
, PhysReg sitmp
, Operand src
, Definition dst
)
224 assert(cluster_size
== 64 || op
== aco_opcode::p_reduce
);
226 Builder
bld(ctx
->program
, &ctx
->instructions
);
228 PhysReg wrtmp
{0}; /* should never be needed */
231 bool should_clobber_vcc
;
232 aco_opcode reduce_opcode
= get_reduction_opcode(ctx
, reduce_op
, &should_clobber_vcc
, &format
);
234 identity
[0] = Operand(get_reduction_identity(reduce_op
, 0));
235 identity
[1] = Operand(get_reduction_identity(reduce_op
, 1));
236 Operand vcndmask_identity
[2] = {identity
[0], identity
[1]};
238 /* First, copy the source to tmp and set inactive lanes to the identity */
239 // note: this clobbers SCC!
240 bld
.sop1(aco_opcode::s_or_saveexec_b64
, Definition(stmp
, s2
), Definition(scc
, s1
), Definition(exec
, s2
), Operand(UINT64_MAX
), Operand(exec
, s2
));
242 for (unsigned i
= 0; i
< src
.size(); i
++) {
243 /* p_exclusive_scan needs it to be a sgpr or inline constant for the v_writelane_b32 */
244 if (identity
[i
].isLiteral() && op
== aco_opcode::p_exclusive_scan
) {
245 bld
.sop1(aco_opcode::s_mov_b32
, Definition(PhysReg
{sitmp
+i
}, s1
), identity
[i
]);
246 identity
[i
] = Operand(PhysReg
{sitmp
+i
}, s1
);
248 bld
.vop1(aco_opcode::v_mov_b32
, Definition(PhysReg
{tmp
+i
}, v1
), identity
[i
]);
249 vcndmask_identity
[i
] = Operand(PhysReg
{tmp
+i
}, v1
);
250 } else if (identity
[i
].isLiteral()) {
251 bld
.vop1(aco_opcode::v_mov_b32
, Definition(PhysReg
{tmp
+i
}, v1
), identity
[i
]);
252 vcndmask_identity
[i
] = Operand(PhysReg
{tmp
+i
}, v1
);
256 for (unsigned i
= 0; i
< src
.size(); i
++) {
257 bld
.vop2_e64(aco_opcode::v_cndmask_b32
, Definition(PhysReg
{tmp
+ i
}, v1
),
258 vcndmask_identity
[i
], Operand(PhysReg
{src
.physReg() + i
}, v1
),
262 bool exec_restored
= false;
263 bool dst_written
= false;
265 case aco_opcode::p_reduce
:
266 if (cluster_size
== 1) break;
267 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, wrtmp
, reduce_opcode
, format
, should_clobber_vcc
,
268 dpp_quad_perm(1, 0, 3, 2), 0xf, 0xf, false, src
.size());
269 if (cluster_size
== 2) break;
270 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, wrtmp
, reduce_opcode
, format
, should_clobber_vcc
,
271 dpp_quad_perm(2, 3, 0, 1), 0xf, 0xf, false, src
.size());
272 if (cluster_size
== 4) break;
273 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, wrtmp
, reduce_opcode
, format
, should_clobber_vcc
,
274 dpp_row_half_mirror
, 0xf, 0xf, false, src
.size());
275 if (cluster_size
== 8) break;
276 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, wrtmp
, reduce_opcode
, format
, should_clobber_vcc
,
277 dpp_row_mirror
, 0xf, 0xf, false, src
.size());
278 if (cluster_size
== 16) break;
279 if (cluster_size
== 32) {
280 for (unsigned i
= 0; i
< src
.size(); i
++)
281 bld
.ds(aco_opcode::ds_swizzle_b32
, Definition(PhysReg
{vtmp
+i
}, v1
), Operand(PhysReg
{tmp
+i
}, s1
), ds_pattern_bitmode(0x1f, 0, 0x10));
282 bld
.sop1(aco_opcode::s_mov_b64
, Definition(exec
, s2
), Operand(stmp
, s2
));
283 exec_restored
= true;
284 emit_vopn(ctx
, dst
.physReg(), vtmp
, tmp
, src
.regClass(), reduce_opcode
, format
, should_clobber_vcc
);
287 assert(cluster_size
== 64);
288 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, wrtmp
, reduce_opcode
, format
, should_clobber_vcc
,
289 dpp_row_bcast15
, 0xa, 0xf, false, src
.size());
290 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, wrtmp
, reduce_opcode
, format
, should_clobber_vcc
,
291 dpp_row_bcast31
, 0xc, 0xf, false, src
.size());
294 case aco_opcode::p_exclusive_scan
:
295 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, wrtmp
, aco_opcode::v_mov_b32
, Format::VOP1
, false,
296 dpp_wf_sr1
, 0xf, 0xf, true, src
.size());
297 for (unsigned i
= 0; i
< src
.size(); i
++) {
298 if (!identity
[i
].isConstant() || identity
[i
].constantValue()) { /* bound_ctrl should take case of this overwise */
299 assert((identity
[i
].isConstant() && !identity
[i
].isLiteral()) || identity
[i
].physReg() == PhysReg
{sitmp
+i
});
300 bld
.vop3(aco_opcode::v_writelane_b32
, Definition(PhysReg
{tmp
+i
}, v1
),
301 identity
[i
], Operand(0u));
305 case aco_opcode::p_inclusive_scan
:
306 assert(cluster_size
== 64);
307 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, wrtmp
, reduce_opcode
, format
, should_clobber_vcc
,
308 dpp_row_sr(1), 0xf, 0xf, false, src
.size(), identity
);
309 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, wrtmp
, reduce_opcode
, format
, should_clobber_vcc
,
310 dpp_row_sr(2), 0xf, 0xf, false, src
.size(), identity
);
311 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, wrtmp
, reduce_opcode
, format
, should_clobber_vcc
,
312 dpp_row_sr(4), 0xf, 0xf, false, src
.size(), identity
);
313 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, wrtmp
, reduce_opcode
, format
, should_clobber_vcc
,
314 dpp_row_sr(8), 0xf, 0xf, false, src
.size(), identity
);
315 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, wrtmp
, reduce_opcode
, format
, should_clobber_vcc
,
316 dpp_row_bcast15
, 0xa, 0xf, false, src
.size(), identity
);
317 emit_dpp_op(ctx
, tmp
, tmp
, tmp
, vtmp
, wrtmp
, reduce_opcode
, format
, should_clobber_vcc
,
318 dpp_row_bcast31
, 0xc, 0xf, false, src
.size(), identity
);
321 unreachable("Invalid reduction mode");
325 bld
.sop1(aco_opcode::s_mov_b64
, Definition(exec
, s2
), Operand(stmp
, s2
));
327 if (op
== aco_opcode::p_reduce
&& cluster_size
== 64) {
328 for (unsigned k
= 0; k
< src
.size(); k
++) {
329 bld
.vop3(aco_opcode::v_readlane_b32
, Definition(PhysReg
{dst
.physReg() + k
}, s1
),
330 Operand(PhysReg
{tmp
+ k
}, v1
), Operand(63u));
332 } else if (!(dst
.physReg() == tmp
) && !dst_written
) {
333 for (unsigned k
= 0; k
< src
.size(); k
++) {
334 bld
.vop1(aco_opcode::v_mov_b32
, Definition(PhysReg
{dst
.physReg() + k
}, s1
),
335 Operand(PhysReg
{tmp
+ k
}, v1
));
340 struct copy_operation
{
347 void handle_operands(std::map
<PhysReg
, copy_operation
>& copy_map
, lower_context
* ctx
, chip_class chip_class
, Pseudo_instruction
*pi
)
349 Builder
bld(ctx
->program
, &ctx
->instructions
);
350 aco_ptr
<Instruction
> mov
;
351 std::map
<PhysReg
, copy_operation
>::iterator it
= copy_map
.begin();
352 std::map
<PhysReg
, copy_operation
>::iterator target
;
353 bool writes_scc
= false;
355 /* count the number of uses for each dst reg */
356 while (it
!= copy_map
.end()) {
357 if (it
->second
.op
.isConstant()) {
362 if (it
->second
.def
.physReg() == scc
)
365 assert(!pi
->tmp_in_scc
|| !(it
->second
.def
.physReg() == pi
->scratch_sgpr
));
367 /* if src and dst reg are the same, remove operation */
368 if (it
->first
== it
->second
.op
.physReg()) {
369 it
= copy_map
.erase(it
);
372 /* check if the operand reg may be overwritten by another copy operation */
373 target
= copy_map
.find(it
->second
.op
.physReg());
374 if (target
!= copy_map
.end()) {
375 target
->second
.uses
++;
381 /* first, handle paths in the location transfer graph */
382 bool preserve_scc
= pi
->tmp_in_scc
&& !writes_scc
;
383 it
= copy_map
.begin();
384 while (it
!= copy_map
.end()) {
386 /* the target reg is not used as operand for any other copy */
387 if (it
->second
.uses
== 0) {
389 /* try to coalesce 32-bit sgpr copies to 64-bit copies */
390 if (it
->second
.def
.getTemp().type() == RegType::sgpr
&& it
->second
.size
== 1 &&
391 !it
->second
.op
.isConstant() && it
->first
% 2 == it
->second
.op
.physReg() % 2) {
393 PhysReg other_def_reg
= PhysReg
{it
->first
% 2 ? it
->first
- 1 : it
->first
+ 1};
394 PhysReg other_op_reg
= PhysReg
{it
->first
% 2 ? it
->second
.op
.physReg() - 1 : it
->second
.op
.physReg() + 1};
395 std::map
<PhysReg
, copy_operation
>::iterator other
= copy_map
.find(other_def_reg
);
397 if (other
!= copy_map
.end() && !other
->second
.uses
&& other
->second
.size
== 1 &&
398 other
->second
.op
.physReg() == other_op_reg
&& !other
->second
.op
.isConstant()) {
399 std::map
<PhysReg
, copy_operation
>::iterator to_erase
= it
->first
% 2 ? it
: other
;
400 it
= it
->first
% 2 ? other
: it
;
401 copy_map
.erase(to_erase
);
406 if (it
->second
.def
.physReg() == scc
) {
407 bld
.sopc(aco_opcode::s_cmp_lg_i32
, it
->second
.def
, it
->second
.op
, Operand(0u));
409 } else if (it
->second
.size
== 2 && it
->second
.def
.getTemp().type() == RegType::sgpr
) {
410 bld
.sop1(aco_opcode::s_mov_b64
, it
->second
.def
, Operand(it
->second
.op
.physReg(), s2
));
412 bld
.copy(it
->second
.def
, it
->second
.op
);
415 /* reduce the number of uses of the operand reg by one */
416 if (!it
->second
.op
.isConstant()) {
417 for (unsigned i
= 0; i
< it
->second
.size
; i
++) {
418 target
= copy_map
.find(PhysReg
{it
->second
.op
.physReg() + i
});
419 if (target
!= copy_map
.end())
420 target
->second
.uses
--;
425 it
= copy_map
.begin();
428 /* the target reg is used as operand, check the next entry */
433 if (copy_map
.empty())
436 /* all target regs are needed as operand somewhere which means, all entries are part of a cycle */
437 bool constants
= false;
438 for (it
= copy_map
.begin(); it
!= copy_map
.end(); ++it
) {
439 assert(it
->second
.op
.isFixed());
440 if (it
->first
== it
->second
.op
.physReg())
442 /* do constants later */
443 if (it
->second
.op
.isConstant()) {
448 if (preserve_scc
&& it
->second
.def
.getTemp().type() == RegType::sgpr
)
449 assert(!(it
->second
.def
.physReg() == pi
->scratch_sgpr
));
451 /* to resolve the cycle, we have to swap the src reg with the dst reg */
452 copy_operation swap
= it
->second
;
453 assert(swap
.op
.regClass() == swap
.def
.regClass());
454 Operand def_as_op
= Operand(swap
.def
.physReg(), swap
.def
.regClass());
455 Definition op_as_def
= Definition(swap
.op
.physReg(), swap
.op
.regClass());
456 if (chip_class
>= GFX9
&& swap
.def
.getTemp().type() == RegType::vgpr
) {
457 bld
.vop1(aco_opcode::v_swap_b32
, swap
.def
, op_as_def
, swap
.op
, def_as_op
);
458 } else if (swap
.op
.physReg() == scc
|| swap
.def
.physReg() == scc
) {
459 /* we need to swap scc and another sgpr */
460 assert(!preserve_scc
);
462 PhysReg other
= swap
.op
.physReg() == scc
? swap
.def
.physReg() : swap
.op
.physReg();
464 bld
.sop1(aco_opcode::s_mov_b32
, Definition(pi
->scratch_sgpr
, s1
), Operand(scc
, s1
));
465 bld
.sopc(aco_opcode::s_cmp_lg_i32
, Definition(scc
, s1
), Operand(other
, s1
), Operand(0u));
466 bld
.sop1(aco_opcode::s_mov_b32
, Definition(other
, s1
), Operand(pi
->scratch_sgpr
, s1
));
467 } else if (swap
.def
.getTemp().type() == RegType::sgpr
) {
469 bld
.sop1(aco_opcode::s_mov_b32
, Definition(pi
->scratch_sgpr
, s1
), swap
.op
);
470 bld
.sop1(aco_opcode::s_mov_b32
, op_as_def
, def_as_op
);
471 bld
.sop1(aco_opcode::s_mov_b32
, swap
.def
, Operand(pi
->scratch_sgpr
, s1
));
473 bld
.sop2(aco_opcode::s_xor_b32
, op_as_def
, Definition(scc
, s1
), swap
.op
, def_as_op
);
474 bld
.sop2(aco_opcode::s_xor_b32
, swap
.def
, Definition(scc
, s1
), swap
.op
, def_as_op
);
475 bld
.sop2(aco_opcode::s_xor_b32
, op_as_def
, Definition(scc
, s1
), swap
.op
, def_as_op
);
478 bld
.vop2(aco_opcode::v_xor_b32
, op_as_def
, swap
.op
, def_as_op
);
479 bld
.vop2(aco_opcode::v_xor_b32
, swap
.def
, swap
.op
, def_as_op
);
480 bld
.vop2(aco_opcode::v_xor_b32
, op_as_def
, swap
.op
, def_as_op
);
483 /* change the operand reg of the target's use */
484 assert(swap
.uses
== 1);
486 for (++target
; target
!= copy_map
.end(); ++target
) {
487 if (target
->second
.op
.physReg() == it
->first
) {
488 target
->second
.op
.setFixed(swap
.op
.physReg());
494 /* copy constants into a registers which were operands */
496 for (it
= copy_map
.begin(); it
!= copy_map
.end(); ++it
) {
497 if (!it
->second
.op
.isConstant())
499 if (it
->second
.def
.physReg() == scc
) {
500 bld
.sopc(aco_opcode::s_cmp_lg_i32
, Definition(scc
, s1
), Operand(0u), Operand(it
->second
.op
.constantValue() ? 1u : 0u));
502 bld
.copy(it
->second
.def
, it
->second
.op
);
508 void lower_to_hw_instr(Program
* program
)
510 Block
*discard_block
= NULL
;
512 for (size_t i
= 0; i
< program
->blocks
.size(); i
++)
514 Block
*block
= &program
->blocks
[i
];
516 ctx
.program
= program
;
517 Builder
bld(program
, &ctx
.instructions
);
519 for (size_t j
= 0; j
< block
->instructions
.size(); j
++) {
520 aco_ptr
<Instruction
>& instr
= block
->instructions
[j
];
521 aco_ptr
<Instruction
> mov
;
522 if (instr
->format
== Format::PSEUDO
) {
523 Pseudo_instruction
*pi
= (Pseudo_instruction
*)instr
.get();
525 switch (instr
->opcode
)
527 case aco_opcode::p_extract_vector
:
529 unsigned reg
= instr
->operands
[0].physReg() + instr
->operands
[1].constantValue() * instr
->definitions
[0].size();
530 RegClass rc
= RegClass(instr
->operands
[0].getTemp().type(), 1);
531 RegClass rc_def
= RegClass(instr
->definitions
[0].getTemp().type(), 1);
532 if (reg
== instr
->definitions
[0].physReg())
535 std::map
<PhysReg
, copy_operation
> copy_operations
;
536 for (unsigned i
= 0; i
< instr
->definitions
[0].size(); i
++) {
537 Definition def
= Definition(PhysReg
{instr
->definitions
[0].physReg() + i
}, rc_def
);
538 copy_operations
[def
.physReg()] = {Operand(PhysReg
{reg
+ i
}, rc
), def
, 0, 1};
540 handle_operands(copy_operations
, &ctx
, program
->chip_class
, pi
);
543 case aco_opcode::p_create_vector
:
545 std::map
<PhysReg
, copy_operation
> copy_operations
;
546 RegClass rc_def
= RegClass(instr
->definitions
[0].getTemp().type(), 1);
547 unsigned reg_idx
= 0;
548 for (const Operand
& op
: instr
->operands
) {
549 if (op
.isConstant()) {
550 const PhysReg reg
= PhysReg
{instr
->definitions
[0].physReg() + reg_idx
};
551 const Definition def
= Definition(reg
, rc_def
);
552 copy_operations
[reg
] = {op
, def
, 0, 1};
557 RegClass rc_op
= RegClass(op
.getTemp().type(), 1);
558 for (unsigned j
= 0; j
< op
.size(); j
++)
560 const Operand copy_op
= Operand(PhysReg
{op
.physReg() + j
}, rc_op
);
561 const Definition def
= Definition(PhysReg
{instr
->definitions
[0].physReg() + reg_idx
}, rc_def
);
562 copy_operations
[def
.physReg()] = {copy_op
, def
, 0, 1};
566 handle_operands(copy_operations
, &ctx
, program
->chip_class
, pi
);
569 case aco_opcode::p_split_vector
:
571 std::map
<PhysReg
, copy_operation
> copy_operations
;
572 RegClass rc_op
= instr
->operands
[0].isConstant() ? s1
: RegClass(instr
->operands
[0].regClass().type(), 1);
573 for (unsigned i
= 0; i
< instr
->definitions
.size(); i
++) {
574 unsigned k
= instr
->definitions
[i
].size();
575 RegClass rc_def
= RegClass(instr
->definitions
[i
].getTemp().type(), 1);
576 for (unsigned j
= 0; j
< k
; j
++) {
577 Operand op
= Operand(PhysReg
{instr
->operands
[0].physReg() + (i
*k
+j
)}, rc_op
);
578 Definition def
= Definition(PhysReg
{instr
->definitions
[i
].physReg() + j
}, rc_def
);
579 copy_operations
[def
.physReg()] = {op
, def
, 0, 1};
582 handle_operands(copy_operations
, &ctx
, program
->chip_class
, pi
);
585 case aco_opcode::p_parallelcopy
:
586 case aco_opcode::p_wqm
:
588 std::map
<PhysReg
, copy_operation
> copy_operations
;
589 for (unsigned i
= 0; i
< instr
->operands
.size(); i
++)
591 Operand operand
= instr
->operands
[i
];
592 if (operand
.isConstant() || operand
.size() == 1) {
593 assert(instr
->definitions
[i
].size() == 1);
594 copy_operations
[instr
->definitions
[i
].physReg()] = {operand
, instr
->definitions
[i
], 0, 1};
596 RegClass def_rc
= RegClass(instr
->definitions
[i
].regClass().type(), 1);
597 RegClass op_rc
= RegClass(operand
.getTemp().type(), 1);
598 for (unsigned j
= 0; j
< operand
.size(); j
++)
600 Operand op
= Operand(PhysReg
{instr
->operands
[i
].physReg() + j
}, op_rc
);
601 Definition def
= Definition(PhysReg
{instr
->definitions
[i
].physReg() + j
}, def_rc
);
602 copy_operations
[def
.physReg()] = {op
, def
, 0, 1};
606 handle_operands(copy_operations
, &ctx
, program
->chip_class
, pi
);
609 case aco_opcode::p_exit_early_if
:
611 /* don't bother with an early exit at the end of the program */
612 if (block
->instructions
[j
+ 1]->opcode
== aco_opcode::p_logical_end
&&
613 block
->instructions
[j
+ 2]->opcode
== aco_opcode::s_endpgm
) {
617 if (!discard_block
) {
618 discard_block
= program
->create_and_insert_block();
619 block
= &program
->blocks
[i
];
621 bld
.reset(discard_block
);
622 bld
.exp(aco_opcode::exp
, Operand(v1
), Operand(v1
), Operand(v1
), Operand(v1
),
623 0, V_008DFC_SQ_EXP_NULL
, false, true, true);
624 if (program
->wb_smem_l1_on_end
)
625 bld
.smem(aco_opcode::s_dcache_wb
);
626 bld
.sopp(aco_opcode::s_endpgm
);
628 bld
.reset(&ctx
.instructions
);
631 //TODO: exec can be zero here with block_kind_discard
633 assert(instr
->operands
[0].physReg() == scc
);
634 bld
.sopp(aco_opcode::s_cbranch_scc0
, instr
->operands
[0], discard_block
->index
);
636 discard_block
->linear_preds
.push_back(block
->index
);
637 block
->linear_succs
.push_back(discard_block
->index
);
640 case aco_opcode::p_spill
:
642 assert(instr
->operands
[0].regClass() == v1
.as_linear());
643 for (unsigned i
= 0; i
< instr
->operands
[2].size(); i
++) {
644 bld
.vop3(aco_opcode::v_writelane_b32
, bld
.def(v1
, instr
->operands
[0].physReg()),
645 Operand(PhysReg
{instr
->operands
[2].physReg() + i
}, s1
),
646 Operand(instr
->operands
[1].constantValue() + i
));
650 case aco_opcode::p_reload
:
652 assert(instr
->operands
[0].regClass() == v1
.as_linear());
653 for (unsigned i
= 0; i
< instr
->definitions
[0].size(); i
++) {
654 bld
.vop3(aco_opcode::v_readlane_b32
,
655 bld
.def(s1
, PhysReg
{instr
->definitions
[0].physReg() + i
}),
656 instr
->operands
[0], Operand(instr
->operands
[1].constantValue() + i
));
660 case aco_opcode::p_as_uniform
:
662 if (instr
->operands
[0].isConstant() || instr
->operands
[0].regClass().type() == RegType::sgpr
) {
663 std::map
<PhysReg
, copy_operation
> copy_operations
;
664 Operand operand
= instr
->operands
[0];
665 if (operand
.isConstant() || operand
.size() == 1) {
666 assert(instr
->definitions
[0].size() == 1);
667 copy_operations
[instr
->definitions
[0].physReg()] = {operand
, instr
->definitions
[0], 0, 1};
669 for (unsigned i
= 0; i
< operand
.size(); i
++)
671 Operand op
= Operand(PhysReg
{operand
.physReg() + i
}, s1
);
672 Definition def
= Definition(PhysReg
{instr
->definitions
[0].physReg() + i
}, s1
);
673 copy_operations
[def
.physReg()] = {op
, def
, 0, 1};
677 handle_operands(copy_operations
, &ctx
, program
->chip_class
, pi
);
679 assert(instr
->operands
[0].regClass().type() == RegType::vgpr
);
680 assert(instr
->definitions
[0].regClass().type() == RegType::sgpr
);
681 assert(instr
->operands
[0].size() == instr
->definitions
[0].size());
682 for (unsigned i
= 0; i
< instr
->definitions
[0].size(); i
++) {
683 bld
.vop1(aco_opcode::v_readfirstlane_b32
,
684 bld
.def(s1
, PhysReg
{instr
->definitions
[0].physReg() + i
}),
685 Operand(PhysReg
{instr
->operands
[0].physReg() + i
}, v1
));
693 } else if (instr
->format
== Format::PSEUDO_BRANCH
) {
694 Pseudo_branch_instruction
* branch
= static_cast<Pseudo_branch_instruction
*>(instr
.get());
695 /* check if all blocks from current to target are empty */
696 bool can_remove
= block
->index
< branch
->target
[0];
697 for (unsigned i
= block
->index
+ 1; can_remove
&& i
< branch
->target
[0]; i
++) {
698 if (program
->blocks
[i
].instructions
.size())
704 switch (instr
->opcode
) {
705 case aco_opcode::p_branch
:
706 assert(block
->linear_succs
[0] == branch
->target
[0]);
707 bld
.sopp(aco_opcode::s_branch
, branch
->target
[0]);
709 case aco_opcode::p_cbranch_nz
:
710 assert(block
->linear_succs
[1] == branch
->target
[0]);
711 if (branch
->operands
[0].physReg() == exec
)
712 bld
.sopp(aco_opcode::s_cbranch_execnz
, branch
->target
[0]);
713 else if (branch
->operands
[0].physReg() == vcc
)
714 bld
.sopp(aco_opcode::s_cbranch_vccnz
, branch
->target
[0]);
716 assert(branch
->operands
[0].physReg() == scc
);
717 bld
.sopp(aco_opcode::s_cbranch_scc1
, branch
->target
[0]);
720 case aco_opcode::p_cbranch_z
:
721 assert(block
->linear_succs
[1] == branch
->target
[0]);
722 if (branch
->operands
[0].physReg() == exec
)
723 bld
.sopp(aco_opcode::s_cbranch_execz
, branch
->target
[0]);
724 else if (branch
->operands
[0].physReg() == vcc
)
725 bld
.sopp(aco_opcode::s_cbranch_vccz
, branch
->target
[0]);
727 assert(branch
->operands
[0].physReg() == scc
);
728 bld
.sopp(aco_opcode::s_cbranch_scc0
, branch
->target
[0]);
732 unreachable("Unknown Pseudo branch instruction!");
735 } else if (instr
->format
== Format::PSEUDO_REDUCTION
) {
736 Pseudo_reduction_instruction
* reduce
= static_cast<Pseudo_reduction_instruction
*>(instr
.get());
737 emit_reduction(&ctx
, reduce
->opcode
, reduce
->reduce_op
, reduce
->cluster_size
,
738 reduce
->operands
[1].physReg(), // tmp
739 reduce
->definitions
[1].physReg(), // stmp
740 reduce
->operands
[2].physReg(), // vtmp
741 reduce
->definitions
[2].physReg(), // sitmp
742 reduce
->operands
[0], reduce
->definitions
[0]);
744 ctx
.instructions
.emplace_back(std::move(instr
));
748 block
->instructions
.swap(ctx
.instructions
);