2 * Copyright © 2019 Valve Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "aco_builder.h"
27 #include "util/u_math.h"
33 enum WQMState
: uint8_t {
36 WQM
= 1 << 1, /* with control flow applied */
37 Preserve_WQM
= 1 << 2,
38 Exact_Branch
= 1 << 3,
41 enum mask_type
: uint8_t {
42 mask_type_global
= 1 << 0,
43 mask_type_exact
= 1 << 1,
44 mask_type_wqm
= 1 << 2,
45 mask_type_loop
= 1 << 3, /* active lanes of a loop */
46 mask_type_initial
= 1 << 4, /* initially active lanes */
51 /* state for WQM propagation */
52 std::set
<unsigned> worklist
;
53 std::vector
<uint16_t> defined_in
;
54 std::vector
<bool> needs_wqm
;
55 std::vector
<bool> branch_wqm
; /* true if the branch condition in this block should be in wqm */
58 wqm_ctx(Program
* program
) : program(program
),
59 defined_in(program
->peekAllocationId(), 0xFFFF),
60 needs_wqm(program
->peekAllocationId()),
61 branch_wqm(program
->blocks
.size()),
65 for (unsigned i
= 0; i
< program
->blocks
.size(); i
++)
72 uint16_t num_exec_masks
;
74 bool has_divergent_break
;
75 bool has_divergent_continue
;
76 bool has_discard
; /* has a discard or demote */
77 loop_info(Block
* b
, uint16_t num
, uint8_t needs
, bool breaks
, bool cont
, bool discard
) :
78 loop_header(b
), num_exec_masks(num
), needs(needs
), has_divergent_break(breaks
),
79 has_divergent_continue(cont
), has_discard(discard
) {}
83 std::vector
<std::pair
<Temp
, uint8_t>> exec
;
84 std::vector
<WQMState
> instr_needs
;
86 uint8_t ever_again_needs
;
93 std::vector
<block_info
> info
;
94 std::vector
<loop_info
> loop
;
95 bool handle_wqm
= false;
96 exec_ctx(Program
*program
) : program(program
), info(program
->blocks
.size()) {}
99 bool pred_by_exec_mask(aco_ptr
<Instruction
>& instr
) {
101 return instr
->reads_exec();
102 if (instr
->format
== Format::SMEM
|| instr
->isSALU())
104 if (instr
->format
== Format::PSEUDO_BARRIER
)
107 if (instr
->format
== Format::PSEUDO
) {
108 switch (instr
->opcode
) {
109 case aco_opcode::p_create_vector
:
110 case aco_opcode::p_extract_vector
:
111 case aco_opcode::p_split_vector
:
112 for (Definition def
: instr
->definitions
) {
113 if (def
.getTemp().type() == RegType::vgpr
)
117 case aco_opcode::p_spill
:
118 case aco_opcode::p_reload
:
125 if (instr
->opcode
== aco_opcode::v_readlane_b32
||
126 instr
->opcode
== aco_opcode::v_readlane_b32_e64
||
127 instr
->opcode
== aco_opcode::v_writelane_b32
||
128 instr
->opcode
== aco_opcode::v_writelane_b32_e64
)
134 bool needs_exact(aco_ptr
<Instruction
>& instr
) {
135 if (instr
->format
== Format::MUBUF
) {
136 MUBUF_instruction
*mubuf
= static_cast<MUBUF_instruction
*>(instr
.get());
137 return mubuf
->disable_wqm
;
138 } else if (instr
->format
== Format::MTBUF
) {
139 MTBUF_instruction
*mtbuf
= static_cast<MTBUF_instruction
*>(instr
.get());
140 return mtbuf
->disable_wqm
;
141 } else if (instr
->format
== Format::MIMG
) {
142 MIMG_instruction
*mimg
= static_cast<MIMG_instruction
*>(instr
.get());
143 return mimg
->disable_wqm
;
144 } else if (instr
->format
== Format::FLAT
|| instr
->format
== Format::GLOBAL
) {
145 FLAT_instruction
*flat
= static_cast<FLAT_instruction
*>(instr
.get());
146 return flat
->disable_wqm
;
148 return instr
->format
== Format::EXP
|| instr
->opcode
== aco_opcode::p_fs_buffer_store_smem
;
152 void set_needs_wqm(wqm_ctx
&ctx
, Temp tmp
)
154 if (!ctx
.needs_wqm
[tmp
.id()]) {
155 ctx
.needs_wqm
[tmp
.id()] = true;
156 if (ctx
.defined_in
[tmp
.id()] != 0xFFFF)
157 ctx
.worklist
.insert(ctx
.defined_in
[tmp
.id()]);
161 void mark_block_wqm(wqm_ctx
&ctx
, unsigned block_idx
)
163 if (ctx
.branch_wqm
[block_idx
])
166 ctx
.branch_wqm
[block_idx
] = true;
167 Block
& block
= ctx
.program
->blocks
[block_idx
];
168 aco_ptr
<Instruction
>& branch
= block
.instructions
.back();
170 if (branch
->opcode
!= aco_opcode::p_branch
) {
171 assert(!branch
->operands
.empty() && branch
->operands
[0].isTemp());
172 set_needs_wqm(ctx
, branch
->operands
[0].getTemp());
175 /* TODO: this sets more branch conditions to WQM than it needs to
176 * it should be enough to stop at the "exec mask top level" */
177 if (block
.kind
& block_kind_top_level
)
180 for (unsigned pred_idx
: block
.logical_preds
)
181 mark_block_wqm(ctx
, pred_idx
);
184 void get_block_needs(wqm_ctx
&ctx
, exec_ctx
&exec_ctx
, Block
* block
)
186 block_info
& info
= exec_ctx
.info
[block
->index
];
188 std::vector
<WQMState
> instr_needs(block
->instructions
.size());
190 if (block
->kind
& block_kind_top_level
) {
191 if (ctx
.loop
&& ctx
.wqm
) {
192 unsigned block_idx
= block
->index
+ 1;
193 while (!(ctx
.program
->blocks
[block_idx
].kind
& block_kind_top_level
)) {
194 /* flag all break conditions as WQM:
195 * the conditions might be computed outside the nested CF */
196 if (ctx
.program
->blocks
[block_idx
].kind
& block_kind_break
)
197 mark_block_wqm(ctx
, block_idx
);
198 /* flag all blocks as WQM to ensure we enter all (nested) loops in WQM */
199 exec_ctx
.info
[block_idx
].block_needs
|= WQM
;
202 } else if (ctx
.loop
&& !ctx
.wqm
) {
203 /* Ensure a branch never results in an exec mask with only helper
204 * invocations (which can cause a loop to repeat infinitively if it's
205 * break branches are done in exact). */
206 unsigned block_idx
= block
->index
;
208 if ((ctx
.program
->blocks
[block_idx
].kind
& block_kind_branch
))
209 exec_ctx
.info
[block_idx
].block_needs
|= Exact_Branch
;
211 } while (!(ctx
.program
->blocks
[block_idx
].kind
& block_kind_top_level
));
218 for (int i
= block
->instructions
.size() - 1; i
>= 0; --i
) {
219 aco_ptr
<Instruction
>& instr
= block
->instructions
[i
];
221 WQMState needs
= needs_exact(instr
) ? Exact
: Unspecified
;
222 bool propagate_wqm
= instr
->opcode
== aco_opcode::p_wqm
;
223 bool preserve_wqm
= instr
->opcode
== aco_opcode::p_discard_if
;
224 bool pred_by_exec
= pred_by_exec_mask(instr
);
225 for (const Definition
& definition
: instr
->definitions
) {
226 if (!definition
.isTemp())
228 const unsigned def
= definition
.tempId();
229 ctx
.defined_in
[def
] = block
->index
;
230 if (needs
== Unspecified
&& ctx
.needs_wqm
[def
]) {
231 needs
= pred_by_exec
? WQM
: Unspecified
;
232 propagate_wqm
= true;
237 for (const Operand
& op
: instr
->operands
) {
239 set_needs_wqm(ctx
, op
.getTemp());
242 } else if (preserve_wqm
&& info
.block_needs
& WQM
) {
243 needs
= Preserve_WQM
;
246 /* ensure the condition controlling the control flow for this phi is in WQM */
247 if (needs
== WQM
&& instr
->opcode
== aco_opcode::p_phi
) {
248 for (unsigned pred_idx
: block
->logical_preds
) {
249 mark_block_wqm(ctx
, pred_idx
);
250 exec_ctx
.info
[pred_idx
].logical_end_wqm
= true;
251 ctx
.worklist
.insert(pred_idx
);
255 if ((instr
->opcode
== aco_opcode::p_logical_end
&& info
.logical_end_wqm
) ||
256 instr
->opcode
== aco_opcode::p_wqm
) {
257 assert(needs
!= Exact
);
261 instr_needs
[i
] = needs
;
262 info
.block_needs
|= needs
;
265 info
.instr_needs
= instr_needs
;
267 /* for "if (<cond>) <wqm code>" or "while (<cond>) <wqm code>",
268 * <cond> should be computed in WQM */
269 if (info
.block_needs
& WQM
&& !(block
->kind
& block_kind_top_level
)) {
270 for (unsigned pred_idx
: block
->logical_preds
)
271 mark_block_wqm(ctx
, pred_idx
);
274 if (block
->kind
& block_kind_loop_header
)
278 void calculate_wqm_needs(exec_ctx
& exec_ctx
)
280 wqm_ctx
ctx(exec_ctx
.program
);
282 while (!ctx
.worklist
.empty()) {
283 unsigned block_index
= *std::prev(ctx
.worklist
.end());
284 ctx
.worklist
.erase(std::prev(ctx
.worklist
.end()));
286 get_block_needs(ctx
, exec_ctx
, &exec_ctx
.program
->blocks
[block_index
]);
289 uint8_t ever_again_needs
= 0;
290 for (int i
= exec_ctx
.program
->blocks
.size() - 1; i
>= 0; i
--) {
291 exec_ctx
.info
[i
].ever_again_needs
= ever_again_needs
;
292 Block
& block
= exec_ctx
.program
->blocks
[i
];
294 if (block
.kind
& block_kind_needs_lowering
)
295 exec_ctx
.info
[i
].block_needs
|= Exact
;
297 /* if discard is used somewhere in nested CF, we need to preserve the WQM mask */
298 if ((block
.kind
& block_kind_discard
||
299 block
.kind
& block_kind_uses_discard_if
) &&
300 ever_again_needs
& WQM
)
301 exec_ctx
.info
[i
].block_needs
|= Preserve_WQM
;
303 ever_again_needs
|= exec_ctx
.info
[i
].block_needs
& ~Exact_Branch
;
304 if (block
.kind
& block_kind_discard
||
305 block
.kind
& block_kind_uses_discard_if
||
306 block
.kind
& block_kind_uses_demote
)
307 ever_again_needs
|= Exact
;
309 /* don't propagate WQM preservation further than the next top_level block */
310 if (block
.kind
& block_kind_top_level
)
311 ever_again_needs
&= ~Preserve_WQM
;
313 exec_ctx
.info
[i
].block_needs
&= ~Preserve_WQM
;
315 exec_ctx
.handle_wqm
= true;
318 void transition_to_WQM(exec_ctx
& ctx
, Builder bld
, unsigned idx
)
320 if (ctx
.info
[idx
].exec
.back().second
& mask_type_wqm
)
322 if (ctx
.info
[idx
].exec
.back().second
& mask_type_global
) {
323 Temp exec_mask
= ctx
.info
[idx
].exec
.back().first
;
324 /* TODO: we might generate better code if we pass the uncopied "exec_mask"
325 * directly to the s_wqm (we still need to keep this parallelcopy for
326 * potential later uses of exec_mask though). We currently can't do this
327 * because of a RA bug. */
328 exec_mask
= bld
.pseudo(aco_opcode::p_parallelcopy
, bld
.def(bld
.lm
), bld
.exec(exec_mask
));
329 ctx
.info
[idx
].exec
.back().first
= exec_mask
;
331 exec_mask
= bld
.sop1(Builder::s_wqm
, bld
.def(bld
.lm
, exec
), bld
.def(s1
, scc
), exec_mask
);
332 ctx
.info
[idx
].exec
.emplace_back(exec_mask
, mask_type_global
| mask_type_wqm
);
335 /* otherwise, the WQM mask should be one below the current mask */
336 ctx
.info
[idx
].exec
.pop_back();
337 assert(ctx
.info
[idx
].exec
.back().second
& mask_type_wqm
);
338 assert(ctx
.info
[idx
].exec
.back().first
.size() == bld
.lm
.size());
339 ctx
.info
[idx
].exec
.back().first
= bld
.pseudo(aco_opcode::p_parallelcopy
, bld
.def(bld
.lm
, exec
),
340 ctx
.info
[idx
].exec
.back().first
);
343 void transition_to_Exact(exec_ctx
& ctx
, Builder bld
, unsigned idx
)
345 if (ctx
.info
[idx
].exec
.back().second
& mask_type_exact
)
347 /* We can't remove the loop exec mask, because that can cause exec.size() to
348 * be less than num_exec_masks. The loop exec mask also needs to be kept
349 * around for various uses. */
350 if ((ctx
.info
[idx
].exec
.back().second
& mask_type_global
) &&
351 !(ctx
.info
[idx
].exec
.back().second
& mask_type_loop
)) {
352 ctx
.info
[idx
].exec
.pop_back();
353 assert(ctx
.info
[idx
].exec
.back().second
& mask_type_exact
);
354 assert(ctx
.info
[idx
].exec
.back().first
.size() == bld
.lm
.size());
355 ctx
.info
[idx
].exec
.back().first
= bld
.pseudo(aco_opcode::p_parallelcopy
, bld
.def(bld
.lm
, exec
),
356 ctx
.info
[idx
].exec
.back().first
);
359 /* otherwise, we create an exact mask and push to the stack */
360 Temp wqm
= ctx
.info
[idx
].exec
.back().first
;
361 Temp exact
= bld
.tmp(bld
.lm
);
362 wqm
= bld
.sop1(Builder::s_and_saveexec
, bld
.def(bld
.lm
), bld
.def(s1
, scc
),
363 bld
.exec(Definition(exact
)), ctx
.info
[idx
].exec
[0].first
, bld
.exec(wqm
));
364 ctx
.info
[idx
].exec
.back().first
= wqm
;
365 ctx
.info
[idx
].exec
.emplace_back(exact
, mask_type_exact
);
368 unsigned add_coupling_code(exec_ctx
& ctx
, Block
* block
,
369 std::vector
<aco_ptr
<Instruction
>>& instructions
)
371 unsigned idx
= block
->index
;
372 Builder
bld(ctx
.program
, &instructions
);
373 std::vector
<unsigned>& preds
= block
->linear_preds
;
377 aco_ptr
<Instruction
>& startpgm
= block
->instructions
[0];
378 assert(startpgm
->opcode
== aco_opcode::p_startpgm
);
379 Temp exec_mask
= startpgm
->definitions
.back().getTemp();
380 bld
.insert(std::move(startpgm
));
382 /* exec seems to need to be manually initialized with combined shaders */
383 if (util_bitcount(ctx
.program
->stage
& sw_mask
) > 1 || (ctx
.program
->stage
& hw_ngg_gs
)) {
384 bld
.sop1(Builder::s_mov
, bld
.exec(Definition(exec_mask
)), bld
.lm
== s2
? Operand(UINT64_MAX
) : Operand(UINT32_MAX
));
385 instructions
[0]->definitions
.pop_back();
388 if (ctx
.handle_wqm
) {
389 ctx
.info
[0].exec
.emplace_back(exec_mask
, mask_type_global
| mask_type_exact
| mask_type_initial
);
390 /* if this block only needs WQM, initialize already */
391 if (ctx
.info
[0].block_needs
== WQM
)
392 transition_to_WQM(ctx
, bld
, 0);
394 uint8_t mask
= mask_type_global
;
395 if (ctx
.program
->needs_wqm
) {
396 exec_mask
= bld
.sop1(Builder::s_wqm
, bld
.def(bld
.lm
, exec
), bld
.def(s1
, scc
), bld
.exec(exec_mask
));
397 mask
|= mask_type_wqm
;
399 mask
|= mask_type_exact
;
401 ctx
.info
[0].exec
.emplace_back(exec_mask
, mask
);
407 /* loop entry block */
408 if (block
->kind
& block_kind_loop_header
) {
409 assert(preds
[0] == idx
- 1);
410 ctx
.info
[idx
].exec
= ctx
.info
[idx
- 1].exec
;
411 loop_info
& info
= ctx
.loop
.back();
412 while (ctx
.info
[idx
].exec
.size() > info
.num_exec_masks
)
413 ctx
.info
[idx
].exec
.pop_back();
415 /* create ssa names for outer exec masks */
416 if (info
.has_discard
) {
417 aco_ptr
<Pseudo_instruction
> phi
;
418 for (int i
= 0; i
< info
.num_exec_masks
- 1; i
++) {
419 phi
.reset(create_instruction
<Pseudo_instruction
>(aco_opcode::p_linear_phi
, Format::PSEUDO
, preds
.size(), 1));
420 phi
->definitions
[0] = bld
.def(bld
.lm
);
421 phi
->operands
[0] = Operand(ctx
.info
[preds
[0]].exec
[i
].first
);
422 ctx
.info
[idx
].exec
[i
].first
= bld
.insert(std::move(phi
));
426 /* create ssa name for restore mask */
427 if (info
.has_divergent_break
) {
428 /* this phi might be trivial but ensures a parallelcopy on the loop header */
429 aco_ptr
<Pseudo_instruction
> phi
{create_instruction
<Pseudo_instruction
>(aco_opcode::p_linear_phi
, Format::PSEUDO
, preds
.size(), 1)};
430 phi
->definitions
[0] = bld
.def(bld
.lm
);
431 phi
->operands
[0] = Operand(ctx
.info
[preds
[0]].exec
[info
.num_exec_masks
- 1].first
);
432 ctx
.info
[idx
].exec
.back().first
= bld
.insert(std::move(phi
));
435 /* create ssa name for loop active mask */
436 aco_ptr
<Pseudo_instruction
> phi
{create_instruction
<Pseudo_instruction
>(aco_opcode::p_linear_phi
, Format::PSEUDO
, preds
.size(), 1)};
437 if (info
.has_divergent_continue
)
438 phi
->definitions
[0] = bld
.def(bld
.lm
);
440 phi
->definitions
[0] = bld
.def(bld
.lm
, exec
);
441 phi
->operands
[0] = Operand(ctx
.info
[preds
[0]].exec
.back().first
);
442 Temp loop_active
= bld
.insert(std::move(phi
));
444 if (info
.has_divergent_break
) {
445 uint8_t mask_type
= (ctx
.info
[idx
].exec
.back().second
& (mask_type_wqm
| mask_type_exact
)) | mask_type_loop
;
446 ctx
.info
[idx
].exec
.emplace_back(loop_active
, mask_type
);
448 ctx
.info
[idx
].exec
.back().first
= loop_active
;
449 ctx
.info
[idx
].exec
.back().second
|= mask_type_loop
;
452 /* create a parallelcopy to move the active mask to exec */
454 if (info
.has_divergent_continue
) {
455 while (block
->instructions
[i
]->opcode
!= aco_opcode::p_logical_start
) {
456 bld
.insert(std::move(block
->instructions
[i
]));
459 uint8_t mask_type
= ctx
.info
[idx
].exec
.back().second
& (mask_type_wqm
| mask_type_exact
);
460 assert(ctx
.info
[idx
].exec
.back().first
.size() == bld
.lm
.size());
461 ctx
.info
[idx
].exec
.emplace_back(bld
.pseudo(aco_opcode::p_parallelcopy
, bld
.def(bld
.lm
, exec
),
462 ctx
.info
[idx
].exec
.back().first
), mask_type
);
468 /* loop exit block */
469 if (block
->kind
& block_kind_loop_exit
) {
470 Block
* header
= ctx
.loop
.back().loop_header
;
471 loop_info
& info
= ctx
.loop
.back();
473 for (ASSERTED
unsigned pred
: preds
)
474 assert(ctx
.info
[pred
].exec
.size() >= info
.num_exec_masks
);
476 /* fill the loop header phis */
477 std::vector
<unsigned>& header_preds
= header
->linear_preds
;
479 if (info
.has_discard
) {
480 while (k
< info
.num_exec_masks
- 1) {
481 aco_ptr
<Instruction
>& phi
= header
->instructions
[k
];
482 assert(phi
->opcode
== aco_opcode::p_linear_phi
);
483 for (unsigned i
= 1; i
< phi
->operands
.size(); i
++)
484 phi
->operands
[i
] = Operand(ctx
.info
[header_preds
[i
]].exec
[k
].first
);
488 aco_ptr
<Instruction
>& phi
= header
->instructions
[k
++];
489 assert(phi
->opcode
== aco_opcode::p_linear_phi
);
490 for (unsigned i
= 1; i
< phi
->operands
.size(); i
++)
491 phi
->operands
[i
] = Operand(ctx
.info
[header_preds
[i
]].exec
[info
.num_exec_masks
- 1].first
);
493 if (info
.has_divergent_break
) {
494 aco_ptr
<Instruction
>& phi
= header
->instructions
[k
];
495 assert(phi
->opcode
== aco_opcode::p_linear_phi
);
496 for (unsigned i
= 1; i
< phi
->operands
.size(); i
++)
497 phi
->operands
[i
] = Operand(ctx
.info
[header_preds
[i
]].exec
[info
.num_exec_masks
].first
);
500 assert(!(block
->kind
& block_kind_top_level
) || info
.num_exec_masks
<= 2);
502 /* create the loop exit phis if not trivial */
503 bool need_parallelcopy
= false;
504 for (unsigned k
= 0; k
< info
.num_exec_masks
; k
++) {
505 Temp same
= ctx
.info
[preds
[0]].exec
[k
].first
;
506 uint8_t type
= ctx
.info
[header_preds
[0]].exec
[k
].second
;
509 for (unsigned i
= 1; i
< preds
.size() && trivial
; i
++) {
510 if (ctx
.info
[preds
[i
]].exec
[k
].first
!= same
)
514 if (k
== info
.num_exec_masks
- 1u) {
515 bool all_liveout_exec
= true;
516 bool all_not_liveout_exec
= true;
517 for (unsigned pred
: preds
) {
518 all_liveout_exec
= all_liveout_exec
&& same
== ctx
.program
->blocks
[pred
].live_out_exec
;
519 all_not_liveout_exec
= all_not_liveout_exec
&& same
!= ctx
.program
->blocks
[pred
].live_out_exec
;
521 if (!all_liveout_exec
&& !all_not_liveout_exec
)
523 else if (all_not_liveout_exec
)
524 need_parallelcopy
= true;
526 need_parallelcopy
|= !trivial
;
530 ctx
.info
[idx
].exec
.emplace_back(same
, type
);
532 /* create phi for loop footer */
533 aco_ptr
<Pseudo_instruction
> phi
{create_instruction
<Pseudo_instruction
>(aco_opcode::p_linear_phi
, Format::PSEUDO
, preds
.size(), 1)};
534 phi
->definitions
[0] = bld
.def(bld
.lm
);
535 if (k
== info
.num_exec_masks
- 1u) {
536 phi
->definitions
[0].setFixed(exec
);
537 need_parallelcopy
= false;
539 for (unsigned i
= 0; i
< phi
->operands
.size(); i
++)
540 phi
->operands
[i
] = Operand(ctx
.info
[preds
[i
]].exec
[k
].first
);
541 ctx
.info
[idx
].exec
.emplace_back(bld
.insert(std::move(phi
)), type
);
544 assert(ctx
.info
[idx
].exec
.size() == info
.num_exec_masks
);
546 /* create a parallelcopy to move the live mask to exec */
548 while (block
->instructions
[i
]->opcode
!= aco_opcode::p_logical_start
) {
549 bld
.insert(std::move(block
->instructions
[i
]));
553 if (ctx
.handle_wqm
) {
554 if (block
->kind
& block_kind_top_level
&& ctx
.info
[idx
].exec
.size() == 2) {
555 if ((ctx
.info
[idx
].block_needs
| ctx
.info
[idx
].ever_again_needs
) == 0 ||
556 (ctx
.info
[idx
].block_needs
| ctx
.info
[idx
].ever_again_needs
) == Exact
) {
557 ctx
.info
[idx
].exec
.back().second
|= mask_type_global
;
558 transition_to_Exact(ctx
, bld
, idx
);
559 ctx
.handle_wqm
= false;
562 if (ctx
.info
[idx
].block_needs
== WQM
)
563 transition_to_WQM(ctx
, bld
, idx
);
564 else if (ctx
.info
[idx
].block_needs
== Exact
)
565 transition_to_Exact(ctx
, bld
, idx
);
568 assert(ctx
.info
[idx
].exec
.back().first
.size() == bld
.lm
.size());
569 if (need_parallelcopy
) {
570 /* only create this parallelcopy is needed, since the operand isn't
571 * fixed to exec which causes the spiller to miscalculate register demand */
572 /* TODO: Fix register_demand calculation for spilling on loop exits.
573 * The problem is only mitigated because the register demand could be
574 * higher if the exec phi doesn't get assigned to exec. */
575 ctx
.info
[idx
].exec
.back().first
= bld
.pseudo(aco_opcode::p_parallelcopy
, bld
.def(bld
.lm
, exec
),
576 ctx
.info
[idx
].exec
.back().first
);
583 if (preds
.size() == 1) {
584 ctx
.info
[idx
].exec
= ctx
.info
[preds
[0]].exec
;
586 assert(preds
.size() == 2);
587 /* if one of the predecessors ends in exact mask, we pop it from stack */
588 unsigned num_exec_masks
= std::min(ctx
.info
[preds
[0]].exec
.size(),
589 ctx
.info
[preds
[1]].exec
.size());
590 if (block
->kind
& block_kind_top_level
&& !(block
->kind
& block_kind_merge
))
591 num_exec_masks
= std::min(num_exec_masks
, 2u);
593 /* create phis for diverged exec masks */
594 for (unsigned i
= 0; i
< num_exec_masks
; i
++) {
595 bool in_exec
= i
== num_exec_masks
- 1 && !(block
->kind
& block_kind_merge
);
596 if (!in_exec
&& ctx
.info
[preds
[0]].exec
[i
].first
== ctx
.info
[preds
[1]].exec
[i
].first
) {
597 assert(ctx
.info
[preds
[0]].exec
[i
].second
== ctx
.info
[preds
[1]].exec
[i
].second
);
598 ctx
.info
[idx
].exec
.emplace_back(ctx
.info
[preds
[0]].exec
[i
]);
602 Temp phi
= bld
.pseudo(aco_opcode::p_linear_phi
, in_exec
? bld
.def(bld
.lm
, exec
) : bld
.def(bld
.lm
),
603 ctx
.info
[preds
[0]].exec
[i
].first
,
604 ctx
.info
[preds
[1]].exec
[i
].first
);
605 uint8_t mask_type
= ctx
.info
[preds
[0]].exec
[i
].second
& ctx
.info
[preds
[1]].exec
[i
].second
;
606 ctx
.info
[idx
].exec
.emplace_back(phi
, mask_type
);
611 while (block
->instructions
[i
]->opcode
== aco_opcode::p_phi
||
612 block
->instructions
[i
]->opcode
== aco_opcode::p_linear_phi
) {
613 bld
.insert(std::move(block
->instructions
[i
]));
617 if (block
->kind
& block_kind_merge
)
618 ctx
.info
[idx
].exec
.pop_back();
620 if (block
->kind
& block_kind_top_level
&& ctx
.info
[idx
].exec
.size() == 3) {
621 assert(ctx
.info
[idx
].exec
.back().second
== mask_type_exact
);
622 assert(block
->kind
& block_kind_merge
);
623 ctx
.info
[idx
].exec
.pop_back();
626 /* try to satisfy the block's needs */
627 if (ctx
.handle_wqm
) {
628 if (block
->kind
& block_kind_top_level
&& ctx
.info
[idx
].exec
.size() == 2) {
629 if ((ctx
.info
[idx
].block_needs
| ctx
.info
[idx
].ever_again_needs
) == 0 ||
630 (ctx
.info
[idx
].block_needs
| ctx
.info
[idx
].ever_again_needs
) == Exact
) {
631 ctx
.info
[idx
].exec
.back().second
|= mask_type_global
;
632 transition_to_Exact(ctx
, bld
, idx
);
633 ctx
.handle_wqm
= false;
636 if (ctx
.info
[idx
].block_needs
== WQM
)
637 transition_to_WQM(ctx
, bld
, idx
);
638 else if (ctx
.info
[idx
].block_needs
== Exact
)
639 transition_to_Exact(ctx
, bld
, idx
);
642 if (block
->kind
& block_kind_merge
) {
643 Temp restore
= ctx
.info
[idx
].exec
.back().first
;
644 assert(restore
.size() == bld
.lm
.size());
645 ctx
.info
[idx
].exec
.back().first
= bld
.pseudo(aco_opcode::p_parallelcopy
, bld
.def(bld
.lm
, exec
), restore
);
651 void lower_fs_buffer_store_smem(Builder
& bld
, bool need_check
, aco_ptr
<Instruction
>& instr
, Temp cur_exec
)
653 Operand offset
= instr
->operands
[1];
655 /* if exec is zero, then use UINT32_MAX as an offset and make this store a no-op */
656 Temp nonempty
= bld
.sopc(Builder::s_cmp_lg
, bld
.def(s1
, scc
), cur_exec
, Operand(0u));
658 if (offset
.isLiteral())
659 offset
= bld
.sop1(aco_opcode::s_mov_b32
, bld
.def(s1
), offset
);
661 offset
= bld
.sop2(aco_opcode::s_cselect_b32
, bld
.hint_m0(bld
.def(s1
)),
662 offset
, Operand(UINT32_MAX
), bld
.scc(nonempty
));
663 } else if (offset
.isConstant() && offset
.constantValue() > 0xFFFFF) {
664 offset
= bld
.sop1(aco_opcode::s_mov_b32
, bld
.hint_m0(bld
.def(s1
)), offset
);
666 if (!offset
.isConstant())
669 switch (instr
->operands
[2].size()) {
671 instr
->opcode
= aco_opcode::s_buffer_store_dword
;
674 instr
->opcode
= aco_opcode::s_buffer_store_dwordx2
;
677 instr
->opcode
= aco_opcode::s_buffer_store_dwordx4
;
680 unreachable("Invalid SMEM buffer store size");
682 instr
->operands
[1] = offset
;
683 /* as_uniform() needs to be done here so it's done in exact mode and helper
684 * lanes don't contribute. */
685 instr
->operands
[2] = Operand(bld
.as_uniform(instr
->operands
[2]));
688 void process_instructions(exec_ctx
& ctx
, Block
* block
,
689 std::vector
<aco_ptr
<Instruction
>>& instructions
,
693 if (ctx
.info
[block
->index
].exec
.back().second
& mask_type_wqm
)
696 assert(!ctx
.handle_wqm
|| ctx
.info
[block
->index
].exec
.back().second
& mask_type_exact
);
700 /* if the block doesn't need both, WQM and Exact, we can skip processing the instructions */
701 bool process
= (ctx
.handle_wqm
&&
702 (ctx
.info
[block
->index
].block_needs
& state
) !=
703 (ctx
.info
[block
->index
].block_needs
& (WQM
| Exact
))) ||
704 block
->kind
& block_kind_uses_discard_if
||
705 block
->kind
& block_kind_uses_demote
||
706 block
->kind
& block_kind_needs_lowering
;
708 std::vector
<aco_ptr
<Instruction
>>::iterator it
= std::next(block
->instructions
.begin(), idx
);
709 instructions
.insert(instructions
.end(),
710 std::move_iterator
<std::vector
<aco_ptr
<Instruction
>>::iterator
>(it
),
711 std::move_iterator
<std::vector
<aco_ptr
<Instruction
>>::iterator
>(block
->instructions
.end()));
715 Builder
bld(ctx
.program
, &instructions
);
717 for (; idx
< block
->instructions
.size(); idx
++) {
718 aco_ptr
<Instruction
> instr
= std::move(block
->instructions
[idx
]);
720 WQMState needs
= ctx
.handle_wqm
? ctx
.info
[block
->index
].instr_needs
[idx
] : Unspecified
;
722 if (instr
->opcode
== aco_opcode::p_discard_if
) {
723 if (ctx
.info
[block
->index
].block_needs
& Preserve_WQM
) {
724 assert(block
->kind
& block_kind_top_level
);
725 transition_to_WQM(ctx
, bld
, block
->index
);
726 ctx
.info
[block
->index
].exec
.back().second
&= ~mask_type_global
;
728 int num
= ctx
.info
[block
->index
].exec
.size();
730 Operand cond
= instr
->operands
[0];
731 for (int i
= num
- 1; i
>= 0; i
--) {
732 Instruction
*andn2
= bld
.sop2(Builder::s_andn2
, bld
.def(bld
.lm
), bld
.def(s1
, scc
),
733 ctx
.info
[block
->index
].exec
[i
].first
, cond
);
735 andn2
->operands
[0].setFixed(exec
);
736 andn2
->definitions
[0].setFixed(exec
);
739 instr
->opcode
= aco_opcode::p_exit_early_if
;
740 instr
->operands
[0] = bld
.scc(andn2
->definitions
[1].getTemp());
742 ctx
.info
[block
->index
].exec
[i
].first
= andn2
->definitions
[0].getTemp();
744 assert(!ctx
.handle_wqm
|| (ctx
.info
[block
->index
].exec
[0].second
& mask_type_wqm
) == 0);
746 } else if (needs
== WQM
&& state
!= WQM
) {
747 transition_to_WQM(ctx
, bld
, block
->index
);
749 } else if (needs
== Exact
&& state
!= Exact
) {
750 transition_to_Exact(ctx
, bld
, block
->index
);
754 if (instr
->opcode
== aco_opcode::p_is_helper
|| instr
->opcode
== aco_opcode::p_load_helper
) {
755 Definition dst
= instr
->definitions
[0];
756 assert(dst
.size() == bld
.lm
.size());
757 if (state
== Exact
) {
758 instr
.reset(create_instruction
<SOP1_instruction
>(bld
.w64or32(Builder::s_mov
), Format::SOP1
, 1, 1));
759 instr
->operands
[0] = Operand(0u);
760 instr
->definitions
[0] = dst
;
762 std::pair
<Temp
, uint8_t>& exact_mask
= ctx
.info
[block
->index
].exec
[0];
763 if (instr
->opcode
== aco_opcode::p_load_helper
&&
764 !(ctx
.info
[block
->index
].exec
[0].second
& mask_type_initial
)) {
765 /* find last initial exact mask */
766 for (int i
= block
->index
; i
>= 0; i
--) {
767 if (ctx
.program
->blocks
[i
].kind
& block_kind_top_level
&&
768 ctx
.info
[i
].exec
[0].second
& mask_type_initial
) {
769 exact_mask
= ctx
.info
[i
].exec
[0];
775 assert(instr
->opcode
== aco_opcode::p_is_helper
|| exact_mask
.second
& mask_type_initial
);
776 assert(exact_mask
.second
& mask_type_exact
);
778 instr
.reset(create_instruction
<SOP2_instruction
>(bld
.w64or32(Builder::s_andn2
), Format::SOP2
, 2, 2));
779 instr
->operands
[0] = Operand(ctx
.info
[block
->index
].exec
.back().first
); /* current exec */
780 instr
->operands
[1] = Operand(exact_mask
.first
);
781 instr
->definitions
[0] = dst
;
782 instr
->definitions
[1] = bld
.def(s1
, scc
);
784 } else if (instr
->opcode
== aco_opcode::p_demote_to_helper
) {
785 /* turn demote into discard_if with only exact masks */
786 assert((ctx
.info
[block
->index
].exec
[0].second
& (mask_type_exact
| mask_type_global
)) == (mask_type_exact
| mask_type_global
));
787 ctx
.info
[block
->index
].exec
[0].second
&= ~mask_type_initial
;
790 Temp cond
, exit_cond
;
791 if (instr
->operands
[0].isConstant()) {
792 assert(instr
->operands
[0].constantValue() == -1u);
793 /* transition to exact and set exec to zero */
794 Temp old_exec
= ctx
.info
[block
->index
].exec
.back().first
;
795 Temp new_exec
= bld
.tmp(bld
.lm
);
796 exit_cond
= bld
.tmp(s1
);
797 cond
= bld
.sop1(Builder::s_and_saveexec
, bld
.def(bld
.lm
), bld
.scc(Definition(exit_cond
)),
798 bld
.exec(Definition(new_exec
)), Operand(0u), bld
.exec(old_exec
));
800 num
= ctx
.info
[block
->index
].exec
.size() - 2;
801 if (ctx
.info
[block
->index
].exec
.back().second
& mask_type_exact
) {
802 ctx
.info
[block
->index
].exec
.back().first
= new_exec
;
804 ctx
.info
[block
->index
].exec
.back().first
= cond
;
805 ctx
.info
[block
->index
].exec
.emplace_back(new_exec
, mask_type_exact
);
808 /* demote_if: transition to exact */
809 transition_to_Exact(ctx
, bld
, block
->index
);
810 assert(instr
->operands
[0].isTemp());
811 cond
= instr
->operands
[0].getTemp();
812 num
= ctx
.info
[block
->index
].exec
.size() - 1;
815 for (int i
= num
; i
>= 0; i
--) {
816 if (ctx
.info
[block
->index
].exec
[i
].second
& mask_type_exact
) {
817 Instruction
*andn2
= bld
.sop2(Builder::s_andn2
, bld
.def(bld
.lm
), bld
.def(s1
, scc
),
818 ctx
.info
[block
->index
].exec
[i
].first
, cond
);
819 if (i
== (int)ctx
.info
[block
->index
].exec
.size() - 1) {
820 andn2
->operands
[0].setFixed(exec
);
821 andn2
->definitions
[0].setFixed(exec
);
824 ctx
.info
[block
->index
].exec
[i
].first
= andn2
->definitions
[0].getTemp();
825 exit_cond
= andn2
->definitions
[1].getTemp();
830 instr
->opcode
= aco_opcode::p_exit_early_if
;
831 instr
->operands
[0] = bld
.scc(exit_cond
);
834 } else if (instr
->opcode
== aco_opcode::p_fs_buffer_store_smem
) {
835 bool need_check
= ctx
.info
[block
->index
].exec
.size() != 1 &&
836 !(ctx
.info
[block
->index
].exec
[ctx
.info
[block
->index
].exec
.size() - 2].second
& Exact
);
837 lower_fs_buffer_store_smem(bld
, need_check
, instr
, ctx
.info
[block
->index
].exec
.back().first
);
840 bld
.insert(std::move(instr
));
844 void add_branch_code(exec_ctx
& ctx
, Block
* block
)
846 unsigned idx
= block
->index
;
847 Builder
bld(ctx
.program
, block
);
849 if (idx
== ctx
.program
->blocks
.size() - 1)
852 /* try to disable wqm handling */
853 if (ctx
.handle_wqm
&& block
->kind
& block_kind_top_level
) {
854 if (ctx
.info
[idx
].exec
.size() == 3) {
855 assert(ctx
.info
[idx
].exec
[1].second
== mask_type_wqm
);
856 ctx
.info
[idx
].exec
.pop_back();
858 assert(ctx
.info
[idx
].exec
.size() <= 2);
860 if (ctx
.info
[idx
].ever_again_needs
== 0 ||
861 ctx
.info
[idx
].ever_again_needs
== Exact
) {
862 /* transition to Exact */
863 aco_ptr
<Instruction
> branch
= std::move(block
->instructions
.back());
864 block
->instructions
.pop_back();
865 ctx
.info
[idx
].exec
.back().second
|= mask_type_global
;
866 transition_to_Exact(ctx
, bld
, idx
);
867 bld
.insert(std::move(branch
));
868 ctx
.handle_wqm
= false;
870 } else if (ctx
.info
[idx
].block_needs
& Preserve_WQM
) {
871 /* transition to WQM and remove global flag */
872 aco_ptr
<Instruction
> branch
= std::move(block
->instructions
.back());
873 block
->instructions
.pop_back();
874 transition_to_WQM(ctx
, bld
, idx
);
875 ctx
.info
[idx
].exec
.back().second
&= ~mask_type_global
;
876 bld
.insert(std::move(branch
));
880 if (block
->kind
& block_kind_loop_preheader
) {
881 /* collect information about the succeeding loop */
882 bool has_divergent_break
= false;
883 bool has_divergent_continue
= false;
884 bool has_discard
= false;
886 unsigned loop_nest_depth
= ctx
.program
->blocks
[idx
+ 1].loop_nest_depth
;
888 for (unsigned i
= idx
+ 1; ctx
.program
->blocks
[i
].loop_nest_depth
>= loop_nest_depth
; i
++) {
889 Block
& loop_block
= ctx
.program
->blocks
[i
];
890 needs
|= ctx
.info
[i
].block_needs
;
892 if (loop_block
.kind
& block_kind_uses_discard_if
||
893 loop_block
.kind
& block_kind_discard
||
894 loop_block
.kind
& block_kind_uses_demote
)
896 if (loop_block
.loop_nest_depth
!= loop_nest_depth
)
899 if (loop_block
.kind
& block_kind_uniform
)
901 else if (loop_block
.kind
& block_kind_break
)
902 has_divergent_break
= true;
903 else if (loop_block
.kind
& block_kind_continue
)
904 has_divergent_continue
= true;
907 if (ctx
.handle_wqm
) {
909 aco_ptr
<Instruction
> branch
= std::move(block
->instructions
.back());
910 block
->instructions
.pop_back();
911 transition_to_WQM(ctx
, bld
, idx
);
912 bld
.insert(std::move(branch
));
914 aco_ptr
<Instruction
> branch
= std::move(block
->instructions
.back());
915 block
->instructions
.pop_back();
916 transition_to_Exact(ctx
, bld
, idx
);
917 bld
.insert(std::move(branch
));
921 unsigned num_exec_masks
= ctx
.info
[idx
].exec
.size();
922 if (block
->kind
& block_kind_top_level
)
923 num_exec_masks
= std::min(num_exec_masks
, 2u);
925 ctx
.loop
.emplace_back(&ctx
.program
->blocks
[block
->linear_succs
[0]],
929 has_divergent_continue
,
933 if (block
->kind
& block_kind_discard
) {
935 assert(block
->instructions
.back()->format
== Format::PSEUDO_BRANCH
);
936 aco_ptr
<Instruction
> branch
= std::move(block
->instructions
.back());
937 block
->instructions
.pop_back();
939 /* create a discard_if() instruction with the exec mask as condition */
941 if (ctx
.loop
.size()) {
942 /* if we're in a loop, only discard from the outer exec masks */
943 num
= ctx
.loop
.back().num_exec_masks
;
945 num
= ctx
.info
[idx
].exec
.size() - 1;
948 Temp old_exec
= ctx
.info
[idx
].exec
.back().first
;
949 Temp new_exec
= bld
.tmp(bld
.lm
);
950 Temp cond
= bld
.sop1(Builder::s_and_saveexec
, bld
.def(bld
.lm
), bld
.def(s1
, scc
),
951 bld
.exec(Definition(new_exec
)), Operand(0u), bld
.exec(old_exec
));
952 ctx
.info
[idx
].exec
.back().first
= new_exec
;
954 for (int i
= num
- 1; i
>= 0; i
--) {
955 Instruction
*andn2
= bld
.sop2(Builder::s_andn2
, bld
.def(bld
.lm
), bld
.def(s1
, scc
),
956 ctx
.info
[block
->index
].exec
[i
].first
, cond
);
957 if (i
== (int)ctx
.info
[idx
].exec
.size() - 1)
958 andn2
->definitions
[0].setFixed(exec
);
960 bld
.pseudo(aco_opcode::p_exit_early_if
, bld
.scc(andn2
->definitions
[1].getTemp()));
961 ctx
.info
[block
->index
].exec
[i
].first
= andn2
->definitions
[0].getTemp();
963 assert(!ctx
.handle_wqm
|| (ctx
.info
[block
->index
].exec
[0].second
& mask_type_wqm
) == 0);
965 if ((block
->kind
& (block_kind_break
| block_kind_uniform
)) == block_kind_break
)
966 ctx
.info
[idx
].exec
.back().first
= cond
;
967 bld
.insert(std::move(branch
));
968 /* no return here as it can be followed by a divergent break */
971 if (block
->kind
& block_kind_continue_or_break
) {
972 assert(ctx
.program
->blocks
[ctx
.program
->blocks
[block
->linear_succs
[1]].linear_succs
[0]].kind
& block_kind_loop_header
);
973 assert(ctx
.program
->blocks
[ctx
.program
->blocks
[block
->linear_succs
[0]].linear_succs
[0]].kind
& block_kind_loop_exit
);
974 assert(block
->instructions
.back()->opcode
== aco_opcode::p_branch
);
975 block
->instructions
.pop_back();
977 bool need_parallelcopy
= false;
978 while (!(ctx
.info
[idx
].exec
.back().second
& mask_type_loop
)) {
979 ctx
.info
[idx
].exec
.pop_back();
980 need_parallelcopy
= true;
983 if (need_parallelcopy
)
984 ctx
.info
[idx
].exec
.back().first
= bld
.pseudo(aco_opcode::p_parallelcopy
, bld
.def(bld
.lm
, exec
), ctx
.info
[idx
].exec
.back().first
);
985 bld
.branch(aco_opcode::p_cbranch_nz
, bld
.exec(ctx
.info
[idx
].exec
.back().first
), block
->linear_succs
[1], block
->linear_succs
[0]);
989 if (block
->kind
& block_kind_uniform
) {
990 Pseudo_branch_instruction
* branch
= static_cast<Pseudo_branch_instruction
*>(block
->instructions
.back().get());
991 if (branch
->opcode
== aco_opcode::p_branch
) {
992 branch
->target
[0] = block
->linear_succs
[0];
994 branch
->target
[0] = block
->linear_succs
[1];
995 branch
->target
[1] = block
->linear_succs
[0];
1000 if (block
->kind
& block_kind_branch
) {
1002 if (ctx
.handle_wqm
&&
1003 ctx
.info
[idx
].exec
.size() >= 2 &&
1004 ctx
.info
[idx
].exec
.back().second
== mask_type_exact
&&
1005 !(ctx
.info
[idx
].block_needs
& Exact_Branch
) &&
1006 ctx
.info
[idx
].exec
[ctx
.info
[idx
].exec
.size() - 2].second
& mask_type_wqm
) {
1007 /* return to wqm before branching */
1008 ctx
.info
[idx
].exec
.pop_back();
1011 // orig = s_and_saveexec_b64
1012 assert(block
->linear_succs
.size() == 2);
1013 assert(block
->instructions
.back()->opcode
== aco_opcode::p_cbranch_z
);
1014 Temp cond
= block
->instructions
.back()->operands
[0].getTemp();
1015 block
->instructions
.pop_back();
1017 if (ctx
.info
[idx
].block_needs
& Exact_Branch
)
1018 transition_to_Exact(ctx
, bld
, idx
);
1020 Temp current_exec
= ctx
.info
[idx
].exec
.back().first
;
1021 uint8_t mask_type
= ctx
.info
[idx
].exec
.back().second
& (mask_type_wqm
| mask_type_exact
);
1023 Temp then_mask
= bld
.tmp(bld
.lm
);
1024 Temp old_exec
= bld
.sop1(Builder::s_and_saveexec
, bld
.def(bld
.lm
), bld
.def(s1
, scc
),
1025 bld
.exec(Definition(then_mask
)), cond
, bld
.exec(current_exec
));
1027 ctx
.info
[idx
].exec
.back().first
= old_exec
;
1029 /* add next current exec to the stack */
1030 ctx
.info
[idx
].exec
.emplace_back(then_mask
, mask_type
);
1032 bld
.branch(aco_opcode::p_cbranch_z
, bld
.exec(then_mask
), block
->linear_succs
[1], block
->linear_succs
[0]);
1036 if (block
->kind
& block_kind_invert
) {
1037 // exec = s_andn2_b64 (original_exec, exec)
1038 assert(block
->instructions
.back()->opcode
== aco_opcode::p_cbranch_nz
);
1039 block
->instructions
.pop_back();
1040 Temp then_mask
= ctx
.info
[idx
].exec
.back().first
;
1041 uint8_t mask_type
= ctx
.info
[idx
].exec
.back().second
;
1042 ctx
.info
[idx
].exec
.pop_back();
1043 Temp orig_exec
= ctx
.info
[idx
].exec
.back().first
;
1044 Temp else_mask
= bld
.sop2(Builder::s_andn2
, bld
.def(bld
.lm
, exec
),
1045 bld
.def(s1
, scc
), orig_exec
, bld
.exec(then_mask
));
1047 /* add next current exec to the stack */
1048 ctx
.info
[idx
].exec
.emplace_back(else_mask
, mask_type
);
1050 bld
.branch(aco_opcode::p_cbranch_z
, bld
.exec(else_mask
), block
->linear_succs
[1], block
->linear_succs
[0]);
1054 if (block
->kind
& block_kind_break
) {
1055 // loop_mask = s_andn2_b64 (loop_mask, exec)
1056 assert(block
->instructions
.back()->opcode
== aco_opcode::p_branch
);
1057 block
->instructions
.pop_back();
1059 Temp current_exec
= ctx
.info
[idx
].exec
.back().first
;
1061 for (int exec_idx
= ctx
.info
[idx
].exec
.size() - 2; exec_idx
>= 0; exec_idx
--) {
1063 Temp exec_mask
= ctx
.info
[idx
].exec
[exec_idx
].first
;
1064 exec_mask
= bld
.sop2(Builder::s_andn2
, bld
.def(bld
.lm
), bld
.scc(Definition(cond
)),
1065 exec_mask
, bld
.exec(current_exec
));
1066 ctx
.info
[idx
].exec
[exec_idx
].first
= exec_mask
;
1067 if (ctx
.info
[idx
].exec
[exec_idx
].second
& mask_type_loop
)
1071 /* check if the successor is the merge block, otherwise set exec to 0 */
1072 // TODO: this could be done better by directly branching to the merge block
1073 unsigned succ_idx
= ctx
.program
->blocks
[block
->linear_succs
[1]].linear_succs
[0];
1074 Block
& succ
= ctx
.program
->blocks
[succ_idx
];
1075 if (!(succ
.kind
& block_kind_invert
|| succ
.kind
& block_kind_merge
)) {
1076 ctx
.info
[idx
].exec
.back().first
= bld
.sop1(Builder::s_mov
, bld
.def(bld
.lm
, exec
), Operand(0u));
1079 bld
.branch(aco_opcode::p_cbranch_nz
, bld
.scc(cond
), block
->linear_succs
[1], block
->linear_succs
[0]);
1083 if (block
->kind
& block_kind_continue
) {
1084 assert(block
->instructions
.back()->opcode
== aco_opcode::p_branch
);
1085 block
->instructions
.pop_back();
1087 Temp current_exec
= ctx
.info
[idx
].exec
.back().first
;
1089 for (int exec_idx
= ctx
.info
[idx
].exec
.size() - 2; exec_idx
>= 0; exec_idx
--) {
1090 if (ctx
.info
[idx
].exec
[exec_idx
].second
& mask_type_loop
)
1093 Temp exec_mask
= ctx
.info
[idx
].exec
[exec_idx
].first
;
1094 exec_mask
= bld
.sop2(Builder::s_andn2
, bld
.def(bld
.lm
), bld
.scc(Definition(cond
)),
1095 exec_mask
, bld
.exec(current_exec
));
1096 ctx
.info
[idx
].exec
[exec_idx
].first
= exec_mask
;
1098 assert(cond
!= Temp());
1100 /* check if the successor is the merge block, otherwise set exec to 0 */
1101 // TODO: this could be done better by directly branching to the merge block
1102 unsigned succ_idx
= ctx
.program
->blocks
[block
->linear_succs
[1]].linear_succs
[0];
1103 Block
& succ
= ctx
.program
->blocks
[succ_idx
];
1104 if (!(succ
.kind
& block_kind_invert
|| succ
.kind
& block_kind_merge
)) {
1105 ctx
.info
[idx
].exec
.back().first
= bld
.sop1(Builder::s_mov
, bld
.def(bld
.lm
, exec
), Operand(0u));
1108 bld
.branch(aco_opcode::p_cbranch_nz
, bld
.scc(cond
), block
->linear_succs
[1], block
->linear_succs
[0]);
1113 void process_block(exec_ctx
& ctx
, Block
* block
)
1115 std::vector
<aco_ptr
<Instruction
>> instructions
;
1116 instructions
.reserve(block
->instructions
.size());
1118 unsigned idx
= add_coupling_code(ctx
, block
, instructions
);
1120 assert(block
->index
!= ctx
.program
->blocks
.size() - 1 ||
1121 ctx
.info
[block
->index
].exec
.size() <= 2);
1123 process_instructions(ctx
, block
, instructions
, idx
);
1125 block
->instructions
= std::move(instructions
);
1127 add_branch_code(ctx
, block
);
1129 block
->live_out_exec
= ctx
.info
[block
->index
].exec
.back().first
;
1132 } /* end namespace */
1135 void insert_exec_mask(Program
*program
)
1137 exec_ctx
ctx(program
);
1139 if (program
->needs_wqm
&& program
->needs_exact
)
1140 calculate_wqm_needs(ctx
);
1142 for (Block
& block
: program
->blocks
)
1143 process_block(ctx
, &block
);