2 * Copyright © 2019 Valve Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "aco_builder.h"
32 enum WQMState
: uint8_t {
35 WQM
= 1 << 1, /* with control flow applied */
36 Preserve_WQM
= 1 << 2,
37 Exact_Branch
= 1 << 3,
40 enum mask_type
: uint8_t {
41 mask_type_global
= 1 << 0,
42 mask_type_exact
= 1 << 1,
43 mask_type_wqm
= 1 << 2,
44 mask_type_loop
= 1 << 3, /* active lanes of a loop */
45 mask_type_initial
= 1 << 4, /* initially active lanes */
50 /* state for WQM propagation */
51 std::set
<unsigned> worklist
;
52 std::vector
<uint16_t> defined_in
;
53 std::vector
<bool> needs_wqm
;
54 std::vector
<bool> branch_wqm
; /* true if the branch condition in this block should be in wqm */
57 wqm_ctx(Program
* program
) : program(program
),
58 defined_in(program
->peekAllocationId(), 0xFFFF),
59 needs_wqm(program
->peekAllocationId()),
60 branch_wqm(program
->blocks
.size()),
64 for (unsigned i
= 0; i
< program
->blocks
.size(); i
++)
71 uint16_t num_exec_masks
;
73 bool has_divergent_break
;
74 bool has_divergent_continue
;
75 bool has_discard
; /* has a discard or demote */
76 loop_info(Block
* b
, uint16_t num
, uint8_t needs
, bool breaks
, bool cont
, bool discard
) :
77 loop_header(b
), num_exec_masks(num
), needs(needs
), has_divergent_break(breaks
),
78 has_divergent_continue(cont
), has_discard(discard
) {}
82 std::vector
<std::pair
<Temp
, uint8_t>> exec
;
83 std::vector
<WQMState
> instr_needs
;
85 uint8_t ever_again_needs
;
91 std::vector
<block_info
> info
;
92 std::vector
<loop_info
> loop
;
93 bool handle_wqm
= false;
94 exec_ctx(Program
*program
) : program(program
), info(program
->blocks
.size()) {}
97 bool pred_by_exec_mask(aco_ptr
<Instruction
>& instr
) {
99 return instr
->reads_exec();
100 if (instr
->format
== Format::SMEM
|| instr
->isSALU())
102 if (instr
->format
== Format::PSEUDO_BARRIER
)
105 if (instr
->format
== Format::PSEUDO
) {
106 switch (instr
->opcode
) {
107 case aco_opcode::p_create_vector
:
108 return instr
->definitions
[0].getTemp().type() == RegType::vgpr
;
109 case aco_opcode::p_extract_vector
:
110 case aco_opcode::p_split_vector
:
111 return instr
->operands
[0].getTemp().type() == RegType::vgpr
;
112 case aco_opcode::p_spill
:
113 case aco_opcode::p_reload
:
120 if (instr
->opcode
== aco_opcode::v_readlane_b32
||
121 instr
->opcode
== aco_opcode::v_readlane_b32_e64
||
122 instr
->opcode
== aco_opcode::v_writelane_b32
||
123 instr
->opcode
== aco_opcode::v_writelane_b32_e64
)
129 bool needs_exact(aco_ptr
<Instruction
>& instr
) {
130 if (instr
->format
== Format::MUBUF
) {
131 MUBUF_instruction
*mubuf
= static_cast<MUBUF_instruction
*>(instr
.get());
132 return mubuf
->disable_wqm
;
133 } else if (instr
->format
== Format::MTBUF
) {
134 MTBUF_instruction
*mtbuf
= static_cast<MTBUF_instruction
*>(instr
.get());
135 return mtbuf
->disable_wqm
;
136 } else if (instr
->format
== Format::MIMG
) {
137 MIMG_instruction
*mimg
= static_cast<MIMG_instruction
*>(instr
.get());
138 return mimg
->disable_wqm
;
139 } else if (instr
->format
== Format::FLAT
|| instr
->format
== Format::GLOBAL
) {
140 FLAT_instruction
*flat
= static_cast<FLAT_instruction
*>(instr
.get());
141 return flat
->disable_wqm
;
143 return instr
->format
== Format::EXP
|| instr
->opcode
== aco_opcode::p_fs_buffer_store_smem
;
147 void set_needs_wqm(wqm_ctx
&ctx
, Temp tmp
)
149 if (!ctx
.needs_wqm
[tmp
.id()]) {
150 ctx
.needs_wqm
[tmp
.id()] = true;
151 if (ctx
.defined_in
[tmp
.id()] != 0xFFFF)
152 ctx
.worklist
.insert(ctx
.defined_in
[tmp
.id()]);
156 void mark_block_wqm(wqm_ctx
&ctx
, unsigned block_idx
)
158 if (ctx
.branch_wqm
[block_idx
])
161 ctx
.branch_wqm
[block_idx
] = true;
162 Block
& block
= ctx
.program
->blocks
[block_idx
];
163 aco_ptr
<Instruction
>& branch
= block
.instructions
.back();
165 if (branch
->opcode
!= aco_opcode::p_branch
) {
166 assert(!branch
->operands
.empty() && branch
->operands
[0].isTemp());
167 set_needs_wqm(ctx
, branch
->operands
[0].getTemp());
170 /* TODO: this sets more branch conditions to WQM than it needs to
171 * it should be enough to stop at the "exec mask top level" */
172 if (block
.kind
& block_kind_top_level
)
175 for (unsigned pred_idx
: block
.logical_preds
)
176 mark_block_wqm(ctx
, pred_idx
);
179 void get_block_needs(wqm_ctx
&ctx
, exec_ctx
&exec_ctx
, Block
* block
)
181 block_info
& info
= exec_ctx
.info
[block
->index
];
183 std::vector
<WQMState
> instr_needs(block
->instructions
.size());
185 if (block
->kind
& block_kind_top_level
) {
186 if (ctx
.loop
&& ctx
.wqm
) {
187 /* mark all break conditions as WQM */
188 unsigned block_idx
= block
->index
+ 1;
189 while (!(ctx
.program
->blocks
[block_idx
].kind
& block_kind_top_level
)) {
190 if (ctx
.program
->blocks
[block_idx
].kind
& block_kind_break
)
191 mark_block_wqm(ctx
, block_idx
);
194 } else if (ctx
.loop
&& !ctx
.wqm
) {
195 /* Ensure a branch never results in an exec mask with only helper
196 * invocations (which can cause a loop to repeat infinitively if it's
197 * break branches are done in exact). */
198 unsigned block_idx
= block
->index
;
200 if ((ctx
.program
->blocks
[block_idx
].kind
& block_kind_branch
))
201 exec_ctx
.info
[block_idx
].block_needs
|= Exact_Branch
;
203 } while (!(ctx
.program
->blocks
[block_idx
].kind
& block_kind_top_level
));
210 for (int i
= block
->instructions
.size() - 1; i
>= 0; --i
) {
211 aco_ptr
<Instruction
>& instr
= block
->instructions
[i
];
213 WQMState needs
= needs_exact(instr
) ? Exact
: Unspecified
;
214 bool propagate_wqm
= instr
->opcode
== aco_opcode::p_wqm
;
215 bool preserve_wqm
= instr
->opcode
== aco_opcode::p_discard_if
;
216 bool pred_by_exec
= pred_by_exec_mask(instr
);
217 for (const Definition
& definition
: instr
->definitions
) {
218 if (!definition
.isTemp())
220 const unsigned def
= definition
.tempId();
221 ctx
.defined_in
[def
] = block
->index
;
222 if (needs
== Unspecified
&& ctx
.needs_wqm
[def
]) {
223 needs
= pred_by_exec
? WQM
: Unspecified
;
224 propagate_wqm
= true;
229 for (const Operand
& op
: instr
->operands
) {
231 set_needs_wqm(ctx
, op
.getTemp());
234 } else if (preserve_wqm
&& info
.block_needs
& WQM
) {
235 needs
= Preserve_WQM
;
238 /* ensure the condition controlling the control flow for this phi is in WQM */
239 if (needs
== WQM
&& instr
->opcode
== aco_opcode::p_phi
) {
240 for (unsigned pred_idx
: block
->logical_preds
)
241 mark_block_wqm(ctx
, pred_idx
);
244 instr_needs
[i
] = needs
;
245 info
.block_needs
|= needs
;
248 info
.instr_needs
= instr_needs
;
250 /* for "if (<cond>) <wqm code>" or "while (<cond>) <wqm code>",
251 * <cond> should be computed in WQM */
252 if (info
.block_needs
& WQM
&& !(block
->kind
& block_kind_top_level
)) {
253 for (unsigned pred_idx
: block
->logical_preds
)
254 mark_block_wqm(ctx
, pred_idx
);
257 if (block
->kind
& block_kind_loop_header
)
261 void calculate_wqm_needs(exec_ctx
& exec_ctx
)
263 wqm_ctx
ctx(exec_ctx
.program
);
265 while (!ctx
.worklist
.empty()) {
266 unsigned block_index
= *std::prev(ctx
.worklist
.end());
267 ctx
.worklist
.erase(std::prev(ctx
.worklist
.end()));
269 get_block_needs(ctx
, exec_ctx
, &exec_ctx
.program
->blocks
[block_index
]);
272 uint8_t ever_again_needs
= 0;
273 for (int i
= exec_ctx
.program
->blocks
.size() - 1; i
>= 0; i
--) {
274 exec_ctx
.info
[i
].ever_again_needs
= ever_again_needs
;
275 Block
& block
= exec_ctx
.program
->blocks
[i
];
277 if (block
.kind
& block_kind_needs_lowering
)
278 exec_ctx
.info
[i
].block_needs
|= Exact
;
280 /* if discard is used somewhere in nested CF, we need to preserve the WQM mask */
281 if ((block
.kind
& block_kind_discard
||
282 block
.kind
& block_kind_uses_discard_if
) &&
283 ever_again_needs
& WQM
)
284 exec_ctx
.info
[i
].block_needs
|= Preserve_WQM
;
286 ever_again_needs
|= exec_ctx
.info
[i
].block_needs
& ~Exact_Branch
;
287 if (block
.kind
& block_kind_discard
||
288 block
.kind
& block_kind_uses_discard_if
||
289 block
.kind
& block_kind_uses_demote
)
290 ever_again_needs
|= Exact
;
292 /* don't propagate WQM preservation further than the next top_level block */
293 if (block
.kind
& block_kind_top_level
)
294 ever_again_needs
&= ~Preserve_WQM
;
296 exec_ctx
.info
[i
].block_needs
&= ~Preserve_WQM
;
298 exec_ctx
.handle_wqm
= true;
301 void transition_to_WQM(exec_ctx
& ctx
, Builder bld
, unsigned idx
)
303 if (ctx
.info
[idx
].exec
.back().second
& mask_type_wqm
)
305 if (ctx
.info
[idx
].exec
.back().second
& mask_type_global
) {
306 Temp exec_mask
= ctx
.info
[idx
].exec
.back().first
;
307 exec_mask
= bld
.sop1(Builder::s_wqm
, bld
.def(bld
.lm
, exec
), bld
.def(s1
, scc
), exec_mask
);
308 ctx
.info
[idx
].exec
.emplace_back(exec_mask
, mask_type_global
| mask_type_wqm
);
311 /* otherwise, the WQM mask should be one below the current mask */
312 ctx
.info
[idx
].exec
.pop_back();
313 assert(ctx
.info
[idx
].exec
.back().second
& mask_type_wqm
);
314 assert(ctx
.info
[idx
].exec
.back().first
.size() == bld
.lm
.size());
315 ctx
.info
[idx
].exec
.back().first
= bld
.pseudo(aco_opcode::p_parallelcopy
, bld
.def(bld
.lm
, exec
),
316 ctx
.info
[idx
].exec
.back().first
);
319 void transition_to_Exact(exec_ctx
& ctx
, Builder bld
, unsigned idx
)
321 if (ctx
.info
[idx
].exec
.back().second
& mask_type_exact
)
323 /* We can't remove the loop exec mask, because that can cause exec.size() to
324 * be less than num_exec_masks. The loop exec mask also needs to be kept
325 * around for various uses. */
326 if ((ctx
.info
[idx
].exec
.back().second
& mask_type_global
) &&
327 !(ctx
.info
[idx
].exec
.back().second
& mask_type_loop
)) {
328 ctx
.info
[idx
].exec
.pop_back();
329 assert(ctx
.info
[idx
].exec
.back().second
& mask_type_exact
);
330 assert(ctx
.info
[idx
].exec
.back().first
.size() == bld
.lm
.size());
331 ctx
.info
[idx
].exec
.back().first
= bld
.pseudo(aco_opcode::p_parallelcopy
, bld
.def(bld
.lm
, exec
),
332 ctx
.info
[idx
].exec
.back().first
);
335 /* otherwise, we create an exact mask and push to the stack */
336 Temp wqm
= ctx
.info
[idx
].exec
.back().first
;
337 Temp exact
= bld
.tmp(bld
.lm
);
338 wqm
= bld
.sop1(Builder::s_and_saveexec
, bld
.def(bld
.lm
), bld
.def(s1
, scc
),
339 bld
.exec(Definition(exact
)), ctx
.info
[idx
].exec
[0].first
, bld
.exec(wqm
));
340 ctx
.info
[idx
].exec
.back().first
= wqm
;
341 ctx
.info
[idx
].exec
.emplace_back(exact
, mask_type_exact
);
344 unsigned add_coupling_code(exec_ctx
& ctx
, Block
* block
,
345 std::vector
<aco_ptr
<Instruction
>>& instructions
)
347 unsigned idx
= block
->index
;
348 Builder
bld(ctx
.program
, &instructions
);
349 std::vector
<unsigned>& preds
= block
->linear_preds
;
353 aco_ptr
<Instruction
>& startpgm
= block
->instructions
[0];
354 assert(startpgm
->opcode
== aco_opcode::p_startpgm
);
355 Temp exec_mask
= startpgm
->definitions
.back().getTemp();
356 bld
.insert(std::move(startpgm
));
358 if (ctx
.handle_wqm
) {
359 ctx
.info
[0].exec
.emplace_back(exec_mask
, mask_type_global
| mask_type_exact
| mask_type_initial
);
360 /* if this block only needs WQM, initialize already */
361 if (ctx
.info
[0].block_needs
== WQM
)
362 transition_to_WQM(ctx
, bld
, 0);
364 uint8_t mask
= mask_type_global
;
365 if (ctx
.program
->needs_wqm
) {
366 exec_mask
= bld
.sop1(Builder::s_wqm
, bld
.def(bld
.lm
, exec
), bld
.def(s1
, scc
), bld
.exec(exec_mask
));
367 mask
|= mask_type_wqm
;
369 mask
|= mask_type_exact
;
371 ctx
.info
[0].exec
.emplace_back(exec_mask
, mask
);
377 /* loop entry block */
378 if (block
->kind
& block_kind_loop_header
) {
379 assert(preds
[0] == idx
- 1);
380 ctx
.info
[idx
].exec
= ctx
.info
[idx
- 1].exec
;
381 loop_info
& info
= ctx
.loop
.back();
382 while (ctx
.info
[idx
].exec
.size() > info
.num_exec_masks
)
383 ctx
.info
[idx
].exec
.pop_back();
385 /* create ssa names for outer exec masks */
386 if (info
.has_discard
) {
387 aco_ptr
<Pseudo_instruction
> phi
;
388 for (int i
= 0; i
< info
.num_exec_masks
- 1; i
++) {
389 phi
.reset(create_instruction
<Pseudo_instruction
>(aco_opcode::p_linear_phi
, Format::PSEUDO
, preds
.size(), 1));
390 phi
->definitions
[0] = bld
.def(bld
.lm
);
391 phi
->operands
[0] = Operand(ctx
.info
[preds
[0]].exec
[i
].first
);
392 ctx
.info
[idx
].exec
[i
].first
= bld
.insert(std::move(phi
));
396 /* create ssa name for restore mask */
397 if (info
.has_divergent_break
) {
398 /* this phi might be trivial but ensures a parallelcopy on the loop header */
399 aco_ptr
<Pseudo_instruction
> phi
{create_instruction
<Pseudo_instruction
>(aco_opcode::p_linear_phi
, Format::PSEUDO
, preds
.size(), 1)};
400 phi
->definitions
[0] = bld
.def(bld
.lm
);
401 phi
->operands
[0] = Operand(ctx
.info
[preds
[0]].exec
[info
.num_exec_masks
- 1].first
);
402 ctx
.info
[idx
].exec
.back().first
= bld
.insert(std::move(phi
));
405 /* create ssa name for loop active mask */
406 aco_ptr
<Pseudo_instruction
> phi
{create_instruction
<Pseudo_instruction
>(aco_opcode::p_linear_phi
, Format::PSEUDO
, preds
.size(), 1)};
407 if (info
.has_divergent_continue
)
408 phi
->definitions
[0] = bld
.def(bld
.lm
);
410 phi
->definitions
[0] = bld
.def(bld
.lm
, exec
);
411 phi
->operands
[0] = Operand(ctx
.info
[preds
[0]].exec
.back().first
);
412 Temp loop_active
= bld
.insert(std::move(phi
));
414 if (info
.has_divergent_break
) {
415 uint8_t mask_type
= (ctx
.info
[idx
].exec
.back().second
& (mask_type_wqm
| mask_type_exact
)) | mask_type_loop
;
416 ctx
.info
[idx
].exec
.emplace_back(loop_active
, mask_type
);
418 ctx
.info
[idx
].exec
.back().first
= loop_active
;
419 ctx
.info
[idx
].exec
.back().second
|= mask_type_loop
;
422 /* create a parallelcopy to move the active mask to exec */
424 if (info
.has_divergent_continue
) {
425 while (block
->instructions
[i
]->opcode
!= aco_opcode::p_logical_start
) {
426 bld
.insert(std::move(block
->instructions
[i
]));
429 uint8_t mask_type
= ctx
.info
[idx
].exec
.back().second
& (mask_type_wqm
| mask_type_exact
);
430 assert(ctx
.info
[idx
].exec
.back().first
.size() == bld
.lm
.size());
431 ctx
.info
[idx
].exec
.emplace_back(bld
.pseudo(aco_opcode::p_parallelcopy
, bld
.def(bld
.lm
, exec
),
432 ctx
.info
[idx
].exec
.back().first
), mask_type
);
438 /* loop exit block */
439 if (block
->kind
& block_kind_loop_exit
) {
440 Block
* header
= ctx
.loop
.back().loop_header
;
441 loop_info
& info
= ctx
.loop
.back();
443 for (ASSERTED
unsigned pred
: preds
)
444 assert(ctx
.info
[pred
].exec
.size() >= info
.num_exec_masks
);
446 /* fill the loop header phis */
447 std::vector
<unsigned>& header_preds
= header
->linear_preds
;
449 if (info
.has_discard
) {
450 while (k
< info
.num_exec_masks
- 1) {
451 aco_ptr
<Instruction
>& phi
= header
->instructions
[k
];
452 assert(phi
->opcode
== aco_opcode::p_linear_phi
);
453 for (unsigned i
= 1; i
< phi
->operands
.size(); i
++)
454 phi
->operands
[i
] = Operand(ctx
.info
[header_preds
[i
]].exec
[k
].first
);
458 aco_ptr
<Instruction
>& phi
= header
->instructions
[k
++];
459 assert(phi
->opcode
== aco_opcode::p_linear_phi
);
460 for (unsigned i
= 1; i
< phi
->operands
.size(); i
++)
461 phi
->operands
[i
] = Operand(ctx
.info
[header_preds
[i
]].exec
[info
.num_exec_masks
- 1].first
);
463 if (info
.has_divergent_break
) {
464 aco_ptr
<Instruction
>& phi
= header
->instructions
[k
];
465 assert(phi
->opcode
== aco_opcode::p_linear_phi
);
466 for (unsigned i
= 1; i
< phi
->operands
.size(); i
++)
467 phi
->operands
[i
] = Operand(ctx
.info
[header_preds
[i
]].exec
[info
.num_exec_masks
].first
);
470 assert(!(block
->kind
& block_kind_top_level
) || info
.num_exec_masks
<= 2);
472 /* create the loop exit phis if not trivial */
473 for (unsigned k
= 0; k
< info
.num_exec_masks
; k
++) {
474 Temp same
= ctx
.info
[preds
[0]].exec
[k
].first
;
475 uint8_t type
= ctx
.info
[header_preds
[0]].exec
[k
].second
;
478 for (unsigned i
= 1; i
< preds
.size() && trivial
; i
++) {
479 if (ctx
.info
[preds
[i
]].exec
[k
].first
!= same
)
484 ctx
.info
[idx
].exec
.emplace_back(same
, type
);
486 /* create phi for loop footer */
487 aco_ptr
<Pseudo_instruction
> phi
{create_instruction
<Pseudo_instruction
>(aco_opcode::p_linear_phi
, Format::PSEUDO
, preds
.size(), 1)};
488 phi
->definitions
[0] = bld
.def(bld
.lm
);
489 for (unsigned i
= 0; i
< phi
->operands
.size(); i
++)
490 phi
->operands
[i
] = Operand(ctx
.info
[preds
[i
]].exec
[k
].first
);
491 ctx
.info
[idx
].exec
.emplace_back(bld
.insert(std::move(phi
)), type
);
494 assert(ctx
.info
[idx
].exec
.size() == info
.num_exec_masks
);
496 /* create a parallelcopy to move the live mask to exec */
498 while (block
->instructions
[i
]->opcode
!= aco_opcode::p_logical_start
) {
499 bld
.insert(std::move(block
->instructions
[i
]));
503 if (ctx
.handle_wqm
) {
504 if (block
->kind
& block_kind_top_level
&& ctx
.info
[idx
].exec
.size() == 2) {
505 if ((ctx
.info
[idx
].block_needs
| ctx
.info
[idx
].ever_again_needs
) == 0 ||
506 (ctx
.info
[idx
].block_needs
| ctx
.info
[idx
].ever_again_needs
) == Exact
) {
507 ctx
.info
[idx
].exec
.back().second
|= mask_type_global
;
508 transition_to_Exact(ctx
, bld
, idx
);
509 ctx
.handle_wqm
= false;
512 if (ctx
.info
[idx
].block_needs
== WQM
)
513 transition_to_WQM(ctx
, bld
, idx
);
514 else if (ctx
.info
[idx
].block_needs
== Exact
)
515 transition_to_Exact(ctx
, bld
, idx
);
518 assert(ctx
.info
[idx
].exec
.back().first
.size() == bld
.lm
.size());
519 ctx
.info
[idx
].exec
.back().first
= bld
.pseudo(aco_opcode::p_parallelcopy
, bld
.def(bld
.lm
, exec
),
520 ctx
.info
[idx
].exec
.back().first
);
526 if (preds
.size() == 1) {
527 ctx
.info
[idx
].exec
= ctx
.info
[preds
[0]].exec
;
529 assert(preds
.size() == 2);
530 /* if one of the predecessors ends in exact mask, we pop it from stack */
531 unsigned num_exec_masks
= std::min(ctx
.info
[preds
[0]].exec
.size(),
532 ctx
.info
[preds
[1]].exec
.size());
533 if (block
->kind
& block_kind_top_level
&& !(block
->kind
& block_kind_merge
))
534 num_exec_masks
= std::min(num_exec_masks
, 2u);
536 /* create phis for diverged exec masks */
537 for (unsigned i
= 0; i
< num_exec_masks
; i
++) {
538 bool in_exec
= i
== num_exec_masks
- 1 && !(block
->kind
& block_kind_merge
);
539 if (!in_exec
&& ctx
.info
[preds
[0]].exec
[i
].first
== ctx
.info
[preds
[1]].exec
[i
].first
) {
540 assert(ctx
.info
[preds
[0]].exec
[i
].second
== ctx
.info
[preds
[1]].exec
[i
].second
);
541 ctx
.info
[idx
].exec
.emplace_back(ctx
.info
[preds
[0]].exec
[i
]);
545 Temp phi
= bld
.pseudo(aco_opcode::p_linear_phi
, in_exec
? bld
.def(bld
.lm
, exec
) : bld
.def(bld
.lm
),
546 ctx
.info
[preds
[0]].exec
[i
].first
,
547 ctx
.info
[preds
[1]].exec
[i
].first
);
548 uint8_t mask_type
= ctx
.info
[preds
[0]].exec
[i
].second
& ctx
.info
[preds
[1]].exec
[i
].second
;
549 ctx
.info
[idx
].exec
.emplace_back(phi
, mask_type
);
554 while (block
->instructions
[i
]->opcode
== aco_opcode::p_phi
||
555 block
->instructions
[i
]->opcode
== aco_opcode::p_linear_phi
) {
556 bld
.insert(std::move(block
->instructions
[i
]));
560 if (block
->kind
& block_kind_merge
)
561 ctx
.info
[idx
].exec
.pop_back();
563 if (block
->kind
& block_kind_top_level
&& ctx
.info
[idx
].exec
.size() == 3) {
564 assert(ctx
.info
[idx
].exec
.back().second
== mask_type_exact
);
565 assert(block
->kind
& block_kind_merge
);
566 ctx
.info
[idx
].exec
.pop_back();
569 /* try to satisfy the block's needs */
570 if (ctx
.handle_wqm
) {
571 if (block
->kind
& block_kind_top_level
&& ctx
.info
[idx
].exec
.size() == 2) {
572 if ((ctx
.info
[idx
].block_needs
| ctx
.info
[idx
].ever_again_needs
) == 0 ||
573 (ctx
.info
[idx
].block_needs
| ctx
.info
[idx
].ever_again_needs
) == Exact
) {
574 ctx
.info
[idx
].exec
.back().second
|= mask_type_global
;
575 transition_to_Exact(ctx
, bld
, idx
);
576 ctx
.handle_wqm
= false;
579 if (ctx
.info
[idx
].block_needs
== WQM
)
580 transition_to_WQM(ctx
, bld
, idx
);
581 else if (ctx
.info
[idx
].block_needs
== Exact
)
582 transition_to_Exact(ctx
, bld
, idx
);
585 if (block
->kind
& block_kind_merge
) {
586 Temp restore
= ctx
.info
[idx
].exec
.back().first
;
587 assert(restore
.size() == bld
.lm
.size());
588 ctx
.info
[idx
].exec
.back().first
= bld
.pseudo(aco_opcode::p_parallelcopy
, bld
.def(bld
.lm
, exec
), restore
);
594 void lower_fs_buffer_store_smem(Builder
& bld
, bool need_check
, aco_ptr
<Instruction
>& instr
, Temp cur_exec
)
596 Operand offset
= instr
->operands
[1];
598 /* if exec is zero, then use UINT32_MAX as an offset and make this store a no-op */
599 Temp nonempty
= bld
.sopc(Builder::s_cmp_lg
, bld
.def(s1
, scc
), cur_exec
, Operand(0u));
601 if (offset
.isLiteral())
602 offset
= bld
.sop1(aco_opcode::s_mov_b32
, bld
.def(s1
), offset
);
604 offset
= bld
.sop2(aco_opcode::s_cselect_b32
, bld
.hint_m0(bld
.def(s1
)),
605 offset
, Operand(UINT32_MAX
), bld
.scc(nonempty
));
606 } else if (offset
.isConstant() && offset
.constantValue() > 0xFFFFF) {
607 offset
= bld
.sop1(aco_opcode::s_mov_b32
, bld
.hint_m0(bld
.def(s1
)), offset
);
609 if (!offset
.isConstant())
612 switch (instr
->operands
[2].size()) {
614 instr
->opcode
= aco_opcode::s_buffer_store_dword
;
617 instr
->opcode
= aco_opcode::s_buffer_store_dwordx2
;
620 instr
->opcode
= aco_opcode::s_buffer_store_dwordx4
;
623 unreachable("Invalid SMEM buffer store size");
625 instr
->operands
[1] = offset
;
626 /* as_uniform() needs to be done here so it's done in exact mode and helper
627 * lanes don't contribute. */
628 instr
->operands
[2] = Operand(bld
.as_uniform(instr
->operands
[2]));
631 void process_instructions(exec_ctx
& ctx
, Block
* block
,
632 std::vector
<aco_ptr
<Instruction
>>& instructions
,
636 if (ctx
.info
[block
->index
].exec
.back().second
& mask_type_wqm
)
639 assert(!ctx
.handle_wqm
|| ctx
.info
[block
->index
].exec
.back().second
& mask_type_exact
);
643 /* if the block doesn't need both, WQM and Exact, we can skip processing the instructions */
644 bool process
= (ctx
.handle_wqm
&&
645 (ctx
.info
[block
->index
].block_needs
& state
) !=
646 (ctx
.info
[block
->index
].block_needs
& (WQM
| Exact
))) ||
647 block
->kind
& block_kind_uses_discard_if
||
648 block
->kind
& block_kind_uses_demote
||
649 block
->kind
& block_kind_needs_lowering
;
651 std::vector
<aco_ptr
<Instruction
>>::iterator it
= std::next(block
->instructions
.begin(), idx
);
652 instructions
.insert(instructions
.end(),
653 std::move_iterator
<std::vector
<aco_ptr
<Instruction
>>::iterator
>(it
),
654 std::move_iterator
<std::vector
<aco_ptr
<Instruction
>>::iterator
>(block
->instructions
.end()));
658 Builder
bld(ctx
.program
, &instructions
);
660 for (; idx
< block
->instructions
.size(); idx
++) {
661 aco_ptr
<Instruction
> instr
= std::move(block
->instructions
[idx
]);
663 WQMState needs
= ctx
.handle_wqm
? ctx
.info
[block
->index
].instr_needs
[idx
] : Unspecified
;
665 if (instr
->opcode
== aco_opcode::p_discard_if
) {
666 if (ctx
.info
[block
->index
].block_needs
& Preserve_WQM
) {
667 assert(block
->kind
& block_kind_top_level
);
668 transition_to_WQM(ctx
, bld
, block
->index
);
669 ctx
.info
[block
->index
].exec
.back().second
&= ~mask_type_global
;
671 int num
= ctx
.info
[block
->index
].exec
.size();
673 Operand cond
= instr
->operands
[0];
674 for (int i
= num
- 1; i
>= 0; i
--) {
675 Instruction
*andn2
= bld
.sop2(Builder::s_andn2
, bld
.def(bld
.lm
), bld
.def(s1
, scc
),
676 ctx
.info
[block
->index
].exec
[i
].first
, cond
);
678 andn2
->operands
[0].setFixed(exec
);
679 andn2
->definitions
[0].setFixed(exec
);
682 instr
->opcode
= aco_opcode::p_exit_early_if
;
683 instr
->operands
[0] = bld
.scc(andn2
->definitions
[1].getTemp());
685 ctx
.info
[block
->index
].exec
[i
].first
= andn2
->definitions
[0].getTemp();
687 assert(!ctx
.handle_wqm
|| (ctx
.info
[block
->index
].exec
[0].second
& mask_type_wqm
) == 0);
689 } else if (needs
== WQM
&& state
!= WQM
) {
690 transition_to_WQM(ctx
, bld
, block
->index
);
692 } else if (needs
== Exact
&& state
!= Exact
) {
693 transition_to_Exact(ctx
, bld
, block
->index
);
697 if (instr
->opcode
== aco_opcode::p_is_helper
|| instr
->opcode
== aco_opcode::p_load_helper
) {
698 Definition dst
= instr
->definitions
[0];
699 assert(dst
.size() == bld
.lm
.size());
700 if (state
== Exact
) {
701 instr
.reset(create_instruction
<SOP1_instruction
>(bld
.w64or32(Builder::s_mov
), Format::SOP1
, 1, 1));
702 instr
->operands
[0] = Operand(0u);
703 instr
->definitions
[0] = dst
;
705 std::pair
<Temp
, uint8_t>& exact_mask
= ctx
.info
[block
->index
].exec
[0];
706 if (instr
->opcode
== aco_opcode::p_load_helper
&&
707 !(ctx
.info
[block
->index
].exec
[0].second
& mask_type_initial
)) {
708 /* find last initial exact mask */
709 for (int i
= block
->index
; i
>= 0; i
--) {
710 if (ctx
.program
->blocks
[i
].kind
& block_kind_top_level
&&
711 ctx
.info
[i
].exec
[0].second
& mask_type_initial
) {
712 exact_mask
= ctx
.info
[i
].exec
[0];
718 assert(instr
->opcode
== aco_opcode::p_is_helper
|| exact_mask
.second
& mask_type_initial
);
719 assert(exact_mask
.second
& mask_type_exact
);
721 instr
.reset(create_instruction
<SOP2_instruction
>(bld
.w64or32(Builder::s_andn2
), Format::SOP2
, 2, 2));
722 instr
->operands
[0] = Operand(ctx
.info
[block
->index
].exec
.back().first
); /* current exec */
723 instr
->operands
[1] = Operand(exact_mask
.first
);
724 instr
->definitions
[0] = dst
;
725 instr
->definitions
[1] = bld
.def(s1
, scc
);
727 } else if (instr
->opcode
== aco_opcode::p_demote_to_helper
) {
728 /* turn demote into discard_if with only exact masks */
729 assert((ctx
.info
[block
->index
].exec
[0].second
& (mask_type_exact
| mask_type_global
)) == (mask_type_exact
| mask_type_global
));
730 ctx
.info
[block
->index
].exec
[0].second
&= ~mask_type_initial
;
734 if (instr
->operands
.empty()) {
735 /* transition to exact and set exec to zero */
736 Temp old_exec
= ctx
.info
[block
->index
].exec
.back().first
;
737 Temp new_exec
= bld
.tmp(bld
.lm
);
738 cond
= bld
.sop1(Builder::s_and_saveexec
, bld
.def(bld
.lm
), bld
.def(s1
, scc
),
739 bld
.exec(Definition(new_exec
)), Operand(0u), bld
.exec(old_exec
));
740 if (ctx
.info
[block
->index
].exec
.back().second
& mask_type_exact
) {
741 ctx
.info
[block
->index
].exec
.back().first
= new_exec
;
743 ctx
.info
[block
->index
].exec
.back().first
= cond
;
744 ctx
.info
[block
->index
].exec
.emplace_back(new_exec
, mask_type_exact
);
747 /* demote_if: transition to exact */
748 transition_to_Exact(ctx
, bld
, block
->index
);
749 assert(instr
->operands
[0].isTemp());
750 cond
= instr
->operands
[0].getTemp();
754 num
+= ctx
.info
[block
->index
].exec
.size() - 1;
755 for (int i
= num
- 1; i
>= 0; i
--) {
756 if (ctx
.info
[block
->index
].exec
[i
].second
& mask_type_exact
) {
757 Instruction
*andn2
= bld
.sop2(Builder::s_andn2
, bld
.def(bld
.lm
), bld
.def(s1
, scc
),
758 ctx
.info
[block
->index
].exec
[i
].first
, cond
);
760 andn2
->operands
[0].setFixed(exec
);
761 andn2
->definitions
[0].setFixed(exec
);
764 instr
->opcode
= aco_opcode::p_exit_early_if
;
765 instr
->operands
[0] = bld
.scc(andn2
->definitions
[1].getTemp());
767 ctx
.info
[block
->index
].exec
[i
].first
= andn2
->definitions
[0].getTemp();
774 } else if (instr
->opcode
== aco_opcode::p_fs_buffer_store_smem
) {
775 bool need_check
= ctx
.info
[block
->index
].exec
.size() != 1 &&
776 !(ctx
.info
[block
->index
].exec
[ctx
.info
[block
->index
].exec
.size() - 2].second
& Exact
);
777 lower_fs_buffer_store_smem(bld
, need_check
, instr
, ctx
.info
[block
->index
].exec
.back().first
);
780 bld
.insert(std::move(instr
));
784 void add_branch_code(exec_ctx
& ctx
, Block
* block
)
786 unsigned idx
= block
->index
;
787 Builder
bld(ctx
.program
, block
);
789 if (idx
== ctx
.program
->blocks
.size() - 1)
792 /* try to disable wqm handling */
793 if (ctx
.handle_wqm
&& block
->kind
& block_kind_top_level
) {
794 if (ctx
.info
[idx
].exec
.size() == 3) {
795 assert(ctx
.info
[idx
].exec
[1].second
== mask_type_wqm
);
796 ctx
.info
[idx
].exec
.pop_back();
798 assert(ctx
.info
[idx
].exec
.size() <= 2);
800 if (ctx
.info
[idx
].ever_again_needs
== 0 ||
801 ctx
.info
[idx
].ever_again_needs
== Exact
) {
802 /* transition to Exact */
803 aco_ptr
<Instruction
> branch
= std::move(block
->instructions
.back());
804 block
->instructions
.pop_back();
805 ctx
.info
[idx
].exec
.back().second
|= mask_type_global
;
806 transition_to_Exact(ctx
, bld
, idx
);
807 bld
.insert(std::move(branch
));
808 ctx
.handle_wqm
= false;
810 } else if (ctx
.info
[idx
].block_needs
& Preserve_WQM
) {
811 /* transition to WQM and remove global flag */
812 aco_ptr
<Instruction
> branch
= std::move(block
->instructions
.back());
813 block
->instructions
.pop_back();
814 transition_to_WQM(ctx
, bld
, idx
);
815 ctx
.info
[idx
].exec
.back().second
&= ~mask_type_global
;
816 bld
.insert(std::move(branch
));
820 if (block
->kind
& block_kind_loop_preheader
) {
821 /* collect information about the succeeding loop */
822 bool has_divergent_break
= false;
823 bool has_divergent_continue
= false;
824 bool has_discard
= false;
826 unsigned loop_nest_depth
= ctx
.program
->blocks
[idx
+ 1].loop_nest_depth
;
828 for (unsigned i
= idx
+ 1; ctx
.program
->blocks
[i
].loop_nest_depth
>= loop_nest_depth
; i
++) {
829 Block
& loop_block
= ctx
.program
->blocks
[i
];
830 needs
|= ctx
.info
[i
].block_needs
;
832 if (loop_block
.kind
& block_kind_uses_discard_if
||
833 loop_block
.kind
& block_kind_discard
||
834 loop_block
.kind
& block_kind_uses_demote
)
836 if (loop_block
.loop_nest_depth
!= loop_nest_depth
)
839 if (loop_block
.kind
& block_kind_uniform
)
841 else if (loop_block
.kind
& block_kind_break
)
842 has_divergent_break
= true;
843 else if (loop_block
.kind
& block_kind_continue
)
844 has_divergent_continue
= true;
847 if (ctx
.handle_wqm
) {
849 aco_ptr
<Instruction
> branch
= std::move(block
->instructions
.back());
850 block
->instructions
.pop_back();
851 transition_to_WQM(ctx
, bld
, idx
);
852 bld
.insert(std::move(branch
));
854 aco_ptr
<Instruction
> branch
= std::move(block
->instructions
.back());
855 block
->instructions
.pop_back();
856 transition_to_Exact(ctx
, bld
, idx
);
857 bld
.insert(std::move(branch
));
861 unsigned num_exec_masks
= ctx
.info
[idx
].exec
.size();
862 if (block
->kind
& block_kind_top_level
)
863 num_exec_masks
= std::min(num_exec_masks
, 2u);
865 ctx
.loop
.emplace_back(&ctx
.program
->blocks
[block
->linear_succs
[0]],
869 has_divergent_continue
,
873 if (block
->kind
& block_kind_discard
) {
875 assert(block
->instructions
.back()->format
== Format::PSEUDO_BRANCH
);
876 aco_ptr
<Instruction
> branch
= std::move(block
->instructions
.back());
877 block
->instructions
.pop_back();
879 /* create a discard_if() instruction with the exec mask as condition */
881 if (ctx
.loop
.size()) {
882 /* if we're in a loop, only discard from the outer exec masks */
883 num
= ctx
.loop
.back().num_exec_masks
;
885 num
= ctx
.info
[idx
].exec
.size() - 1;
888 Temp old_exec
= ctx
.info
[idx
].exec
.back().first
;
889 Temp new_exec
= bld
.tmp(bld
.lm
);
890 Temp cond
= bld
.sop1(Builder::s_and_saveexec
, bld
.def(bld
.lm
), bld
.def(s1
, scc
),
891 bld
.exec(Definition(new_exec
)), Operand(0u), bld
.exec(old_exec
));
892 ctx
.info
[idx
].exec
.back().first
= new_exec
;
894 for (int i
= num
- 1; i
>= 0; i
--) {
895 Instruction
*andn2
= bld
.sop2(Builder::s_andn2
, bld
.def(bld
.lm
), bld
.def(s1
, scc
),
896 ctx
.info
[block
->index
].exec
[i
].first
, cond
);
897 if (i
== (int)ctx
.info
[idx
].exec
.size() - 1)
898 andn2
->definitions
[0].setFixed(exec
);
900 bld
.pseudo(aco_opcode::p_exit_early_if
, bld
.scc(andn2
->definitions
[1].getTemp()));
901 ctx
.info
[block
->index
].exec
[i
].first
= andn2
->definitions
[0].getTemp();
903 assert(!ctx
.handle_wqm
|| (ctx
.info
[block
->index
].exec
[0].second
& mask_type_wqm
) == 0);
905 if ((block
->kind
& (block_kind_break
| block_kind_uniform
)) == block_kind_break
)
906 ctx
.info
[idx
].exec
.back().first
= cond
;
907 bld
.insert(std::move(branch
));
908 /* no return here as it can be followed by a divergent break */
911 if (block
->kind
& block_kind_continue_or_break
) {
912 assert(ctx
.program
->blocks
[ctx
.program
->blocks
[block
->linear_succs
[1]].linear_succs
[0]].kind
& block_kind_loop_header
);
913 assert(ctx
.program
->blocks
[ctx
.program
->blocks
[block
->linear_succs
[0]].linear_succs
[0]].kind
& block_kind_loop_exit
);
914 assert(block
->instructions
.back()->opcode
== aco_opcode::p_branch
);
915 block
->instructions
.pop_back();
917 while (!(ctx
.info
[idx
].exec
.back().second
& mask_type_loop
))
918 ctx
.info
[idx
].exec
.pop_back();
920 ctx
.info
[idx
].exec
.back().first
= bld
.pseudo(aco_opcode::p_parallelcopy
, bld
.def(bld
.lm
, exec
), ctx
.info
[idx
].exec
.back().first
);
921 bld
.branch(aco_opcode::p_cbranch_nz
, bld
.exec(ctx
.info
[idx
].exec
.back().first
), block
->linear_succs
[1], block
->linear_succs
[0]);
925 if (block
->kind
& block_kind_uniform
) {
926 Pseudo_branch_instruction
* branch
= static_cast<Pseudo_branch_instruction
*>(block
->instructions
.back().get());
927 if (branch
->opcode
== aco_opcode::p_branch
) {
928 branch
->target
[0] = block
->linear_succs
[0];
930 branch
->target
[0] = block
->linear_succs
[1];
931 branch
->target
[1] = block
->linear_succs
[0];
936 if (block
->kind
& block_kind_branch
) {
938 if (ctx
.handle_wqm
&&
939 ctx
.info
[idx
].exec
.size() >= 2 &&
940 ctx
.info
[idx
].exec
.back().second
== mask_type_exact
&&
941 !(ctx
.info
[idx
].block_needs
& Exact_Branch
) &&
942 ctx
.info
[idx
].exec
[ctx
.info
[idx
].exec
.size() - 2].second
& mask_type_wqm
) {
943 /* return to wqm before branching */
944 ctx
.info
[idx
].exec
.pop_back();
947 // orig = s_and_saveexec_b64
948 assert(block
->linear_succs
.size() == 2);
949 assert(block
->instructions
.back()->opcode
== aco_opcode::p_cbranch_z
);
950 Temp cond
= block
->instructions
.back()->operands
[0].getTemp();
951 block
->instructions
.pop_back();
953 if (ctx
.info
[idx
].block_needs
& Exact_Branch
)
954 transition_to_Exact(ctx
, bld
, idx
);
956 Temp current_exec
= ctx
.info
[idx
].exec
.back().first
;
957 uint8_t mask_type
= ctx
.info
[idx
].exec
.back().second
& (mask_type_wqm
| mask_type_exact
);
959 Temp then_mask
= bld
.tmp(bld
.lm
);
960 Temp old_exec
= bld
.sop1(Builder::s_and_saveexec
, bld
.def(bld
.lm
), bld
.def(s1
, scc
),
961 bld
.exec(Definition(then_mask
)), cond
, bld
.exec(current_exec
));
963 ctx
.info
[idx
].exec
.back().first
= old_exec
;
965 /* add next current exec to the stack */
966 ctx
.info
[idx
].exec
.emplace_back(then_mask
, mask_type
);
968 bld
.branch(aco_opcode::p_cbranch_z
, bld
.exec(then_mask
), block
->linear_succs
[1], block
->linear_succs
[0]);
972 if (block
->kind
& block_kind_invert
) {
973 // exec = s_andn2_b64 (original_exec, exec)
974 assert(block
->instructions
.back()->opcode
== aco_opcode::p_cbranch_nz
);
975 block
->instructions
.pop_back();
976 Temp then_mask
= ctx
.info
[idx
].exec
.back().first
;
977 uint8_t mask_type
= ctx
.info
[idx
].exec
.back().second
;
978 ctx
.info
[idx
].exec
.pop_back();
979 Temp orig_exec
= ctx
.info
[idx
].exec
.back().first
;
980 Temp else_mask
= bld
.sop2(Builder::s_andn2
, bld
.def(bld
.lm
, exec
),
981 bld
.def(s1
, scc
), orig_exec
, bld
.exec(then_mask
));
983 /* add next current exec to the stack */
984 ctx
.info
[idx
].exec
.emplace_back(else_mask
, mask_type
);
986 bld
.branch(aco_opcode::p_cbranch_z
, bld
.exec(else_mask
), block
->linear_succs
[1], block
->linear_succs
[0]);
990 if (block
->kind
& block_kind_break
) {
991 // loop_mask = s_andn2_b64 (loop_mask, exec)
992 assert(block
->instructions
.back()->opcode
== aco_opcode::p_branch
);
993 block
->instructions
.pop_back();
995 Temp current_exec
= ctx
.info
[idx
].exec
.back().first
;
997 for (int exec_idx
= ctx
.info
[idx
].exec
.size() - 2; exec_idx
>= 0; exec_idx
--) {
999 Temp exec_mask
= ctx
.info
[idx
].exec
[exec_idx
].first
;
1000 exec_mask
= bld
.sop2(Builder::s_andn2
, bld
.def(bld
.lm
), bld
.scc(Definition(cond
)),
1001 exec_mask
, current_exec
);
1002 ctx
.info
[idx
].exec
[exec_idx
].first
= exec_mask
;
1003 if (ctx
.info
[idx
].exec
[exec_idx
].second
& mask_type_loop
)
1007 /* check if the successor is the merge block, otherwise set exec to 0 */
1008 // TODO: this could be done better by directly branching to the merge block
1009 unsigned succ_idx
= ctx
.program
->blocks
[block
->linear_succs
[1]].linear_succs
[0];
1010 Block
& succ
= ctx
.program
->blocks
[succ_idx
];
1011 if (!(succ
.kind
& block_kind_invert
|| succ
.kind
& block_kind_merge
)) {
1012 ctx
.info
[idx
].exec
.back().first
= bld
.sop1(Builder::s_mov
, bld
.def(bld
.lm
, exec
), Operand(0u));
1015 bld
.branch(aco_opcode::p_cbranch_nz
, bld
.scc(cond
), block
->linear_succs
[1], block
->linear_succs
[0]);
1019 if (block
->kind
& block_kind_continue
) {
1020 assert(block
->instructions
.back()->opcode
== aco_opcode::p_branch
);
1021 block
->instructions
.pop_back();
1023 Temp current_exec
= ctx
.info
[idx
].exec
.back().first
;
1025 for (int exec_idx
= ctx
.info
[idx
].exec
.size() - 2; exec_idx
>= 0; exec_idx
--) {
1026 if (ctx
.info
[idx
].exec
[exec_idx
].second
& mask_type_loop
)
1029 Temp exec_mask
= ctx
.info
[idx
].exec
[exec_idx
].first
;
1030 exec_mask
= bld
.sop2(Builder::s_andn2
, bld
.def(bld
.lm
), bld
.scc(Definition(cond
)),
1031 exec_mask
, bld
.exec(current_exec
));
1032 ctx
.info
[idx
].exec
[exec_idx
].first
= exec_mask
;
1034 assert(cond
!= Temp());
1036 /* check if the successor is the merge block, otherwise set exec to 0 */
1037 // TODO: this could be done better by directly branching to the merge block
1038 unsigned succ_idx
= ctx
.program
->blocks
[block
->linear_succs
[1]].linear_succs
[0];
1039 Block
& succ
= ctx
.program
->blocks
[succ_idx
];
1040 if (!(succ
.kind
& block_kind_invert
|| succ
.kind
& block_kind_merge
)) {
1041 ctx
.info
[idx
].exec
.back().first
= bld
.sop1(Builder::s_mov
, bld
.def(bld
.lm
, exec
), Operand(0u));
1044 bld
.branch(aco_opcode::p_cbranch_nz
, bld
.scc(cond
), block
->linear_succs
[1], block
->linear_succs
[0]);
1049 void process_block(exec_ctx
& ctx
, Block
* block
)
1051 std::vector
<aco_ptr
<Instruction
>> instructions
;
1052 instructions
.reserve(block
->instructions
.size());
1054 unsigned idx
= add_coupling_code(ctx
, block
, instructions
);
1056 assert(block
->index
!= ctx
.program
->blocks
.size() - 1 ||
1057 ctx
.info
[block
->index
].exec
.size() <= 2);
1059 process_instructions(ctx
, block
, instructions
, idx
);
1061 block
->instructions
= std::move(instructions
);
1063 add_branch_code(ctx
, block
);
1065 block
->live_out_exec
= ctx
.info
[block
->index
].exec
.back().first
;
1068 } /* end namespace */
1071 void insert_exec_mask(Program
*program
)
1073 exec_ctx
ctx(program
);
1075 if (program
->needs_wqm
&& program
->needs_exact
)
1076 calculate_wqm_needs(ctx
);
1078 for (Block
& block
: program
->blocks
)
1079 process_block(ctx
, &block
);