2 * Copyright © 2019 Valve Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "aco_builder.h"
32 enum WQMState
: uint8_t {
35 WQM
= 1 << 1, /* with control flow applied */
36 Preserve_WQM
= 1 << 2,
37 Exact_Branch
= 1 << 3,
40 enum mask_type
: uint8_t {
41 mask_type_global
= 1 << 0,
42 mask_type_exact
= 1 << 1,
43 mask_type_wqm
= 1 << 2,
44 mask_type_loop
= 1 << 3, /* active lanes of a loop */
45 mask_type_initial
= 1 << 4, /* initially active lanes */
50 /* state for WQM propagation */
51 std::set
<unsigned> worklist
;
52 std::vector
<uint16_t> defined_in
;
53 std::vector
<bool> needs_wqm
;
54 std::vector
<bool> branch_wqm
; /* true if the branch condition in this block should be in wqm */
57 wqm_ctx(Program
* program
) : program(program
),
58 defined_in(program
->peekAllocationId(), 0xFFFF),
59 needs_wqm(program
->peekAllocationId()),
60 branch_wqm(program
->blocks
.size()),
64 for (unsigned i
= 0; i
< program
->blocks
.size(); i
++)
71 uint16_t num_exec_masks
;
73 bool has_divergent_break
;
74 bool has_divergent_continue
;
75 bool has_discard
; /* has a discard or demote */
76 loop_info(Block
* b
, uint16_t num
, uint8_t needs
, bool breaks
, bool cont
, bool discard
) :
77 loop_header(b
), num_exec_masks(num
), needs(needs
), has_divergent_break(breaks
),
78 has_divergent_continue(cont
), has_discard(discard
) {}
82 std::vector
<std::pair
<Temp
, uint8_t>> exec
;
83 std::vector
<WQMState
> instr_needs
;
85 uint8_t ever_again_needs
;
91 std::vector
<block_info
> info
;
92 std::vector
<loop_info
> loop
;
93 bool handle_wqm
= false;
94 exec_ctx(Program
*program
) : program(program
), info(program
->blocks
.size()) {}
97 bool pred_by_exec_mask(aco_ptr
<Instruction
>& instr
) {
99 return instr
->reads_exec();
100 if (instr
->format
== Format::SMEM
|| instr
->isSALU())
102 if (instr
->format
== Format::PSEUDO_BARRIER
)
105 if (instr
->format
== Format::PSEUDO
) {
106 switch (instr
->opcode
) {
107 case aco_opcode::p_create_vector
:
108 return instr
->definitions
[0].getTemp().type() == RegType::vgpr
;
109 case aco_opcode::p_extract_vector
:
110 case aco_opcode::p_split_vector
:
111 return instr
->operands
[0].getTemp().type() == RegType::vgpr
;
112 case aco_opcode::p_spill
:
113 case aco_opcode::p_reload
:
120 if (instr
->opcode
== aco_opcode::v_readlane_b32
||
121 instr
->opcode
== aco_opcode::v_writelane_b32
)
127 bool needs_exact(aco_ptr
<Instruction
>& instr
) {
128 if (instr
->format
== Format::MUBUF
) {
129 MUBUF_instruction
*mubuf
= static_cast<MUBUF_instruction
*>(instr
.get());
130 return mubuf
->disable_wqm
;
131 } else if (instr
->format
== Format::MTBUF
) {
132 MTBUF_instruction
*mtbuf
= static_cast<MTBUF_instruction
*>(instr
.get());
133 return mtbuf
->disable_wqm
;
134 } else if (instr
->format
== Format::MIMG
) {
135 MIMG_instruction
*mimg
= static_cast<MIMG_instruction
*>(instr
.get());
136 return mimg
->disable_wqm
;
137 } else if (instr
->format
== Format::FLAT
|| instr
->format
== Format::GLOBAL
) {
138 FLAT_instruction
*flat
= static_cast<FLAT_instruction
*>(instr
.get());
139 return flat
->disable_wqm
;
141 return instr
->format
== Format::EXP
|| instr
->opcode
== aco_opcode::p_fs_buffer_store_smem
;
145 void set_needs_wqm(wqm_ctx
&ctx
, Temp tmp
)
147 if (!ctx
.needs_wqm
[tmp
.id()]) {
148 ctx
.needs_wqm
[tmp
.id()] = true;
149 if (ctx
.defined_in
[tmp
.id()] != 0xFFFF)
150 ctx
.worklist
.insert(ctx
.defined_in
[tmp
.id()]);
154 void mark_block_wqm(wqm_ctx
&ctx
, unsigned block_idx
)
156 if (ctx
.branch_wqm
[block_idx
])
159 ctx
.branch_wqm
[block_idx
] = true;
160 Block
& block
= ctx
.program
->blocks
[block_idx
];
161 aco_ptr
<Instruction
>& branch
= block
.instructions
.back();
163 if (branch
->opcode
!= aco_opcode::p_branch
) {
164 assert(!branch
->operands
.empty() && branch
->operands
[0].isTemp());
165 set_needs_wqm(ctx
, branch
->operands
[0].getTemp());
168 /* TODO: this sets more branch conditions to WQM than it needs to
169 * it should be enough to stop at the "exec mask top level" */
170 if (block
.kind
& block_kind_top_level
)
173 for (unsigned pred_idx
: block
.logical_preds
)
174 mark_block_wqm(ctx
, pred_idx
);
177 void get_block_needs(wqm_ctx
&ctx
, exec_ctx
&exec_ctx
, Block
* block
)
179 block_info
& info
= exec_ctx
.info
[block
->index
];
181 std::vector
<WQMState
> instr_needs(block
->instructions
.size());
183 if (block
->kind
& block_kind_top_level
) {
184 if (ctx
.loop
&& ctx
.wqm
) {
185 /* mark all break conditions as WQM */
186 unsigned block_idx
= block
->index
+ 1;
187 while (!(ctx
.program
->blocks
[block_idx
].kind
& block_kind_top_level
)) {
188 if (ctx
.program
->blocks
[block_idx
].kind
& block_kind_break
)
189 mark_block_wqm(ctx
, block_idx
);
192 } else if (ctx
.loop
&& !ctx
.wqm
) {
193 /* Ensure a branch never results in an exec mask with only helper
194 * invocations (which can cause a loop to repeat infinitively if it's
195 * break branches are done in exact). */
196 unsigned block_idx
= block
->index
;
198 if ((ctx
.program
->blocks
[block_idx
].kind
& block_kind_branch
))
199 exec_ctx
.info
[block_idx
].block_needs
|= Exact_Branch
;
201 } while (!(ctx
.program
->blocks
[block_idx
].kind
& block_kind_top_level
));
208 for (int i
= block
->instructions
.size() - 1; i
>= 0; --i
) {
209 aco_ptr
<Instruction
>& instr
= block
->instructions
[i
];
211 WQMState needs
= needs_exact(instr
) ? Exact
: Unspecified
;
212 bool propagate_wqm
= instr
->opcode
== aco_opcode::p_wqm
;
213 bool preserve_wqm
= instr
->opcode
== aco_opcode::p_discard_if
;
214 bool pred_by_exec
= pred_by_exec_mask(instr
);
215 for (const Definition
& definition
: instr
->definitions
) {
216 if (!definition
.isTemp())
218 const unsigned def
= definition
.tempId();
219 ctx
.defined_in
[def
] = block
->index
;
220 if (needs
== Unspecified
&& ctx
.needs_wqm
[def
]) {
221 needs
= pred_by_exec
? WQM
: Unspecified
;
222 propagate_wqm
= true;
227 for (const Operand
& op
: instr
->operands
) {
229 set_needs_wqm(ctx
, op
.getTemp());
232 } else if (preserve_wqm
&& info
.block_needs
& WQM
) {
233 needs
= Preserve_WQM
;
236 /* ensure the condition controlling the control flow for this phi is in WQM */
237 if (needs
== WQM
&& instr
->opcode
== aco_opcode::p_phi
) {
238 for (unsigned pred_idx
: block
->logical_preds
)
239 mark_block_wqm(ctx
, pred_idx
);
242 instr_needs
[i
] = needs
;
243 info
.block_needs
|= needs
;
246 info
.instr_needs
= instr_needs
;
248 /* for "if (<cond>) <wqm code>" or "while (<cond>) <wqm code>",
249 * <cond> should be computed in WQM */
250 if (info
.block_needs
& WQM
&& !(block
->kind
& block_kind_top_level
)) {
251 for (unsigned pred_idx
: block
->logical_preds
)
252 mark_block_wqm(ctx
, pred_idx
);
255 if (block
->kind
& block_kind_loop_header
)
259 void calculate_wqm_needs(exec_ctx
& exec_ctx
)
261 wqm_ctx
ctx(exec_ctx
.program
);
263 while (!ctx
.worklist
.empty()) {
264 unsigned block_index
= *std::prev(ctx
.worklist
.end());
265 ctx
.worklist
.erase(std::prev(ctx
.worklist
.end()));
267 get_block_needs(ctx
, exec_ctx
, &exec_ctx
.program
->blocks
[block_index
]);
270 uint8_t ever_again_needs
= 0;
271 for (int i
= exec_ctx
.program
->blocks
.size() - 1; i
>= 0; i
--) {
272 exec_ctx
.info
[i
].ever_again_needs
= ever_again_needs
;
273 Block
& block
= exec_ctx
.program
->blocks
[i
];
275 if (block
.kind
& block_kind_needs_lowering
)
276 exec_ctx
.info
[i
].block_needs
|= Exact
;
278 /* if discard is used somewhere in nested CF, we need to preserve the WQM mask */
279 if ((block
.kind
& block_kind_discard
||
280 block
.kind
& block_kind_uses_discard_if
) &&
281 ever_again_needs
& WQM
)
282 exec_ctx
.info
[i
].block_needs
|= Preserve_WQM
;
284 ever_again_needs
|= exec_ctx
.info
[i
].block_needs
& ~Exact_Branch
;
285 if (block
.kind
& block_kind_discard
||
286 block
.kind
& block_kind_uses_discard_if
||
287 block
.kind
& block_kind_uses_demote
)
288 ever_again_needs
|= Exact
;
290 /* don't propagate WQM preservation further than the next top_level block */
291 if (block
.kind
& block_kind_top_level
)
292 ever_again_needs
&= ~Preserve_WQM
;
294 exec_ctx
.info
[i
].block_needs
&= ~Preserve_WQM
;
296 exec_ctx
.handle_wqm
= true;
299 void transition_to_WQM(exec_ctx
& ctx
, Builder bld
, unsigned idx
)
301 if (ctx
.info
[idx
].exec
.back().second
& mask_type_wqm
)
303 if (ctx
.info
[idx
].exec
.back().second
& mask_type_global
) {
304 Temp exec_mask
= ctx
.info
[idx
].exec
.back().first
;
305 exec_mask
= bld
.sop1(aco_opcode::s_wqm_b64
, bld
.def(s2
, exec
), bld
.def(s1
, scc
), exec_mask
);
306 ctx
.info
[idx
].exec
.emplace_back(exec_mask
, mask_type_global
| mask_type_wqm
);
309 /* otherwise, the WQM mask should be one below the current mask */
310 ctx
.info
[idx
].exec
.pop_back();
311 assert(ctx
.info
[idx
].exec
.back().second
& mask_type_wqm
);
312 ctx
.info
[idx
].exec
.back().first
= bld
.pseudo(aco_opcode::p_parallelcopy
, bld
.def(s2
, exec
),
313 ctx
.info
[idx
].exec
.back().first
);
316 void transition_to_Exact(exec_ctx
& ctx
, Builder bld
, unsigned idx
)
318 if (ctx
.info
[idx
].exec
.back().second
& mask_type_exact
)
320 /* We can't remove the loop exec mask, because that can cause exec.size() to
321 * be less than num_exec_masks. The loop exec mask also needs to be kept
322 * around for various uses. */
323 if ((ctx
.info
[idx
].exec
.back().second
& mask_type_global
) &&
324 !(ctx
.info
[idx
].exec
.back().second
& mask_type_loop
)) {
325 ctx
.info
[idx
].exec
.pop_back();
326 assert(ctx
.info
[idx
].exec
.back().second
& mask_type_exact
);
327 ctx
.info
[idx
].exec
.back().first
= bld
.pseudo(aco_opcode::p_parallelcopy
, bld
.def(s2
, exec
),
328 ctx
.info
[idx
].exec
.back().first
);
331 /* otherwise, we create an exact mask and push to the stack */
332 Temp wqm
= ctx
.info
[idx
].exec
.back().first
;
333 Temp exact
= bld
.tmp(s2
);
334 wqm
= bld
.sop1(aco_opcode::s_and_saveexec_b64
, bld
.def(s2
), bld
.def(s1
, scc
),
335 bld
.exec(Definition(exact
)), ctx
.info
[idx
].exec
[0].first
, bld
.exec(wqm
));
336 ctx
.info
[idx
].exec
.back().first
= wqm
;
337 ctx
.info
[idx
].exec
.emplace_back(exact
, mask_type_exact
);
340 unsigned add_coupling_code(exec_ctx
& ctx
, Block
* block
,
341 std::vector
<aco_ptr
<Instruction
>>& instructions
)
343 unsigned idx
= block
->index
;
344 Builder
bld(ctx
.program
, &instructions
);
345 std::vector
<unsigned>& preds
= block
->linear_preds
;
349 aco_ptr
<Instruction
>& startpgm
= block
->instructions
[0];
350 assert(startpgm
->opcode
== aco_opcode::p_startpgm
);
351 Temp exec_mask
= startpgm
->definitions
.back().getTemp();
352 bld
.insert(std::move(startpgm
));
354 if (ctx
.handle_wqm
) {
355 ctx
.info
[0].exec
.emplace_back(exec_mask
, mask_type_global
| mask_type_exact
| mask_type_initial
);
356 /* if this block only needs WQM, initialize already */
357 if (ctx
.info
[0].block_needs
== WQM
)
358 transition_to_WQM(ctx
, bld
, 0);
360 uint8_t mask
= mask_type_global
;
361 if (ctx
.program
->needs_wqm
) {
362 exec_mask
= bld
.sop1(aco_opcode::s_wqm_b64
, bld
.def(s2
, exec
), bld
.def(s1
, scc
), bld
.exec(exec_mask
));
363 mask
|= mask_type_wqm
;
365 mask
|= mask_type_exact
;
367 ctx
.info
[0].exec
.emplace_back(exec_mask
, mask
);
373 /* loop entry block */
374 if (block
->kind
& block_kind_loop_header
) {
375 assert(preds
[0] == idx
- 1);
376 ctx
.info
[idx
].exec
= ctx
.info
[idx
- 1].exec
;
377 loop_info
& info
= ctx
.loop
.back();
378 while (ctx
.info
[idx
].exec
.size() > info
.num_exec_masks
)
379 ctx
.info
[idx
].exec
.pop_back();
381 /* create ssa names for outer exec masks */
382 if (info
.has_discard
) {
383 aco_ptr
<Pseudo_instruction
> phi
;
384 for (int i
= 0; i
< info
.num_exec_masks
- 1; i
++) {
385 phi
.reset(create_instruction
<Pseudo_instruction
>(aco_opcode::p_linear_phi
, Format::PSEUDO
, preds
.size(), 1));
386 phi
->definitions
[0] = bld
.def(s2
);
387 phi
->operands
[0] = Operand(ctx
.info
[preds
[0]].exec
[i
].first
);
388 ctx
.info
[idx
].exec
[i
].first
= bld
.insert(std::move(phi
));
392 /* create ssa name for restore mask */
393 if (info
.has_divergent_break
) {
394 /* this phi might be trivial but ensures a parallelcopy on the loop header */
395 aco_ptr
<Pseudo_instruction
> phi
{create_instruction
<Pseudo_instruction
>(aco_opcode::p_linear_phi
, Format::PSEUDO
, preds
.size(), 1)};
396 phi
->definitions
[0] = bld
.def(s2
);
397 phi
->operands
[0] = Operand(ctx
.info
[preds
[0]].exec
[info
.num_exec_masks
- 1].first
);
398 ctx
.info
[idx
].exec
.back().first
= bld
.insert(std::move(phi
));
401 /* create ssa name for loop active mask */
402 aco_ptr
<Pseudo_instruction
> phi
{create_instruction
<Pseudo_instruction
>(aco_opcode::p_linear_phi
, Format::PSEUDO
, preds
.size(), 1)};
403 if (info
.has_divergent_continue
)
404 phi
->definitions
[0] = bld
.def(s2
);
406 phi
->definitions
[0] = bld
.def(s2
, exec
);
407 phi
->operands
[0] = Operand(ctx
.info
[preds
[0]].exec
.back().first
);
408 Temp loop_active
= bld
.insert(std::move(phi
));
410 if (info
.has_divergent_break
) {
411 uint8_t mask_type
= (ctx
.info
[idx
].exec
.back().second
& (mask_type_wqm
| mask_type_exact
)) | mask_type_loop
;
412 ctx
.info
[idx
].exec
.emplace_back(loop_active
, mask_type
);
414 ctx
.info
[idx
].exec
.back().first
= loop_active
;
415 ctx
.info
[idx
].exec
.back().second
|= mask_type_loop
;
418 /* create a parallelcopy to move the active mask to exec */
420 if (info
.has_divergent_continue
) {
421 while (block
->instructions
[i
]->opcode
!= aco_opcode::p_logical_start
) {
422 bld
.insert(std::move(block
->instructions
[i
]));
425 uint8_t mask_type
= ctx
.info
[idx
].exec
.back().second
& (mask_type_wqm
| mask_type_exact
);
426 ctx
.info
[idx
].exec
.emplace_back(bld
.pseudo(aco_opcode::p_parallelcopy
, bld
.def(s2
, exec
),
427 ctx
.info
[idx
].exec
.back().first
), mask_type
);
433 /* loop exit block */
434 if (block
->kind
& block_kind_loop_exit
) {
435 Block
* header
= ctx
.loop
.back().loop_header
;
436 loop_info
& info
= ctx
.loop
.back();
438 for (ASSERTED
unsigned pred
: preds
)
439 assert(ctx
.info
[pred
].exec
.size() >= info
.num_exec_masks
);
441 /* fill the loop header phis */
442 std::vector
<unsigned>& header_preds
= header
->linear_preds
;
444 if (info
.has_discard
) {
445 while (k
< info
.num_exec_masks
- 1) {
446 aco_ptr
<Instruction
>& phi
= header
->instructions
[k
];
447 assert(phi
->opcode
== aco_opcode::p_linear_phi
);
448 for (unsigned i
= 1; i
< phi
->operands
.size(); i
++)
449 phi
->operands
[i
] = Operand(ctx
.info
[header_preds
[i
]].exec
[k
].first
);
453 aco_ptr
<Instruction
>& phi
= header
->instructions
[k
++];
454 assert(phi
->opcode
== aco_opcode::p_linear_phi
);
455 for (unsigned i
= 1; i
< phi
->operands
.size(); i
++)
456 phi
->operands
[i
] = Operand(ctx
.info
[header_preds
[i
]].exec
[info
.num_exec_masks
- 1].first
);
458 if (info
.has_divergent_break
) {
459 aco_ptr
<Instruction
>& phi
= header
->instructions
[k
];
460 assert(phi
->opcode
== aco_opcode::p_linear_phi
);
461 for (unsigned i
= 1; i
< phi
->operands
.size(); i
++)
462 phi
->operands
[i
] = Operand(ctx
.info
[header_preds
[i
]].exec
[info
.num_exec_masks
].first
);
465 assert(!(block
->kind
& block_kind_top_level
) || info
.num_exec_masks
<= 2);
467 /* create the loop exit phis if not trivial */
468 for (unsigned k
= 0; k
< info
.num_exec_masks
; k
++) {
469 Temp same
= ctx
.info
[preds
[0]].exec
[k
].first
;
470 uint8_t type
= ctx
.info
[header_preds
[0]].exec
[k
].second
;
473 for (unsigned i
= 1; i
< preds
.size() && trivial
; i
++) {
474 if (ctx
.info
[preds
[i
]].exec
[k
].first
!= same
)
479 ctx
.info
[idx
].exec
.emplace_back(same
, type
);
481 /* create phi for loop footer */
482 aco_ptr
<Pseudo_instruction
> phi
{create_instruction
<Pseudo_instruction
>(aco_opcode::p_linear_phi
, Format::PSEUDO
, preds
.size(), 1)};
483 phi
->definitions
[0] = bld
.def(s2
);
484 for (unsigned i
= 0; i
< phi
->operands
.size(); i
++)
485 phi
->operands
[i
] = Operand(ctx
.info
[preds
[i
]].exec
[k
].first
);
486 ctx
.info
[idx
].exec
.emplace_back(bld
.insert(std::move(phi
)), type
);
489 assert(ctx
.info
[idx
].exec
.size() == info
.num_exec_masks
);
491 /* create a parallelcopy to move the live mask to exec */
493 while (block
->instructions
[i
]->opcode
!= aco_opcode::p_logical_start
) {
494 bld
.insert(std::move(block
->instructions
[i
]));
498 if (ctx
.handle_wqm
) {
499 if (block
->kind
& block_kind_top_level
&& ctx
.info
[idx
].exec
.size() == 2) {
500 if ((ctx
.info
[idx
].block_needs
| ctx
.info
[idx
].ever_again_needs
) == 0 ||
501 (ctx
.info
[idx
].block_needs
| ctx
.info
[idx
].ever_again_needs
) == Exact
) {
502 ctx
.info
[idx
].exec
.back().second
|= mask_type_global
;
503 transition_to_Exact(ctx
, bld
, idx
);
504 ctx
.handle_wqm
= false;
507 if (ctx
.info
[idx
].block_needs
== WQM
)
508 transition_to_WQM(ctx
, bld
, idx
);
509 else if (ctx
.info
[idx
].block_needs
== Exact
)
510 transition_to_Exact(ctx
, bld
, idx
);
513 ctx
.info
[idx
].exec
.back().first
= bld
.pseudo(aco_opcode::p_parallelcopy
, bld
.def(s2
, exec
),
514 ctx
.info
[idx
].exec
.back().first
);
520 if (preds
.size() == 1) {
521 ctx
.info
[idx
].exec
= ctx
.info
[preds
[0]].exec
;
523 assert(preds
.size() == 2);
524 /* if one of the predecessors ends in exact mask, we pop it from stack */
525 unsigned num_exec_masks
= std::min(ctx
.info
[preds
[0]].exec
.size(),
526 ctx
.info
[preds
[1]].exec
.size());
527 if (block
->kind
& block_kind_top_level
&& !(block
->kind
& block_kind_merge
))
528 num_exec_masks
= std::min(num_exec_masks
, 2u);
530 /* create phis for diverged exec masks */
531 for (unsigned i
= 0; i
< num_exec_masks
; i
++) {
532 bool in_exec
= i
== num_exec_masks
- 1 && !(block
->kind
& block_kind_merge
);
533 if (!in_exec
&& ctx
.info
[preds
[0]].exec
[i
].first
== ctx
.info
[preds
[1]].exec
[i
].first
) {
534 assert(ctx
.info
[preds
[0]].exec
[i
].second
== ctx
.info
[preds
[1]].exec
[i
].second
);
535 ctx
.info
[idx
].exec
.emplace_back(ctx
.info
[preds
[0]].exec
[i
]);
539 Temp phi
= bld
.pseudo(aco_opcode::p_linear_phi
, in_exec
? bld
.def(s2
, exec
) : bld
.def(s2
),
540 ctx
.info
[preds
[0]].exec
[i
].first
,
541 ctx
.info
[preds
[1]].exec
[i
].first
);
542 uint8_t mask_type
= ctx
.info
[preds
[0]].exec
[i
].second
& ctx
.info
[preds
[1]].exec
[i
].second
;
543 ctx
.info
[idx
].exec
.emplace_back(phi
, mask_type
);
548 while (block
->instructions
[i
]->opcode
== aco_opcode::p_phi
||
549 block
->instructions
[i
]->opcode
== aco_opcode::p_linear_phi
) {
550 bld
.insert(std::move(block
->instructions
[i
]));
554 if (block
->kind
& block_kind_merge
)
555 ctx
.info
[idx
].exec
.pop_back();
557 if (block
->kind
& block_kind_top_level
&& ctx
.info
[idx
].exec
.size() == 3) {
558 assert(ctx
.info
[idx
].exec
.back().second
== mask_type_exact
);
559 assert(block
->kind
& block_kind_merge
);
560 ctx
.info
[idx
].exec
.pop_back();
563 /* try to satisfy the block's needs */
564 if (ctx
.handle_wqm
) {
565 if (block
->kind
& block_kind_top_level
&& ctx
.info
[idx
].exec
.size() == 2) {
566 if ((ctx
.info
[idx
].block_needs
| ctx
.info
[idx
].ever_again_needs
) == 0 ||
567 (ctx
.info
[idx
].block_needs
| ctx
.info
[idx
].ever_again_needs
) == Exact
) {
568 ctx
.info
[idx
].exec
.back().second
|= mask_type_global
;
569 transition_to_Exact(ctx
, bld
, idx
);
570 ctx
.handle_wqm
= false;
573 if (ctx
.info
[idx
].block_needs
== WQM
)
574 transition_to_WQM(ctx
, bld
, idx
);
575 else if (ctx
.info
[idx
].block_needs
== Exact
)
576 transition_to_Exact(ctx
, bld
, idx
);
579 if (block
->kind
& block_kind_merge
) {
580 Temp restore
= ctx
.info
[idx
].exec
.back().first
;
581 ctx
.info
[idx
].exec
.back().first
= bld
.pseudo(aco_opcode::p_parallelcopy
, bld
.def(s2
, exec
), restore
);
587 void lower_fs_buffer_store_smem(Builder
& bld
, bool need_check
, aco_ptr
<Instruction
>& instr
, Temp cur_exec
)
589 Operand offset
= instr
->operands
[1];
591 /* if exec is zero, then use UINT32_MAX as an offset and make this store a no-op */
592 Temp nonempty
= bld
.sopc(aco_opcode::s_cmp_lg_u64
, bld
.def(s1
, scc
), cur_exec
, Operand(0u));
594 if (offset
.isLiteral())
595 offset
= bld
.sop1(aco_opcode::s_mov_b32
, bld
.def(s1
), offset
);
597 offset
= bld
.sop2(aco_opcode::s_cselect_b32
, bld
.hint_m0(bld
.def(s1
)),
598 offset
, Operand(UINT32_MAX
), bld
.scc(nonempty
));
599 } else if (offset
.isConstant() && offset
.constantValue() > 0xFFFFF) {
600 offset
= bld
.sop1(aco_opcode::s_mov_b32
, bld
.hint_m0(bld
.def(s1
)), offset
);
602 if (!offset
.isConstant())
605 switch (instr
->operands
[2].size()) {
607 instr
->opcode
= aco_opcode::s_buffer_store_dword
;
610 instr
->opcode
= aco_opcode::s_buffer_store_dwordx2
;
613 instr
->opcode
= aco_opcode::s_buffer_store_dwordx4
;
616 unreachable("Invalid SMEM buffer store size");
618 instr
->operands
[1] = offset
;
619 /* as_uniform() needs to be done here so it's done in exact mode and helper
620 * lanes don't contribute. */
621 instr
->operands
[2] = Operand(bld
.as_uniform(instr
->operands
[2]));
624 void process_instructions(exec_ctx
& ctx
, Block
* block
,
625 std::vector
<aco_ptr
<Instruction
>>& instructions
,
629 if (ctx
.info
[block
->index
].exec
.back().second
& mask_type_wqm
)
632 assert(!ctx
.handle_wqm
|| ctx
.info
[block
->index
].exec
.back().second
& mask_type_exact
);
636 /* if the block doesn't need both, WQM and Exact, we can skip processing the instructions */
637 bool process
= (ctx
.handle_wqm
&&
638 (ctx
.info
[block
->index
].block_needs
& state
) !=
639 (ctx
.info
[block
->index
].block_needs
& (WQM
| Exact
))) ||
640 block
->kind
& block_kind_uses_discard_if
||
641 block
->kind
& block_kind_uses_demote
||
642 block
->kind
& block_kind_needs_lowering
;
644 std::vector
<aco_ptr
<Instruction
>>::iterator it
= std::next(block
->instructions
.begin(), idx
);
645 instructions
.insert(instructions
.end(),
646 std::move_iterator
<std::vector
<aco_ptr
<Instruction
>>::iterator
>(it
),
647 std::move_iterator
<std::vector
<aco_ptr
<Instruction
>>::iterator
>(block
->instructions
.end()));
651 Builder
bld(ctx
.program
, &instructions
);
653 for (; idx
< block
->instructions
.size(); idx
++) {
654 aco_ptr
<Instruction
> instr
= std::move(block
->instructions
[idx
]);
656 WQMState needs
= ctx
.handle_wqm
? ctx
.info
[block
->index
].instr_needs
[idx
] : Unspecified
;
658 if (instr
->opcode
== aco_opcode::p_discard_if
) {
659 if (ctx
.info
[block
->index
].block_needs
& Preserve_WQM
) {
660 assert(block
->kind
& block_kind_top_level
);
661 transition_to_WQM(ctx
, bld
, block
->index
);
662 ctx
.info
[block
->index
].exec
.back().second
&= ~mask_type_global
;
664 int num
= ctx
.info
[block
->index
].exec
.size();
666 Operand cond
= instr
->operands
[0];
667 for (int i
= num
- 1; i
>= 0; i
--) {
668 Instruction
*andn2
= bld
.sop2(aco_opcode::s_andn2_b64
, bld
.def(s2
), bld
.def(s1
, scc
),
669 ctx
.info
[block
->index
].exec
[i
].first
, cond
);
671 andn2
->operands
[0].setFixed(exec
);
672 andn2
->definitions
[0].setFixed(exec
);
675 instr
->opcode
= aco_opcode::p_exit_early_if
;
676 instr
->operands
[0] = bld
.scc(andn2
->definitions
[1].getTemp());
678 ctx
.info
[block
->index
].exec
[i
].first
= andn2
->definitions
[0].getTemp();
680 assert(!ctx
.handle_wqm
|| (ctx
.info
[block
->index
].exec
[0].second
& mask_type_wqm
) == 0);
682 } else if (needs
== WQM
&& state
!= WQM
) {
683 transition_to_WQM(ctx
, bld
, block
->index
);
685 } else if (needs
== Exact
&& state
!= Exact
) {
686 transition_to_Exact(ctx
, bld
, block
->index
);
690 if (instr
->opcode
== aco_opcode::p_is_helper
|| instr
->opcode
== aco_opcode::p_load_helper
) {
691 Definition dst
= instr
->definitions
[0];
692 if (state
== Exact
) {
693 instr
.reset(create_instruction
<SOP1_instruction
>(aco_opcode::s_mov_b64
, Format::SOP1
, 1, 1));
694 instr
->operands
[0] = Operand(0u);
695 instr
->definitions
[0] = dst
;
697 std::pair
<Temp
, uint8_t>& exact_mask
= ctx
.info
[block
->index
].exec
[0];
698 if (instr
->opcode
== aco_opcode::p_load_helper
&&
699 !(ctx
.info
[block
->index
].exec
[0].second
& mask_type_initial
)) {
700 /* find last initial exact mask */
701 for (int i
= block
->index
; i
>= 0; i
--) {
702 if (ctx
.program
->blocks
[i
].kind
& block_kind_top_level
&&
703 ctx
.info
[i
].exec
[0].second
& mask_type_initial
) {
704 exact_mask
= ctx
.info
[i
].exec
[0];
710 assert(instr
->opcode
== aco_opcode::p_is_helper
|| exact_mask
.second
& mask_type_initial
);
711 assert(exact_mask
.second
& mask_type_exact
);
713 instr
.reset(create_instruction
<SOP2_instruction
>(aco_opcode::s_andn2_b64
, Format::SOP2
, 2, 2));
714 instr
->operands
[0] = Operand(ctx
.info
[block
->index
].exec
.back().first
); /* current exec */
715 instr
->operands
[1] = Operand(exact_mask
.first
);
716 instr
->definitions
[0] = dst
;
717 instr
->definitions
[1] = bld
.def(s1
, scc
);
719 } else if (instr
->opcode
== aco_opcode::p_demote_to_helper
) {
720 /* turn demote into discard_if with only exact masks */
721 assert((ctx
.info
[block
->index
].exec
[0].second
& (mask_type_exact
| mask_type_global
)) == (mask_type_exact
| mask_type_global
));
722 ctx
.info
[block
->index
].exec
[0].second
&= ~mask_type_initial
;
726 if (instr
->operands
.empty()) {
727 /* transition to exact and set exec to zero */
728 Temp old_exec
= ctx
.info
[block
->index
].exec
.back().first
;
729 Temp new_exec
= bld
.tmp(s2
);
730 cond
= bld
.sop1(aco_opcode::s_and_saveexec_b64
, bld
.def(s2
), bld
.def(s1
, scc
),
731 bld
.exec(Definition(new_exec
)), Operand(0u), bld
.exec(old_exec
));
732 if (ctx
.info
[block
->index
].exec
.back().second
& mask_type_exact
) {
733 ctx
.info
[block
->index
].exec
.back().first
= new_exec
;
735 ctx
.info
[block
->index
].exec
.back().first
= cond
;
736 ctx
.info
[block
->index
].exec
.emplace_back(new_exec
, mask_type_exact
);
739 /* demote_if: transition to exact */
740 transition_to_Exact(ctx
, bld
, block
->index
);
741 assert(instr
->operands
[0].isTemp());
742 cond
= instr
->operands
[0].getTemp();
746 num
+= ctx
.info
[block
->index
].exec
.size() - 1;
747 for (int i
= num
- 1; i
>= 0; i
--) {
748 if (ctx
.info
[block
->index
].exec
[i
].second
& mask_type_exact
) {
749 Instruction
*andn2
= bld
.sop2(aco_opcode::s_andn2_b64
, bld
.def(s2
), bld
.def(s1
, scc
),
750 ctx
.info
[block
->index
].exec
[i
].first
, cond
);
752 andn2
->operands
[0].setFixed(exec
);
753 andn2
->definitions
[0].setFixed(exec
);
756 instr
->opcode
= aco_opcode::p_exit_early_if
;
757 instr
->operands
[0] = bld
.scc(andn2
->definitions
[1].getTemp());
759 ctx
.info
[block
->index
].exec
[i
].first
= andn2
->definitions
[0].getTemp();
766 } else if (instr
->opcode
== aco_opcode::p_fs_buffer_store_smem
) {
767 bool need_check
= ctx
.info
[block
->index
].exec
.size() != 1 &&
768 !(ctx
.info
[block
->index
].exec
[ctx
.info
[block
->index
].exec
.size() - 2].second
& Exact
);
769 lower_fs_buffer_store_smem(bld
, need_check
, instr
, ctx
.info
[block
->index
].exec
.back().first
);
772 bld
.insert(std::move(instr
));
776 void add_branch_code(exec_ctx
& ctx
, Block
* block
)
778 unsigned idx
= block
->index
;
779 Builder
bld(ctx
.program
, block
);
781 if (idx
== ctx
.program
->blocks
.size() - 1)
784 /* try to disable wqm handling */
785 if (ctx
.handle_wqm
&& block
->kind
& block_kind_top_level
) {
786 if (ctx
.info
[idx
].exec
.size() == 3) {
787 assert(ctx
.info
[idx
].exec
[1].second
== mask_type_wqm
);
788 ctx
.info
[idx
].exec
.pop_back();
790 assert(ctx
.info
[idx
].exec
.size() <= 2);
792 if (ctx
.info
[idx
].ever_again_needs
== 0 ||
793 ctx
.info
[idx
].ever_again_needs
== Exact
) {
794 /* transition to Exact */
795 aco_ptr
<Instruction
> branch
= std::move(block
->instructions
.back());
796 block
->instructions
.pop_back();
797 ctx
.info
[idx
].exec
.back().second
|= mask_type_global
;
798 transition_to_Exact(ctx
, bld
, idx
);
799 bld
.insert(std::move(branch
));
800 ctx
.handle_wqm
= false;
802 } else if (ctx
.info
[idx
].block_needs
& Preserve_WQM
) {
803 /* transition to WQM and remove global flag */
804 aco_ptr
<Instruction
> branch
= std::move(block
->instructions
.back());
805 block
->instructions
.pop_back();
806 transition_to_WQM(ctx
, bld
, idx
);
807 ctx
.info
[idx
].exec
.back().second
&= ~mask_type_global
;
808 bld
.insert(std::move(branch
));
812 if (block
->kind
& block_kind_loop_preheader
) {
813 /* collect information about the succeeding loop */
814 bool has_divergent_break
= false;
815 bool has_divergent_continue
= false;
816 bool has_discard
= false;
818 unsigned loop_nest_depth
= ctx
.program
->blocks
[idx
+ 1].loop_nest_depth
;
820 for (unsigned i
= idx
+ 1; ctx
.program
->blocks
[i
].loop_nest_depth
>= loop_nest_depth
; i
++) {
821 Block
& loop_block
= ctx
.program
->blocks
[i
];
822 needs
|= ctx
.info
[i
].block_needs
;
824 if (loop_block
.kind
& block_kind_uses_discard_if
||
825 loop_block
.kind
& block_kind_discard
||
826 loop_block
.kind
& block_kind_uses_demote
)
828 if (loop_block
.loop_nest_depth
!= loop_nest_depth
)
831 if (loop_block
.kind
& block_kind_uniform
)
833 else if (loop_block
.kind
& block_kind_break
)
834 has_divergent_break
= true;
835 else if (loop_block
.kind
& block_kind_continue
)
836 has_divergent_continue
= true;
839 if (ctx
.handle_wqm
) {
841 aco_ptr
<Instruction
> branch
= std::move(block
->instructions
.back());
842 block
->instructions
.pop_back();
843 transition_to_WQM(ctx
, bld
, idx
);
844 bld
.insert(std::move(branch
));
846 aco_ptr
<Instruction
> branch
= std::move(block
->instructions
.back());
847 block
->instructions
.pop_back();
848 transition_to_Exact(ctx
, bld
, idx
);
849 bld
.insert(std::move(branch
));
853 unsigned num_exec_masks
= ctx
.info
[idx
].exec
.size();
854 if (block
->kind
& block_kind_top_level
)
855 num_exec_masks
= std::min(num_exec_masks
, 2u);
857 ctx
.loop
.emplace_back(&ctx
.program
->blocks
[block
->linear_succs
[0]],
861 has_divergent_continue
,
865 if (block
->kind
& block_kind_discard
) {
867 assert(block
->instructions
.back()->format
== Format::PSEUDO_BRANCH
);
868 aco_ptr
<Instruction
> branch
= std::move(block
->instructions
.back());
869 block
->instructions
.pop_back();
871 /* create a discard_if() instruction with the exec mask as condition */
873 if (ctx
.loop
.size()) {
874 /* if we're in a loop, only discard from the outer exec masks */
875 num
= ctx
.loop
.back().num_exec_masks
;
877 num
= ctx
.info
[idx
].exec
.size() - 1;
880 Temp old_exec
= ctx
.info
[idx
].exec
.back().first
;
881 Temp new_exec
= bld
.tmp(s2
);
882 Temp cond
= bld
.sop1(aco_opcode::s_and_saveexec_b64
, bld
.def(s2
), bld
.def(s1
, scc
),
883 bld
.exec(Definition(new_exec
)), Operand(0u), bld
.exec(old_exec
));
884 ctx
.info
[idx
].exec
.back().first
= new_exec
;
886 for (int i
= num
- 1; i
>= 0; i
--) {
887 Instruction
*andn2
= bld
.sop2(aco_opcode::s_andn2_b64
, bld
.def(s2
), bld
.def(s1
, scc
),
888 ctx
.info
[block
->index
].exec
[i
].first
, cond
);
890 bld
.pseudo(aco_opcode::p_exit_early_if
, bld
.scc(andn2
->definitions
[1].getTemp()));
891 ctx
.info
[block
->index
].exec
[i
].first
= andn2
->definitions
[0].getTemp();
893 assert(!ctx
.handle_wqm
|| (ctx
.info
[block
->index
].exec
[0].second
& mask_type_wqm
) == 0);
895 if ((block
->kind
& (block_kind_break
| block_kind_uniform
)) == block_kind_break
)
896 ctx
.info
[idx
].exec
.back().first
= cond
;
897 bld
.insert(std::move(branch
));
898 /* no return here as it can be followed by a divergent break */
901 if (block
->kind
& block_kind_continue_or_break
) {
902 assert(ctx
.program
->blocks
[ctx
.program
->blocks
[block
->linear_succs
[1]].linear_succs
[0]].kind
& block_kind_loop_header
);
903 assert(ctx
.program
->blocks
[ctx
.program
->blocks
[block
->linear_succs
[0]].linear_succs
[0]].kind
& block_kind_loop_exit
);
904 assert(block
->instructions
.back()->opcode
== aco_opcode::p_branch
);
905 block
->instructions
.pop_back();
907 if (ctx
.info
[idx
].exec
.back().second
& mask_type_loop
) {
908 bld
.branch(aco_opcode::p_cbranch_nz
, bld
.exec(ctx
.info
[idx
].exec
.back().first
), block
->linear_succs
[1], block
->linear_succs
[0]);
911 for (int exec_idx
= ctx
.info
[idx
].exec
.size() - 1; exec_idx
>= 0; exec_idx
--) {
912 if (ctx
.info
[idx
].exec
[exec_idx
].second
& mask_type_loop
) {
913 cond
= bld
.sopc(aco_opcode::s_cmp_lg_u64
, bld
.def(s1
, scc
), ctx
.info
[idx
].exec
[exec_idx
].first
, Operand(0u));
917 assert(cond
!= Temp());
919 bld
.branch(aco_opcode::p_cbranch_nz
, bld
.scc(cond
), block
->linear_succs
[1], block
->linear_succs
[0]);
924 if (block
->kind
& block_kind_uniform
) {
925 Pseudo_branch_instruction
* branch
= static_cast<Pseudo_branch_instruction
*>(block
->instructions
.back().get());
926 if (branch
->opcode
== aco_opcode::p_branch
) {
927 branch
->target
[0] = block
->linear_succs
[0];
929 branch
->target
[0] = block
->linear_succs
[1];
930 branch
->target
[1] = block
->linear_succs
[0];
935 if (block
->kind
& block_kind_branch
) {
937 if (ctx
.handle_wqm
&&
938 ctx
.info
[idx
].exec
.size() >= 2 &&
939 ctx
.info
[idx
].exec
.back().second
== mask_type_exact
&&
940 !(ctx
.info
[idx
].block_needs
& Exact_Branch
) &&
941 ctx
.info
[idx
].exec
[ctx
.info
[idx
].exec
.size() - 2].second
& mask_type_wqm
) {
942 /* return to wqm before branching */
943 ctx
.info
[idx
].exec
.pop_back();
946 // orig = s_and_saveexec_b64
947 assert(block
->linear_succs
.size() == 2);
948 assert(block
->instructions
.back()->opcode
== aco_opcode::p_cbranch_z
);
949 Temp cond
= block
->instructions
.back()->operands
[0].getTemp();
950 block
->instructions
.pop_back();
952 if (ctx
.info
[idx
].block_needs
& Exact_Branch
)
953 transition_to_Exact(ctx
, bld
, idx
);
955 Temp current_exec
= ctx
.info
[idx
].exec
.back().first
;
956 uint8_t mask_type
= ctx
.info
[idx
].exec
.back().second
& (mask_type_wqm
| mask_type_exact
);
958 Temp then_mask
= bld
.tmp(s2
);
959 Temp old_exec
= bld
.sop1(aco_opcode::s_and_saveexec_b64
, bld
.def(s2
), bld
.def(s1
, scc
),
960 bld
.exec(Definition(then_mask
)), cond
, bld
.exec(current_exec
));
962 ctx
.info
[idx
].exec
.back().first
= old_exec
;
964 /* add next current exec to the stack */
965 ctx
.info
[idx
].exec
.emplace_back(then_mask
, mask_type
);
967 bld
.branch(aco_opcode::p_cbranch_z
, bld
.exec(then_mask
), block
->linear_succs
[1], block
->linear_succs
[0]);
971 if (block
->kind
& block_kind_invert
) {
972 // exec = s_andn2_b64 (original_exec, exec)
973 assert(block
->instructions
.back()->opcode
== aco_opcode::p_cbranch_nz
);
974 block
->instructions
.pop_back();
975 Temp then_mask
= ctx
.info
[idx
].exec
.back().first
;
976 uint8_t mask_type
= ctx
.info
[idx
].exec
.back().second
;
977 ctx
.info
[idx
].exec
.pop_back();
978 Temp orig_exec
= ctx
.info
[idx
].exec
.back().first
;
979 Temp else_mask
= bld
.sop2(aco_opcode::s_andn2_b64
, bld
.def(s2
, exec
),
980 bld
.def(s1
, scc
), orig_exec
, bld
.exec(then_mask
));
982 /* add next current exec to the stack */
983 ctx
.info
[idx
].exec
.emplace_back(else_mask
, mask_type
);
985 bld
.branch(aco_opcode::p_cbranch_z
, bld
.exec(else_mask
), block
->linear_succs
[1], block
->linear_succs
[0]);
989 if (block
->kind
& block_kind_break
) {
990 // loop_mask = s_andn2_b64 (loop_mask, exec)
991 assert(block
->instructions
.back()->opcode
== aco_opcode::p_branch
);
992 block
->instructions
.pop_back();
994 Temp current_exec
= ctx
.info
[idx
].exec
.back().first
;
996 for (int exec_idx
= ctx
.info
[idx
].exec
.size() - 2; exec_idx
>= 0; exec_idx
--) {
998 Temp exec_mask
= ctx
.info
[idx
].exec
[exec_idx
].first
;
999 exec_mask
= bld
.sop2(aco_opcode::s_andn2_b64
, bld
.def(s2
), bld
.scc(Definition(cond
)),
1000 exec_mask
, current_exec
);
1001 ctx
.info
[idx
].exec
[exec_idx
].first
= exec_mask
;
1002 if (ctx
.info
[idx
].exec
[exec_idx
].second
& mask_type_loop
)
1006 /* check if the successor is the merge block, otherwise set exec to 0 */
1007 // TODO: this could be done better by directly branching to the merge block
1008 unsigned succ_idx
= ctx
.program
->blocks
[block
->linear_succs
[1]].linear_succs
[0];
1009 Block
& succ
= ctx
.program
->blocks
[succ_idx
];
1010 if (!(succ
.kind
& block_kind_invert
|| succ
.kind
& block_kind_merge
)) {
1011 ctx
.info
[idx
].exec
.back().first
= bld
.sop1(aco_opcode::s_mov_b64
, bld
.def(s2
, exec
), Operand(0u));
1014 bld
.branch(aco_opcode::p_cbranch_nz
, bld
.scc(cond
), block
->linear_succs
[1], block
->linear_succs
[0]);
1018 if (block
->kind
& block_kind_continue
) {
1019 assert(block
->instructions
.back()->opcode
== aco_opcode::p_branch
);
1020 block
->instructions
.pop_back();
1022 Temp current_exec
= ctx
.info
[idx
].exec
.back().first
;
1024 for (int exec_idx
= ctx
.info
[idx
].exec
.size() - 2; exec_idx
>= 0; exec_idx
--) {
1025 if (ctx
.info
[idx
].exec
[exec_idx
].second
& mask_type_loop
)
1028 Temp exec_mask
= ctx
.info
[idx
].exec
[exec_idx
].first
;
1029 exec_mask
= bld
.sop2(aco_opcode::s_andn2_b64
, bld
.def(s2
), bld
.scc(Definition(cond
)),
1030 exec_mask
, bld
.exec(current_exec
));
1031 ctx
.info
[idx
].exec
[exec_idx
].first
= exec_mask
;
1033 assert(cond
!= Temp());
1035 /* check if the successor is the merge block, otherwise set exec to 0 */
1036 // TODO: this could be done better by directly branching to the merge block
1037 unsigned succ_idx
= ctx
.program
->blocks
[block
->linear_succs
[1]].linear_succs
[0];
1038 Block
& succ
= ctx
.program
->blocks
[succ_idx
];
1039 if (!(succ
.kind
& block_kind_invert
|| succ
.kind
& block_kind_merge
)) {
1040 ctx
.info
[idx
].exec
.back().first
= bld
.sop1(aco_opcode::s_mov_b64
, bld
.def(s2
, exec
), Operand(0u));
1043 bld
.branch(aco_opcode::p_cbranch_nz
, bld
.scc(cond
), block
->linear_succs
[1], block
->linear_succs
[0]);
1048 void process_block(exec_ctx
& ctx
, Block
* block
)
1050 std::vector
<aco_ptr
<Instruction
>> instructions
;
1051 instructions
.reserve(block
->instructions
.size());
1053 unsigned idx
= add_coupling_code(ctx
, block
, instructions
);
1055 assert(block
->index
!= ctx
.program
->blocks
.size() - 1 ||
1056 ctx
.info
[block
->index
].exec
.size() <= 2);
1058 process_instructions(ctx
, block
, instructions
, idx
);
1060 block
->instructions
= std::move(instructions
);
1062 add_branch_code(ctx
, block
);
1064 block
->live_out_exec
= ctx
.info
[block
->index
].exec
.back().first
;
1067 } /* end namespace */
1070 void insert_exec_mask(Program
*program
)
1072 exec_ctx
ctx(program
);
1074 if (program
->needs_wqm
&& program
->needs_exact
)
1075 calculate_wqm_needs(ctx
);
1077 for (Block
& block
: program
->blocks
)
1078 process_block(ctx
, &block
);