bf7ccf8b57030054680a039fa3abe9fdafe8739e
[mesa.git] / src / amd / compiler / aco_insert_exec_mask.cpp
1 /*
2 * Copyright © 2019 Valve Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 #include "aco_ir.h"
26 #include "aco_builder.h"
27 #include "util/u_math.h"
28
29 namespace aco {
30
31 namespace {
32
33 enum WQMState : uint8_t {
34 Unspecified = 0,
35 Exact = 1 << 0,
36 WQM = 1 << 1, /* with control flow applied */
37 Preserve_WQM = 1 << 2,
38 Exact_Branch = 1 << 3,
39 };
40
41 enum mask_type : uint8_t {
42 mask_type_global = 1 << 0,
43 mask_type_exact = 1 << 1,
44 mask_type_wqm = 1 << 2,
45 mask_type_loop = 1 << 3, /* active lanes of a loop */
46 mask_type_initial = 1 << 4, /* initially active lanes */
47 };
48
49 struct wqm_ctx {
50 Program* program;
51 /* state for WQM propagation */
52 std::set<unsigned> worklist;
53 std::vector<uint16_t> defined_in;
54 std::vector<bool> needs_wqm;
55 std::vector<bool> branch_wqm; /* true if the branch condition in this block should be in wqm */
56 bool loop;
57 bool wqm;
58 wqm_ctx(Program* program) : program(program),
59 defined_in(program->peekAllocationId(), 0xFFFF),
60 needs_wqm(program->peekAllocationId()),
61 branch_wqm(program->blocks.size()),
62 loop(false),
63 wqm(false)
64 {
65 for (unsigned i = 0; i < program->blocks.size(); i++)
66 worklist.insert(i);
67 }
68 };
69
70 struct loop_info {
71 Block* loop_header;
72 uint16_t num_exec_masks;
73 uint8_t needs;
74 bool has_divergent_break;
75 bool has_divergent_continue;
76 bool has_discard; /* has a discard or demote */
77 loop_info(Block* b, uint16_t num, uint8_t needs, bool breaks, bool cont, bool discard) :
78 loop_header(b), num_exec_masks(num), needs(needs), has_divergent_break(breaks),
79 has_divergent_continue(cont), has_discard(discard) {}
80 };
81
82 struct block_info {
83 std::vector<std::pair<Temp, uint8_t>> exec;
84 std::vector<WQMState> instr_needs;
85 uint8_t block_needs;
86 uint8_t ever_again_needs;
87 bool logical_end_wqm;
88 /* more... */
89 };
90
91 struct exec_ctx {
92 Program *program;
93 std::vector<block_info> info;
94 std::vector<loop_info> loop;
95 bool handle_wqm = false;
96 exec_ctx(Program *program) : program(program), info(program->blocks.size()) {}
97 };
98
99 bool pred_by_exec_mask(aco_ptr<Instruction>& instr) {
100 if (instr->isSALU())
101 return instr->reads_exec();
102 if (instr->format == Format::SMEM || instr->isSALU())
103 return false;
104 if (instr->format == Format::PSEUDO_BARRIER)
105 return false;
106
107 if (instr->format == Format::PSEUDO) {
108 switch (instr->opcode) {
109 case aco_opcode::p_create_vector:
110 return instr->definitions[0].getTemp().type() == RegType::vgpr;
111 case aco_opcode::p_extract_vector:
112 case aco_opcode::p_split_vector:
113 return instr->operands[0].getTemp().type() == RegType::vgpr;
114 case aco_opcode::p_spill:
115 case aco_opcode::p_reload:
116 return false;
117 default:
118 break;
119 }
120 }
121
122 if (instr->opcode == aco_opcode::v_readlane_b32 ||
123 instr->opcode == aco_opcode::v_readlane_b32_e64 ||
124 instr->opcode == aco_opcode::v_writelane_b32 ||
125 instr->opcode == aco_opcode::v_writelane_b32_e64)
126 return false;
127
128 return true;
129 }
130
131 bool needs_exact(aco_ptr<Instruction>& instr) {
132 if (instr->format == Format::MUBUF) {
133 MUBUF_instruction *mubuf = static_cast<MUBUF_instruction *>(instr.get());
134 return mubuf->disable_wqm;
135 } else if (instr->format == Format::MTBUF) {
136 MTBUF_instruction *mtbuf = static_cast<MTBUF_instruction *>(instr.get());
137 return mtbuf->disable_wqm;
138 } else if (instr->format == Format::MIMG) {
139 MIMG_instruction *mimg = static_cast<MIMG_instruction *>(instr.get());
140 return mimg->disable_wqm;
141 } else if (instr->format == Format::FLAT || instr->format == Format::GLOBAL) {
142 FLAT_instruction *flat = static_cast<FLAT_instruction *>(instr.get());
143 return flat->disable_wqm;
144 } else {
145 return instr->format == Format::EXP || instr->opcode == aco_opcode::p_fs_buffer_store_smem;
146 }
147 }
148
149 void set_needs_wqm(wqm_ctx &ctx, Temp tmp)
150 {
151 if (!ctx.needs_wqm[tmp.id()]) {
152 ctx.needs_wqm[tmp.id()] = true;
153 if (ctx.defined_in[tmp.id()] != 0xFFFF)
154 ctx.worklist.insert(ctx.defined_in[tmp.id()]);
155 }
156 }
157
158 void mark_block_wqm(wqm_ctx &ctx, unsigned block_idx)
159 {
160 if (ctx.branch_wqm[block_idx])
161 return;
162
163 ctx.branch_wqm[block_idx] = true;
164 Block& block = ctx.program->blocks[block_idx];
165 aco_ptr<Instruction>& branch = block.instructions.back();
166
167 if (branch->opcode != aco_opcode::p_branch) {
168 assert(!branch->operands.empty() && branch->operands[0].isTemp());
169 set_needs_wqm(ctx, branch->operands[0].getTemp());
170 }
171
172 /* TODO: this sets more branch conditions to WQM than it needs to
173 * it should be enough to stop at the "exec mask top level" */
174 if (block.kind & block_kind_top_level)
175 return;
176
177 for (unsigned pred_idx : block.logical_preds)
178 mark_block_wqm(ctx, pred_idx);
179 }
180
181 void get_block_needs(wqm_ctx &ctx, exec_ctx &exec_ctx, Block* block)
182 {
183 block_info& info = exec_ctx.info[block->index];
184
185 std::vector<WQMState> instr_needs(block->instructions.size());
186
187 if (block->kind & block_kind_top_level) {
188 if (ctx.loop && ctx.wqm) {
189 /* mark all break conditions as WQM */
190 unsigned block_idx = block->index + 1;
191 while (!(ctx.program->blocks[block_idx].kind & block_kind_top_level)) {
192 if (ctx.program->blocks[block_idx].kind & block_kind_break)
193 mark_block_wqm(ctx, block_idx);
194 block_idx++;
195 }
196 } else if (ctx.loop && !ctx.wqm) {
197 /* Ensure a branch never results in an exec mask with only helper
198 * invocations (which can cause a loop to repeat infinitively if it's
199 * break branches are done in exact). */
200 unsigned block_idx = block->index;
201 do {
202 if ((ctx.program->blocks[block_idx].kind & block_kind_branch))
203 exec_ctx.info[block_idx].block_needs |= Exact_Branch;
204 block_idx++;
205 } while (!(ctx.program->blocks[block_idx].kind & block_kind_top_level));
206 }
207
208 ctx.loop = false;
209 ctx.wqm = false;
210 }
211
212 for (int i = block->instructions.size() - 1; i >= 0; --i) {
213 aco_ptr<Instruction>& instr = block->instructions[i];
214
215 WQMState needs = needs_exact(instr) ? Exact : Unspecified;
216 bool propagate_wqm = instr->opcode == aco_opcode::p_wqm;
217 bool preserve_wqm = instr->opcode == aco_opcode::p_discard_if;
218 bool pred_by_exec = pred_by_exec_mask(instr);
219 for (const Definition& definition : instr->definitions) {
220 if (!definition.isTemp())
221 continue;
222 const unsigned def = definition.tempId();
223 ctx.defined_in[def] = block->index;
224 if (needs == Unspecified && ctx.needs_wqm[def]) {
225 needs = pred_by_exec ? WQM : Unspecified;
226 propagate_wqm = true;
227 }
228 }
229
230 if (propagate_wqm) {
231 for (const Operand& op : instr->operands) {
232 if (op.isTemp()) {
233 set_needs_wqm(ctx, op.getTemp());
234 }
235 }
236 } else if (preserve_wqm && info.block_needs & WQM) {
237 needs = Preserve_WQM;
238 }
239
240 /* ensure the condition controlling the control flow for this phi is in WQM */
241 if (needs == WQM && instr->opcode == aco_opcode::p_phi) {
242 for (unsigned pred_idx : block->logical_preds) {
243 mark_block_wqm(ctx, pred_idx);
244 exec_ctx.info[pred_idx].logical_end_wqm = true;
245 ctx.worklist.insert(pred_idx);
246 }
247 }
248
249 if ((instr->opcode == aco_opcode::p_logical_end && info.logical_end_wqm) ||
250 instr->opcode == aco_opcode::p_wqm) {
251 assert(needs != Exact);
252 needs = WQM;
253 }
254
255 instr_needs[i] = needs;
256 info.block_needs |= needs;
257 }
258
259 info.instr_needs = instr_needs;
260
261 /* for "if (<cond>) <wqm code>" or "while (<cond>) <wqm code>",
262 * <cond> should be computed in WQM */
263 if (info.block_needs & WQM && !(block->kind & block_kind_top_level)) {
264 for (unsigned pred_idx : block->logical_preds)
265 mark_block_wqm(ctx, pred_idx);
266 ctx.wqm = true;
267 }
268 if (block->kind & block_kind_loop_header)
269 ctx.loop = true;
270 }
271
272 void calculate_wqm_needs(exec_ctx& exec_ctx)
273 {
274 wqm_ctx ctx(exec_ctx.program);
275
276 while (!ctx.worklist.empty()) {
277 unsigned block_index = *std::prev(ctx.worklist.end());
278 ctx.worklist.erase(std::prev(ctx.worklist.end()));
279
280 get_block_needs(ctx, exec_ctx, &exec_ctx.program->blocks[block_index]);
281 }
282
283 uint8_t ever_again_needs = 0;
284 for (int i = exec_ctx.program->blocks.size() - 1; i >= 0; i--) {
285 exec_ctx.info[i].ever_again_needs = ever_again_needs;
286 Block& block = exec_ctx.program->blocks[i];
287
288 if (block.kind & block_kind_needs_lowering)
289 exec_ctx.info[i].block_needs |= Exact;
290
291 /* if discard is used somewhere in nested CF, we need to preserve the WQM mask */
292 if ((block.kind & block_kind_discard ||
293 block.kind & block_kind_uses_discard_if) &&
294 ever_again_needs & WQM)
295 exec_ctx.info[i].block_needs |= Preserve_WQM;
296
297 ever_again_needs |= exec_ctx.info[i].block_needs & ~Exact_Branch;
298 if (block.kind & block_kind_discard ||
299 block.kind & block_kind_uses_discard_if ||
300 block.kind & block_kind_uses_demote)
301 ever_again_needs |= Exact;
302
303 /* don't propagate WQM preservation further than the next top_level block */
304 if (block.kind & block_kind_top_level)
305 ever_again_needs &= ~Preserve_WQM;
306 else
307 exec_ctx.info[i].block_needs &= ~Preserve_WQM;
308 }
309 exec_ctx.handle_wqm = true;
310 }
311
312 void transition_to_WQM(exec_ctx& ctx, Builder bld, unsigned idx)
313 {
314 if (ctx.info[idx].exec.back().second & mask_type_wqm)
315 return;
316 if (ctx.info[idx].exec.back().second & mask_type_global) {
317 Temp exec_mask = ctx.info[idx].exec.back().first;
318 exec_mask = bld.sop1(Builder::s_wqm, bld.def(bld.lm, exec), bld.def(s1, scc), exec_mask);
319 ctx.info[idx].exec.emplace_back(exec_mask, mask_type_global | mask_type_wqm);
320 return;
321 }
322 /* otherwise, the WQM mask should be one below the current mask */
323 ctx.info[idx].exec.pop_back();
324 assert(ctx.info[idx].exec.back().second & mask_type_wqm);
325 assert(ctx.info[idx].exec.back().first.size() == bld.lm.size());
326 ctx.info[idx].exec.back().first = bld.pseudo(aco_opcode::p_parallelcopy, bld.def(bld.lm, exec),
327 ctx.info[idx].exec.back().first);
328 }
329
330 void transition_to_Exact(exec_ctx& ctx, Builder bld, unsigned idx)
331 {
332 if (ctx.info[idx].exec.back().second & mask_type_exact)
333 return;
334 /* We can't remove the loop exec mask, because that can cause exec.size() to
335 * be less than num_exec_masks. The loop exec mask also needs to be kept
336 * around for various uses. */
337 if ((ctx.info[idx].exec.back().second & mask_type_global) &&
338 !(ctx.info[idx].exec.back().second & mask_type_loop)) {
339 ctx.info[idx].exec.pop_back();
340 assert(ctx.info[idx].exec.back().second & mask_type_exact);
341 assert(ctx.info[idx].exec.back().first.size() == bld.lm.size());
342 ctx.info[idx].exec.back().first = bld.pseudo(aco_opcode::p_parallelcopy, bld.def(bld.lm, exec),
343 ctx.info[idx].exec.back().first);
344 return;
345 }
346 /* otherwise, we create an exact mask and push to the stack */
347 Temp wqm = ctx.info[idx].exec.back().first;
348 Temp exact = bld.tmp(bld.lm);
349 wqm = bld.sop1(Builder::s_and_saveexec, bld.def(bld.lm), bld.def(s1, scc),
350 bld.exec(Definition(exact)), ctx.info[idx].exec[0].first, bld.exec(wqm));
351 ctx.info[idx].exec.back().first = wqm;
352 ctx.info[idx].exec.emplace_back(exact, mask_type_exact);
353 }
354
355 unsigned add_coupling_code(exec_ctx& ctx, Block* block,
356 std::vector<aco_ptr<Instruction>>& instructions)
357 {
358 unsigned idx = block->index;
359 Builder bld(ctx.program, &instructions);
360 std::vector<unsigned>& preds = block->linear_preds;
361
362 /* start block */
363 if (idx == 0) {
364 aco_ptr<Instruction>& startpgm = block->instructions[0];
365 assert(startpgm->opcode == aco_opcode::p_startpgm);
366 Temp exec_mask = startpgm->definitions.back().getTemp();
367 bld.insert(std::move(startpgm));
368
369 /* exec seems to need to be manually initialized with combined shaders */
370 if (util_bitcount(ctx.program->stage & sw_mask) > 1) {
371 bld.sop1(Builder::s_mov, bld.exec(Definition(exec_mask)), bld.lm == s2 ? Operand(UINT64_MAX) : Operand(UINT32_MAX));
372 instructions[0]->definitions.pop_back();
373 }
374
375 if (ctx.handle_wqm) {
376 ctx.info[0].exec.emplace_back(exec_mask, mask_type_global | mask_type_exact | mask_type_initial);
377 /* if this block only needs WQM, initialize already */
378 if (ctx.info[0].block_needs == WQM)
379 transition_to_WQM(ctx, bld, 0);
380 } else {
381 uint8_t mask = mask_type_global;
382 if (ctx.program->needs_wqm) {
383 exec_mask = bld.sop1(Builder::s_wqm, bld.def(bld.lm, exec), bld.def(s1, scc), bld.exec(exec_mask));
384 mask |= mask_type_wqm;
385 } else {
386 mask |= mask_type_exact;
387 }
388 ctx.info[0].exec.emplace_back(exec_mask, mask);
389 }
390
391 return 1;
392 }
393
394 /* loop entry block */
395 if (block->kind & block_kind_loop_header) {
396 assert(preds[0] == idx - 1);
397 ctx.info[idx].exec = ctx.info[idx - 1].exec;
398 loop_info& info = ctx.loop.back();
399 while (ctx.info[idx].exec.size() > info.num_exec_masks)
400 ctx.info[idx].exec.pop_back();
401
402 /* create ssa names for outer exec masks */
403 if (info.has_discard) {
404 aco_ptr<Pseudo_instruction> phi;
405 for (int i = 0; i < info.num_exec_masks - 1; i++) {
406 phi.reset(create_instruction<Pseudo_instruction>(aco_opcode::p_linear_phi, Format::PSEUDO, preds.size(), 1));
407 phi->definitions[0] = bld.def(bld.lm);
408 phi->operands[0] = Operand(ctx.info[preds[0]].exec[i].first);
409 ctx.info[idx].exec[i].first = bld.insert(std::move(phi));
410 }
411 }
412
413 /* create ssa name for restore mask */
414 if (info.has_divergent_break) {
415 /* this phi might be trivial but ensures a parallelcopy on the loop header */
416 aco_ptr<Pseudo_instruction> phi{create_instruction<Pseudo_instruction>(aco_opcode::p_linear_phi, Format::PSEUDO, preds.size(), 1)};
417 phi->definitions[0] = bld.def(bld.lm);
418 phi->operands[0] = Operand(ctx.info[preds[0]].exec[info.num_exec_masks - 1].first);
419 ctx.info[idx].exec.back().first = bld.insert(std::move(phi));
420 }
421
422 /* create ssa name for loop active mask */
423 aco_ptr<Pseudo_instruction> phi{create_instruction<Pseudo_instruction>(aco_opcode::p_linear_phi, Format::PSEUDO, preds.size(), 1)};
424 if (info.has_divergent_continue)
425 phi->definitions[0] = bld.def(bld.lm);
426 else
427 phi->definitions[0] = bld.def(bld.lm, exec);
428 phi->operands[0] = Operand(ctx.info[preds[0]].exec.back().first);
429 Temp loop_active = bld.insert(std::move(phi));
430
431 if (info.has_divergent_break) {
432 uint8_t mask_type = (ctx.info[idx].exec.back().second & (mask_type_wqm | mask_type_exact)) | mask_type_loop;
433 ctx.info[idx].exec.emplace_back(loop_active, mask_type);
434 } else {
435 ctx.info[idx].exec.back().first = loop_active;
436 ctx.info[idx].exec.back().second |= mask_type_loop;
437 }
438
439 /* create a parallelcopy to move the active mask to exec */
440 unsigned i = 0;
441 if (info.has_divergent_continue) {
442 while (block->instructions[i]->opcode != aco_opcode::p_logical_start) {
443 bld.insert(std::move(block->instructions[i]));
444 i++;
445 }
446 uint8_t mask_type = ctx.info[idx].exec.back().second & (mask_type_wqm | mask_type_exact);
447 assert(ctx.info[idx].exec.back().first.size() == bld.lm.size());
448 ctx.info[idx].exec.emplace_back(bld.pseudo(aco_opcode::p_parallelcopy, bld.def(bld.lm, exec),
449 ctx.info[idx].exec.back().first), mask_type);
450 }
451
452 return i;
453 }
454
455 /* loop exit block */
456 if (block->kind & block_kind_loop_exit) {
457 Block* header = ctx.loop.back().loop_header;
458 loop_info& info = ctx.loop.back();
459
460 for (ASSERTED unsigned pred : preds)
461 assert(ctx.info[pred].exec.size() >= info.num_exec_masks);
462
463 /* fill the loop header phis */
464 std::vector<unsigned>& header_preds = header->linear_preds;
465 int k = 0;
466 if (info.has_discard) {
467 while (k < info.num_exec_masks - 1) {
468 aco_ptr<Instruction>& phi = header->instructions[k];
469 assert(phi->opcode == aco_opcode::p_linear_phi);
470 for (unsigned i = 1; i < phi->operands.size(); i++)
471 phi->operands[i] = Operand(ctx.info[header_preds[i]].exec[k].first);
472 k++;
473 }
474 }
475 aco_ptr<Instruction>& phi = header->instructions[k++];
476 assert(phi->opcode == aco_opcode::p_linear_phi);
477 for (unsigned i = 1; i < phi->operands.size(); i++)
478 phi->operands[i] = Operand(ctx.info[header_preds[i]].exec[info.num_exec_masks - 1].first);
479
480 if (info.has_divergent_break) {
481 aco_ptr<Instruction>& phi = header->instructions[k];
482 assert(phi->opcode == aco_opcode::p_linear_phi);
483 for (unsigned i = 1; i < phi->operands.size(); i++)
484 phi->operands[i] = Operand(ctx.info[header_preds[i]].exec[info.num_exec_masks].first);
485 }
486
487 assert(!(block->kind & block_kind_top_level) || info.num_exec_masks <= 2);
488
489 /* create the loop exit phis if not trivial */
490 for (unsigned k = 0; k < info.num_exec_masks; k++) {
491 Temp same = ctx.info[preds[0]].exec[k].first;
492 uint8_t type = ctx.info[header_preds[0]].exec[k].second;
493 bool trivial = true;
494
495 for (unsigned i = 1; i < preds.size() && trivial; i++) {
496 if (ctx.info[preds[i]].exec[k].first != same)
497 trivial = false;
498 }
499
500 if (trivial) {
501 ctx.info[idx].exec.emplace_back(same, type);
502 } else {
503 /* create phi for loop footer */
504 aco_ptr<Pseudo_instruction> phi{create_instruction<Pseudo_instruction>(aco_opcode::p_linear_phi, Format::PSEUDO, preds.size(), 1)};
505 phi->definitions[0] = bld.def(bld.lm);
506 for (unsigned i = 0; i < phi->operands.size(); i++)
507 phi->operands[i] = Operand(ctx.info[preds[i]].exec[k].first);
508 ctx.info[idx].exec.emplace_back(bld.insert(std::move(phi)), type);
509 }
510 }
511 assert(ctx.info[idx].exec.size() == info.num_exec_masks);
512
513 /* create a parallelcopy to move the live mask to exec */
514 unsigned i = 0;
515 while (block->instructions[i]->opcode != aco_opcode::p_logical_start) {
516 bld.insert(std::move(block->instructions[i]));
517 i++;
518 }
519
520 if (ctx.handle_wqm) {
521 if (block->kind & block_kind_top_level && ctx.info[idx].exec.size() == 2) {
522 if ((ctx.info[idx].block_needs | ctx.info[idx].ever_again_needs) == 0 ||
523 (ctx.info[idx].block_needs | ctx.info[idx].ever_again_needs) == Exact) {
524 ctx.info[idx].exec.back().second |= mask_type_global;
525 transition_to_Exact(ctx, bld, idx);
526 ctx.handle_wqm = false;
527 }
528 }
529 if (ctx.info[idx].block_needs == WQM)
530 transition_to_WQM(ctx, bld, idx);
531 else if (ctx.info[idx].block_needs == Exact)
532 transition_to_Exact(ctx, bld, idx);
533 }
534
535 assert(ctx.info[idx].exec.back().first.size() == bld.lm.size());
536 ctx.info[idx].exec.back().first = bld.pseudo(aco_opcode::p_parallelcopy, bld.def(bld.lm, exec),
537 ctx.info[idx].exec.back().first);
538
539 ctx.loop.pop_back();
540 return i;
541 }
542
543 if (preds.size() == 1) {
544 ctx.info[idx].exec = ctx.info[preds[0]].exec;
545 } else {
546 assert(preds.size() == 2);
547 /* if one of the predecessors ends in exact mask, we pop it from stack */
548 unsigned num_exec_masks = std::min(ctx.info[preds[0]].exec.size(),
549 ctx.info[preds[1]].exec.size());
550 if (block->kind & block_kind_top_level && !(block->kind & block_kind_merge))
551 num_exec_masks = std::min(num_exec_masks, 2u);
552
553 /* create phis for diverged exec masks */
554 for (unsigned i = 0; i < num_exec_masks; i++) {
555 bool in_exec = i == num_exec_masks - 1 && !(block->kind & block_kind_merge);
556 if (!in_exec && ctx.info[preds[0]].exec[i].first == ctx.info[preds[1]].exec[i].first) {
557 assert(ctx.info[preds[0]].exec[i].second == ctx.info[preds[1]].exec[i].second);
558 ctx.info[idx].exec.emplace_back(ctx.info[preds[0]].exec[i]);
559 continue;
560 }
561
562 Temp phi = bld.pseudo(aco_opcode::p_linear_phi, in_exec ? bld.def(bld.lm, exec) : bld.def(bld.lm),
563 ctx.info[preds[0]].exec[i].first,
564 ctx.info[preds[1]].exec[i].first);
565 uint8_t mask_type = ctx.info[preds[0]].exec[i].second & ctx.info[preds[1]].exec[i].second;
566 ctx.info[idx].exec.emplace_back(phi, mask_type);
567 }
568 }
569
570 unsigned i = 0;
571 while (block->instructions[i]->opcode == aco_opcode::p_phi ||
572 block->instructions[i]->opcode == aco_opcode::p_linear_phi) {
573 bld.insert(std::move(block->instructions[i]));
574 i++;
575 }
576
577 if (block->kind & block_kind_merge)
578 ctx.info[idx].exec.pop_back();
579
580 if (block->kind & block_kind_top_level && ctx.info[idx].exec.size() == 3) {
581 assert(ctx.info[idx].exec.back().second == mask_type_exact);
582 assert(block->kind & block_kind_merge);
583 ctx.info[idx].exec.pop_back();
584 }
585
586 /* try to satisfy the block's needs */
587 if (ctx.handle_wqm) {
588 if (block->kind & block_kind_top_level && ctx.info[idx].exec.size() == 2) {
589 if ((ctx.info[idx].block_needs | ctx.info[idx].ever_again_needs) == 0 ||
590 (ctx.info[idx].block_needs | ctx.info[idx].ever_again_needs) == Exact) {
591 ctx.info[idx].exec.back().second |= mask_type_global;
592 transition_to_Exact(ctx, bld, idx);
593 ctx.handle_wqm = false;
594 }
595 }
596 if (ctx.info[idx].block_needs == WQM)
597 transition_to_WQM(ctx, bld, idx);
598 else if (ctx.info[idx].block_needs == Exact)
599 transition_to_Exact(ctx, bld, idx);
600 }
601
602 if (block->kind & block_kind_merge) {
603 Temp restore = ctx.info[idx].exec.back().first;
604 assert(restore.size() == bld.lm.size());
605 ctx.info[idx].exec.back().first = bld.pseudo(aco_opcode::p_parallelcopy, bld.def(bld.lm, exec), restore);
606 }
607
608 return i;
609 }
610
611 void lower_fs_buffer_store_smem(Builder& bld, bool need_check, aco_ptr<Instruction>& instr, Temp cur_exec)
612 {
613 Operand offset = instr->operands[1];
614 if (need_check) {
615 /* if exec is zero, then use UINT32_MAX as an offset and make this store a no-op */
616 Temp nonempty = bld.sopc(Builder::s_cmp_lg, bld.def(s1, scc), cur_exec, Operand(0u));
617
618 if (offset.isLiteral())
619 offset = bld.sop1(aco_opcode::s_mov_b32, bld.def(s1), offset);
620
621 offset = bld.sop2(aco_opcode::s_cselect_b32, bld.hint_m0(bld.def(s1)),
622 offset, Operand(UINT32_MAX), bld.scc(nonempty));
623 } else if (offset.isConstant() && offset.constantValue() > 0xFFFFF) {
624 offset = bld.sop1(aco_opcode::s_mov_b32, bld.hint_m0(bld.def(s1)), offset);
625 }
626 if (!offset.isConstant())
627 offset.setFixed(m0);
628
629 switch (instr->operands[2].size()) {
630 case 1:
631 instr->opcode = aco_opcode::s_buffer_store_dword;
632 break;
633 case 2:
634 instr->opcode = aco_opcode::s_buffer_store_dwordx2;
635 break;
636 case 4:
637 instr->opcode = aco_opcode::s_buffer_store_dwordx4;
638 break;
639 default:
640 unreachable("Invalid SMEM buffer store size");
641 }
642 instr->operands[1] = offset;
643 /* as_uniform() needs to be done here so it's done in exact mode and helper
644 * lanes don't contribute. */
645 instr->operands[2] = Operand(bld.as_uniform(instr->operands[2]));
646 }
647
648 void process_instructions(exec_ctx& ctx, Block* block,
649 std::vector<aco_ptr<Instruction>>& instructions,
650 unsigned idx)
651 {
652 WQMState state;
653 if (ctx.info[block->index].exec.back().second & mask_type_wqm)
654 state = WQM;
655 else {
656 assert(!ctx.handle_wqm || ctx.info[block->index].exec.back().second & mask_type_exact);
657 state = Exact;
658 }
659
660 /* if the block doesn't need both, WQM and Exact, we can skip processing the instructions */
661 bool process = (ctx.handle_wqm &&
662 (ctx.info[block->index].block_needs & state) !=
663 (ctx.info[block->index].block_needs & (WQM | Exact))) ||
664 block->kind & block_kind_uses_discard_if ||
665 block->kind & block_kind_uses_demote ||
666 block->kind & block_kind_needs_lowering;
667 if (!process) {
668 std::vector<aco_ptr<Instruction>>::iterator it = std::next(block->instructions.begin(), idx);
669 instructions.insert(instructions.end(),
670 std::move_iterator<std::vector<aco_ptr<Instruction>>::iterator>(it),
671 std::move_iterator<std::vector<aco_ptr<Instruction>>::iterator>(block->instructions.end()));
672 return;
673 }
674
675 Builder bld(ctx.program, &instructions);
676
677 for (; idx < block->instructions.size(); idx++) {
678 aco_ptr<Instruction> instr = std::move(block->instructions[idx]);
679
680 WQMState needs = ctx.handle_wqm ? ctx.info[block->index].instr_needs[idx] : Unspecified;
681
682 if (instr->opcode == aco_opcode::p_discard_if) {
683 if (ctx.info[block->index].block_needs & Preserve_WQM) {
684 assert(block->kind & block_kind_top_level);
685 transition_to_WQM(ctx, bld, block->index);
686 ctx.info[block->index].exec.back().second &= ~mask_type_global;
687 }
688 int num = ctx.info[block->index].exec.size();
689 assert(num);
690 Operand cond = instr->operands[0];
691 for (int i = num - 1; i >= 0; i--) {
692 Instruction *andn2 = bld.sop2(Builder::s_andn2, bld.def(bld.lm), bld.def(s1, scc),
693 ctx.info[block->index].exec[i].first, cond);
694 if (i == num - 1) {
695 andn2->operands[0].setFixed(exec);
696 andn2->definitions[0].setFixed(exec);
697 }
698 if (i == 0) {
699 instr->opcode = aco_opcode::p_exit_early_if;
700 instr->operands[0] = bld.scc(andn2->definitions[1].getTemp());
701 }
702 ctx.info[block->index].exec[i].first = andn2->definitions[0].getTemp();
703 }
704 assert(!ctx.handle_wqm || (ctx.info[block->index].exec[0].second & mask_type_wqm) == 0);
705
706 } else if (needs == WQM && state != WQM) {
707 transition_to_WQM(ctx, bld, block->index);
708 state = WQM;
709 } else if (needs == Exact && state != Exact) {
710 transition_to_Exact(ctx, bld, block->index);
711 state = Exact;
712 }
713
714 if (instr->opcode == aco_opcode::p_is_helper || instr->opcode == aco_opcode::p_load_helper) {
715 Definition dst = instr->definitions[0];
716 assert(dst.size() == bld.lm.size());
717 if (state == Exact) {
718 instr.reset(create_instruction<SOP1_instruction>(bld.w64or32(Builder::s_mov), Format::SOP1, 1, 1));
719 instr->operands[0] = Operand(0u);
720 instr->definitions[0] = dst;
721 } else {
722 std::pair<Temp, uint8_t>& exact_mask = ctx.info[block->index].exec[0];
723 if (instr->opcode == aco_opcode::p_load_helper &&
724 !(ctx.info[block->index].exec[0].second & mask_type_initial)) {
725 /* find last initial exact mask */
726 for (int i = block->index; i >= 0; i--) {
727 if (ctx.program->blocks[i].kind & block_kind_top_level &&
728 ctx.info[i].exec[0].second & mask_type_initial) {
729 exact_mask = ctx.info[i].exec[0];
730 break;
731 }
732 }
733 }
734
735 assert(instr->opcode == aco_opcode::p_is_helper || exact_mask.second & mask_type_initial);
736 assert(exact_mask.second & mask_type_exact);
737
738 instr.reset(create_instruction<SOP2_instruction>(bld.w64or32(Builder::s_andn2), Format::SOP2, 2, 2));
739 instr->operands[0] = Operand(ctx.info[block->index].exec.back().first); /* current exec */
740 instr->operands[1] = Operand(exact_mask.first);
741 instr->definitions[0] = dst;
742 instr->definitions[1] = bld.def(s1, scc);
743 }
744 } else if (instr->opcode == aco_opcode::p_demote_to_helper) {
745 /* turn demote into discard_if with only exact masks */
746 assert((ctx.info[block->index].exec[0].second & (mask_type_exact | mask_type_global)) == (mask_type_exact | mask_type_global));
747 ctx.info[block->index].exec[0].second &= ~mask_type_initial;
748
749 int num;
750 Temp cond, exit_cond;
751 if (instr->operands[0].isConstant()) {
752 assert(instr->operands[0].constantValue() == -1u);
753 /* transition to exact and set exec to zero */
754 Temp old_exec = ctx.info[block->index].exec.back().first;
755 Temp new_exec = bld.tmp(bld.lm);
756 exit_cond = bld.tmp(s1);
757 cond = bld.sop1(Builder::s_and_saveexec, bld.def(bld.lm), bld.scc(Definition(exit_cond)),
758 bld.exec(Definition(new_exec)), Operand(0u), bld.exec(old_exec));
759
760 num = ctx.info[block->index].exec.size() - 2;
761 if (ctx.info[block->index].exec.back().second & mask_type_exact) {
762 ctx.info[block->index].exec.back().first = new_exec;
763 } else {
764 ctx.info[block->index].exec.back().first = cond;
765 ctx.info[block->index].exec.emplace_back(new_exec, mask_type_exact);
766 }
767 } else {
768 /* demote_if: transition to exact */
769 transition_to_Exact(ctx, bld, block->index);
770 assert(instr->operands[0].isTemp());
771 cond = instr->operands[0].getTemp();
772 num = ctx.info[block->index].exec.size() - 1;
773 }
774
775 for (int i = num; i >= 0; i--) {
776 if (ctx.info[block->index].exec[i].second & mask_type_exact) {
777 Instruction *andn2 = bld.sop2(Builder::s_andn2, bld.def(bld.lm), bld.def(s1, scc),
778 ctx.info[block->index].exec[i].first, cond);
779 if (i == (int)ctx.info[block->index].exec.size() - 1) {
780 andn2->operands[0].setFixed(exec);
781 andn2->definitions[0].setFixed(exec);
782 }
783
784 ctx.info[block->index].exec[i].first = andn2->definitions[0].getTemp();
785 exit_cond = andn2->definitions[1].getTemp();
786 } else {
787 assert(i != 0);
788 }
789 }
790 instr->opcode = aco_opcode::p_exit_early_if;
791 instr->operands[0] = bld.scc(exit_cond);
792 state = Exact;
793
794 } else if (instr->opcode == aco_opcode::p_fs_buffer_store_smem) {
795 bool need_check = ctx.info[block->index].exec.size() != 1 &&
796 !(ctx.info[block->index].exec[ctx.info[block->index].exec.size() - 2].second & Exact);
797 lower_fs_buffer_store_smem(bld, need_check, instr, ctx.info[block->index].exec.back().first);
798 }
799
800 bld.insert(std::move(instr));
801 }
802 }
803
804 void add_branch_code(exec_ctx& ctx, Block* block)
805 {
806 unsigned idx = block->index;
807 Builder bld(ctx.program, block);
808
809 if (idx == ctx.program->blocks.size() - 1)
810 return;
811
812 /* try to disable wqm handling */
813 if (ctx.handle_wqm && block->kind & block_kind_top_level) {
814 if (ctx.info[idx].exec.size() == 3) {
815 assert(ctx.info[idx].exec[1].second == mask_type_wqm);
816 ctx.info[idx].exec.pop_back();
817 }
818 assert(ctx.info[idx].exec.size() <= 2);
819
820 if (ctx.info[idx].ever_again_needs == 0 ||
821 ctx.info[idx].ever_again_needs == Exact) {
822 /* transition to Exact */
823 aco_ptr<Instruction> branch = std::move(block->instructions.back());
824 block->instructions.pop_back();
825 ctx.info[idx].exec.back().second |= mask_type_global;
826 transition_to_Exact(ctx, bld, idx);
827 bld.insert(std::move(branch));
828 ctx.handle_wqm = false;
829
830 } else if (ctx.info[idx].block_needs & Preserve_WQM) {
831 /* transition to WQM and remove global flag */
832 aco_ptr<Instruction> branch = std::move(block->instructions.back());
833 block->instructions.pop_back();
834 transition_to_WQM(ctx, bld, idx);
835 ctx.info[idx].exec.back().second &= ~mask_type_global;
836 bld.insert(std::move(branch));
837 }
838 }
839
840 if (block->kind & block_kind_loop_preheader) {
841 /* collect information about the succeeding loop */
842 bool has_divergent_break = false;
843 bool has_divergent_continue = false;
844 bool has_discard = false;
845 uint8_t needs = 0;
846 unsigned loop_nest_depth = ctx.program->blocks[idx + 1].loop_nest_depth;
847
848 for (unsigned i = idx + 1; ctx.program->blocks[i].loop_nest_depth >= loop_nest_depth; i++) {
849 Block& loop_block = ctx.program->blocks[i];
850 needs |= ctx.info[i].block_needs;
851
852 if (loop_block.kind & block_kind_uses_discard_if ||
853 loop_block.kind & block_kind_discard ||
854 loop_block.kind & block_kind_uses_demote)
855 has_discard = true;
856 if (loop_block.loop_nest_depth != loop_nest_depth)
857 continue;
858
859 if (loop_block.kind & block_kind_uniform)
860 continue;
861 else if (loop_block.kind & block_kind_break)
862 has_divergent_break = true;
863 else if (loop_block.kind & block_kind_continue)
864 has_divergent_continue = true;
865 }
866
867 if (ctx.handle_wqm) {
868 if (needs & WQM) {
869 aco_ptr<Instruction> branch = std::move(block->instructions.back());
870 block->instructions.pop_back();
871 transition_to_WQM(ctx, bld, idx);
872 bld.insert(std::move(branch));
873 } else {
874 aco_ptr<Instruction> branch = std::move(block->instructions.back());
875 block->instructions.pop_back();
876 transition_to_Exact(ctx, bld, idx);
877 bld.insert(std::move(branch));
878 }
879 }
880
881 unsigned num_exec_masks = ctx.info[idx].exec.size();
882 if (block->kind & block_kind_top_level)
883 num_exec_masks = std::min(num_exec_masks, 2u);
884
885 ctx.loop.emplace_back(&ctx.program->blocks[block->linear_succs[0]],
886 num_exec_masks,
887 needs,
888 has_divergent_break,
889 has_divergent_continue,
890 has_discard);
891 }
892
893 if (block->kind & block_kind_discard) {
894
895 assert(block->instructions.back()->format == Format::PSEUDO_BRANCH);
896 aco_ptr<Instruction> branch = std::move(block->instructions.back());
897 block->instructions.pop_back();
898
899 /* create a discard_if() instruction with the exec mask as condition */
900 unsigned num = 0;
901 if (ctx.loop.size()) {
902 /* if we're in a loop, only discard from the outer exec masks */
903 num = ctx.loop.back().num_exec_masks;
904 } else {
905 num = ctx.info[idx].exec.size() - 1;
906 }
907
908 Temp old_exec = ctx.info[idx].exec.back().first;
909 Temp new_exec = bld.tmp(bld.lm);
910 Temp cond = bld.sop1(Builder::s_and_saveexec, bld.def(bld.lm), bld.def(s1, scc),
911 bld.exec(Definition(new_exec)), Operand(0u), bld.exec(old_exec));
912 ctx.info[idx].exec.back().first = new_exec;
913
914 for (int i = num - 1; i >= 0; i--) {
915 Instruction *andn2 = bld.sop2(Builder::s_andn2, bld.def(bld.lm), bld.def(s1, scc),
916 ctx.info[block->index].exec[i].first, cond);
917 if (i == (int)ctx.info[idx].exec.size() - 1)
918 andn2->definitions[0].setFixed(exec);
919 if (i == 0)
920 bld.pseudo(aco_opcode::p_exit_early_if, bld.scc(andn2->definitions[1].getTemp()));
921 ctx.info[block->index].exec[i].first = andn2->definitions[0].getTemp();
922 }
923 assert(!ctx.handle_wqm || (ctx.info[block->index].exec[0].second & mask_type_wqm) == 0);
924
925 if ((block->kind & (block_kind_break | block_kind_uniform)) == block_kind_break)
926 ctx.info[idx].exec.back().first = cond;
927 bld.insert(std::move(branch));
928 /* no return here as it can be followed by a divergent break */
929 }
930
931 if (block->kind & block_kind_continue_or_break) {
932 assert(ctx.program->blocks[ctx.program->blocks[block->linear_succs[1]].linear_succs[0]].kind & block_kind_loop_header);
933 assert(ctx.program->blocks[ctx.program->blocks[block->linear_succs[0]].linear_succs[0]].kind & block_kind_loop_exit);
934 assert(block->instructions.back()->opcode == aco_opcode::p_branch);
935 block->instructions.pop_back();
936
937 while (!(ctx.info[idx].exec.back().second & mask_type_loop))
938 ctx.info[idx].exec.pop_back();
939
940 ctx.info[idx].exec.back().first = bld.pseudo(aco_opcode::p_parallelcopy, bld.def(bld.lm, exec), ctx.info[idx].exec.back().first);
941 bld.branch(aco_opcode::p_cbranch_nz, bld.exec(ctx.info[idx].exec.back().first), block->linear_succs[1], block->linear_succs[0]);
942 return;
943 }
944
945 if (block->kind & block_kind_uniform) {
946 Pseudo_branch_instruction* branch = static_cast<Pseudo_branch_instruction*>(block->instructions.back().get());
947 if (branch->opcode == aco_opcode::p_branch) {
948 branch->target[0] = block->linear_succs[0];
949 } else {
950 branch->target[0] = block->linear_succs[1];
951 branch->target[1] = block->linear_succs[0];
952 }
953 return;
954 }
955
956 if (block->kind & block_kind_branch) {
957
958 if (ctx.handle_wqm &&
959 ctx.info[idx].exec.size() >= 2 &&
960 ctx.info[idx].exec.back().second == mask_type_exact &&
961 !(ctx.info[idx].block_needs & Exact_Branch) &&
962 ctx.info[idx].exec[ctx.info[idx].exec.size() - 2].second & mask_type_wqm) {
963 /* return to wqm before branching */
964 ctx.info[idx].exec.pop_back();
965 }
966
967 // orig = s_and_saveexec_b64
968 assert(block->linear_succs.size() == 2);
969 assert(block->instructions.back()->opcode == aco_opcode::p_cbranch_z);
970 Temp cond = block->instructions.back()->operands[0].getTemp();
971 block->instructions.pop_back();
972
973 if (ctx.info[idx].block_needs & Exact_Branch)
974 transition_to_Exact(ctx, bld, idx);
975
976 Temp current_exec = ctx.info[idx].exec.back().first;
977 uint8_t mask_type = ctx.info[idx].exec.back().second & (mask_type_wqm | mask_type_exact);
978
979 Temp then_mask = bld.tmp(bld.lm);
980 Temp old_exec = bld.sop1(Builder::s_and_saveexec, bld.def(bld.lm), bld.def(s1, scc),
981 bld.exec(Definition(then_mask)), cond, bld.exec(current_exec));
982
983 ctx.info[idx].exec.back().first = old_exec;
984
985 /* add next current exec to the stack */
986 ctx.info[idx].exec.emplace_back(then_mask, mask_type);
987
988 bld.branch(aco_opcode::p_cbranch_z, bld.exec(then_mask), block->linear_succs[1], block->linear_succs[0]);
989 return;
990 }
991
992 if (block->kind & block_kind_invert) {
993 // exec = s_andn2_b64 (original_exec, exec)
994 assert(block->instructions.back()->opcode == aco_opcode::p_cbranch_nz);
995 block->instructions.pop_back();
996 Temp then_mask = ctx.info[idx].exec.back().first;
997 uint8_t mask_type = ctx.info[idx].exec.back().second;
998 ctx.info[idx].exec.pop_back();
999 Temp orig_exec = ctx.info[idx].exec.back().first;
1000 Temp else_mask = bld.sop2(Builder::s_andn2, bld.def(bld.lm, exec),
1001 bld.def(s1, scc), orig_exec, bld.exec(then_mask));
1002
1003 /* add next current exec to the stack */
1004 ctx.info[idx].exec.emplace_back(else_mask, mask_type);
1005
1006 bld.branch(aco_opcode::p_cbranch_z, bld.exec(else_mask), block->linear_succs[1], block->linear_succs[0]);
1007 return;
1008 }
1009
1010 if (block->kind & block_kind_break) {
1011 // loop_mask = s_andn2_b64 (loop_mask, exec)
1012 assert(block->instructions.back()->opcode == aco_opcode::p_branch);
1013 block->instructions.pop_back();
1014
1015 Temp current_exec = ctx.info[idx].exec.back().first;
1016 Temp cond = Temp();
1017 for (int exec_idx = ctx.info[idx].exec.size() - 2; exec_idx >= 0; exec_idx--) {
1018 cond = bld.tmp(s1);
1019 Temp exec_mask = ctx.info[idx].exec[exec_idx].first;
1020 exec_mask = bld.sop2(Builder::s_andn2, bld.def(bld.lm), bld.scc(Definition(cond)),
1021 exec_mask, current_exec);
1022 ctx.info[idx].exec[exec_idx].first = exec_mask;
1023 if (ctx.info[idx].exec[exec_idx].second & mask_type_loop)
1024 break;
1025 }
1026
1027 /* check if the successor is the merge block, otherwise set exec to 0 */
1028 // TODO: this could be done better by directly branching to the merge block
1029 unsigned succ_idx = ctx.program->blocks[block->linear_succs[1]].linear_succs[0];
1030 Block& succ = ctx.program->blocks[succ_idx];
1031 if (!(succ.kind & block_kind_invert || succ.kind & block_kind_merge)) {
1032 ctx.info[idx].exec.back().first = bld.sop1(Builder::s_mov, bld.def(bld.lm, exec), Operand(0u));
1033 }
1034
1035 bld.branch(aco_opcode::p_cbranch_nz, bld.scc(cond), block->linear_succs[1], block->linear_succs[0]);
1036 return;
1037 }
1038
1039 if (block->kind & block_kind_continue) {
1040 assert(block->instructions.back()->opcode == aco_opcode::p_branch);
1041 block->instructions.pop_back();
1042
1043 Temp current_exec = ctx.info[idx].exec.back().first;
1044 Temp cond = Temp();
1045 for (int exec_idx = ctx.info[idx].exec.size() - 2; exec_idx >= 0; exec_idx--) {
1046 if (ctx.info[idx].exec[exec_idx].second & mask_type_loop)
1047 break;
1048 cond = bld.tmp(s1);
1049 Temp exec_mask = ctx.info[idx].exec[exec_idx].first;
1050 exec_mask = bld.sop2(Builder::s_andn2, bld.def(bld.lm), bld.scc(Definition(cond)),
1051 exec_mask, bld.exec(current_exec));
1052 ctx.info[idx].exec[exec_idx].first = exec_mask;
1053 }
1054 assert(cond != Temp());
1055
1056 /* check if the successor is the merge block, otherwise set exec to 0 */
1057 // TODO: this could be done better by directly branching to the merge block
1058 unsigned succ_idx = ctx.program->blocks[block->linear_succs[1]].linear_succs[0];
1059 Block& succ = ctx.program->blocks[succ_idx];
1060 if (!(succ.kind & block_kind_invert || succ.kind & block_kind_merge)) {
1061 ctx.info[idx].exec.back().first = bld.sop1(Builder::s_mov, bld.def(bld.lm, exec), Operand(0u));
1062 }
1063
1064 bld.branch(aco_opcode::p_cbranch_nz, bld.scc(cond), block->linear_succs[1], block->linear_succs[0]);
1065 return;
1066 }
1067 }
1068
1069 void process_block(exec_ctx& ctx, Block* block)
1070 {
1071 std::vector<aco_ptr<Instruction>> instructions;
1072 instructions.reserve(block->instructions.size());
1073
1074 unsigned idx = add_coupling_code(ctx, block, instructions);
1075
1076 assert(block->index != ctx.program->blocks.size() - 1 ||
1077 ctx.info[block->index].exec.size() <= 2);
1078
1079 process_instructions(ctx, block, instructions, idx);
1080
1081 block->instructions = std::move(instructions);
1082
1083 add_branch_code(ctx, block);
1084
1085 block->live_out_exec = ctx.info[block->index].exec.back().first;
1086 }
1087
1088 } /* end namespace */
1089
1090
1091 void insert_exec_mask(Program *program)
1092 {
1093 exec_ctx ctx(program);
1094
1095 if (program->needs_wqm && program->needs_exact)
1096 calculate_wqm_needs(ctx);
1097
1098 for (Block& block : program->blocks)
1099 process_block(ctx, &block);
1100
1101 }
1102
1103 }
1104