2 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Rob Clark <robclark@freedesktop.org>
27 #include "util/ralloc.h"
28 #include "util/u_math.h"
31 #include "ir3_compiler.h"
36 * We currently require that scheduling ensures that we have enough nop's
37 * in all the right places. The legalize step mostly handles fixing up
38 * instruction flags ((ss)/(sy)/(ei)), and collapses sequences of nop's
39 * into fewer nop's w/ rpt flag.
42 struct ir3_legalize_ctx
{
43 struct ir3_compiler
*compiler
;
44 struct ir3_shader_variant
*so
;
49 struct ir3_legalize_state
{
51 regmask_t needs_ss_war
; /* write after read */
55 struct ir3_legalize_block_data
{
57 struct ir3_legalize_state state
;
60 /* We want to evaluate each block from the position of any other
61 * predecessor block, in order that the flags set are the union of
62 * all possible program paths.
64 * To do this, we need to know the output state (needs_ss/ss_war/sy)
65 * of all predecessor blocks. The tricky thing is loops, which mean
66 * that we can't simply recursively process each predecessor block
67 * before legalizing the current block.
69 * How we handle that is by looping over all the blocks until the
70 * results converge. If the output state of a given block changes
71 * in a given pass, this means that all successor blocks are not
72 * yet fully legalized.
76 legalize_block(struct ir3_legalize_ctx
*ctx
, struct ir3_block
*block
)
78 struct ir3_legalize_block_data
*bd
= block
->data
;
83 struct ir3_instruction
*last_input
= NULL
;
84 struct ir3_instruction
*last_rel
= NULL
;
85 struct ir3_instruction
*last_n
= NULL
;
86 struct list_head instr_list
;
87 struct ir3_legalize_state prev_state
= bd
->state
;
88 struct ir3_legalize_state
*state
= &bd
->state
;
89 bool last_input_needs_ss
= false;
90 bool has_tex_prefetch
= false;
92 /* our input state is the OR of all predecessor blocks' state: */
93 set_foreach(block
->predecessors
, entry
) {
94 struct ir3_block
*predecessor
= (struct ir3_block
*)entry
->key
;
95 struct ir3_legalize_block_data
*pbd
= predecessor
->data
;
96 struct ir3_legalize_state
*pstate
= &pbd
->state
;
98 /* Our input (ss)/(sy) state is based on OR'ing the output
99 * state of all our predecessor blocks
101 regmask_or(&state
->needs_ss
,
102 &state
->needs_ss
, &pstate
->needs_ss
);
103 regmask_or(&state
->needs_ss_war
,
104 &state
->needs_ss_war
, &pstate
->needs_ss_war
);
105 regmask_or(&state
->needs_sy
,
106 &state
->needs_sy
, &pstate
->needs_sy
);
109 /* remove all the instructions from the list, we'll be adding
110 * them back in as we go
112 list_replace(&block
->instr_list
, &instr_list
);
113 list_inithead(&block
->instr_list
);
115 foreach_instr_safe (n
, &instr_list
) {
118 n
->flags
&= ~(IR3_INSTR_SS
| IR3_INSTR_SY
);
120 /* _meta::tex_prefetch instructions removed later in
121 * collect_tex_prefetches()
123 if (is_meta(n
) && (n
->opc
!= OPC_META_TEX_PREFETCH
))
127 struct ir3_register
*inloc
= n
->regs
[1];
128 assert(inloc
->flags
& IR3_REG_IMMED
);
129 ctx
->max_bary
= MAX2(ctx
->max_bary
, inloc
->iim_val
);
132 if (last_n
&& is_barrier(last_n
)) {
133 n
->flags
|= IR3_INSTR_SS
| IR3_INSTR_SY
;
134 last_input_needs_ss
= false;
135 regmask_init(&state
->needs_ss_war
);
136 regmask_init(&state
->needs_ss
);
137 regmask_init(&state
->needs_sy
);
140 if (last_n
&& (last_n
->opc
== OPC_PREDT
)) {
141 n
->flags
|= IR3_INSTR_SS
;
142 regmask_init(&state
->needs_ss_war
);
143 regmask_init(&state
->needs_ss
);
146 /* NOTE: consider dst register too.. it could happen that
147 * texture sample instruction (for example) writes some
148 * components which are unused. A subsequent instruction
149 * that writes the same register can race w/ the sam instr
150 * resulting in undefined results:
152 for (i
= 0; i
< n
->regs_count
; i
++) {
153 struct ir3_register
*reg
= n
->regs
[i
];
157 /* TODO: we probably only need (ss) for alu
158 * instr consuming sfu result.. need to make
159 * some tests for both this and (sy)..
161 if (regmask_get(&state
->needs_ss
, reg
)) {
162 n
->flags
|= IR3_INSTR_SS
;
163 last_input_needs_ss
= false;
164 regmask_init(&state
->needs_ss_war
);
165 regmask_init(&state
->needs_ss
);
168 if (regmask_get(&state
->needs_sy
, reg
)) {
169 n
->flags
|= IR3_INSTR_SY
;
170 regmask_init(&state
->needs_sy
);
174 /* TODO: is it valid to have address reg loaded from a
175 * relative src (ie. mova a0, c<a0.x+4>)? If so, the
176 * last_rel check below should be moved ahead of this:
178 if (reg
->flags
& IR3_REG_RELATIV
)
182 if (n
->regs_count
> 0) {
183 struct ir3_register
*reg
= n
->regs
[0];
184 if (regmask_get(&state
->needs_ss_war
, reg
)) {
185 n
->flags
|= IR3_INSTR_SS
;
186 last_input_needs_ss
= false;
187 regmask_init(&state
->needs_ss_war
);
188 regmask_init(&state
->needs_ss
);
191 if (last_rel
&& (reg
->num
== regid(REG_A0
, 0))) {
192 last_rel
->flags
|= IR3_INSTR_UL
;
197 /* cat5+ does not have an (ss) bit, if needed we need to
198 * insert a nop to carry the sync flag. Would be kinda
199 * clever if we were aware of this during scheduling, but
200 * this should be a pretty rare case:
202 if ((n
->flags
& IR3_INSTR_SS
) && (opc_cat(n
->opc
) >= 5)) {
203 struct ir3_instruction
*nop
;
204 nop
= ir3_NOP(block
);
205 nop
->flags
|= IR3_INSTR_SS
;
206 n
->flags
&= ~IR3_INSTR_SS
;
209 /* need to be able to set (ss) on first instruction: */
210 if (list_is_empty(&block
->instr_list
) && (opc_cat(n
->opc
) >= 5))
213 if (ctx
->compiler
->samgq_workaround
&&
214 ctx
->type
== MESA_SHADER_VERTEX
&& n
->opc
== OPC_SAMGQ
) {
215 struct ir3_instruction
*samgp
;
217 list_delinit(&n
->node
);
219 for (i
= 0; i
< 4; i
++) {
220 samgp
= ir3_instr_clone(n
);
221 samgp
->opc
= OPC_SAMGP0
+ i
;
223 samgp
->flags
|= IR3_INSTR_SY
;
226 list_addtail(&n
->node
, &block
->instr_list
);
229 if (n
->opc
== OPC_DSXPP_1
|| n
->opc
== OPC_DSYPP_1
) {
230 struct ir3_instruction
*op_p
= ir3_instr_clone(n
);
231 op_p
->flags
= IR3_INSTR_P
;
233 ctx
->so
->need_fine_derivatives
= true;
237 regmask_set(&state
->needs_ss
, n
->regs
[0]);
239 if (is_tex_or_prefetch(n
)) {
240 regmask_set(&state
->needs_sy
, n
->regs
[0]);
241 if (n
->opc
== OPC_META_TEX_PREFETCH
)
242 has_tex_prefetch
= true;
243 } else if (n
->opc
== OPC_RESINFO
) {
244 regmask_set(&state
->needs_ss
, n
->regs
[0]);
245 ir3_NOP(block
)->flags
|= IR3_INSTR_SS
;
246 last_input_needs_ss
= false;
247 } else if (is_load(n
)) {
248 /* seems like ldlv needs (ss) bit instead?? which is odd but
249 * makes a bunch of flat-varying tests start working on a4xx.
251 if ((n
->opc
== OPC_LDLV
) || (n
->opc
== OPC_LDL
) || (n
->opc
== OPC_LDLW
))
252 regmask_set(&state
->needs_ss
, n
->regs
[0]);
254 regmask_set(&state
->needs_sy
, n
->regs
[0]);
255 } else if (is_atomic(n
->opc
)) {
256 if (n
->flags
& IR3_INSTR_G
) {
257 if (ctx
->compiler
->gpu_id
>= 600) {
258 /* New encoding, returns result via second src: */
259 regmask_set(&state
->needs_sy
, n
->regs
[3]);
261 regmask_set(&state
->needs_sy
, n
->regs
[0]);
264 regmask_set(&state
->needs_ss
, n
->regs
[0]);
268 if (is_ssbo(n
->opc
) || (is_atomic(n
->opc
) && (n
->flags
& IR3_INSTR_G
)))
269 ctx
->so
->has_ssbo
= true;
271 /* both tex/sfu appear to not always immediately consume
272 * their src register(s):
274 if (is_tex(n
) || is_sfu(n
) || is_mem(n
)) {
275 foreach_src (reg
, n
) {
277 regmask_set(&state
->needs_ss_war
, reg
);
283 last_input_needs_ss
|= (n
->opc
== OPC_LDLV
);
290 assert(block
== list_first_entry(&block
->shader
->block_list
,
291 struct ir3_block
, node
));
292 /* special hack.. if using ldlv to bypass interpolation,
293 * we need to insert a dummy bary.f on which we can set
296 if (is_mem(last_input
) && (last_input
->opc
== OPC_LDLV
)) {
297 struct ir3_instruction
*baryf
;
299 /* (ss)bary.f (ei)r63.x, 0, r0.x */
300 baryf
= ir3_instr_create(block
, OPC_BARY_F
);
301 ir3_reg_create(baryf
, regid(63, 0), 0);
302 ir3_reg_create(baryf
, 0, IR3_REG_IMMED
)->iim_val
= 0;
303 ir3_reg_create(baryf
, regid(0, 0), 0);
305 /* insert the dummy bary.f after last_input: */
306 list_delinit(&baryf
->node
);
307 list_add(&baryf
->node
, &last_input
->node
);
311 /* by definition, we need (ss) since we are inserting
312 * the dummy bary.f immediately after the ldlv:
314 last_input_needs_ss
= true;
316 last_input
->regs
[0]->flags
|= IR3_REG_EI
;
317 if (last_input_needs_ss
)
318 last_input
->flags
|= IR3_INSTR_SS
;
319 } else if (has_tex_prefetch
) {
320 /* texture prefetch, but *no* inputs.. we need to insert a
321 * dummy bary.f at the top of the shader to unblock varying
324 struct ir3_instruction
*baryf
;
326 /* (ss)bary.f (ei)r63.x, 0, r0.x */
327 baryf
= ir3_instr_create(block
, OPC_BARY_F
);
328 ir3_reg_create(baryf
, regid(63, 0), 0)->flags
|= IR3_REG_EI
;
329 ir3_reg_create(baryf
, 0, IR3_REG_IMMED
)->iim_val
= 0;
330 ir3_reg_create(baryf
, regid(0, 0), 0);
332 /* insert the dummy bary.f at head: */
333 list_delinit(&baryf
->node
);
334 list_add(&baryf
->node
, &block
->instr_list
);
338 last_rel
->flags
|= IR3_INSTR_UL
;
342 if (memcmp(&prev_state
, state
, sizeof(*state
))) {
343 /* our output state changed, this invalidates all of our
346 for (unsigned i
= 0; i
< ARRAY_SIZE(block
->successors
); i
++) {
347 if (!block
->successors
[i
])
349 struct ir3_legalize_block_data
*pbd
= block
->successors
[i
]->data
;
357 /* NOTE: branch instructions are always the last instruction(s)
358 * in the block. We take advantage of this as we resolve the
359 * branches, since "if (foo) break;" constructs turn into
364 * 0029:021: mov.s32s32 r62.x, r1.y
365 * 0082:022: br !p0.x, target=block5
366 * 0083:023: br p0.x, target=block4
367 * // succs: if _[0029:021: mov.s32s32] block4; else block5;
370 * 0084:024: jump, target=block6
374 * 0085:025: jump, target=block7
378 * ie. only instruction in block4/block5 is a jump, so when
379 * resolving branches we can easily detect this by checking
380 * that the first instruction in the target block is itself
381 * a jump, and setup the br directly to the jump's target
382 * (and strip back out the now unreached jump)
384 * TODO sometimes we end up with things like:
388 * add.u r0.y, r0.y, 1
390 * If we swapped the order of the branches, we could drop one.
392 static struct ir3_block
*
393 resolve_dest_block(struct ir3_block
*block
)
395 /* special case for last block: */
396 if (!block
->successors
[0])
399 /* NOTE that we may or may not have inserted the jump
400 * in the target block yet, so conditions to resolve
401 * the dest to the dest block's successor are:
403 * (1) successor[1] == NULL &&
404 * (2) (block-is-empty || only-instr-is-jump)
406 if (block
->successors
[1] == NULL
) {
407 if (list_is_empty(&block
->instr_list
)) {
408 return block
->successors
[0];
409 } else if (list_length(&block
->instr_list
) == 1) {
410 struct ir3_instruction
*instr
= list_first_entry(
411 &block
->instr_list
, struct ir3_instruction
, node
);
412 if (instr
->opc
== OPC_JUMP
)
413 return block
->successors
[0];
420 remove_unused_block(struct ir3_block
*old_target
)
422 list_delinit(&old_target
->node
);
424 /* cleanup dangling predecessors: */
425 for (unsigned i
= 0; i
< ARRAY_SIZE(old_target
->successors
); i
++) {
426 if (old_target
->successors
[i
]) {
427 struct ir3_block
*succ
= old_target
->successors
[i
];
428 _mesa_set_remove_key(succ
->predecessors
, old_target
);
434 retarget_jump(struct ir3_instruction
*instr
, struct ir3_block
*new_target
)
436 struct ir3_block
*old_target
= instr
->cat0
.target
;
437 struct ir3_block
*cur_block
= instr
->block
;
439 /* update current blocks successors to reflect the retargetting: */
440 if (cur_block
->successors
[0] == old_target
) {
441 cur_block
->successors
[0] = new_target
;
443 debug_assert(cur_block
->successors
[1] == old_target
);
444 cur_block
->successors
[1] = new_target
;
447 /* update new target's predecessors: */
448 _mesa_set_add(new_target
->predecessors
, cur_block
);
450 /* and remove old_target's predecessor: */
451 debug_assert(_mesa_set_search(old_target
->predecessors
, cur_block
));
452 _mesa_set_remove_key(old_target
->predecessors
, cur_block
);
454 if (old_target
->predecessors
->entries
== 0)
455 remove_unused_block(old_target
);
457 instr
->cat0
.target
= new_target
;
461 resolve_jump(struct ir3_instruction
*instr
)
463 struct ir3_block
*tblock
=
464 resolve_dest_block(instr
->cat0
.target
);
465 struct ir3_instruction
*target
;
467 if (tblock
!= instr
->cat0
.target
) {
468 retarget_jump(instr
, tblock
);
472 target
= list_first_entry(&tblock
->instr_list
,
473 struct ir3_instruction
, node
);
475 /* TODO maybe a less fragile way to do this. But we are expecting
476 * a pattern from sched_block() that looks like:
478 * br !p0.x, #else-block
481 * if the first branch target is +2, or if 2nd branch target is +1
482 * then we can just drop the jump.
485 if (instr
->cat0
.inv
== true)
490 if (target
->ip
== (instr
->ip
+ next_block
)) {
491 list_delinit(&instr
->node
);
495 (int)target
->ip
- (int)instr
->ip
;
500 /* resolve jumps, removing jumps/branches to immediately following
501 * instruction which we end up with from earlier stages. Since
502 * removing an instruction can invalidate earlier instruction's
503 * branch offsets, we need to do this iteratively until no more
504 * branches are removed.
507 resolve_jumps(struct ir3
*ir
)
509 foreach_block (block
, &ir
->block_list
)
510 foreach_instr (instr
, &block
->instr_list
)
511 if (is_flow(instr
) && instr
->cat0
.target
)
512 if (resolve_jump(instr
))
518 static void mark_jp(struct ir3_block
*block
)
520 struct ir3_instruction
*target
= list_first_entry(&block
->instr_list
,
521 struct ir3_instruction
, node
);
522 target
->flags
|= IR3_INSTR_JP
;
525 /* Mark points where control flow converges or diverges.
527 * Divergence points could actually be re-convergence points where
528 * "parked" threads are recoverged with threads that took the opposite
529 * path last time around. Possibly it is easier to think of (jp) as
530 * "the execution mask might have changed".
533 mark_xvergence_points(struct ir3
*ir
)
535 foreach_block (block
, &ir
->block_list
) {
536 if (block
->predecessors
->entries
> 1) {
537 /* if a block has more than one possible predecessor, then
538 * the first instruction is a convergence point.
541 } else if (block
->predecessors
->entries
== 1) {
542 /* If a block has one predecessor, which has multiple possible
543 * successors, it is a divergence point.
545 set_foreach(block
->predecessors
, entry
) {
546 struct ir3_block
*predecessor
= (struct ir3_block
*)entry
->key
;
547 if (predecessor
->successors
[1]) {
555 /* Insert the branch/jump instructions for flow control between blocks.
556 * Initially this is done naively, without considering if the successor
557 * block immediately follows the current block (ie. so no jump required),
558 * but that is cleaned up in resolve_jumps().
560 * TODO what ensures that the last write to p0.x in a block is the
561 * branch condition? Have we been getting lucky all this time?
564 block_sched(struct ir3
*ir
)
566 foreach_block (block
, &ir
->block_list
) {
567 if (block
->successors
[1]) {
568 /* if/else, conditional branches to "then" or "else": */
569 struct ir3_instruction
*br
;
571 debug_assert(block
->condition
);
573 /* create "else" branch first (since "then" block should
574 * frequently/always end up being a fall-thru):
576 br
= ir3_B(block
, block
->condition
, 0);
578 br
->cat0
.target
= block
->successors
[1];
581 br
= ir3_B(block
, block
->condition
, 0);
582 br
->cat0
.target
= block
->successors
[0];
584 } else if (block
->successors
[0]) {
585 /* otherwise unconditional jump to next block: */
586 struct ir3_instruction
*jmp
;
588 jmp
= ir3_JUMP(block
);
589 jmp
->cat0
.target
= block
->successors
[0];
594 /* Here we workaround the fact that kill doesn't actually kill the thread as
595 * GL expects. The last instruction always needs to be an end instruction,
596 * which means that if we're stuck in a loop where kill is the only way out,
597 * then we may have to jump out to the end. kill may also have the d3d
598 * semantics of converting the thread to a helper thread, rather than setting
599 * the exec mask to 0, in which case the helper thread could get stuck in an
602 * We do this late, both to give the scheduler the opportunity to reschedule
603 * kill instructions earlier and to avoid having to create a separate basic
606 * TODO: Assuming that the wavefront doesn't stop as soon as all threads are
607 * killed, we might benefit by doing this more aggressively when the remaining
608 * part of the program after the kill is large, since that would let us
609 * skip over the instructions when there are no non-killed threads left.
612 kill_sched(struct ir3
*ir
, struct ir3_shader_variant
*so
)
614 /* True if we know that this block will always eventually lead to the end
617 bool always_ends
= true;
619 struct ir3_block
*last_block
=
620 list_last_entry(&ir
->block_list
, struct ir3_block
, node
);
622 foreach_block_rev (block
, &ir
->block_list
) {
623 for (unsigned i
= 0; i
< 2 && block
->successors
[i
]; i
++) {
624 if (block
->successors
[i
]->start_ip
<= block
->end_ip
)
631 foreach_instr_safe (instr
, &block
->instr_list
) {
632 if (instr
->opc
!= OPC_KILL
)
635 struct ir3_instruction
*br
= ir3_instr_create(block
, OPC_B
);
636 br
->regs
[1] = instr
->regs
[1];
638 list_last_entry(&ir
->block_list
, struct ir3_block
, node
);
641 list_add(&br
->node
, &instr
->node
);
648 /* I'm not entirely sure how the branchstack works, but we probably
649 * need to add at least one entry for the divergence which is resolved
654 /* We don't update predecessors/successors, so we have to do this
661 /* Insert nop's required to make this a legal/valid shader program: */
663 nop_sched(struct ir3
*ir
)
665 foreach_block (block
, &ir
->block_list
) {
666 struct ir3_instruction
*last
= NULL
;
667 struct list_head instr_list
;
669 /* remove all the instructions from the list, we'll be adding
670 * them back in as we go
672 list_replace(&block
->instr_list
, &instr_list
);
673 list_inithead(&block
->instr_list
);
675 foreach_instr_safe (instr
, &instr_list
) {
676 unsigned delay
= ir3_delay_calc(block
, instr
, false, true);
678 /* NOTE: I think the nopN encoding works for a5xx and
679 * probably a4xx, but not a3xx. So far only tested on
683 if ((delay
> 0) && (ir
->compiler
->gpu_id
>= 600) && last
&&
684 ((opc_cat(last
->opc
) == 2) || (opc_cat(last
->opc
) == 3))) {
685 /* the previous cat2/cat3 instruction can encode at most 3 nop's: */
686 unsigned transfer
= MIN2(delay
, 3 - last
->nop
);
687 last
->nop
+= transfer
;
691 if ((delay
> 0) && last
&& (last
->opc
== OPC_NOP
)) {
692 /* the previous nop can encode at most 5 repeats: */
693 unsigned transfer
= MIN2(delay
, 5 - last
->repeat
);
694 last
->repeat
+= transfer
;
699 debug_assert(delay
<= 6);
700 ir3_NOP(block
)->repeat
= delay
- 1;
703 list_addtail(&instr
->node
, &block
->instr_list
);
710 ir3_legalize(struct ir3
*ir
, struct ir3_shader_variant
*so
, int *max_bary
)
712 struct ir3_legalize_ctx
*ctx
= rzalloc(ir
, struct ir3_legalize_ctx
);
717 ctx
->compiler
= ir
->compiler
;
718 ctx
->type
= ir
->type
;
720 /* allocate per-block data: */
721 foreach_block (block
, &ir
->block_list
) {
722 block
->data
= rzalloc(ctx
, struct ir3_legalize_block_data
);
727 /* process each block: */
730 foreach_block (block
, &ir
->block_list
) {
731 progress
|= legalize_block(ctx
, block
);
735 *max_bary
= ctx
->max_bary
;
738 if (so
->type
== MESA_SHADER_FRAGMENT
)
743 ir3_count_instructions(ir
);
744 } while(resolve_jumps(ir
));
746 mark_xvergence_points(ir
);