2 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Rob Clark <robclark@freedesktop.org>
27 #include "util/ralloc.h"
28 #include "util/u_math.h"
31 #include "ir3_compiler.h"
36 * We currently require that scheduling ensures that we have enough nop's
37 * in all the right places. The legalize step mostly handles fixing up
38 * instruction flags ((ss)/(sy)/(ei)), and collapses sequences of nop's
39 * into fewer nop's w/ rpt flag.
42 struct ir3_legalize_ctx
{
43 struct ir3_compiler
*compiler
;
50 struct ir3_legalize_state
{
52 regmask_t needs_ss_war
; /* write after read */
56 struct ir3_legalize_block_data
{
58 struct ir3_legalize_state state
;
61 /* We want to evaluate each block from the position of any other
62 * predecessor block, in order that the flags set are the union of
63 * all possible program paths.
65 * To do this, we need to know the output state (needs_ss/ss_war/sy)
66 * of all predecessor blocks. The tricky thing is loops, which mean
67 * that we can't simply recursively process each predecessor block
68 * before legalizing the current block.
70 * How we handle that is by looping over all the blocks until the
71 * results converge. If the output state of a given block changes
72 * in a given pass, this means that all successor blocks are not
73 * yet fully legalized.
77 legalize_block(struct ir3_legalize_ctx
*ctx
, struct ir3_block
*block
)
79 struct ir3_legalize_block_data
*bd
= block
->data
;
84 struct ir3_instruction
*last_input
= NULL
;
85 struct ir3_instruction
*last_rel
= NULL
;
86 struct ir3_instruction
*last_n
= NULL
;
87 struct list_head instr_list
;
88 struct ir3_legalize_state prev_state
= bd
->state
;
89 struct ir3_legalize_state
*state
= &bd
->state
;
90 bool last_input_needs_ss
= false;
92 /* our input state is the OR of all predecessor blocks' state: */
93 set_foreach(block
->predecessors
, entry
) {
94 struct ir3_block
*predecessor
= (struct ir3_block
*)entry
->key
;
95 struct ir3_legalize_block_data
*pbd
= predecessor
->data
;
96 struct ir3_legalize_state
*pstate
= &pbd
->state
;
98 /* Our input (ss)/(sy) state is based on OR'ing the output
99 * state of all our predecessor blocks
101 regmask_or(&state
->needs_ss
,
102 &state
->needs_ss
, &pstate
->needs_ss
);
103 regmask_or(&state
->needs_ss_war
,
104 &state
->needs_ss_war
, &pstate
->needs_ss_war
);
105 regmask_or(&state
->needs_sy
,
106 &state
->needs_sy
, &pstate
->needs_sy
);
109 /* remove all the instructions from the list, we'll be adding
110 * them back in as we go
112 list_replace(&block
->instr_list
, &instr_list
);
113 list_inithead(&block
->instr_list
);
115 list_for_each_entry_safe (struct ir3_instruction
, n
, &instr_list
, node
) {
116 struct ir3_register
*reg
;
119 n
->flags
&= ~(IR3_INSTR_SS
| IR3_INSTR_SY
);
125 struct ir3_register
*inloc
= n
->regs
[1];
126 assert(inloc
->flags
& IR3_REG_IMMED
);
127 ctx
->max_bary
= MAX2(ctx
->max_bary
, inloc
->iim_val
);
130 if (last_n
&& is_barrier(last_n
)) {
131 n
->flags
|= IR3_INSTR_SS
| IR3_INSTR_SY
;
132 last_input_needs_ss
= false;
135 /* NOTE: consider dst register too.. it could happen that
136 * texture sample instruction (for example) writes some
137 * components which are unused. A subsequent instruction
138 * that writes the same register can race w/ the sam instr
139 * resulting in undefined results:
141 for (i
= 0; i
< n
->regs_count
; i
++) {
146 /* TODO: we probably only need (ss) for alu
147 * instr consuming sfu result.. need to make
148 * some tests for both this and (sy)..
150 if (regmask_get(&state
->needs_ss
, reg
)) {
151 n
->flags
|= IR3_INSTR_SS
;
152 last_input_needs_ss
= false;
153 regmask_init(&state
->needs_ss_war
);
154 regmask_init(&state
->needs_ss
);
157 if (regmask_get(&state
->needs_sy
, reg
)) {
158 n
->flags
|= IR3_INSTR_SY
;
159 regmask_init(&state
->needs_sy
);
163 /* TODO: is it valid to have address reg loaded from a
164 * relative src (ie. mova a0, c<a0.x+4>)? If so, the
165 * last_rel check below should be moved ahead of this:
167 if (reg
->flags
& IR3_REG_RELATIV
)
171 if (n
->regs_count
> 0) {
173 if (regmask_get(&state
->needs_ss_war
, reg
)) {
174 n
->flags
|= IR3_INSTR_SS
;
175 last_input_needs_ss
= false;
176 regmask_init(&state
->needs_ss_war
);
177 regmask_init(&state
->needs_ss
);
180 if (last_rel
&& (reg
->num
== regid(REG_A0
, 0))) {
181 last_rel
->flags
|= IR3_INSTR_UL
;
186 /* cat5+ does not have an (ss) bit, if needed we need to
187 * insert a nop to carry the sync flag. Would be kinda
188 * clever if we were aware of this during scheduling, but
189 * this should be a pretty rare case:
191 if ((n
->flags
& IR3_INSTR_SS
) && (opc_cat(n
->opc
) >= 5)) {
192 struct ir3_instruction
*nop
;
193 nop
= ir3_NOP(block
);
194 nop
->flags
|= IR3_INSTR_SS
;
195 n
->flags
&= ~IR3_INSTR_SS
;
198 /* need to be able to set (ss) on first instruction: */
199 if (list_empty(&block
->instr_list
) && (opc_cat(n
->opc
) >= 5))
202 if (is_nop(n
) && !list_empty(&block
->instr_list
)) {
203 struct ir3_instruction
*last
= list_last_entry(&block
->instr_list
,
204 struct ir3_instruction
, node
);
205 if (is_nop(last
) && (last
->repeat
< 5)) {
207 last
->flags
|= n
->flags
;
211 /* NOTE: I think the nopN encoding works for a5xx and
212 * probably a4xx, but not a3xx. So far only tested on
215 if ((ctx
->compiler
->gpu_id
>= 600) && !n
->flags
&& (last
->nop
< 3) &&
216 ((opc_cat(last
->opc
) == 2) || (opc_cat(last
->opc
) == 3))) {
222 if (ctx
->compiler
->samgq_workaround
&&
223 ctx
->type
== MESA_SHADER_VERTEX
&& n
->opc
== OPC_SAMGQ
) {
224 struct ir3_instruction
*samgp
;
226 for (i
= 0; i
< 4; i
++) {
227 samgp
= ir3_instr_clone(n
);
228 samgp
->opc
= OPC_SAMGP0
+ i
;
230 samgp
->flags
|= IR3_INSTR_SY
;
232 list_delinit(&n
->node
);
234 list_addtail(&n
->node
, &block
->instr_list
);
238 regmask_set(&state
->needs_ss
, n
->regs
[0]);
241 regmask_set(&state
->needs_sy
, n
->regs
[0]);
242 ctx
->need_pixlod
= true;
243 } else if (n
->opc
== OPC_RESINFO
) {
244 regmask_set(&state
->needs_ss
, n
->regs
[0]);
245 ir3_NOP(block
)->flags
|= IR3_INSTR_SS
;
246 last_input_needs_ss
= false;
247 } else if (is_load(n
)) {
248 /* seems like ldlv needs (ss) bit instead?? which is odd but
249 * makes a bunch of flat-varying tests start working on a4xx.
251 if ((n
->opc
== OPC_LDLV
) || (n
->opc
== OPC_LDL
))
252 regmask_set(&state
->needs_ss
, n
->regs
[0]);
254 regmask_set(&state
->needs_sy
, n
->regs
[0]);
255 } else if (is_atomic(n
->opc
)) {
256 if (n
->flags
& IR3_INSTR_G
) {
257 if (ctx
->compiler
->gpu_id
>= 600) {
258 /* New encoding, returns result via second src: */
259 regmask_set(&state
->needs_sy
, n
->regs
[3]);
261 regmask_set(&state
->needs_sy
, n
->regs
[0]);
264 regmask_set(&state
->needs_ss
, n
->regs
[0]);
268 if (is_ssbo(n
->opc
) || (is_atomic(n
->opc
) && (n
->flags
& IR3_INSTR_G
)))
269 ctx
->has_ssbo
= true;
271 /* both tex/sfu appear to not always immediately consume
272 * their src register(s):
274 if (is_tex(n
) || is_sfu(n
) || is_mem(n
)) {
275 foreach_src(reg
, n
) {
277 regmask_set(&state
->needs_ss_war
, reg
);
283 last_input_needs_ss
|= (n
->opc
== OPC_LDLV
);
290 assert(block
== list_first_entry(&block
->shader
->block_list
,
291 struct ir3_block
, node
));
292 /* special hack.. if using ldlv to bypass interpolation,
293 * we need to insert a dummy bary.f on which we can set
296 if (is_mem(last_input
) && (last_input
->opc
== OPC_LDLV
)) {
297 struct ir3_instruction
*baryf
;
299 /* (ss)bary.f (ei)r63.x, 0, r0.x */
300 baryf
= ir3_instr_create(block
, OPC_BARY_F
);
301 ir3_reg_create(baryf
, regid(63, 0), 0);
302 ir3_reg_create(baryf
, 0, IR3_REG_IMMED
)->iim_val
= 0;
303 ir3_reg_create(baryf
, regid(0, 0), 0);
305 /* insert the dummy bary.f after last_input: */
306 list_delinit(&baryf
->node
);
307 list_add(&baryf
->node
, &last_input
->node
);
311 /* by definition, we need (ss) since we are inserting
312 * the dummy bary.f immediately after the ldlv:
314 last_input_needs_ss
= true;
316 last_input
->regs
[0]->flags
|= IR3_REG_EI
;
317 if (last_input_needs_ss
)
318 last_input
->flags
|= IR3_INSTR_SS
;
322 last_rel
->flags
|= IR3_INSTR_UL
;
326 if (memcmp(&prev_state
, state
, sizeof(*state
))) {
327 /* our output state changed, this invalidates all of our
330 for (unsigned i
= 0; i
< ARRAY_SIZE(block
->successors
); i
++) {
331 if (!block
->successors
[i
])
333 struct ir3_legalize_block_data
*pbd
= block
->successors
[i
]->data
;
341 /* NOTE: branch instructions are always the last instruction(s)
342 * in the block. We take advantage of this as we resolve the
343 * branches, since "if (foo) break;" constructs turn into
348 * 0029:021: mov.s32s32 r62.x, r1.y
349 * 0082:022: br !p0.x, target=block5
350 * 0083:023: br p0.x, target=block4
351 * // succs: if _[0029:021: mov.s32s32] block4; else block5;
354 * 0084:024: jump, target=block6
358 * 0085:025: jump, target=block7
362 * ie. only instruction in block4/block5 is a jump, so when
363 * resolving branches we can easily detect this by checking
364 * that the first instruction in the target block is itself
365 * a jump, and setup the br directly to the jump's target
366 * (and strip back out the now unreached jump)
368 * TODO sometimes we end up with things like:
372 * add.u r0.y, r0.y, 1
374 * If we swapped the order of the branches, we could drop one.
376 static struct ir3_block
*
377 resolve_dest_block(struct ir3_block
*block
)
379 /* special case for last block: */
380 if (!block
->successors
[0])
383 /* NOTE that we may or may not have inserted the jump
384 * in the target block yet, so conditions to resolve
385 * the dest to the dest block's successor are:
387 * (1) successor[1] == NULL &&
388 * (2) (block-is-empty || only-instr-is-jump)
390 if (block
->successors
[1] == NULL
) {
391 if (list_empty(&block
->instr_list
)) {
392 return block
->successors
[0];
393 } else if (list_length(&block
->instr_list
) == 1) {
394 struct ir3_instruction
*instr
= list_first_entry(
395 &block
->instr_list
, struct ir3_instruction
, node
);
396 if (instr
->opc
== OPC_JUMP
)
397 return block
->successors
[0];
404 remove_unused_block(struct ir3_block
*old_target
)
406 list_delinit(&old_target
->node
);
408 /* cleanup dangling predecessors: */
409 for (unsigned i
= 0; i
< ARRAY_SIZE(old_target
->successors
); i
++) {
410 if (old_target
->successors
[i
]) {
411 struct ir3_block
*succ
= old_target
->successors
[i
];
412 _mesa_set_remove_key(succ
->predecessors
, old_target
);
418 retarget_jump(struct ir3_instruction
*instr
, struct ir3_block
*new_target
)
420 struct ir3_block
*old_target
= instr
->cat0
.target
;
421 struct ir3_block
*cur_block
= instr
->block
;
423 /* update current blocks successors to reflect the retargetting: */
424 if (cur_block
->successors
[0] == old_target
) {
425 cur_block
->successors
[0] = new_target
;
427 debug_assert(cur_block
->successors
[1] == old_target
);
428 cur_block
->successors
[1] = new_target
;
431 /* update new target's predecessors: */
432 _mesa_set_add(new_target
->predecessors
, cur_block
);
434 /* and remove old_target's predecessor: */
435 debug_assert(_mesa_set_search(old_target
->predecessors
, cur_block
));
436 _mesa_set_remove_key(old_target
->predecessors
, cur_block
);
438 if (old_target
->predecessors
->entries
== 0)
439 remove_unused_block(old_target
);
441 instr
->cat0
.target
= new_target
;
445 resolve_jump(struct ir3_instruction
*instr
)
447 struct ir3_block
*tblock
=
448 resolve_dest_block(instr
->cat0
.target
);
449 struct ir3_instruction
*target
;
451 if (tblock
!= instr
->cat0
.target
) {
452 retarget_jump(instr
, tblock
);
456 target
= list_first_entry(&tblock
->instr_list
,
457 struct ir3_instruction
, node
);
459 /* TODO maybe a less fragile way to do this. But we are expecting
460 * a pattern from sched_block() that looks like:
462 * br !p0.x, #else-block
465 * if the first branch target is +2, or if 2nd branch target is +1
466 * then we can just drop the jump.
469 if (instr
->cat0
.inv
== true)
474 if (target
->ip
== (instr
->ip
+ next_block
)) {
475 list_delinit(&instr
->node
);
479 (int)target
->ip
- (int)instr
->ip
;
484 /* resolve jumps, removing jumps/branches to immediately following
485 * instruction which we end up with from earlier stages. Since
486 * removing an instruction can invalidate earlier instruction's
487 * branch offsets, we need to do this iteratively until no more
488 * branches are removed.
491 resolve_jumps(struct ir3
*ir
)
493 list_for_each_entry (struct ir3_block
, block
, &ir
->block_list
, node
)
494 list_for_each_entry (struct ir3_instruction
, instr
, &block
->instr_list
, node
)
495 if (is_flow(instr
) && instr
->cat0
.target
)
496 if (resolve_jump(instr
))
502 static void mark_jp(struct ir3_block
*block
)
504 struct ir3_instruction
*target
= list_first_entry(&block
->instr_list
,
505 struct ir3_instruction
, node
);
506 target
->flags
|= IR3_INSTR_JP
;
509 /* Mark points where control flow converges or diverges.
511 * Divergence points could actually be re-convergence points where
512 * "parked" threads are recoverged with threads that took the opposite
513 * path last time around. Possibly it is easier to think of (jp) as
514 * "the execution mask might have changed".
517 mark_xvergence_points(struct ir3
*ir
)
519 list_for_each_entry (struct ir3_block
, block
, &ir
->block_list
, node
) {
520 if (block
->predecessors
->entries
> 1) {
521 /* if a block has more than one possible predecessor, then
522 * the first instruction is a convergence point.
525 } else if (block
->predecessors
->entries
== 1) {
526 /* If a block has one predecessor, which has multiple possible
527 * successors, it is a divergence point.
529 set_foreach(block
->predecessors
, entry
) {
530 struct ir3_block
*predecessor
= (struct ir3_block
*)entry
->key
;
531 if (predecessor
->successors
[1]) {
540 ir3_legalize(struct ir3
*ir
, bool *has_ssbo
, bool *need_pixlod
, int *max_bary
)
542 struct ir3_legalize_ctx
*ctx
= rzalloc(ir
, struct ir3_legalize_ctx
);
546 ctx
->compiler
= ir
->compiler
;
547 ctx
->type
= ir
->type
;
549 /* allocate per-block data: */
550 list_for_each_entry (struct ir3_block
, block
, &ir
->block_list
, node
) {
551 block
->data
= rzalloc(ctx
, struct ir3_legalize_block_data
);
554 /* process each block: */
557 list_for_each_entry (struct ir3_block
, block
, &ir
->block_list
, node
) {
558 progress
|= legalize_block(ctx
, block
);
562 *has_ssbo
= ctx
->has_ssbo
;
563 *need_pixlod
= ctx
->need_pixlod
;
564 *max_bary
= ctx
->max_bary
;
567 ir3_count_instructions(ir
);
568 } while(resolve_jumps(ir
));
570 mark_xvergence_points(ir
);