1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
4 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 * Rob Clark <robclark@freedesktop.org>
29 #include "util/u_math.h"
31 #include "freedreno_util.h"
38 * We currently require that scheduling ensures that we have enough nop's
39 * in all the right places. The legalize step mostly handles fixing up
40 * instruction flags ((ss)/(sy)/(ei)), and collapses sequences of nop's
41 * into fewer nop's w/ rpt flag.
44 struct ir3_legalize_ctx
{
49 /* We want to evaluate each block from the position of any other
50 * predecessor block, in order that the flags set are the union
51 * of all possible program paths. For stopping condition, we
52 * want to stop when the pair of <pred-block, current-block> has
53 * been visited already.
55 * XXX is that completely true? We could have different needs_xyz
56 * flags set depending on path leading to pred-block.. we could
57 * do *most* of this based on chasing src instructions ptrs (and
58 * following all phi srcs).. except the write-after-read hazzard.
60 * For now we just set ss/sy flag on first instruction on block,
61 * and handle everything within the block as before.
65 legalize_block(struct ir3_legalize_ctx
*ctx
, struct ir3_block
*block
)
67 struct ir3_instruction
*last_input
= NULL
;
68 struct ir3_instruction
*last_rel
= NULL
;
69 struct list_head instr_list
;
70 regmask_t needs_ss_war
; /* write after read */
74 regmask_init(&needs_ss_war
);
75 regmask_init(&needs_ss
);
76 regmask_init(&needs_sy
);
78 /* remove all the instructions from the list, we'll be adding
79 * them back in as we go
81 list_replace(&block
->instr_list
, &instr_list
);
82 list_inithead(&block
->instr_list
);
84 list_for_each_entry_safe (struct ir3_instruction
, n
, &instr_list
, node
) {
85 struct ir3_register
*reg
;
92 struct ir3_register
*inloc
= n
->regs
[1];
93 assert(inloc
->flags
& IR3_REG_IMMED
);
94 ctx
->max_bary
= MAX2(ctx
->max_bary
, inloc
->iim_val
);
97 /* NOTE: consider dst register too.. it could happen that
98 * texture sample instruction (for example) writes some
99 * components which are unused. A subsequent instruction
100 * that writes the same register can race w/ the sam instr
101 * resulting in undefined results:
103 for (i
= 0; i
< n
->regs_count
; i
++) {
108 /* TODO: we probably only need (ss) for alu
109 * instr consuming sfu result.. need to make
110 * some tests for both this and (sy)..
112 if (regmask_get(&needs_ss
, reg
)) {
113 n
->flags
|= IR3_INSTR_SS
;
114 regmask_init(&needs_ss
);
117 if (regmask_get(&needs_sy
, reg
)) {
118 n
->flags
|= IR3_INSTR_SY
;
119 regmask_init(&needs_sy
);
123 /* TODO: is it valid to have address reg loaded from a
124 * relative src (ie. mova a0, c<a0.x+4>)? If so, the
125 * last_rel check below should be moved ahead of this:
127 if (reg
->flags
& IR3_REG_RELATIV
)
131 if (n
->regs_count
> 0) {
133 if (regmask_get(&needs_ss_war
, reg
)) {
134 n
->flags
|= IR3_INSTR_SS
;
135 regmask_init(&needs_ss_war
); // ??? I assume?
138 if (last_rel
&& (reg
->num
== regid(REG_A0
, 0))) {
139 last_rel
->flags
|= IR3_INSTR_UL
;
144 /* cat5+ does not have an (ss) bit, if needed we need to
145 * insert a nop to carry the sync flag. Would be kinda
146 * clever if we were aware of this during scheduling, but
147 * this should be a pretty rare case:
149 if ((n
->flags
& IR3_INSTR_SS
) && (n
->category
>= 5)) {
150 struct ir3_instruction
*nop
;
151 nop
= ir3_NOP(block
);
152 nop
->flags
|= IR3_INSTR_SS
;
153 n
->flags
&= ~IR3_INSTR_SS
;
156 /* need to be able to set (ss) on first instruction: */
157 if (list_empty(&block
->instr_list
) && (n
->category
>= 5))
160 if (is_nop(n
) && !list_empty(&block
->instr_list
)) {
161 struct ir3_instruction
*last
= list_last_entry(&block
->instr_list
,
162 struct ir3_instruction
, node
);
163 if (is_nop(last
) && (last
->repeat
< 5)) {
165 last
->flags
|= n
->flags
;
170 list_addtail(&n
->node
, &block
->instr_list
);
173 regmask_set(&needs_ss
, n
->regs
[0]);
176 /* this ends up being the # of samp instructions.. but that
177 * is ok, everything else only cares whether it is zero or
178 * not. We do this here, rather than when we encounter a
179 * SAMP decl, because (especially in binning pass shader)
180 * the samp instruction(s) could get eliminated if the
181 * result is not used.
183 ctx
->has_samp
= true;
184 regmask_set(&needs_sy
, n
->regs
[0]);
185 } else if (is_mem(n
)) {
186 regmask_set(&needs_sy
, n
->regs
[0]);
189 /* both tex/sfu appear to not always immediately consume
190 * their src register(s):
192 if (is_tex(n
) || is_sfu(n
) || is_mem(n
)) {
193 foreach_src(reg
, n
) {
195 regmask_set(&needs_ss_war
, reg
);
204 /* special hack.. if using ldlv to bypass interpolation,
205 * we need to insert a dummy bary.f on which we can set
208 if (is_mem(last_input
) && (last_input
->opc
== OPC_LDLV
)) {
209 struct ir3_instruction
*baryf
;
211 /* (ss)bary.f (ei)r63.x, 0, r0.x */
212 baryf
= ir3_instr_create(block
, 2, OPC_BARY_F
);
213 baryf
->flags
|= IR3_INSTR_SS
;
214 ir3_reg_create(baryf
, regid(63, 0), 0);
215 ir3_reg_create(baryf
, 0, IR3_REG_IMMED
)->iim_val
= 0;
216 ir3_reg_create(baryf
, regid(0, 0), 0);
218 /* insert the dummy bary.f after last_input: */
219 list_delinit(&baryf
->node
);
220 list_add(&baryf
->node
, &last_input
->node
);
224 last_input
->regs
[0]->flags
|= IR3_REG_EI
;
228 last_rel
->flags
|= IR3_INSTR_UL
;
230 list_first_entry(&block
->instr_list
, struct ir3_instruction
, node
)
231 ->flags
|= IR3_INSTR_SS
| IR3_INSTR_SY
;
234 /* NOTE: branch instructions are always the last instruction(s)
235 * in the block. We take advantage of this as we resolve the
236 * branches, since "if (foo) break;" constructs turn into
241 * 0029:021: mov.s32s32 r62.x, r1.y
242 * 0082:022: br !p0.x, target=block5
243 * 0083:023: br p0.x, target=block4
244 * // succs: if _[0029:021: mov.s32s32] block4; else block5;
247 * 0084:024: jump, target=block6
251 * 0085:025: jump, target=block7
255 * ie. only instruction in block4/block5 is a jump, so when
256 * resolving branches we can easily detect this by checking
257 * that the first instruction in the target block is itself
258 * a jump, and setup the br directly to the jump's target
259 * (and strip back out the now unreached jump)
261 * TODO sometimes we end up with things like:
265 * add.u r0.y, r0.y, 1
267 * If we swapped the order of the branches, we could drop one.
269 static struct ir3_block
*
270 resolve_dest_block(struct ir3_block
*block
)
272 /* special case for last block: */
273 if (!block
->successors
[0])
276 /* NOTE that we may or may not have inserted the jump
277 * in the target block yet, so conditions to resolve
278 * the dest to the dest block's successor are:
280 * (1) successor[1] == NULL &&
281 * (2) (block-is-empty || only-instr-is-jump)
283 if (block
->successors
[1] == NULL
) {
284 if (list_empty(&block
->instr_list
)) {
285 return block
->successors
[0];
286 } else if (list_length(&block
->instr_list
) == 1) {
287 struct ir3_instruction
*instr
= list_first_entry(
288 &block
->instr_list
, struct ir3_instruction
, node
);
289 if (is_flow(instr
) && (instr
->opc
== OPC_JUMP
))
290 return block
->successors
[0];
297 resolve_jump(struct ir3_instruction
*instr
)
299 struct ir3_block
*tblock
=
300 resolve_dest_block(instr
->cat0
.target
);
301 struct ir3_instruction
*target
;
303 if (tblock
!= instr
->cat0
.target
) {
304 list_delinit(&instr
->cat0
.target
->node
);
305 instr
->cat0
.target
= tblock
;
309 target
= list_first_entry(&tblock
->instr_list
,
310 struct ir3_instruction
, node
);
312 if ((!target
) || (target
->ip
== (instr
->ip
+ 1))) {
313 list_delinit(&instr
->node
);
317 (int)target
->ip
- (int)instr
->ip
;
322 /* resolve jumps, removing jumps/branches to immediately following
323 * instruction which we end up with from earlier stages. Since
324 * removing an instruction can invalidate earlier instruction's
325 * branch offsets, we need to do this iteratively until no more
326 * branches are removed.
329 resolve_jumps(struct ir3
*ir
)
331 list_for_each_entry (struct ir3_block
, block
, &ir
->block_list
, node
)
332 list_for_each_entry (struct ir3_instruction
, instr
, &block
->instr_list
, node
)
333 if (is_flow(instr
) && instr
->cat0
.target
)
334 if (resolve_jump(instr
))
340 /* we want to mark points where divergent flow control re-converges
341 * with (jp) flags. For now, since we don't do any optimization for
342 * things that start out as a 'do {} while()', re-convergence points
343 * will always be a branch or jump target. Note that this is overly
344 * conservative, since unconditional jump targets are not convergence
345 * points, we are just assuming that the other path to reach the jump
346 * target was divergent. If we were clever enough to optimize the
347 * jump at end of a loop back to a conditional branch into a single
348 * conditional branch, ie. like:
350 * add.f r1.w, r0.x, (neg)(r)c2.x <= loop start
351 * mul.f r1.z, r1.z, r0.x
352 * mul.f r1.y, r1.y, r0.x
353 * mul.f r0.z, r1.x, r0.x
354 * mul.f r0.w, r0.y, r0.x
355 * cmps.f.ge r0.x, (r)c2.y, (r)r1.w
356 * add.s r0.x, (r)r0.x, (r)-1
357 * sel.f32 r0.x, (r)c3.y, (r)r0.x, c3.x
358 * cmps.f.eq p0.x, r0.x, c3.y
359 * mov.f32f32 r0.x, r1.w
360 * mov.f32f32 r0.y, r0.w
361 * mov.f32f32 r1.x, r0.z
364 * (jp)mul.f r0.x, c263.y, r1.y
366 * Then we'd have to be more clever, as the convergence point is no
367 * longer a branch or jump target.
370 mark_convergence_points(struct ir3
*ir
)
372 list_for_each_entry (struct ir3_block
, block
, &ir
->block_list
, node
) {
373 list_for_each_entry (struct ir3_instruction
, instr
, &block
->instr_list
, node
) {
374 if (is_flow(instr
) && instr
->cat0
.target
) {
375 struct ir3_instruction
*target
=
376 list_first_entry(&instr
->cat0
.target
->instr_list
,
377 struct ir3_instruction
, node
);
378 target
->flags
|= IR3_INSTR_JP
;
385 ir3_legalize(struct ir3
*ir
, bool *has_samp
, int *max_bary
)
387 struct ir3_legalize_ctx ctx
= {
391 list_for_each_entry (struct ir3_block
, block
, &ir
->block_list
, node
) {
392 legalize_block(&ctx
, block
);
395 *has_samp
= ctx
.has_samp
;
396 *max_bary
= ctx
.max_bary
;
399 ir3_count_instructions(ir
);
400 } while(resolve_jumps(ir
));
402 mark_convergence_points(ir
);