freedreno/ir3: add barriers
[mesa.git] / src / gallium / drivers / freedreno / ir3 / ir3_legalize.c
1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
2
3 /*
4 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 *
25 * Authors:
26 * Rob Clark <robclark@freedesktop.org>
27 */
28
29 #include "util/u_math.h"
30
31 #include "freedreno_util.h"
32
33 #include "ir3.h"
34
35 /*
36 * Legalize:
37 *
38 * We currently require that scheduling ensures that we have enough nop's
39 * in all the right places. The legalize step mostly handles fixing up
40 * instruction flags ((ss)/(sy)/(ei)), and collapses sequences of nop's
41 * into fewer nop's w/ rpt flag.
42 */
43
44 struct ir3_legalize_ctx {
45 bool has_samp;
46 bool has_ssbo;
47 int max_bary;
48 };
49
50 /* We want to evaluate each block from the position of any other
51 * predecessor block, in order that the flags set are the union
52 * of all possible program paths. For stopping condition, we
53 * want to stop when the pair of <pred-block, current-block> has
54 * been visited already.
55 *
56 * XXX is that completely true? We could have different needs_xyz
57 * flags set depending on path leading to pred-block.. we could
58 * do *most* of this based on chasing src instructions ptrs (and
59 * following all phi srcs).. except the write-after-read hazzard.
60 *
61 * For now we just set ss/sy flag on first instruction on block,
62 * and handle everything within the block as before.
63 */
64
65 static void
66 legalize_block(struct ir3_legalize_ctx *ctx, struct ir3_block *block)
67 {
68 struct ir3_instruction *last_input = NULL;
69 struct ir3_instruction *last_rel = NULL;
70 struct ir3_instruction *last_n = NULL;
71 struct list_head instr_list;
72 regmask_t needs_ss_war; /* write after read */
73 regmask_t needs_ss;
74 regmask_t needs_sy;
75
76 regmask_init(&needs_ss_war);
77 regmask_init(&needs_ss);
78 regmask_init(&needs_sy);
79
80 /* remove all the instructions from the list, we'll be adding
81 * them back in as we go
82 */
83 list_replace(&block->instr_list, &instr_list);
84 list_inithead(&block->instr_list);
85
86 list_for_each_entry_safe (struct ir3_instruction, n, &instr_list, node) {
87 struct ir3_register *reg;
88 unsigned i;
89
90 if (is_meta(n))
91 continue;
92
93 if (is_input(n)) {
94 struct ir3_register *inloc = n->regs[1];
95 assert(inloc->flags & IR3_REG_IMMED);
96 ctx->max_bary = MAX2(ctx->max_bary, inloc->iim_val);
97 }
98
99 if (last_n && is_barrier(last_n))
100 n->flags |= IR3_INSTR_SS | IR3_INSTR_SY;
101
102 /* NOTE: consider dst register too.. it could happen that
103 * texture sample instruction (for example) writes some
104 * components which are unused. A subsequent instruction
105 * that writes the same register can race w/ the sam instr
106 * resulting in undefined results:
107 */
108 for (i = 0; i < n->regs_count; i++) {
109 reg = n->regs[i];
110
111 if (reg_gpr(reg)) {
112
113 /* TODO: we probably only need (ss) for alu
114 * instr consuming sfu result.. need to make
115 * some tests for both this and (sy)..
116 */
117 if (regmask_get(&needs_ss, reg)) {
118 n->flags |= IR3_INSTR_SS;
119 regmask_init(&needs_ss);
120 }
121
122 if (regmask_get(&needs_sy, reg)) {
123 n->flags |= IR3_INSTR_SY;
124 regmask_init(&needs_sy);
125 }
126 }
127
128 /* TODO: is it valid to have address reg loaded from a
129 * relative src (ie. mova a0, c<a0.x+4>)? If so, the
130 * last_rel check below should be moved ahead of this:
131 */
132 if (reg->flags & IR3_REG_RELATIV)
133 last_rel = n;
134 }
135
136 if (n->regs_count > 0) {
137 reg = n->regs[0];
138 if (regmask_get(&needs_ss_war, reg)) {
139 n->flags |= IR3_INSTR_SS;
140 regmask_init(&needs_ss_war); // ??? I assume?
141 }
142
143 if (last_rel && (reg->num == regid(REG_A0, 0))) {
144 last_rel->flags |= IR3_INSTR_UL;
145 last_rel = NULL;
146 }
147 }
148
149 /* cat5+ does not have an (ss) bit, if needed we need to
150 * insert a nop to carry the sync flag. Would be kinda
151 * clever if we were aware of this during scheduling, but
152 * this should be a pretty rare case:
153 */
154 if ((n->flags & IR3_INSTR_SS) && (opc_cat(n->opc) >= 5)) {
155 struct ir3_instruction *nop;
156 nop = ir3_NOP(block);
157 nop->flags |= IR3_INSTR_SS;
158 n->flags &= ~IR3_INSTR_SS;
159 }
160
161 /* need to be able to set (ss) on first instruction: */
162 if (list_empty(&block->instr_list) && (opc_cat(n->opc) >= 5))
163 ir3_NOP(block);
164
165 if (is_nop(n) && !list_empty(&block->instr_list)) {
166 struct ir3_instruction *last = list_last_entry(&block->instr_list,
167 struct ir3_instruction, node);
168 if (is_nop(last) && (last->repeat < 5)) {
169 last->repeat++;
170 last->flags |= n->flags;
171 continue;
172 }
173 }
174
175 list_addtail(&n->node, &block->instr_list);
176
177 if (is_sfu(n))
178 regmask_set(&needs_ss, n->regs[0]);
179
180 if (is_tex(n)) {
181 /* this ends up being the # of samp instructions.. but that
182 * is ok, everything else only cares whether it is zero or
183 * not. We do this here, rather than when we encounter a
184 * SAMP decl, because (especially in binning pass shader)
185 * the samp instruction(s) could get eliminated if the
186 * result is not used.
187 */
188 ctx->has_samp = true;
189 regmask_set(&needs_sy, n->regs[0]);
190 } else if (is_load(n)) {
191 /* seems like ldlv needs (ss) bit instead?? which is odd but
192 * makes a bunch of flat-varying tests start working on a4xx.
193 */
194 if (n->opc == OPC_LDLV)
195 regmask_set(&needs_ss, n->regs[0]);
196 else
197 regmask_set(&needs_sy, n->regs[0]);
198 }
199
200 if ((n->opc == OPC_LDGB) || (n->opc == OPC_STGB) || is_atomic(n->opc))
201 ctx->has_ssbo = true;
202
203 /* both tex/sfu appear to not always immediately consume
204 * their src register(s):
205 */
206 if (is_tex(n) || is_sfu(n) || is_load(n)) {
207 foreach_src(reg, n) {
208 if (reg_gpr(reg))
209 regmask_set(&needs_ss_war, reg);
210 }
211 }
212
213 if (is_input(n))
214 last_input = n;
215
216 last_n = n;
217 }
218
219 if (last_input) {
220 /* special hack.. if using ldlv to bypass interpolation,
221 * we need to insert a dummy bary.f on which we can set
222 * the (ei) flag:
223 */
224 if (is_mem(last_input) && (last_input->opc == OPC_LDLV)) {
225 struct ir3_instruction *baryf;
226
227 /* (ss)bary.f (ei)r63.x, 0, r0.x */
228 baryf = ir3_instr_create(block, OPC_BARY_F);
229 baryf->flags |= IR3_INSTR_SS;
230 ir3_reg_create(baryf, regid(63, 0), 0);
231 ir3_reg_create(baryf, 0, IR3_REG_IMMED)->iim_val = 0;
232 ir3_reg_create(baryf, regid(0, 0), 0);
233
234 /* insert the dummy bary.f after last_input: */
235 list_delinit(&baryf->node);
236 list_add(&baryf->node, &last_input->node);
237
238 last_input = baryf;
239 }
240 last_input->regs[0]->flags |= IR3_REG_EI;
241 }
242
243 if (last_rel)
244 last_rel->flags |= IR3_INSTR_UL;
245
246 list_first_entry(&block->instr_list, struct ir3_instruction, node)
247 ->flags |= IR3_INSTR_SS | IR3_INSTR_SY;
248 }
249
250 /* NOTE: branch instructions are always the last instruction(s)
251 * in the block. We take advantage of this as we resolve the
252 * branches, since "if (foo) break;" constructs turn into
253 * something like:
254 *
255 * block3 {
256 * ...
257 * 0029:021: mov.s32s32 r62.x, r1.y
258 * 0082:022: br !p0.x, target=block5
259 * 0083:023: br p0.x, target=block4
260 * // succs: if _[0029:021: mov.s32s32] block4; else block5;
261 * }
262 * block4 {
263 * 0084:024: jump, target=block6
264 * // succs: block6;
265 * }
266 * block5 {
267 * 0085:025: jump, target=block7
268 * // succs: block7;
269 * }
270 *
271 * ie. only instruction in block4/block5 is a jump, so when
272 * resolving branches we can easily detect this by checking
273 * that the first instruction in the target block is itself
274 * a jump, and setup the br directly to the jump's target
275 * (and strip back out the now unreached jump)
276 *
277 * TODO sometimes we end up with things like:
278 *
279 * br !p0.x, #2
280 * br p0.x, #12
281 * add.u r0.y, r0.y, 1
282 *
283 * If we swapped the order of the branches, we could drop one.
284 */
285 static struct ir3_block *
286 resolve_dest_block(struct ir3_block *block)
287 {
288 /* special case for last block: */
289 if (!block->successors[0])
290 return block;
291
292 /* NOTE that we may or may not have inserted the jump
293 * in the target block yet, so conditions to resolve
294 * the dest to the dest block's successor are:
295 *
296 * (1) successor[1] == NULL &&
297 * (2) (block-is-empty || only-instr-is-jump)
298 */
299 if (block->successors[1] == NULL) {
300 if (list_empty(&block->instr_list)) {
301 return block->successors[0];
302 } else if (list_length(&block->instr_list) == 1) {
303 struct ir3_instruction *instr = list_first_entry(
304 &block->instr_list, struct ir3_instruction, node);
305 if (instr->opc == OPC_JUMP)
306 return block->successors[0];
307 }
308 }
309 return block;
310 }
311
312 static bool
313 resolve_jump(struct ir3_instruction *instr)
314 {
315 struct ir3_block *tblock =
316 resolve_dest_block(instr->cat0.target);
317 struct ir3_instruction *target;
318
319 if (tblock != instr->cat0.target) {
320 list_delinit(&instr->cat0.target->node);
321 instr->cat0.target = tblock;
322 return true;
323 }
324
325 target = list_first_entry(&tblock->instr_list,
326 struct ir3_instruction, node);
327
328 if ((!target) || (target->ip == (instr->ip + 1))) {
329 list_delinit(&instr->node);
330 return true;
331 } else {
332 instr->cat0.immed =
333 (int)target->ip - (int)instr->ip;
334 }
335 return false;
336 }
337
338 /* resolve jumps, removing jumps/branches to immediately following
339 * instruction which we end up with from earlier stages. Since
340 * removing an instruction can invalidate earlier instruction's
341 * branch offsets, we need to do this iteratively until no more
342 * branches are removed.
343 */
344 static bool
345 resolve_jumps(struct ir3 *ir)
346 {
347 list_for_each_entry (struct ir3_block, block, &ir->block_list, node)
348 list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node)
349 if (is_flow(instr) && instr->cat0.target)
350 if (resolve_jump(instr))
351 return true;
352
353 return false;
354 }
355
356 /* we want to mark points where divergent flow control re-converges
357 * with (jp) flags. For now, since we don't do any optimization for
358 * things that start out as a 'do {} while()', re-convergence points
359 * will always be a branch or jump target. Note that this is overly
360 * conservative, since unconditional jump targets are not convergence
361 * points, we are just assuming that the other path to reach the jump
362 * target was divergent. If we were clever enough to optimize the
363 * jump at end of a loop back to a conditional branch into a single
364 * conditional branch, ie. like:
365 *
366 * add.f r1.w, r0.x, (neg)(r)c2.x <= loop start
367 * mul.f r1.z, r1.z, r0.x
368 * mul.f r1.y, r1.y, r0.x
369 * mul.f r0.z, r1.x, r0.x
370 * mul.f r0.w, r0.y, r0.x
371 * cmps.f.ge r0.x, (r)c2.y, (r)r1.w
372 * add.s r0.x, (r)r0.x, (r)-1
373 * sel.f32 r0.x, (r)c3.y, (r)r0.x, c3.x
374 * cmps.f.eq p0.x, r0.x, c3.y
375 * mov.f32f32 r0.x, r1.w
376 * mov.f32f32 r0.y, r0.w
377 * mov.f32f32 r1.x, r0.z
378 * (rpt2)nop
379 * br !p0.x, #-13
380 * (jp)mul.f r0.x, c263.y, r1.y
381 *
382 * Then we'd have to be more clever, as the convergence point is no
383 * longer a branch or jump target.
384 */
385 static void
386 mark_convergence_points(struct ir3 *ir)
387 {
388 list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
389 list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
390 if (is_flow(instr) && instr->cat0.target) {
391 struct ir3_instruction *target =
392 list_first_entry(&instr->cat0.target->instr_list,
393 struct ir3_instruction, node);
394 target->flags |= IR3_INSTR_JP;
395 }
396 }
397 }
398 }
399
400 void
401 ir3_legalize(struct ir3 *ir, bool *has_samp, bool *has_ssbo, int *max_bary)
402 {
403 struct ir3_legalize_ctx ctx = {
404 .max_bary = -1,
405 };
406
407 list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
408 legalize_block(&ctx, block);
409 }
410
411 *has_samp = ctx.has_samp;
412 *has_ssbo = ctx.has_ssbo;
413 *max_bary = ctx.max_bary;
414
415 do {
416 ir3_count_instructions(ir);
417 } while(resolve_jumps(ir));
418
419 mark_convergence_points(ir);
420 }