freedreno/ir3: all mem instructions have WAR hazzard
[mesa.git] / src / gallium / drivers / freedreno / ir3 / ir3_legalize.c
1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
2
3 /*
4 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 *
25 * Authors:
26 * Rob Clark <robclark@freedesktop.org>
27 */
28
29 #include "util/u_math.h"
30
31 #include "freedreno_util.h"
32
33 #include "ir3.h"
34
35 /*
36 * Legalize:
37 *
38 * We currently require that scheduling ensures that we have enough nop's
39 * in all the right places. The legalize step mostly handles fixing up
40 * instruction flags ((ss)/(sy)/(ei)), and collapses sequences of nop's
41 * into fewer nop's w/ rpt flag.
42 */
43
44 struct ir3_legalize_ctx {
45 bool has_samp;
46 bool has_ssbo;
47 int max_bary;
48 };
49
50 /* We want to evaluate each block from the position of any other
51 * predecessor block, in order that the flags set are the union
52 * of all possible program paths. For stopping condition, we
53 * want to stop when the pair of <pred-block, current-block> has
54 * been visited already.
55 *
56 * XXX is that completely true? We could have different needs_xyz
57 * flags set depending on path leading to pred-block.. we could
58 * do *most* of this based on chasing src instructions ptrs (and
59 * following all phi srcs).. except the write-after-read hazzard.
60 *
61 * For now we just set ss/sy flag on first instruction on block,
62 * and handle everything within the block as before.
63 */
64
65 static void
66 legalize_block(struct ir3_legalize_ctx *ctx, struct ir3_block *block)
67 {
68 struct ir3_instruction *last_input = NULL;
69 struct ir3_instruction *last_rel = NULL;
70 struct ir3_instruction *last_n = NULL;
71 struct list_head instr_list;
72 regmask_t needs_ss_war; /* write after read */
73 regmask_t needs_ss;
74 regmask_t needs_sy;
75
76 regmask_init(&needs_ss_war);
77 regmask_init(&needs_ss);
78 regmask_init(&needs_sy);
79
80 /* remove all the instructions from the list, we'll be adding
81 * them back in as we go
82 */
83 list_replace(&block->instr_list, &instr_list);
84 list_inithead(&block->instr_list);
85
86 list_for_each_entry_safe (struct ir3_instruction, n, &instr_list, node) {
87 struct ir3_register *reg;
88 unsigned i;
89
90 if (is_meta(n))
91 continue;
92
93 if (is_input(n)) {
94 struct ir3_register *inloc = n->regs[1];
95 assert(inloc->flags & IR3_REG_IMMED);
96 ctx->max_bary = MAX2(ctx->max_bary, inloc->iim_val);
97 }
98
99 if (last_n && is_barrier(last_n))
100 n->flags |= IR3_INSTR_SS | IR3_INSTR_SY;
101
102 /* NOTE: consider dst register too.. it could happen that
103 * texture sample instruction (for example) writes some
104 * components which are unused. A subsequent instruction
105 * that writes the same register can race w/ the sam instr
106 * resulting in undefined results:
107 */
108 for (i = 0; i < n->regs_count; i++) {
109 reg = n->regs[i];
110
111 if (reg_gpr(reg)) {
112
113 /* TODO: we probably only need (ss) for alu
114 * instr consuming sfu result.. need to make
115 * some tests for both this and (sy)..
116 */
117 if (regmask_get(&needs_ss, reg)) {
118 n->flags |= IR3_INSTR_SS;
119 regmask_init(&needs_ss);
120 }
121
122 if (regmask_get(&needs_sy, reg)) {
123 n->flags |= IR3_INSTR_SY;
124 regmask_init(&needs_sy);
125 }
126 }
127
128 /* TODO: is it valid to have address reg loaded from a
129 * relative src (ie. mova a0, c<a0.x+4>)? If so, the
130 * last_rel check below should be moved ahead of this:
131 */
132 if (reg->flags & IR3_REG_RELATIV)
133 last_rel = n;
134 }
135
136 if (n->regs_count > 0) {
137 reg = n->regs[0];
138 if (regmask_get(&needs_ss_war, reg)) {
139 n->flags |= IR3_INSTR_SS;
140 regmask_init(&needs_ss_war); // ??? I assume?
141 }
142
143 if (last_rel && (reg->num == regid(REG_A0, 0))) {
144 last_rel->flags |= IR3_INSTR_UL;
145 last_rel = NULL;
146 }
147 }
148
149 /* cat5+ does not have an (ss) bit, if needed we need to
150 * insert a nop to carry the sync flag. Would be kinda
151 * clever if we were aware of this during scheduling, but
152 * this should be a pretty rare case:
153 */
154 if ((n->flags & IR3_INSTR_SS) && (opc_cat(n->opc) >= 5)) {
155 struct ir3_instruction *nop;
156 nop = ir3_NOP(block);
157 nop->flags |= IR3_INSTR_SS;
158 n->flags &= ~IR3_INSTR_SS;
159 }
160
161 /* need to be able to set (ss) on first instruction: */
162 if (list_empty(&block->instr_list) && (opc_cat(n->opc) >= 5))
163 ir3_NOP(block);
164
165 if (is_nop(n) && !list_empty(&block->instr_list)) {
166 struct ir3_instruction *last = list_last_entry(&block->instr_list,
167 struct ir3_instruction, node);
168 if (is_nop(last) && (last->repeat < 5)) {
169 last->repeat++;
170 last->flags |= n->flags;
171 continue;
172 }
173 }
174
175 list_addtail(&n->node, &block->instr_list);
176
177 if (is_sfu(n))
178 regmask_set(&needs_ss, n->regs[0]);
179
180 if (is_tex(n)) {
181 /* this ends up being the # of samp instructions.. but that
182 * is ok, everything else only cares whether it is zero or
183 * not. We do this here, rather than when we encounter a
184 * SAMP decl, because (especially in binning pass shader)
185 * the samp instruction(s) could get eliminated if the
186 * result is not used.
187 */
188 ctx->has_samp = true;
189 regmask_set(&needs_sy, n->regs[0]);
190 } else if (n->opc == OPC_RESINFO) {
191 regmask_set(&needs_ss, n->regs[0]);
192 ir3_NOP(block)->flags |= IR3_INSTR_SS;
193 } else if (is_load(n)) {
194 /* seems like ldlv needs (ss) bit instead?? which is odd but
195 * makes a bunch of flat-varying tests start working on a4xx.
196 */
197 if ((n->opc == OPC_LDLV) || (n->opc == OPC_LDL))
198 regmask_set(&needs_ss, n->regs[0]);
199 else
200 regmask_set(&needs_sy, n->regs[0]);
201 } else if (is_atomic(n->opc)) {
202 if (n->flags & IR3_INSTR_G)
203 regmask_set(&needs_sy, n->regs[0]);
204 else
205 regmask_set(&needs_ss, n->regs[0]);
206 }
207
208 if (is_ssbo(n->opc) || (is_atomic(n->opc) && (n->flags & IR3_INSTR_G)))
209 ctx->has_ssbo = true;
210
211 /* both tex/sfu appear to not always immediately consume
212 * their src register(s):
213 */
214 if (is_tex(n) || is_sfu(n) || is_mem(n)) {
215 foreach_src(reg, n) {
216 if (reg_gpr(reg))
217 regmask_set(&needs_ss_war, reg);
218 }
219 }
220
221 if (is_input(n))
222 last_input = n;
223
224 last_n = n;
225 }
226
227 if (last_input) {
228 /* special hack.. if using ldlv to bypass interpolation,
229 * we need to insert a dummy bary.f on which we can set
230 * the (ei) flag:
231 */
232 if (is_mem(last_input) && (last_input->opc == OPC_LDLV)) {
233 struct ir3_instruction *baryf;
234
235 /* (ss)bary.f (ei)r63.x, 0, r0.x */
236 baryf = ir3_instr_create(block, OPC_BARY_F);
237 baryf->flags |= IR3_INSTR_SS;
238 ir3_reg_create(baryf, regid(63, 0), 0);
239 ir3_reg_create(baryf, 0, IR3_REG_IMMED)->iim_val = 0;
240 ir3_reg_create(baryf, regid(0, 0), 0);
241
242 /* insert the dummy bary.f after last_input: */
243 list_delinit(&baryf->node);
244 list_add(&baryf->node, &last_input->node);
245
246 last_input = baryf;
247 }
248 last_input->regs[0]->flags |= IR3_REG_EI;
249 }
250
251 if (last_rel)
252 last_rel->flags |= IR3_INSTR_UL;
253
254 list_first_entry(&block->instr_list, struct ir3_instruction, node)
255 ->flags |= IR3_INSTR_SS | IR3_INSTR_SY;
256 }
257
258 /* NOTE: branch instructions are always the last instruction(s)
259 * in the block. We take advantage of this as we resolve the
260 * branches, since "if (foo) break;" constructs turn into
261 * something like:
262 *
263 * block3 {
264 * ...
265 * 0029:021: mov.s32s32 r62.x, r1.y
266 * 0082:022: br !p0.x, target=block5
267 * 0083:023: br p0.x, target=block4
268 * // succs: if _[0029:021: mov.s32s32] block4; else block5;
269 * }
270 * block4 {
271 * 0084:024: jump, target=block6
272 * // succs: block6;
273 * }
274 * block5 {
275 * 0085:025: jump, target=block7
276 * // succs: block7;
277 * }
278 *
279 * ie. only instruction in block4/block5 is a jump, so when
280 * resolving branches we can easily detect this by checking
281 * that the first instruction in the target block is itself
282 * a jump, and setup the br directly to the jump's target
283 * (and strip back out the now unreached jump)
284 *
285 * TODO sometimes we end up with things like:
286 *
287 * br !p0.x, #2
288 * br p0.x, #12
289 * add.u r0.y, r0.y, 1
290 *
291 * If we swapped the order of the branches, we could drop one.
292 */
293 static struct ir3_block *
294 resolve_dest_block(struct ir3_block *block)
295 {
296 /* special case for last block: */
297 if (!block->successors[0])
298 return block;
299
300 /* NOTE that we may or may not have inserted the jump
301 * in the target block yet, so conditions to resolve
302 * the dest to the dest block's successor are:
303 *
304 * (1) successor[1] == NULL &&
305 * (2) (block-is-empty || only-instr-is-jump)
306 */
307 if (block->successors[1] == NULL) {
308 if (list_empty(&block->instr_list)) {
309 return block->successors[0];
310 } else if (list_length(&block->instr_list) == 1) {
311 struct ir3_instruction *instr = list_first_entry(
312 &block->instr_list, struct ir3_instruction, node);
313 if (instr->opc == OPC_JUMP)
314 return block->successors[0];
315 }
316 }
317 return block;
318 }
319
320 static bool
321 resolve_jump(struct ir3_instruction *instr)
322 {
323 struct ir3_block *tblock =
324 resolve_dest_block(instr->cat0.target);
325 struct ir3_instruction *target;
326
327 if (tblock != instr->cat0.target) {
328 list_delinit(&instr->cat0.target->node);
329 instr->cat0.target = tblock;
330 return true;
331 }
332
333 target = list_first_entry(&tblock->instr_list,
334 struct ir3_instruction, node);
335
336 if ((!target) || (target->ip == (instr->ip + 1))) {
337 list_delinit(&instr->node);
338 return true;
339 } else {
340 instr->cat0.immed =
341 (int)target->ip - (int)instr->ip;
342 }
343 return false;
344 }
345
346 /* resolve jumps, removing jumps/branches to immediately following
347 * instruction which we end up with from earlier stages. Since
348 * removing an instruction can invalidate earlier instruction's
349 * branch offsets, we need to do this iteratively until no more
350 * branches are removed.
351 */
352 static bool
353 resolve_jumps(struct ir3 *ir)
354 {
355 list_for_each_entry (struct ir3_block, block, &ir->block_list, node)
356 list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node)
357 if (is_flow(instr) && instr->cat0.target)
358 if (resolve_jump(instr))
359 return true;
360
361 return false;
362 }
363
364 /* we want to mark points where divergent flow control re-converges
365 * with (jp) flags. For now, since we don't do any optimization for
366 * things that start out as a 'do {} while()', re-convergence points
367 * will always be a branch or jump target. Note that this is overly
368 * conservative, since unconditional jump targets are not convergence
369 * points, we are just assuming that the other path to reach the jump
370 * target was divergent. If we were clever enough to optimize the
371 * jump at end of a loop back to a conditional branch into a single
372 * conditional branch, ie. like:
373 *
374 * add.f r1.w, r0.x, (neg)(r)c2.x <= loop start
375 * mul.f r1.z, r1.z, r0.x
376 * mul.f r1.y, r1.y, r0.x
377 * mul.f r0.z, r1.x, r0.x
378 * mul.f r0.w, r0.y, r0.x
379 * cmps.f.ge r0.x, (r)c2.y, (r)r1.w
380 * add.s r0.x, (r)r0.x, (r)-1
381 * sel.f32 r0.x, (r)c3.y, (r)r0.x, c3.x
382 * cmps.f.eq p0.x, r0.x, c3.y
383 * mov.f32f32 r0.x, r1.w
384 * mov.f32f32 r0.y, r0.w
385 * mov.f32f32 r1.x, r0.z
386 * (rpt2)nop
387 * br !p0.x, #-13
388 * (jp)mul.f r0.x, c263.y, r1.y
389 *
390 * Then we'd have to be more clever, as the convergence point is no
391 * longer a branch or jump target.
392 */
393 static void
394 mark_convergence_points(struct ir3 *ir)
395 {
396 list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
397 list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
398 if (is_flow(instr) && instr->cat0.target) {
399 struct ir3_instruction *target =
400 list_first_entry(&instr->cat0.target->instr_list,
401 struct ir3_instruction, node);
402 target->flags |= IR3_INSTR_JP;
403 }
404 }
405 }
406 }
407
408 void
409 ir3_legalize(struct ir3 *ir, bool *has_samp, bool *has_ssbo, int *max_bary)
410 {
411 struct ir3_legalize_ctx ctx = {
412 .max_bary = -1,
413 };
414
415 list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
416 legalize_block(&ctx, block);
417 }
418
419 *has_samp = ctx.has_samp;
420 *has_ssbo = ctx.has_ssbo;
421 *max_bary = ctx.max_bary;
422
423 do {
424 ir3_count_instructions(ir);
425 } while(resolve_jumps(ir));
426
427 mark_convergence_points(ir);
428 }