freedreno/ir3: don't DCE ij_pix if used for pre-fs-texture-fetch
[mesa.git] / src / freedreno / ir3 / ir3_depth.c
1 /*
2 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "util/u_math.h"
28
29 #include "ir3.h"
30 #include "ir3_shader.h"
31
32 /*
33 * Instruction Depth:
34 *
35 * Calculates weighted instruction depth, ie. the sum of # of needed
36 * instructions plus delay slots back to original input (ie INPUT or
37 * CONST). That is to say, an instructions depth is:
38 *
39 * depth(instr) {
40 * d = 0;
41 * // for each src register:
42 * foreach (src in instr->regs[1..n])
43 * d = max(d, delayslots(src->instr, n) + depth(src->instr));
44 * return d + 1;
45 * }
46 *
47 * After an instruction's depth is calculated, it is inserted into the
48 * blocks depth sorted list, which is used by the scheduling pass.
49 */
50
51 /* generally don't count false dependencies, since this can just be
52 * something like a barrier, or SSBO store. The exception is array
53 * dependencies if the assigner is an array write and the consumer
54 * reads the same array.
55 */
56 static bool
57 ignore_dep(struct ir3_instruction *assigner,
58 struct ir3_instruction *consumer, unsigned n)
59 {
60 if (!__is_false_dep(consumer, n))
61 return false;
62
63 if (assigner->barrier_class & IR3_BARRIER_ARRAY_W) {
64 struct ir3_register *dst = assigner->regs[0];
65 struct ir3_register *src;
66
67 debug_assert(dst->flags & IR3_REG_ARRAY);
68
69 foreach_src(src, consumer) {
70 if ((src->flags & IR3_REG_ARRAY) &&
71 (dst->array.id == src->array.id)) {
72 return false;
73 }
74 }
75 }
76
77 return true;
78 }
79
80 /* calculate required # of delay slots between the instruction that
81 * assigns a value and the one that consumes
82 */
83 int ir3_delayslots(struct ir3_instruction *assigner,
84 struct ir3_instruction *consumer, unsigned n)
85 {
86 if (ignore_dep(assigner, consumer, n))
87 return 0;
88
89 /* worst case is cat1-3 (alu) -> cat4/5 needing 6 cycles, normal
90 * alu -> alu needs 3 cycles, cat4 -> alu and texture fetch
91 * handled with sync bits
92 */
93
94 if (is_meta(assigner) || is_meta(consumer))
95 return 0;
96
97 if (writes_addr(assigner))
98 return 6;
99
100 /* handled via sync flags: */
101 if (is_sfu(assigner) || is_tex(assigner) || is_mem(assigner))
102 return 0;
103
104 /* assigner must be alu: */
105 if (is_flow(consumer) || is_sfu(consumer) || is_tex(consumer) ||
106 is_mem(consumer)) {
107 return 6;
108 } else if ((is_mad(consumer->opc) || is_madsh(consumer->opc)) &&
109 (n == 3)) {
110 /* special case, 3rd src to cat3 not required on first cycle */
111 return 1;
112 } else {
113 return 3;
114 }
115 }
116
117 void
118 ir3_insert_by_depth(struct ir3_instruction *instr, struct list_head *list)
119 {
120 /* remove from existing spot in list: */
121 list_delinit(&instr->node);
122
123 /* find where to re-insert instruction: */
124 list_for_each_entry (struct ir3_instruction, pos, list, node) {
125 if (pos->depth > instr->depth) {
126 list_add(&instr->node, &pos->node);
127 return;
128 }
129 }
130 /* if we get here, we didn't find an insertion spot: */
131 list_addtail(&instr->node, list);
132 }
133
134 static void
135 ir3_instr_depth(struct ir3_instruction *instr, unsigned boost, bool falsedep)
136 {
137 struct ir3_instruction *src;
138
139 /* don't mark falsedep's as used, but otherwise process them normally: */
140 if (!falsedep)
141 instr->flags &= ~IR3_INSTR_UNUSED;
142
143 if (ir3_instr_check_mark(instr))
144 return;
145
146 instr->depth = 0;
147
148 foreach_ssa_src_n(src, i, instr) {
149 unsigned sd;
150
151 /* visit child to compute it's depth: */
152 ir3_instr_depth(src, boost, __is_false_dep(instr, i));
153
154 /* for array writes, no need to delay on previous write: */
155 if (i == 0)
156 continue;
157
158 sd = ir3_delayslots(src, instr, i) + src->depth;
159 sd += boost;
160
161 instr->depth = MAX2(instr->depth, sd);
162 }
163
164 if (!is_meta(instr))
165 instr->depth++;
166
167 ir3_insert_by_depth(instr, &instr->block->instr_list);
168 }
169
170 static bool
171 remove_unused_by_block(struct ir3_block *block)
172 {
173 bool progress = false;
174 list_for_each_entry_safe (struct ir3_instruction, instr, &block->instr_list, node) {
175 if (instr->opc == OPC_END || instr->opc == OPC_CHSH || instr->opc == OPC_CHMASK)
176 continue;
177 if (instr->flags & IR3_INSTR_UNUSED) {
178 if (instr->opc == OPC_META_FO) {
179 struct ir3_instruction *src = ssa(instr->regs[1]);
180 /* leave inputs alone.. we can't optimize out components of
181 * an input, since the hw is still going to be writing all
182 * of the components, and we could end up in a situation
183 * where multiple inputs overlap.
184 */
185 if ((src->opc != OPC_META_INPUT) &&
186 (src->regs[0]->wrmask > 1)) {
187 src->regs[0]->wrmask &= ~(1 << instr->fo.off);
188
189 /* prune no-longer needed right-neighbors. We could
190 * probably do the same for left-neighbors (ie. tex
191 * fetch that only need .yw components), but that
192 * makes RA a bit more confusing than it already is
193 */
194 struct ir3_instruction *n = instr;
195 while (n && n->cp.right)
196 n = n->cp.right;
197 while (n->flags & IR3_INSTR_UNUSED) {
198 n = n->cp.left;
199 if (!n)
200 break;
201 n->cp.right = NULL;
202 }
203 }
204 }
205 list_delinit(&instr->node);
206 progress = true;
207 }
208 }
209 return progress;
210 }
211
212 static bool
213 compute_depth_and_remove_unused(struct ir3 *ir, struct ir3_shader_variant *so)
214 {
215 unsigned i;
216 bool progress = false;
217
218 ir3_clear_mark(ir);
219
220 /* initially mark everything as unused, we'll clear the flag as we
221 * visit the instructions:
222 */
223 list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
224 list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
225 /* special case, if pre-fs texture fetch used, we cannot
226 * eliminate the barycentric i/j input
227 */
228 if (so->num_sampler_prefetch &&
229 (instr->opc == OPC_META_INPUT) &&
230 (instr->input.sysval == SYSTEM_VALUE_BARYCENTRIC_PIXEL))
231 continue;
232 instr->flags |= IR3_INSTR_UNUSED;
233 }
234 }
235
236 for (i = 0; i < ir->noutputs; i++)
237 if (ir->outputs[i])
238 ir3_instr_depth(ir->outputs[i], 0, false);
239
240 list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
241 for (i = 0; i < block->keeps_count; i++)
242 ir3_instr_depth(block->keeps[i], 0, false);
243
244 /* We also need to account for if-condition: */
245 if (block->condition)
246 ir3_instr_depth(block->condition, 6, false);
247 }
248
249 /* mark un-used instructions: */
250 list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
251 progress |= remove_unused_by_block(block);
252 }
253
254 /* note that we can end up with unused indirects, but we should
255 * not end up with unused predicates.
256 */
257 for (i = 0; i < ir->indirects_count; i++) {
258 struct ir3_instruction *instr = ir->indirects[i];
259 if (instr && (instr->flags & IR3_INSTR_UNUSED))
260 ir->indirects[i] = NULL;
261 }
262
263 /* cleanup unused inputs: */
264 for (i = 0; i < ir->ninputs; i++) {
265 struct ir3_instruction *in = ir->inputs[i];
266 if (in && (in->flags & IR3_INSTR_UNUSED))
267 ir->inputs[i] = NULL;
268 }
269
270 return progress;
271 }
272
273 void
274 ir3_depth(struct ir3 *ir, struct ir3_shader_variant *so)
275 {
276 bool progress;
277 do {
278 progress = compute_depth_and_remove_unused(ir, so);
279 } while (progress);
280 }