freedreno: move ir3 to common location
[mesa.git] / src / freedreno / ir3 / ir3_depth.c
1 /*
2 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "util/u_math.h"
28
29 #include "ir3.h"
30
31 /*
32 * Instruction Depth:
33 *
34 * Calculates weighted instruction depth, ie. the sum of # of needed
35 * instructions plus delay slots back to original input (ie INPUT or
36 * CONST). That is to say, an instructions depth is:
37 *
38 * depth(instr) {
39 * d = 0;
40 * // for each src register:
41 * foreach (src in instr->regs[1..n])
42 * d = max(d, delayslots(src->instr, n) + depth(src->instr));
43 * return d + 1;
44 * }
45 *
46 * After an instruction's depth is calculated, it is inserted into the
47 * blocks depth sorted list, which is used by the scheduling pass.
48 */
49
50 /* generally don't count false dependencies, since this can just be
51 * something like a barrier, or SSBO store. The exception is array
52 * dependencies if the assigner is an array write and the consumer
53 * reads the same array.
54 */
55 static bool
56 ignore_dep(struct ir3_instruction *assigner,
57 struct ir3_instruction *consumer, unsigned n)
58 {
59 if (!__is_false_dep(consumer, n))
60 return false;
61
62 if (assigner->barrier_class & IR3_BARRIER_ARRAY_W) {
63 struct ir3_register *dst = assigner->regs[0];
64 struct ir3_register *src;
65
66 debug_assert(dst->flags & IR3_REG_ARRAY);
67
68 foreach_src(src, consumer) {
69 if ((src->flags & IR3_REG_ARRAY) &&
70 (dst->array.id == src->array.id)) {
71 return false;
72 }
73 }
74 }
75
76 return true;
77 }
78
79 /* calculate required # of delay slots between the instruction that
80 * assigns a value and the one that consumes
81 */
82 int ir3_delayslots(struct ir3_instruction *assigner,
83 struct ir3_instruction *consumer, unsigned n)
84 {
85 if (ignore_dep(assigner, consumer, n))
86 return 0;
87
88 /* worst case is cat1-3 (alu) -> cat4/5 needing 6 cycles, normal
89 * alu -> alu needs 3 cycles, cat4 -> alu and texture fetch
90 * handled with sync bits
91 */
92
93 if (is_meta(assigner))
94 return 0;
95
96 if (writes_addr(assigner))
97 return 6;
98
99 /* handled via sync flags: */
100 if (is_sfu(assigner) || is_tex(assigner) || is_mem(assigner))
101 return 0;
102
103 /* assigner must be alu: */
104 if (is_flow(consumer) || is_sfu(consumer) || is_tex(consumer) ||
105 is_mem(consumer)) {
106 return 6;
107 } else if ((is_mad(consumer->opc) || is_madsh(consumer->opc)) &&
108 (n == 3)) {
109 /* special case, 3rd src to cat3 not required on first cycle */
110 return 1;
111 } else {
112 return 3;
113 }
114 }
115
116 void
117 ir3_insert_by_depth(struct ir3_instruction *instr, struct list_head *list)
118 {
119 /* remove from existing spot in list: */
120 list_delinit(&instr->node);
121
122 /* find where to re-insert instruction: */
123 list_for_each_entry (struct ir3_instruction, pos, list, node) {
124 if (pos->depth > instr->depth) {
125 list_add(&instr->node, &pos->node);
126 return;
127 }
128 }
129 /* if we get here, we didn't find an insertion spot: */
130 list_addtail(&instr->node, list);
131 }
132
133 static void
134 ir3_instr_depth(struct ir3_instruction *instr, unsigned boost, bool falsedep)
135 {
136 struct ir3_instruction *src;
137
138 /* don't mark falsedep's as used, but otherwise process them normally: */
139 if (!falsedep)
140 instr->flags &= ~IR3_INSTR_UNUSED;
141
142 if (ir3_instr_check_mark(instr))
143 return;
144
145 instr->depth = 0;
146
147 foreach_ssa_src_n(src, i, instr) {
148 unsigned sd;
149
150 /* visit child to compute it's depth: */
151 ir3_instr_depth(src, boost, __is_false_dep(instr, i));
152
153 /* for array writes, no need to delay on previous write: */
154 if (i == 0)
155 continue;
156
157 sd = ir3_delayslots(src, instr, i) + src->depth;
158 sd += boost;
159
160 instr->depth = MAX2(instr->depth, sd);
161 }
162
163 if (!is_meta(instr))
164 instr->depth++;
165
166 ir3_insert_by_depth(instr, &instr->block->instr_list);
167 }
168
169 static bool
170 remove_unused_by_block(struct ir3_block *block)
171 {
172 bool progress = false;
173 list_for_each_entry_safe (struct ir3_instruction, instr, &block->instr_list, node) {
174 if (instr->opc == OPC_END)
175 continue;
176 if (instr->flags & IR3_INSTR_UNUSED) {
177 list_delinit(&instr->node);
178 progress = true;
179 }
180 }
181 return progress;
182 }
183
184 static bool
185 compute_depth_and_remove_unused(struct ir3 *ir)
186 {
187 unsigned i;
188 bool progress = false;
189
190 ir3_clear_mark(ir);
191
192 /* initially mark everything as unused, we'll clear the flag as we
193 * visit the instructions:
194 */
195 list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
196 list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
197 instr->flags |= IR3_INSTR_UNUSED;
198 }
199 }
200
201 for (i = 0; i < ir->noutputs; i++)
202 if (ir->outputs[i])
203 ir3_instr_depth(ir->outputs[i], 0, false);
204
205 list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
206 for (i = 0; i < block->keeps_count; i++)
207 ir3_instr_depth(block->keeps[i], 0, false);
208
209 /* We also need to account for if-condition: */
210 if (block->condition)
211 ir3_instr_depth(block->condition, 6, false);
212 }
213
214 /* mark un-used instructions: */
215 list_for_each_entry (struct ir3_block, block, &ir->block_list, node) {
216 progress |= remove_unused_by_block(block);
217 }
218
219 /* note that we can end up with unused indirects, but we should
220 * not end up with unused predicates.
221 */
222 for (i = 0; i < ir->indirects_count; i++) {
223 struct ir3_instruction *instr = ir->indirects[i];
224 if (instr && (instr->flags & IR3_INSTR_UNUSED))
225 ir->indirects[i] = NULL;
226 }
227
228 /* cleanup unused inputs: */
229 for (i = 0; i < ir->ninputs; i++) {
230 struct ir3_instruction *in = ir->inputs[i];
231 if (in && (in->flags & IR3_INSTR_UNUSED))
232 ir->inputs[i] = NULL;
233 }
234
235 return progress;
236 }
237
238 void
239 ir3_depth(struct ir3 *ir)
240 {
241 bool progress;
242 do {
243 progress = compute_depth_and_remove_unused(ir);
244 } while (progress);
245 }