2 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Rob Clark <robclark@freedesktop.org>
27 #include "util/u_math.h"
30 #include "ir3_shader.h"
35 * Calculates weighted instruction depth, ie. the sum of # of needed
36 * instructions plus delay slots back to original input (ie INPUT or
37 * CONST). That is to say, an instructions depth is:
41 * // for each src register:
42 * foreach (src in instr->regs[1..n])
43 * d = max(d, delayslots(src->instr, n) + depth(src->instr));
47 * After an instruction's depth is calculated, it is inserted into the
48 * blocks depth sorted list, which is used by the scheduling pass.
51 /* generally don't count false dependencies, since this can just be
52 * something like a barrier, or SSBO store. The exception is array
53 * dependencies if the assigner is an array write and the consumer
54 * reads the same array.
57 ignore_dep(struct ir3_instruction
*assigner
,
58 struct ir3_instruction
*consumer
, unsigned n
)
60 if (!__is_false_dep(consumer
, n
))
63 if (assigner
->barrier_class
& IR3_BARRIER_ARRAY_W
) {
64 struct ir3_register
*dst
= assigner
->regs
[0];
65 struct ir3_register
*src
;
67 debug_assert(dst
->flags
& IR3_REG_ARRAY
);
69 foreach_src(src
, consumer
) {
70 if ((src
->flags
& IR3_REG_ARRAY
) &&
71 (dst
->array
.id
== src
->array
.id
)) {
80 /* calculate required # of delay slots between the instruction that
81 * assigns a value and the one that consumes
83 int ir3_delayslots(struct ir3_instruction
*assigner
,
84 struct ir3_instruction
*consumer
, unsigned n
)
86 if (ignore_dep(assigner
, consumer
, n
))
89 /* worst case is cat1-3 (alu) -> cat4/5 needing 6 cycles, normal
90 * alu -> alu needs 3 cycles, cat4 -> alu and texture fetch
91 * handled with sync bits
94 if (is_meta(assigner
) || is_meta(consumer
))
97 if (writes_addr(assigner
))
100 /* handled via sync flags: */
101 if (is_sfu(assigner
) || is_tex(assigner
) || is_mem(assigner
))
104 /* assigner must be alu: */
105 if (is_flow(consumer
) || is_sfu(consumer
) || is_tex(consumer
) ||
108 } else if ((is_mad(consumer
->opc
) || is_madsh(consumer
->opc
)) &&
110 /* special case, 3rd src to cat3 not required on first cycle */
118 ir3_insert_by_depth(struct ir3_instruction
*instr
, struct list_head
*list
)
120 /* remove from existing spot in list: */
121 list_delinit(&instr
->node
);
123 /* find where to re-insert instruction: */
124 list_for_each_entry (struct ir3_instruction
, pos
, list
, node
) {
125 if (pos
->depth
> instr
->depth
) {
126 list_add(&instr
->node
, &pos
->node
);
130 /* if we get here, we didn't find an insertion spot: */
131 list_addtail(&instr
->node
, list
);
135 ir3_instr_depth(struct ir3_instruction
*instr
, unsigned boost
, bool falsedep
)
137 struct ir3_instruction
*src
;
139 /* don't mark falsedep's as used, but otherwise process them normally: */
141 instr
->flags
&= ~IR3_INSTR_UNUSED
;
143 if (ir3_instr_check_mark(instr
))
148 foreach_ssa_src_n(src
, i
, instr
) {
151 /* visit child to compute it's depth: */
152 ir3_instr_depth(src
, boost
, __is_false_dep(instr
, i
));
154 /* for array writes, no need to delay on previous write: */
158 sd
= ir3_delayslots(src
, instr
, i
) + src
->depth
;
161 instr
->depth
= MAX2(instr
->depth
, sd
);
167 ir3_insert_by_depth(instr
, &instr
->block
->instr_list
);
171 remove_unused_by_block(struct ir3_block
*block
)
173 bool progress
= false;
174 list_for_each_entry_safe (struct ir3_instruction
, instr
, &block
->instr_list
, node
) {
175 if (instr
->opc
== OPC_END
|| instr
->opc
== OPC_CHSH
|| instr
->opc
== OPC_CHMASK
)
177 if (instr
->flags
& IR3_INSTR_UNUSED
) {
178 if (instr
->opc
== OPC_META_FO
) {
179 struct ir3_instruction
*src
= ssa(instr
->regs
[1]);
180 /* leave inputs alone.. we can't optimize out components of
181 * an input, since the hw is still going to be writing all
182 * of the components, and we could end up in a situation
183 * where multiple inputs overlap.
185 if ((src
->opc
!= OPC_META_INPUT
) &&
186 (src
->regs
[0]->wrmask
> 1)) {
187 src
->regs
[0]->wrmask
&= ~(1 << instr
->fo
.off
);
189 /* prune no-longer needed right-neighbors. We could
190 * probably do the same for left-neighbors (ie. tex
191 * fetch that only need .yw components), but that
192 * makes RA a bit more confusing than it already is
194 struct ir3_instruction
*n
= instr
;
195 while (n
&& n
->cp
.right
)
197 while (n
->flags
& IR3_INSTR_UNUSED
) {
205 list_delinit(&instr
->node
);
213 compute_depth_and_remove_unused(struct ir3
*ir
, struct ir3_shader_variant
*so
)
216 bool progress
= false;
220 /* initially mark everything as unused, we'll clear the flag as we
221 * visit the instructions:
223 list_for_each_entry (struct ir3_block
, block
, &ir
->block_list
, node
) {
224 list_for_each_entry (struct ir3_instruction
, instr
, &block
->instr_list
, node
) {
225 /* special case, if pre-fs texture fetch used, we cannot
226 * eliminate the barycentric i/j input
228 if (so
->num_sampler_prefetch
&&
229 (instr
->opc
== OPC_META_INPUT
) &&
230 (instr
->input
.sysval
== SYSTEM_VALUE_BARYCENTRIC_PIXEL
))
232 instr
->flags
|= IR3_INSTR_UNUSED
;
236 for (i
= 0; i
< ir
->noutputs
; i
++)
238 ir3_instr_depth(ir
->outputs
[i
], 0, false);
240 list_for_each_entry (struct ir3_block
, block
, &ir
->block_list
, node
) {
241 for (i
= 0; i
< block
->keeps_count
; i
++)
242 ir3_instr_depth(block
->keeps
[i
], 0, false);
244 /* We also need to account for if-condition: */
245 if (block
->condition
)
246 ir3_instr_depth(block
->condition
, 6, false);
249 /* mark un-used instructions: */
250 list_for_each_entry (struct ir3_block
, block
, &ir
->block_list
, node
) {
251 progress
|= remove_unused_by_block(block
);
254 /* note that we can end up with unused indirects, but we should
255 * not end up with unused predicates.
257 for (i
= 0; i
< ir
->indirects_count
; i
++) {
258 struct ir3_instruction
*instr
= ir
->indirects
[i
];
259 if (instr
&& (instr
->flags
& IR3_INSTR_UNUSED
))
260 ir
->indirects
[i
] = NULL
;
263 /* cleanup unused inputs: */
264 for (i
= 0; i
< ir
->ninputs
; i
++) {
265 struct ir3_instruction
*in
= ir
->inputs
[i
];
266 if (in
&& (in
->flags
& IR3_INSTR_UNUSED
))
267 ir
->inputs
[i
] = NULL
;
274 ir3_depth(struct ir3
*ir
, struct ir3_shader_variant
*so
)
278 progress
= compute_depth_and_remove_unused(ir
, so
);