freedreno: Track the set of UBOs to be uploaded in UBO analysis.
[mesa.git] / src / freedreno / ir3 / ir3_nir_analyze_ubo_ranges.c
1 /*
2 * Copyright © 2019 Google, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include "ir3_nir.h"
25 #include "ir3_compiler.h"
26 #include "compiler/nir/nir.h"
27 #include "compiler/nir/nir_builder.h"
28 #include "mesa/main/macros.h"
29
30 static inline struct ir3_ubo_range
31 get_ubo_load_range(nir_intrinsic_instr *instr)
32 {
33 struct ir3_ubo_range r;
34
35 const int offset = nir_src_as_uint(instr->src[1]);
36 const int bytes = nir_intrinsic_dest_components(instr) * 4;
37
38 r.start = ROUND_DOWN_TO(offset, 16 * 4);
39 r.end = ALIGN(offset + bytes, 16 * 4);
40
41 return r;
42 }
43
44 static void
45 gather_ubo_ranges(nir_shader *nir, nir_intrinsic_instr *instr,
46 struct ir3_ubo_analysis_state *state)
47 {
48 if (!nir_src_is_const(instr->src[0]))
49 return;
50
51 if (!nir_src_is_const(instr->src[1])) {
52 if (nir_src_as_uint(instr->src[0]) == 0) {
53 /* If this is an indirect on UBO 0, we'll still lower it back to
54 * load_uniform. Set the range to cover all of UBO 0.
55 */
56 state->range[0].end = align(nir->num_uniforms * 16, 16 * 4);
57 }
58
59 return;
60 }
61
62 const struct ir3_ubo_range r = get_ubo_load_range(instr);
63 const uint32_t block = nir_src_as_uint(instr->src[0]);
64
65 /* if UBO lowering is disabled, we still want to lower block 0
66 * (which is normal uniforms):
67 */
68 if ((block > 0) && (ir3_shader_debug & IR3_DBG_NOUBOOPT))
69 return;
70
71 if (r.start < state->range[block].start)
72 state->range[block].start = r.start;
73 if (state->range[block].end < r.end)
74 state->range[block].end = r.end;
75 }
76
77 /* For indirect offset, it is common to see a pattern of multiple
78 * loads with the same base, but different constant offset, ie:
79 *
80 * vec1 32 ssa_33 = iadd ssa_base, const_offset
81 * vec4 32 ssa_34 = intrinsic load_uniform (ssa_33) (base=N, 0, 0)
82 *
83 * Detect this, and peel out the const_offset part, to end up with:
84 *
85 * vec4 32 ssa_34 = intrinsic load_uniform (ssa_base) (base=N+const_offset, 0, 0)
86 *
87 * Or similarly:
88 *
89 * vec1 32 ssa_33 = imad24_ir3 a, b, const_offset
90 * vec4 32 ssa_34 = intrinsic load_uniform (ssa_33) (base=N, 0, 0)
91 *
92 * Can be converted to:
93 *
94 * vec1 32 ssa_base = imul24 a, b
95 * vec4 32 ssa_34 = intrinsic load_uniform (ssa_base) (base=N+const_offset, 0, 0)
96 *
97 * This gives the other opt passes something much easier to work
98 * with (ie. not requiring value range tracking)
99 */
100 static void
101 handle_partial_const(nir_builder *b, nir_ssa_def **srcp, unsigned *offp)
102 {
103 if ((*srcp)->parent_instr->type != nir_instr_type_alu)
104 return;
105
106 nir_alu_instr *alu = nir_instr_as_alu((*srcp)->parent_instr);
107
108 if (alu->op == nir_op_imad24_ir3) {
109 /* This case is slightly more complicated as we need to
110 * replace the imad24_ir3 with an imul24:
111 */
112 if (!nir_src_is_const(alu->src[2].src))
113 return;
114
115 *offp += nir_src_as_uint(alu->src[2].src);
116 *srcp = nir_imul24(b, nir_ssa_for_alu_src(b, alu, 0),
117 nir_ssa_for_alu_src(b, alu, 1));
118
119 return;
120 }
121
122 if (alu->op != nir_op_iadd)
123 return;
124
125 if (!(alu->src[0].src.is_ssa && alu->src[1].src.is_ssa))
126 return;
127
128 if (nir_src_is_const(alu->src[0].src)) {
129 *offp += nir_src_as_uint(alu->src[0].src);
130 *srcp = alu->src[1].src.ssa;
131 } else if (nir_src_is_const(alu->src[1].src)) {
132 *srcp = alu->src[0].src.ssa;
133 *offp += nir_src_as_uint(alu->src[1].src);
134 }
135 }
136
137 static void
138 lower_ubo_load_to_uniform(nir_intrinsic_instr *instr, nir_builder *b,
139 struct ir3_ubo_analysis_state *state)
140 {
141 /* We don't lower dynamic block index UBO loads to load_uniform, but we
142 * could probably with some effort determine a block stride in number of
143 * registers.
144 */
145 if (!nir_src_is_const(instr->src[0]))
146 return;
147
148 const uint32_t block = nir_src_as_uint(instr->src[0]);
149
150 if (block > 0) {
151 /* We don't lower dynamic array indexing either, but we definitely should.
152 * We don't have a good way of determining the range of the dynamic
153 * access, so for now just fall back to pulling.
154 */
155 if (!nir_src_is_const(instr->src[1]))
156 return;
157
158 /* After gathering the UBO access ranges, we limit the total
159 * upload. Reject if we're now outside the range.
160 */
161 const struct ir3_ubo_range r = get_ubo_load_range(instr);
162 if (!(state->range[block].start <= r.start &&
163 r.end <= state->range[block].end))
164 return;
165 }
166
167 b->cursor = nir_before_instr(&instr->instr);
168
169 nir_ssa_def *ubo_offset = nir_ssa_for_src(b, instr->src[1], 1);
170 unsigned const_offset = 0;
171
172 handle_partial_const(b, &ubo_offset, &const_offset);
173
174 /* UBO offset is in bytes, but uniform offset is in units of
175 * dwords, so we need to divide by 4 (right-shift by 2). And
176 * also the same for the constant part of the offset:
177 */
178 nir_ssa_def *new_offset = ir3_nir_try_propagate_bit_shift(b, ubo_offset, -2);
179 nir_ssa_def *uniform_offset = NULL;
180 if (new_offset) {
181 uniform_offset = new_offset;
182 } else {
183 uniform_offset = nir_ushr(b, ubo_offset, nir_imm_int(b, 2));
184 }
185
186 debug_assert(!(const_offset & 0x3));
187 const_offset >>= 2;
188
189 const int range_offset =
190 (state->range[block].offset - state->range[block].start) / 4;
191 const_offset += range_offset;
192
193 nir_intrinsic_instr *uniform =
194 nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_uniform);
195 uniform->num_components = instr->num_components;
196 uniform->src[0] = nir_src_for_ssa(uniform_offset);
197 nir_intrinsic_set_base(uniform, const_offset);
198 nir_ssa_dest_init(&uniform->instr, &uniform->dest,
199 uniform->num_components, instr->dest.ssa.bit_size,
200 instr->dest.ssa.name);
201 nir_builder_instr_insert(b, &uniform->instr);
202 nir_ssa_def_rewrite_uses(&instr->dest.ssa,
203 nir_src_for_ssa(&uniform->dest.ssa));
204
205 nir_instr_remove(&instr->instr);
206
207 state->lower_count++;
208 }
209
210 bool
211 ir3_nir_analyze_ubo_ranges(nir_shader *nir, struct ir3_shader *shader)
212 {
213 struct ir3_ubo_analysis_state *state = &shader->ubo_state;
214
215 memset(state, 0, sizeof(*state));
216
217 nir_foreach_function(function, nir) {
218 if (function->impl) {
219 nir_foreach_block(block, function->impl) {
220 nir_foreach_instr(instr, block) {
221 if (instr->type == nir_instr_type_intrinsic &&
222 nir_instr_as_intrinsic(instr)->intrinsic == nir_intrinsic_load_ubo)
223 gather_ubo_ranges(nir, nir_instr_as_intrinsic(instr), state);
224 }
225 }
226 }
227 }
228
229 /* For now, everything we upload is accessed statically and thus will be
230 * used by the shader. Once we can upload dynamically indexed data, we may
231 * upload sparsely accessed arrays, at which point we probably want to
232 * give priority to smaller UBOs, on the assumption that big UBOs will be
233 * accessed dynamically. Alternatively, we can track statically and
234 * dynamically accessed ranges separately and upload static rangtes
235 * first.
236 */
237 const uint32_t max_upload = 16 * 1024;
238 uint32_t offset = 0;
239 for (uint32_t i = 0; i < ARRAY_SIZE(state->range); i++) {
240 uint32_t range_size = state->range[i].end - state->range[i].start;
241
242 debug_assert(offset <= max_upload);
243 state->range[i].offset = offset;
244 if (offset + range_size > max_upload) {
245 range_size = max_upload - offset;
246 state->range[i].end = state->range[i].start + range_size;
247 }
248 offset += range_size;
249
250 if (state->range[i].start < state->range[i].end)
251 state->enabled |= 1 << i;
252 }
253 state->size = offset;
254
255 nir_foreach_function(function, nir) {
256 if (function->impl) {
257 nir_builder builder;
258 nir_builder_init(&builder, function->impl);
259 nir_foreach_block(block, function->impl) {
260 nir_foreach_instr_safe(instr, block) {
261 if (instr->type == nir_instr_type_intrinsic &&
262 nir_instr_as_intrinsic(instr)->intrinsic == nir_intrinsic_load_ubo)
263 lower_ubo_load_to_uniform(nir_instr_as_intrinsic(instr), &builder, state);
264 }
265 }
266
267 nir_metadata_preserve(function->impl, nir_metadata_block_index |
268 nir_metadata_dominance);
269 }
270 }
271
272 return state->lower_count > 0;
273 }