freedreno/a3xx: parameterize ubo optimization
[mesa.git] / src / freedreno / ir3 / ir3_nir_analyze_ubo_ranges.c
1 /*
2 * Copyright © 2019 Google, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include "ir3_nir.h"
25 #include "ir3_compiler.h"
26 #include "compiler/nir/nir.h"
27 #include "compiler/nir/nir_builder.h"
28 #include "util/u_math.h"
29
30 static inline struct ir3_ubo_range
31 get_ubo_load_range(nir_intrinsic_instr *instr, uint32_t alignment)
32 {
33 struct ir3_ubo_range r;
34
35 int offset = nir_src_as_uint(instr->src[1]);
36 const int bytes = nir_intrinsic_dest_components(instr) * 4;
37
38 r.start = ROUND_DOWN_TO(offset, alignment * 16);
39 r.end = ALIGN(offset + bytes, alignment * 16);
40
41 return r;
42 }
43
44 static struct ir3_ubo_range *
45 get_existing_range(nir_intrinsic_instr *instr,
46 struct ir3_ubo_analysis_state *state,
47 bool create_new)
48 {
49 unsigned block, base = 0;
50 bool bindless;
51 if (nir_src_is_const(instr->src[0])) {
52 block = nir_src_as_uint(instr->src[0]);
53 bindless = false;
54 } else {
55 nir_intrinsic_instr *rsrc = ir3_bindless_resource(instr->src[0]);
56 if (rsrc && nir_src_is_const(rsrc->src[0])) {
57 block = nir_src_as_uint(rsrc->src[0]);
58 base = nir_intrinsic_desc_set(rsrc);
59 bindless = true;
60 } else {
61 return NULL;
62 }
63 }
64 for (int i = 0; i < IR3_MAX_UBO_PUSH_RANGES; i++) {
65 struct ir3_ubo_range *range = &state->range[i];
66 if (range->end < range->start) {
67 /* We don't have a matching range, but there are more available.
68 */
69 if (create_new) {
70 range->block = block;
71 range->bindless_base = base;
72 range->bindless = bindless;
73 return range;
74 } else {
75 return NULL;
76 }
77 } else if (range->block == block && range->bindless_base == base &&
78 range->bindless == bindless) {
79 return range;
80 }
81 }
82
83 return NULL;
84 }
85
86 static void
87 gather_ubo_ranges(nir_shader *nir, nir_intrinsic_instr *instr,
88 struct ir3_ubo_analysis_state *state, uint32_t alignment)
89 {
90 struct ir3_ubo_range *old_r = get_existing_range(instr, state, true);
91 if (!old_r)
92 return;
93
94 if (!nir_src_is_const(instr->src[1])) {
95 if (!old_r->bindless && old_r->block == 0) {
96 /* If this is an indirect on UBO 0, we'll still lower it back to
97 * load_uniform. Set the range to cover all of UBO 0.
98 */
99 old_r->start = 0;
100 old_r->end = ALIGN(nir->num_uniforms * 16, alignment * 16);
101 }
102
103 return;
104 }
105
106 const struct ir3_ubo_range r = get_ubo_load_range(instr, alignment);
107
108 /* if UBO lowering is disabled, we still want to lower block 0
109 * (which is normal uniforms):
110 */
111 if ((old_r->bindless || old_r->block != 0) && (ir3_shader_debug & IR3_DBG_NOUBOOPT))
112 return;
113
114 if (r.start < old_r->start)
115 old_r->start = r.start;
116 if (old_r->end < r.end)
117 old_r->end = r.end;
118 }
119
120 /* For indirect offset, it is common to see a pattern of multiple
121 * loads with the same base, but different constant offset, ie:
122 *
123 * vec1 32 ssa_33 = iadd ssa_base, const_offset
124 * vec4 32 ssa_34 = intrinsic load_uniform (ssa_33) (base=N, 0, 0)
125 *
126 * Detect this, and peel out the const_offset part, to end up with:
127 *
128 * vec4 32 ssa_34 = intrinsic load_uniform (ssa_base) (base=N+const_offset, 0, 0)
129 *
130 * Or similarly:
131 *
132 * vec1 32 ssa_33 = imad24_ir3 a, b, const_offset
133 * vec4 32 ssa_34 = intrinsic load_uniform (ssa_33) (base=N, 0, 0)
134 *
135 * Can be converted to:
136 *
137 * vec1 32 ssa_base = imul24 a, b
138 * vec4 32 ssa_34 = intrinsic load_uniform (ssa_base) (base=N+const_offset, 0, 0)
139 *
140 * This gives the other opt passes something much easier to work
141 * with (ie. not requiring value range tracking)
142 */
143 static void
144 handle_partial_const(nir_builder *b, nir_ssa_def **srcp, int *offp)
145 {
146 if ((*srcp)->parent_instr->type != nir_instr_type_alu)
147 return;
148
149 nir_alu_instr *alu = nir_instr_as_alu((*srcp)->parent_instr);
150
151 if (alu->op == nir_op_imad24_ir3) {
152 /* This case is slightly more complicated as we need to
153 * replace the imad24_ir3 with an imul24:
154 */
155 if (!nir_src_is_const(alu->src[2].src))
156 return;
157
158 *offp += nir_src_as_uint(alu->src[2].src);
159 *srcp = nir_imul24(b, nir_ssa_for_alu_src(b, alu, 0),
160 nir_ssa_for_alu_src(b, alu, 1));
161
162 return;
163 }
164
165 if (alu->op != nir_op_iadd)
166 return;
167
168 if (!(alu->src[0].src.is_ssa && alu->src[1].src.is_ssa))
169 return;
170
171 if (nir_src_is_const(alu->src[0].src)) {
172 *offp += nir_src_as_uint(alu->src[0].src);
173 *srcp = alu->src[1].src.ssa;
174 } else if (nir_src_is_const(alu->src[1].src)) {
175 *srcp = alu->src[0].src.ssa;
176 *offp += nir_src_as_uint(alu->src[1].src);
177 }
178 }
179
180 static void
181 lower_ubo_block_decrement(nir_intrinsic_instr *instr, nir_builder *b, int *num_ubos)
182 {
183 /* Skip shifting things for turnip's bindless resources. */
184 if (ir3_bindless_resource(instr->src[0])) {
185 assert(!b->shader->info.first_ubo_is_default_ubo); /* only set for GL */
186 return;
187 }
188
189 /* Shift all GL nir_intrinsic_load_ubo UBO indices down by 1, because we
190 * have lowered block 0 off of load_ubo to constbuf and ir3_const only
191 * uploads pointers for block 1-N. This is also where we update the NIR
192 * num_ubos to reflect the UBOs that remain in use after others got
193 * lowered to constbuf access.
194 */
195 if (nir_src_is_const(instr->src[0])) {
196 int block = nir_src_as_uint(instr->src[0]) - 1;
197 *num_ubos = MAX2(*num_ubos, block + 1);
198 } else {
199 *num_ubos = b->shader->info.num_ubos - 1;
200 }
201
202 nir_ssa_def *old_idx = nir_ssa_for_src(b, instr->src[0], 1);
203 nir_ssa_def *new_idx = nir_iadd_imm(b, old_idx, -1);
204 nir_instr_rewrite_src(&instr->instr, &instr->src[0],
205 nir_src_for_ssa(new_idx));
206 }
207
208 static void
209 lower_ubo_load_to_uniform(nir_intrinsic_instr *instr, nir_builder *b,
210 struct ir3_ubo_analysis_state *state, int *num_ubos, uint32_t alignment)
211 {
212 b->cursor = nir_before_instr(&instr->instr);
213
214 /* We don't lower dynamic block index UBO loads to load_uniform, but we
215 * could probably with some effort determine a block stride in number of
216 * registers.
217 */
218 struct ir3_ubo_range *range = get_existing_range(instr, state, false);
219 if (!range) {
220 lower_ubo_block_decrement(instr, b, num_ubos);
221 return;
222 }
223
224 if (range->bindless || range->block > 0) {
225 /* We don't lower dynamic array indexing either, but we definitely should.
226 * We don't have a good way of determining the range of the dynamic
227 * access, so for now just fall back to pulling.
228 */
229 if (!nir_src_is_const(instr->src[1])) {
230 lower_ubo_block_decrement(instr, b, num_ubos);
231 return;
232 }
233
234 /* After gathering the UBO access ranges, we limit the total
235 * upload. Reject if we're now outside the range.
236 */
237 const struct ir3_ubo_range r = get_ubo_load_range(instr, alignment);
238 if (!(range->start <= r.start && r.end <= range->end)) {
239 lower_ubo_block_decrement(instr, b, num_ubos);
240 return;
241 }
242 }
243
244 nir_ssa_def *ubo_offset = nir_ssa_for_src(b, instr->src[1], 1);
245 int const_offset = 0;
246
247 handle_partial_const(b, &ubo_offset, &const_offset);
248
249 /* UBO offset is in bytes, but uniform offset is in units of
250 * dwords, so we need to divide by 4 (right-shift by 2). For ldc the
251 * offset is in units of 16 bytes, so we need to multiply by 4. And
252 * also the same for the constant part of the offset:
253 */
254 const int shift = -2;
255 nir_ssa_def *new_offset = ir3_nir_try_propagate_bit_shift(b, ubo_offset, -2);
256 nir_ssa_def *uniform_offset = NULL;
257 if (new_offset) {
258 uniform_offset = new_offset;
259 } else {
260 uniform_offset = shift > 0 ?
261 nir_ishl(b, ubo_offset, nir_imm_int(b, shift)) :
262 nir_ushr(b, ubo_offset, nir_imm_int(b, -shift));
263 }
264
265 debug_assert(!(const_offset & 0x3));
266 const_offset >>= 2;
267
268 const int range_offset = ((int)range->offset - (int)range->start) / 4;
269 const_offset += range_offset;
270
271 /* The range_offset could be negative, if if only part of the UBO
272 * block is accessed, range->start can be greater than range->offset.
273 * But we can't underflow const_offset. If necessary we need to
274 * insert nir instructions to compensate (which can hopefully be
275 * optimized away)
276 */
277 if (const_offset < 0) {
278 uniform_offset = nir_iadd_imm(b, uniform_offset, const_offset);
279 const_offset = 0;
280 }
281
282 nir_intrinsic_instr *uniform =
283 nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_uniform);
284 uniform->num_components = instr->num_components;
285 uniform->src[0] = nir_src_for_ssa(uniform_offset);
286 nir_intrinsic_set_base(uniform, const_offset);
287 nir_ssa_dest_init(&uniform->instr, &uniform->dest,
288 uniform->num_components, instr->dest.ssa.bit_size,
289 instr->dest.ssa.name);
290 nir_builder_instr_insert(b, &uniform->instr);
291 nir_ssa_def_rewrite_uses(&instr->dest.ssa,
292 nir_src_for_ssa(&uniform->dest.ssa));
293
294 nir_instr_remove(&instr->instr);
295
296 state->lower_count++;
297 }
298
299 static bool
300 instr_is_load_ubo(nir_instr *instr)
301 {
302 if (instr->type != nir_instr_type_intrinsic)
303 return false;
304
305 nir_intrinsic_op op = nir_instr_as_intrinsic(instr)->intrinsic;
306
307 /* ir3_nir_lower_io_offsets happens after this pass. */
308 assert(op != nir_intrinsic_load_ubo_ir3);
309
310 return op == nir_intrinsic_load_ubo;
311 }
312
313 bool
314 ir3_nir_analyze_ubo_ranges(nir_shader *nir, struct ir3_shader *shader)
315 {
316 struct ir3_ubo_analysis_state *state = &shader->ubo_state;
317
318 memset(state, 0, sizeof(*state));
319 for (int i = 0; i < IR3_MAX_UBO_PUSH_RANGES; i++) {
320 state->range[i].start = UINT32_MAX;
321 }
322
323 nir_foreach_function (function, nir) {
324 if (function->impl) {
325 nir_foreach_block (block, function->impl) {
326 nir_foreach_instr (instr, block) {
327 if (instr_is_load_ubo(instr))
328 gather_ubo_ranges(nir, nir_instr_as_intrinsic(instr),
329 state, shader->compiler->const_upload_unit);
330 }
331 }
332 }
333 }
334
335 /* For now, everything we upload is accessed statically and thus will be
336 * used by the shader. Once we can upload dynamically indexed data, we may
337 * upload sparsely accessed arrays, at which point we probably want to
338 * give priority to smaller UBOs, on the assumption that big UBOs will be
339 * accessed dynamically. Alternatively, we can track statically and
340 * dynamically accessed ranges separately and upload static rangtes
341 * first.
342 */
343 const uint32_t max_upload = shader->compiler->max_const * 16;
344 uint32_t offset = shader->const_state.num_reserved_user_consts * 16;
345 state->num_enabled = ARRAY_SIZE(state->range);
346 for (uint32_t i = 0; i < ARRAY_SIZE(state->range); i++) {
347 if (state->range[i].start >= state->range[i].end) {
348 state->num_enabled = i;
349 break;
350 }
351
352 uint32_t range_size = state->range[i].end - state->range[i].start;
353
354 debug_assert(offset <= max_upload);
355 state->range[i].offset = offset;
356 if (offset + range_size > max_upload) {
357 range_size = max_upload - offset;
358 state->range[i].end = state->range[i].start + range_size;
359 }
360 offset += range_size;
361
362 }
363 state->size = offset;
364
365 int num_ubos = 0;
366 nir_foreach_function (function, nir) {
367 if (function->impl) {
368 nir_builder builder;
369 nir_builder_init(&builder, function->impl);
370 nir_foreach_block (block, function->impl) {
371 nir_foreach_instr_safe (instr, block) {
372 if (instr_is_load_ubo(instr))
373 lower_ubo_load_to_uniform(nir_instr_as_intrinsic(instr),
374 &builder, state, &num_ubos,
375 shader->compiler->const_upload_unit);
376 }
377 }
378
379 nir_metadata_preserve(function->impl, nir_metadata_block_index |
380 nir_metadata_dominance);
381 }
382 }
383 /* Update the num_ubos field for GL (first_ubo_is_default_ubo). With
384 * Vulkan's bindless, we don't use the num_ubos field, so we can leave it
385 * incremented.
386 */
387 if (nir->info.first_ubo_is_default_ubo)
388 nir->info.num_ubos = num_ubos;
389
390 return state->lower_count > 0;
391 }