3feb60ed7114fb58ce70d894bc603e3a5fabc642
[mesa.git] / src / freedreno / ir3 / ir3_nir_analyze_ubo_ranges.c
1 /*
2 * Copyright © 2019 Google, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include "ir3_nir.h"
25 #include "ir3_compiler.h"
26 #include "compiler/nir/nir.h"
27 #include "compiler/nir/nir_builder.h"
28 #include "util/u_math.h"
29
30 static bool
31 ubo_is_gl_uniforms(const struct ir3_ubo_info *ubo)
32 {
33 return !ubo->bindless && ubo->block == 0;
34 }
35
36 static inline struct ir3_ubo_range
37 get_ubo_load_range(nir_shader *nir, nir_intrinsic_instr *instr, uint32_t alignment)
38 {
39 struct ir3_ubo_range r;
40
41 if (nir_src_is_const(instr->src[1])) {
42 int offset = nir_src_as_uint(instr->src[1]);
43 const int bytes = nir_intrinsic_dest_components(instr) * 4;
44
45 r.start = ROUND_DOWN_TO(offset, alignment * 16);
46 r.end = ALIGN(offset + bytes, alignment * 16);
47 } else {
48 /* The other valid place to call this is on the GL default uniform block */
49 assert(nir_src_as_uint(instr->src[0]) == 0);
50 r.start = 0;
51 r.end = ALIGN(nir->num_uniforms * 16, alignment * 16);
52 }
53
54 return r;
55 }
56
57 static bool
58 get_ubo_info(nir_intrinsic_instr *instr, struct ir3_ubo_info *ubo)
59 {
60 if (nir_src_is_const(instr->src[0])) {
61 ubo->block = nir_src_as_uint(instr->src[0]);
62 ubo->bindless_base = 0;
63 ubo->bindless = false;
64 return true;
65 } else {
66 nir_intrinsic_instr *rsrc = ir3_bindless_resource(instr->src[0]);
67 if (rsrc && nir_src_is_const(rsrc->src[0])) {
68 ubo->block = nir_src_as_uint(rsrc->src[0]);
69 ubo->bindless_base = nir_intrinsic_desc_set(rsrc);
70 ubo->bindless = true;
71 return true;
72 }
73 }
74 return false;
75 }
76
77 /**
78 * Get an existing range, but don't create a new range associated with
79 * the ubo, but don't create a new one if one does not already exist.
80 */
81 static const struct ir3_ubo_range *
82 get_existing_range(nir_intrinsic_instr *instr,
83 const struct ir3_ubo_analysis_state *state)
84 {
85 struct ir3_ubo_info ubo = {};
86
87 if (!get_ubo_info(instr, &ubo))
88 return NULL;
89
90 for (int i = 0; i < IR3_MAX_UBO_PUSH_RANGES; i++) {
91 const struct ir3_ubo_range *range = &state->range[i];
92 if (range->end < range->start) {
93 break;
94 } else if (!memcmp(&range->ubo, &ubo, sizeof(ubo))) {
95 return range;
96 }
97 }
98
99 return NULL;
100 }
101
102 /**
103 * Get an existing range, or create a new one if necessary/possible.
104 */
105 static struct ir3_ubo_range *
106 get_range(nir_intrinsic_instr *instr, struct ir3_ubo_analysis_state *state)
107 {
108 struct ir3_ubo_info ubo = {};
109
110 if (!get_ubo_info(instr, &ubo))
111 return NULL;
112
113 for (int i = 0; i < IR3_MAX_UBO_PUSH_RANGES; i++) {
114 struct ir3_ubo_range *range = &state->range[i];
115 if (range->end < range->start) {
116 /* We don't have a matching range, but there are more available.
117 */
118 range->ubo = ubo;
119 return range;
120 } else if (!memcmp(&range->ubo, &ubo, sizeof(ubo))) {
121 return range;
122 }
123 }
124
125 return NULL;
126 }
127
128 static void
129 gather_ubo_ranges(nir_shader *nir, nir_intrinsic_instr *instr,
130 struct ir3_ubo_analysis_state *state, uint32_t alignment)
131 {
132 if (ir3_shader_debug & IR3_DBG_NOUBOOPT)
133 return;
134
135 struct ir3_ubo_range *old_r = get_range(instr, state);
136 if (!old_r)
137 return;
138
139 /* We don't know how to get the size of UBOs being indirected on, other
140 * than on the GL uniforms where we have some other shader_info data.
141 */
142 if (!nir_src_is_const(instr->src[1]) && !ubo_is_gl_uniforms(&old_r->ubo))
143 return;
144
145 const struct ir3_ubo_range r = get_ubo_load_range(nir, instr, alignment);
146
147 if (r.start < old_r->start)
148 old_r->start = r.start;
149 if (old_r->end < r.end)
150 old_r->end = r.end;
151 }
152
153 /* For indirect offset, it is common to see a pattern of multiple
154 * loads with the same base, but different constant offset, ie:
155 *
156 * vec1 32 ssa_33 = iadd ssa_base, const_offset
157 * vec4 32 ssa_34 = intrinsic load_uniform (ssa_33) (base=N, 0, 0)
158 *
159 * Detect this, and peel out the const_offset part, to end up with:
160 *
161 * vec4 32 ssa_34 = intrinsic load_uniform (ssa_base) (base=N+const_offset, 0, 0)
162 *
163 * Or similarly:
164 *
165 * vec1 32 ssa_33 = imad24_ir3 a, b, const_offset
166 * vec4 32 ssa_34 = intrinsic load_uniform (ssa_33) (base=N, 0, 0)
167 *
168 * Can be converted to:
169 *
170 * vec1 32 ssa_base = imul24 a, b
171 * vec4 32 ssa_34 = intrinsic load_uniform (ssa_base) (base=N+const_offset, 0, 0)
172 *
173 * This gives the other opt passes something much easier to work
174 * with (ie. not requiring value range tracking)
175 */
176 static void
177 handle_partial_const(nir_builder *b, nir_ssa_def **srcp, int *offp)
178 {
179 if ((*srcp)->parent_instr->type != nir_instr_type_alu)
180 return;
181
182 nir_alu_instr *alu = nir_instr_as_alu((*srcp)->parent_instr);
183
184 if (alu->op == nir_op_imad24_ir3) {
185 /* This case is slightly more complicated as we need to
186 * replace the imad24_ir3 with an imul24:
187 */
188 if (!nir_src_is_const(alu->src[2].src))
189 return;
190
191 *offp += nir_src_as_uint(alu->src[2].src);
192 *srcp = nir_imul24(b, nir_ssa_for_alu_src(b, alu, 0),
193 nir_ssa_for_alu_src(b, alu, 1));
194
195 return;
196 }
197
198 if (alu->op != nir_op_iadd)
199 return;
200
201 if (!(alu->src[0].src.is_ssa && alu->src[1].src.is_ssa))
202 return;
203
204 if (nir_src_is_const(alu->src[0].src)) {
205 *offp += nir_src_as_uint(alu->src[0].src);
206 *srcp = alu->src[1].src.ssa;
207 } else if (nir_src_is_const(alu->src[1].src)) {
208 *srcp = alu->src[0].src.ssa;
209 *offp += nir_src_as_uint(alu->src[1].src);
210 }
211 }
212
213 /* Tracks the maximum bindful UBO accessed so that we reduce the UBO
214 * descriptors emitted in the fast path for GL.
215 */
216 static void
217 track_ubo_use(nir_intrinsic_instr *instr, nir_builder *b, int *num_ubos)
218 {
219 if (ir3_bindless_resource(instr->src[0])) {
220 assert(!b->shader->info.first_ubo_is_default_ubo); /* only set for GL */
221 return;
222 }
223
224 if (nir_src_is_const(instr->src[0])) {
225 int block = nir_src_as_uint(instr->src[0]);
226 *num_ubos = MAX2(*num_ubos, block + 1);
227 } else {
228 *num_ubos = b->shader->info.num_ubos;
229 }
230 }
231
232 static void
233 lower_ubo_load_to_uniform(nir_intrinsic_instr *instr, nir_builder *b,
234 struct ir3_ubo_analysis_state *state, int *num_ubos, uint32_t alignment)
235 {
236 b->cursor = nir_before_instr(&instr->instr);
237
238 /* We don't lower dynamic block index UBO loads to load_uniform, but we
239 * could probably with some effort determine a block stride in number of
240 * registers.
241 */
242 const struct ir3_ubo_range *range = get_existing_range(instr, state);
243 if (!range) {
244 track_ubo_use(instr, b, num_ubos);
245 return;
246 }
247
248 /* We don't have a good way of determining the range of the dynamic
249 * access in general, so for now just fall back to pulling.
250 */
251 if (!nir_src_is_const(instr->src[1]) && !ubo_is_gl_uniforms(&range->ubo))
252 return;
253
254 /* After gathering the UBO access ranges, we limit the total
255 * upload. Don't lower if this load is outside the range.
256 */
257 const struct ir3_ubo_range r = get_ubo_load_range(b->shader,
258 instr, alignment);
259 if (!(range->start <= r.start && r.end <= range->end)) {
260 track_ubo_use(instr, b, num_ubos);
261 return;
262 }
263
264 nir_ssa_def *ubo_offset = nir_ssa_for_src(b, instr->src[1], 1);
265 int const_offset = 0;
266
267 handle_partial_const(b, &ubo_offset, &const_offset);
268
269 /* UBO offset is in bytes, but uniform offset is in units of
270 * dwords, so we need to divide by 4 (right-shift by 2). For ldc the
271 * offset is in units of 16 bytes, so we need to multiply by 4. And
272 * also the same for the constant part of the offset:
273 */
274 const int shift = -2;
275 nir_ssa_def *new_offset = ir3_nir_try_propagate_bit_shift(b, ubo_offset, -2);
276 nir_ssa_def *uniform_offset = NULL;
277 if (new_offset) {
278 uniform_offset = new_offset;
279 } else {
280 uniform_offset = shift > 0 ?
281 nir_ishl(b, ubo_offset, nir_imm_int(b, shift)) :
282 nir_ushr(b, ubo_offset, nir_imm_int(b, -shift));
283 }
284
285 debug_assert(!(const_offset & 0x3));
286 const_offset >>= 2;
287
288 const int range_offset = ((int)range->offset - (int)range->start) / 4;
289 const_offset += range_offset;
290
291 /* The range_offset could be negative, if if only part of the UBO
292 * block is accessed, range->start can be greater than range->offset.
293 * But we can't underflow const_offset. If necessary we need to
294 * insert nir instructions to compensate (which can hopefully be
295 * optimized away)
296 */
297 if (const_offset < 0) {
298 uniform_offset = nir_iadd_imm(b, uniform_offset, const_offset);
299 const_offset = 0;
300 }
301
302 nir_intrinsic_instr *uniform =
303 nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_uniform);
304 uniform->num_components = instr->num_components;
305 uniform->src[0] = nir_src_for_ssa(uniform_offset);
306 nir_intrinsic_set_base(uniform, const_offset);
307 nir_ssa_dest_init(&uniform->instr, &uniform->dest,
308 uniform->num_components, instr->dest.ssa.bit_size,
309 instr->dest.ssa.name);
310 nir_builder_instr_insert(b, &uniform->instr);
311 nir_ssa_def_rewrite_uses(&instr->dest.ssa,
312 nir_src_for_ssa(&uniform->dest.ssa));
313
314 nir_instr_remove(&instr->instr);
315
316 state->lower_count++;
317 }
318
319 static bool
320 instr_is_load_ubo(nir_instr *instr)
321 {
322 if (instr->type != nir_instr_type_intrinsic)
323 return false;
324
325 nir_intrinsic_op op = nir_instr_as_intrinsic(instr)->intrinsic;
326
327 /* ir3_nir_lower_io_offsets happens after this pass. */
328 assert(op != nir_intrinsic_load_ubo_ir3);
329
330 return op == nir_intrinsic_load_ubo;
331 }
332
333 bool
334 ir3_nir_analyze_ubo_ranges(nir_shader *nir, struct ir3_shader_variant *v)
335 {
336 struct ir3_const_state *const_state = ir3_const_state(v);
337 struct ir3_ubo_analysis_state *state = &const_state->ubo_state;
338 struct ir3_compiler *compiler = v->shader->compiler;
339
340 memset(state, 0, sizeof(*state));
341 for (int i = 0; i < IR3_MAX_UBO_PUSH_RANGES; i++) {
342 state->range[i].start = UINT32_MAX;
343 }
344
345 nir_foreach_function (function, nir) {
346 if (function->impl) {
347 nir_foreach_block (block, function->impl) {
348 nir_foreach_instr (instr, block) {
349 if (instr_is_load_ubo(instr))
350 gather_ubo_ranges(nir, nir_instr_as_intrinsic(instr),
351 state, compiler->const_upload_unit);
352 }
353 }
354 }
355 }
356
357 /* For now, everything we upload is accessed statically and thus will be
358 * used by the shader. Once we can upload dynamically indexed data, we may
359 * upload sparsely accessed arrays, at which point we probably want to
360 * give priority to smaller UBOs, on the assumption that big UBOs will be
361 * accessed dynamically. Alternatively, we can track statically and
362 * dynamically accessed ranges separately and upload static rangtes
363 * first.
364 */
365
366 /* Limit our uploads to the amount of constant buffer space available in
367 * the hardware, minus what the shader compiler may need for various
368 * driver params. We do this UBO-to-push-constant before the real
369 * allocation of the driver params' const space, because UBO pointers can
370 * be driver params but this pass usually eliminatings them.
371 */
372 struct ir3_const_state worst_case_const_state = { };
373 ir3_setup_const_state(nir, v, &worst_case_const_state);
374 const uint32_t max_upload = (compiler->max_const -
375 worst_case_const_state.offsets.immediate) * 16;
376
377 uint32_t offset = v->shader->num_reserved_user_consts * 16;
378 state->num_enabled = ARRAY_SIZE(state->range);
379 for (uint32_t i = 0; i < ARRAY_SIZE(state->range); i++) {
380 if (state->range[i].start >= state->range[i].end) {
381 state->num_enabled = i;
382 break;
383 }
384
385 uint32_t range_size = state->range[i].end - state->range[i].start;
386
387 debug_assert(offset <= max_upload);
388 state->range[i].offset = offset;
389 if (offset + range_size > max_upload) {
390 range_size = max_upload - offset;
391 state->range[i].end = state->range[i].start + range_size;
392 }
393 offset += range_size;
394
395 }
396 state->size = offset;
397
398 int num_ubos = 0;
399 nir_foreach_function (function, nir) {
400 if (function->impl) {
401 nir_builder builder;
402 nir_builder_init(&builder, function->impl);
403 nir_foreach_block (block, function->impl) {
404 nir_foreach_instr_safe (instr, block) {
405 if (instr_is_load_ubo(instr))
406 lower_ubo_load_to_uniform(nir_instr_as_intrinsic(instr),
407 &builder, state, &num_ubos,
408 compiler->const_upload_unit);
409 }
410 }
411
412 nir_metadata_preserve(function->impl, nir_metadata_block_index |
413 nir_metadata_dominance);
414 }
415 }
416 /* Update the num_ubos field for GL (first_ubo_is_default_ubo). With
417 * Vulkan's bindless, we don't use the num_ubos field, so we can leave it
418 * incremented.
419 */
420 if (nir->info.first_ubo_is_default_ubo)
421 nir->info.num_ubos = num_ubos;
422
423 return state->lower_count > 0;
424 }