freedreno/ir3: Print a space after nop counts, like qcom's disasm.
[mesa.git] / src / freedreno / ir3 / ir3_nir_analyze_ubo_ranges.c
1 /*
2 * Copyright © 2019 Google, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include "ir3_nir.h"
25 #include "ir3_compiler.h"
26 #include "compiler/nir/nir.h"
27 #include "compiler/nir/nir_builder.h"
28 #include "util/u_math.h"
29
30 static inline struct ir3_ubo_range
31 get_ubo_load_range(nir_intrinsic_instr *instr)
32 {
33 struct ir3_ubo_range r;
34
35 int offset = nir_src_as_uint(instr->src[1]);
36 if (instr->intrinsic == nir_intrinsic_load_ubo_ir3)
37 offset *= 16;
38 const int bytes = nir_intrinsic_dest_components(instr) * 4;
39
40 r.start = ROUND_DOWN_TO(offset, 16 * 4);
41 r.end = ALIGN(offset + bytes, 16 * 4);
42
43 return r;
44 }
45
46 static struct ir3_ubo_range *
47 get_existing_range(nir_intrinsic_instr *instr,
48 struct ir3_ubo_analysis_state *state,
49 bool create_new)
50 {
51 unsigned block, base = 0;
52 bool bindless;
53 if (nir_src_is_const(instr->src[0])) {
54 block = nir_src_as_uint(instr->src[0]);
55 bindless = false;
56 } else {
57 nir_intrinsic_instr *rsrc = ir3_bindless_resource(instr->src[0]);
58 if (rsrc && nir_src_is_const(rsrc->src[0])) {
59 block = nir_src_as_uint(rsrc->src[0]);
60 base = nir_intrinsic_desc_set(rsrc);
61 bindless = true;
62 } else {
63 return NULL;
64 }
65 }
66 for (int i = 0; i < IR3_MAX_UBO_PUSH_RANGES; i++) {
67 struct ir3_ubo_range *range = &state->range[i];
68 if (range->end < range->start) {
69 /* We don't have a matching range, but there are more available.
70 */
71 if (create_new) {
72 range->block = block;
73 range->bindless_base = base;
74 range->bindless = bindless;
75 return range;
76 } else {
77 return NULL;
78 }
79 } else if (range->block == block && range->bindless_base == base &&
80 range->bindless == bindless) {
81 return range;
82 }
83 }
84
85 return NULL;
86 }
87
88 static void
89 gather_ubo_ranges(nir_shader *nir, nir_intrinsic_instr *instr,
90 struct ir3_ubo_analysis_state *state)
91 {
92 struct ir3_ubo_range *old_r = get_existing_range(instr, state, true);
93 if (!old_r)
94 return;
95
96 if (!nir_src_is_const(instr->src[1])) {
97 if (!old_r->bindless && old_r->block == 0) {
98 /* If this is an indirect on UBO 0, we'll still lower it back to
99 * load_uniform. Set the range to cover all of UBO 0.
100 */
101 state->range[0].start = 0;
102 state->range[0].end = ALIGN(nir->num_uniforms * 16, 16 * 4);
103 }
104
105 return;
106 }
107
108 const struct ir3_ubo_range r = get_ubo_load_range(instr);
109
110 /* if UBO lowering is disabled, we still want to lower block 0
111 * (which is normal uniforms):
112 */
113 if ((old_r->bindless || old_r->block != 0) && (ir3_shader_debug & IR3_DBG_NOUBOOPT))
114 return;
115
116 if (r.start < old_r->start)
117 old_r->start = r.start;
118 if (old_r->end < r.end)
119 old_r->end = r.end;
120 }
121
122 /* For indirect offset, it is common to see a pattern of multiple
123 * loads with the same base, but different constant offset, ie:
124 *
125 * vec1 32 ssa_33 = iadd ssa_base, const_offset
126 * vec4 32 ssa_34 = intrinsic load_uniform (ssa_33) (base=N, 0, 0)
127 *
128 * Detect this, and peel out the const_offset part, to end up with:
129 *
130 * vec4 32 ssa_34 = intrinsic load_uniform (ssa_base) (base=N+const_offset, 0, 0)
131 *
132 * Or similarly:
133 *
134 * vec1 32 ssa_33 = imad24_ir3 a, b, const_offset
135 * vec4 32 ssa_34 = intrinsic load_uniform (ssa_33) (base=N, 0, 0)
136 *
137 * Can be converted to:
138 *
139 * vec1 32 ssa_base = imul24 a, b
140 * vec4 32 ssa_34 = intrinsic load_uniform (ssa_base) (base=N+const_offset, 0, 0)
141 *
142 * This gives the other opt passes something much easier to work
143 * with (ie. not requiring value range tracking)
144 */
145 static void
146 handle_partial_const(nir_builder *b, nir_ssa_def **srcp, unsigned *offp)
147 {
148 if ((*srcp)->parent_instr->type != nir_instr_type_alu)
149 return;
150
151 nir_alu_instr *alu = nir_instr_as_alu((*srcp)->parent_instr);
152
153 if (alu->op == nir_op_imad24_ir3) {
154 /* This case is slightly more complicated as we need to
155 * replace the imad24_ir3 with an imul24:
156 */
157 if (!nir_src_is_const(alu->src[2].src))
158 return;
159
160 *offp += nir_src_as_uint(alu->src[2].src);
161 *srcp = nir_imul24(b, nir_ssa_for_alu_src(b, alu, 0),
162 nir_ssa_for_alu_src(b, alu, 1));
163
164 return;
165 }
166
167 if (alu->op != nir_op_iadd)
168 return;
169
170 if (!(alu->src[0].src.is_ssa && alu->src[1].src.is_ssa))
171 return;
172
173 if (nir_src_is_const(alu->src[0].src)) {
174 *offp += nir_src_as_uint(alu->src[0].src);
175 *srcp = alu->src[1].src.ssa;
176 } else if (nir_src_is_const(alu->src[1].src)) {
177 *srcp = alu->src[0].src.ssa;
178 *offp += nir_src_as_uint(alu->src[1].src);
179 }
180 }
181
182 static void
183 lower_ubo_load_to_uniform(nir_intrinsic_instr *instr, nir_builder *b,
184 struct ir3_ubo_analysis_state *state)
185 {
186 /* We don't lower dynamic block index UBO loads to load_uniform, but we
187 * could probably with some effort determine a block stride in number of
188 * registers.
189 */
190 struct ir3_ubo_range *range = get_existing_range(instr, state, false);
191 if (!range)
192 return;
193
194 if (range->bindless || range->block > 0) {
195 /* We don't lower dynamic array indexing either, but we definitely should.
196 * We don't have a good way of determining the range of the dynamic
197 * access, so for now just fall back to pulling.
198 */
199 if (!nir_src_is_const(instr->src[1]))
200 return;
201
202 /* After gathering the UBO access ranges, we limit the total
203 * upload. Reject if we're now outside the range.
204 */
205 const struct ir3_ubo_range r = get_ubo_load_range(instr);
206 if (!(range->start <= r.start && r.end <= range->end))
207 return;
208 }
209
210 b->cursor = nir_before_instr(&instr->instr);
211
212 nir_ssa_def *ubo_offset = nir_ssa_for_src(b, instr->src[1], 1);
213 unsigned const_offset = 0;
214
215 handle_partial_const(b, &ubo_offset, &const_offset);
216
217 /* UBO offset is in bytes, but uniform offset is in units of
218 * dwords, so we need to divide by 4 (right-shift by 2). For ldc the
219 * offset is in units of 16 bytes, so we need to multiply by 4. And
220 * also the same for the constant part of the offset:
221 */
222
223 const int shift = instr->intrinsic == nir_intrinsic_load_ubo_ir3 ? 2 : -2;
224 nir_ssa_def *new_offset = ir3_nir_try_propagate_bit_shift(b, ubo_offset, shift);
225 nir_ssa_def *uniform_offset = NULL;
226 if (new_offset) {
227 uniform_offset = new_offset;
228 } else {
229 uniform_offset = shift > 0 ?
230 nir_ishl(b, ubo_offset, nir_imm_int(b, shift)) :
231 nir_ushr(b, ubo_offset, nir_imm_int(b, -shift));
232 }
233
234 if (instr->intrinsic == nir_intrinsic_load_ubo_ir3) {
235 const_offset <<= 2;
236 const_offset += nir_intrinsic_base(instr);
237 } else {
238 debug_assert(!(const_offset & 0x3));
239 const_offset >>= 2;
240 }
241
242 const int range_offset = (range->offset - range->start) / 4;
243 const_offset += range_offset;
244
245 nir_intrinsic_instr *uniform =
246 nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_uniform);
247 uniform->num_components = instr->num_components;
248 uniform->src[0] = nir_src_for_ssa(uniform_offset);
249 nir_intrinsic_set_base(uniform, const_offset);
250 nir_ssa_dest_init(&uniform->instr, &uniform->dest,
251 uniform->num_components, instr->dest.ssa.bit_size,
252 instr->dest.ssa.name);
253 nir_builder_instr_insert(b, &uniform->instr);
254 nir_ssa_def_rewrite_uses(&instr->dest.ssa,
255 nir_src_for_ssa(&uniform->dest.ssa));
256
257 nir_instr_remove(&instr->instr);
258
259 state->lower_count++;
260 }
261
262 static bool
263 instr_is_load_ubo(nir_instr *instr)
264 {
265 if (instr->type != nir_instr_type_intrinsic)
266 return false;
267
268 nir_intrinsic_op op = nir_instr_as_intrinsic(instr)->intrinsic;
269 return op == nir_intrinsic_load_ubo || op == nir_intrinsic_load_ubo_ir3;
270 }
271
272 bool
273 ir3_nir_analyze_ubo_ranges(nir_shader *nir, struct ir3_shader *shader)
274 {
275 struct ir3_ubo_analysis_state *state = &shader->ubo_state;
276
277 memset(state, 0, sizeof(*state));
278 for (int i = 0; i < IR3_MAX_UBO_PUSH_RANGES; i++) {
279 state->range[i].start = UINT32_MAX;
280 }
281
282 nir_foreach_function (function, nir) {
283 if (function->impl) {
284 nir_foreach_block (block, function->impl) {
285 nir_foreach_instr (instr, block) {
286 if (instr_is_load_ubo(instr))
287 gather_ubo_ranges(nir, nir_instr_as_intrinsic(instr), state);
288 }
289 }
290 }
291 }
292
293 /* For now, everything we upload is accessed statically and thus will be
294 * used by the shader. Once we can upload dynamically indexed data, we may
295 * upload sparsely accessed arrays, at which point we probably want to
296 * give priority to smaller UBOs, on the assumption that big UBOs will be
297 * accessed dynamically. Alternatively, we can track statically and
298 * dynamically accessed ranges separately and upload static rangtes
299 * first.
300 */
301 const uint32_t max_upload = 16 * 1024;
302 uint32_t offset = shader->const_state.num_reserved_user_consts * 16;
303 state->num_enabled = ARRAY_SIZE(state->range);
304 for (uint32_t i = 0; i < ARRAY_SIZE(state->range); i++) {
305 if (state->range[i].start >= state->range[i].end) {
306 state->num_enabled = i;
307 break;
308 }
309
310 uint32_t range_size = state->range[i].end - state->range[i].start;
311
312 debug_assert(offset <= max_upload);
313 state->range[i].offset = offset;
314 if (offset + range_size > max_upload) {
315 range_size = max_upload - offset;
316 state->range[i].end = state->range[i].start + range_size;
317 }
318 offset += range_size;
319
320 }
321 state->size = offset;
322
323 nir_foreach_function (function, nir) {
324 if (function->impl) {
325 nir_builder builder;
326 nir_builder_init(&builder, function->impl);
327 nir_foreach_block (block, function->impl) {
328 nir_foreach_instr_safe (instr, block) {
329 if (instr_is_load_ubo(instr))
330 lower_ubo_load_to_uniform(nir_instr_as_intrinsic(instr), &builder, state);
331 }
332 }
333
334 nir_metadata_preserve(function->impl, nir_metadata_block_index |
335 nir_metadata_dominance);
336 }
337 }
338
339 return state->lower_count > 0;
340 }