freedreno/ir3: fix lowered ubo region alignment
[mesa.git] / src / freedreno / ir3 / ir3_nir_analyze_ubo_ranges.c
1 /*
2 * Copyright © 2019 Google, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 #include "ir3_nir.h"
25 #include "compiler/nir/nir.h"
26 #include "compiler/nir/nir_builder.h"
27 #include "util/u_dynarray.h"
28 #include "mesa/main/macros.h"
29
30 static inline struct ir3_ubo_range
31 get_ubo_load_range(nir_intrinsic_instr *instr)
32 {
33 struct ir3_ubo_range r;
34
35 const int bytes = nir_intrinsic_dest_components(instr) *
36 (nir_dest_bit_size(instr->dest) / 8);
37
38 r.start = ROUND_DOWN_TO(nir_src_as_uint(instr->src[1]), 16 * 4);
39 r.end = ALIGN(r.start + bytes, 16 * 4);
40
41 return r;
42 }
43
44 static void
45 gather_ubo_ranges(nir_intrinsic_instr *instr,
46 struct ir3_ubo_analysis_state *state)
47 {
48 if (!nir_src_is_const(instr->src[0]))
49 return;
50
51 if (!nir_src_is_const(instr->src[1]))
52 return;
53
54 const struct ir3_ubo_range r = get_ubo_load_range(instr);
55 const uint32_t block = nir_src_as_uint(instr->src[0]);
56
57 if (r.start < state->range[block].start)
58 state->range[block].start = r.start;
59 if (state->range[block].end < r.end)
60 state->range[block].end = r.end;
61 }
62
63 static void
64 lower_ubo_load_to_uniform(nir_intrinsic_instr *instr, nir_builder *b,
65 struct ir3_ubo_analysis_state *state)
66 {
67 /* We don't lower dynamic block index UBO loads to load_uniform, but we
68 * could probably with some effort determine a block stride in number of
69 * registers.
70 */
71 if (!nir_src_is_const(instr->src[0]))
72 return;
73
74 const uint32_t block = nir_src_as_uint(instr->src[0]);
75
76 if (block > 0) {
77 /* We don't lower dynamic array indexing either, but we definitely should.
78 * We don't have a good way of determining the range of the dynamic
79 * access, so for now just fall back to pulling.
80 */
81 if (!nir_src_is_const(instr->src[1]))
82 return;
83
84 /* After gathering the UBO access ranges, we limit the total
85 * upload. Reject if we're now outside the range.
86 */
87 const struct ir3_ubo_range r = get_ubo_load_range(instr);
88 if (!(state->range[block].start <= r.start &&
89 r.end <= state->range[block].end))
90 return;
91 }
92
93 b->cursor = nir_before_instr(&instr->instr);
94
95 nir_ssa_def *ubo_offset = nir_ssa_for_src(b, instr->src[1], 1);
96 nir_ssa_def *new_offset = ir3_nir_try_propagate_bit_shift(b, ubo_offset, -2);
97 if (new_offset)
98 ubo_offset = new_offset;
99 else
100 ubo_offset = nir_ushr(b, ubo_offset, nir_imm_int(b, 2));
101
102 const int range_offset =
103 (state->range[block].offset - state->range[block].start) / 4;
104 nir_ssa_def *uniform_offset =
105 nir_iadd(b, ubo_offset, nir_imm_int(b, range_offset));
106
107 nir_intrinsic_instr *uniform =
108 nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_uniform);
109 uniform->num_components = instr->num_components;
110 uniform->src[0] = nir_src_for_ssa(uniform_offset);
111 nir_ssa_dest_init(&uniform->instr, &uniform->dest,
112 uniform->num_components, instr->dest.ssa.bit_size,
113 instr->dest.ssa.name);
114 nir_builder_instr_insert(b, &uniform->instr);
115 nir_ssa_def_rewrite_uses(&instr->dest.ssa,
116 nir_src_for_ssa(&uniform->dest.ssa));
117
118 nir_instr_remove(&instr->instr);
119
120 state->lower_count++;
121 }
122
123 bool
124 ir3_nir_analyze_ubo_ranges(nir_shader *nir, struct ir3_shader *shader)
125 {
126 struct ir3_ubo_analysis_state *state = &shader->ubo_state;
127
128 memset(state, 0, sizeof(*state));
129 state->range[0].end = align(nir->num_uniforms * 16, 16 * 4); /* align to 4*vec4 */
130
131 nir_foreach_function(function, nir) {
132 if (function->impl) {
133 nir_foreach_block(block, function->impl) {
134 nir_foreach_instr(instr, block) {
135 if (instr->type == nir_instr_type_intrinsic &&
136 nir_instr_as_intrinsic(instr)->intrinsic == nir_intrinsic_load_ubo)
137 gather_ubo_ranges(nir_instr_as_intrinsic(instr), state);
138 }
139 }
140 }
141 }
142
143 /* For now, everything we upload is accessed statically and thus will be
144 * used by the shader. Once we can upload dynamically indexed data, we may
145 * upload sparsely accessed arrays, at which point we probably want to
146 * give priority to smaller UBOs, on the assumption that big UBOs will be
147 * accessed dynamically. Alternatively, we can track statically and
148 * dynamically accessed ranges separately and upload static rangtes
149 * first.
150 */
151 const uint32_t max_upload = 16 * 1024;
152 uint32_t offset = 0;
153 for (uint32_t i = 0; i < ARRAY_SIZE(state->range); i++) {
154 uint32_t range_size = state->range[i].end - state->range[i].start;
155
156 debug_assert(offset <= max_upload);
157 state->range[i].offset = offset;
158 if (offset + range_size > max_upload) {
159 range_size = max_upload - offset;
160 state->range[i].end = state->range[i].start + range_size;
161 }
162 offset += range_size;
163 }
164 state->size = offset;
165
166 nir_foreach_function(function, nir) {
167 if (function->impl) {
168 nir_builder builder;
169 nir_builder_init(&builder, function->impl);
170 nir_foreach_block(block, function->impl) {
171 nir_foreach_instr_safe(instr, block) {
172 if (instr->type == nir_instr_type_intrinsic &&
173 nir_instr_as_intrinsic(instr)->intrinsic == nir_intrinsic_load_ubo)
174 lower_ubo_load_to_uniform(nir_instr_as_intrinsic(instr), &builder, state);
175 }
176 }
177
178 nir_metadata_preserve(function->impl, nir_metadata_block_index |
179 nir_metadata_dominance);
180 }
181 }
182
183 return state->lower_count > 0;
184 }