2 * Copyright © 2019 Google, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 #include "ir3_compiler.h"
26 #include "compiler/nir/nir.h"
27 #include "compiler/nir/nir_builder.h"
28 #include "mesa/main/macros.h"
30 static inline struct ir3_ubo_range
31 get_ubo_load_range(nir_intrinsic_instr
*instr
)
33 struct ir3_ubo_range r
;
35 const int offset
= nir_src_as_uint(instr
->src
[1]);
36 const int bytes
= nir_intrinsic_dest_components(instr
) * 4;
38 r
.start
= ROUND_DOWN_TO(offset
, 16 * 4);
39 r
.end
= ALIGN(offset
+ bytes
, 16 * 4);
45 gather_ubo_ranges(nir_shader
*nir
, nir_intrinsic_instr
*instr
,
46 struct ir3_ubo_analysis_state
*state
)
48 if (!nir_src_is_const(instr
->src
[0]))
51 if (!nir_src_is_const(instr
->src
[1])) {
52 if (nir_src_as_uint(instr
->src
[0]) == 0) {
53 /* If this is an indirect on UBO 0, we'll still lower it back to
54 * load_uniform. Set the range to cover all of UBO 0.
56 state
->range
[0].end
= align(nir
->num_uniforms
* 16, 16 * 4);
62 const struct ir3_ubo_range r
= get_ubo_load_range(instr
);
63 const uint32_t block
= nir_src_as_uint(instr
->src
[0]);
65 /* if UBO lowering is disabled, we still want to lower block 0
66 * (which is normal uniforms):
68 if ((block
> 0) && (ir3_shader_debug
& IR3_DBG_NOUBOOPT
))
71 if (r
.start
< state
->range
[block
].start
)
72 state
->range
[block
].start
= r
.start
;
73 if (state
->range
[block
].end
< r
.end
)
74 state
->range
[block
].end
= r
.end
;
77 /* For indirect offset, it is common to see a pattern of multiple
78 * loads with the same base, but different constant offset, ie:
80 * vec1 32 ssa_33 = iadd ssa_base, const_offset
81 * vec4 32 ssa_34 = intrinsic load_uniform (ssa_33) (base=N, 0, 0)
83 * Detect this, and peel out the const_offset part, to end up with:
85 * vec4 32 ssa_34 = intrinsic load_uniform (ssa_base) (base=N+const_offset, 0, 0)
87 * This gives the other opt passes something much easier to work
88 * with (ie. not requiring value range tracking)
91 handle_partial_const(nir_ssa_def
**srcp
, unsigned *offp
)
93 if ((*srcp
)->parent_instr
->type
!= nir_instr_type_alu
)
96 nir_alu_instr
*alu
= nir_instr_as_alu((*srcp
)->parent_instr
);
97 if (alu
->op
!= nir_op_iadd
)
100 if (!(alu
->src
[0].src
.is_ssa
&& alu
->src
[1].src
.is_ssa
))
103 if (nir_src_is_const(alu
->src
[0].src
)) {
104 *offp
+= nir_src_as_uint(alu
->src
[0].src
);
105 *srcp
= alu
->src
[1].src
.ssa
;
106 } else if (nir_src_is_const(alu
->src
[1].src
)) {
107 *srcp
= alu
->src
[0].src
.ssa
;
108 *offp
+= nir_src_as_uint(alu
->src
[1].src
);
113 lower_ubo_load_to_uniform(nir_intrinsic_instr
*instr
, nir_builder
*b
,
114 struct ir3_ubo_analysis_state
*state
)
116 /* We don't lower dynamic block index UBO loads to load_uniform, but we
117 * could probably with some effort determine a block stride in number of
120 if (!nir_src_is_const(instr
->src
[0]))
123 const uint32_t block
= nir_src_as_uint(instr
->src
[0]);
126 /* We don't lower dynamic array indexing either, but we definitely should.
127 * We don't have a good way of determining the range of the dynamic
128 * access, so for now just fall back to pulling.
130 if (!nir_src_is_const(instr
->src
[1]))
133 /* After gathering the UBO access ranges, we limit the total
134 * upload. Reject if we're now outside the range.
136 const struct ir3_ubo_range r
= get_ubo_load_range(instr
);
137 if (!(state
->range
[block
].start
<= r
.start
&&
138 r
.end
<= state
->range
[block
].end
))
142 b
->cursor
= nir_before_instr(&instr
->instr
);
144 nir_ssa_def
*ubo_offset
= nir_ssa_for_src(b
, instr
->src
[1], 1);
145 unsigned const_offset
= 0;
147 handle_partial_const(&ubo_offset
, &const_offset
);
149 /* UBO offset is in bytes, but uniform offset is in units of
150 * dwords, so we need to divide by 4 (right-shift by 2). And
151 * also the same for the constant part of the offset:
153 nir_ssa_def
*new_offset
= ir3_nir_try_propagate_bit_shift(b
, ubo_offset
, -2);
154 nir_ssa_def
*uniform_offset
= NULL
;
156 uniform_offset
= new_offset
;
158 uniform_offset
= nir_ushr(b
, ubo_offset
, nir_imm_int(b
, 2));
161 debug_assert(!(const_offset
& 0x3));
164 const int range_offset
=
165 (state
->range
[block
].offset
- state
->range
[block
].start
) / 4;
166 const_offset
+= range_offset
;
168 nir_intrinsic_instr
*uniform
=
169 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_load_uniform
);
170 uniform
->num_components
= instr
->num_components
;
171 uniform
->src
[0] = nir_src_for_ssa(uniform_offset
);
172 nir_intrinsic_set_base(uniform
, const_offset
);
173 nir_ssa_dest_init(&uniform
->instr
, &uniform
->dest
,
174 uniform
->num_components
, instr
->dest
.ssa
.bit_size
,
175 instr
->dest
.ssa
.name
);
176 nir_builder_instr_insert(b
, &uniform
->instr
);
177 nir_ssa_def_rewrite_uses(&instr
->dest
.ssa
,
178 nir_src_for_ssa(&uniform
->dest
.ssa
));
180 nir_instr_remove(&instr
->instr
);
182 state
->lower_count
++;
186 ir3_nir_analyze_ubo_ranges(nir_shader
*nir
, struct ir3_shader
*shader
)
188 struct ir3_ubo_analysis_state
*state
= &shader
->ubo_state
;
190 memset(state
, 0, sizeof(*state
));
192 nir_foreach_function(function
, nir
) {
193 if (function
->impl
) {
194 nir_foreach_block(block
, function
->impl
) {
195 nir_foreach_instr(instr
, block
) {
196 if (instr
->type
== nir_instr_type_intrinsic
&&
197 nir_instr_as_intrinsic(instr
)->intrinsic
== nir_intrinsic_load_ubo
)
198 gather_ubo_ranges(nir
, nir_instr_as_intrinsic(instr
), state
);
204 /* For now, everything we upload is accessed statically and thus will be
205 * used by the shader. Once we can upload dynamically indexed data, we may
206 * upload sparsely accessed arrays, at which point we probably want to
207 * give priority to smaller UBOs, on the assumption that big UBOs will be
208 * accessed dynamically. Alternatively, we can track statically and
209 * dynamically accessed ranges separately and upload static rangtes
212 const uint32_t max_upload
= 16 * 1024;
214 for (uint32_t i
= 0; i
< ARRAY_SIZE(state
->range
); i
++) {
215 uint32_t range_size
= state
->range
[i
].end
- state
->range
[i
].start
;
217 debug_assert(offset
<= max_upload
);
218 state
->range
[i
].offset
= offset
;
219 if (offset
+ range_size
> max_upload
) {
220 range_size
= max_upload
- offset
;
221 state
->range
[i
].end
= state
->range
[i
].start
+ range_size
;
223 offset
+= range_size
;
225 state
->size
= offset
;
227 nir_foreach_function(function
, nir
) {
228 if (function
->impl
) {
230 nir_builder_init(&builder
, function
->impl
);
231 nir_foreach_block(block
, function
->impl
) {
232 nir_foreach_instr_safe(instr
, block
) {
233 if (instr
->type
== nir_instr_type_intrinsic
&&
234 nir_instr_as_intrinsic(instr
)->intrinsic
== nir_intrinsic_load_ubo
)
235 lower_ubo_load_to_uniform(nir_instr_as_intrinsic(instr
), &builder
, state
);
239 nir_metadata_preserve(function
->impl
, nir_metadata_block_index
|
240 nir_metadata_dominance
);
244 return state
->lower_count
> 0;