2 * Copyright © 2019 Google, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 #include "ir3_compiler.h"
26 #include "compiler/nir/nir.h"
27 #include "compiler/nir/nir_builder.h"
28 #include "util/u_math.h"
30 static inline struct ir3_ubo_range
31 get_ubo_load_range(nir_intrinsic_instr
*instr
)
33 struct ir3_ubo_range r
;
35 int offset
= nir_src_as_uint(instr
->src
[1]);
36 if (instr
->intrinsic
== nir_intrinsic_load_ubo_ir3
)
38 const int bytes
= nir_intrinsic_dest_components(instr
) * 4;
40 r
.start
= ROUND_DOWN_TO(offset
, 16 * 4);
41 r
.end
= ALIGN(offset
+ bytes
, 16 * 4);
46 static struct ir3_ubo_range
*
47 get_existing_range(nir_intrinsic_instr
*instr
,
48 struct ir3_ubo_analysis_state
*state
,
51 unsigned block
, base
= 0;
53 if (nir_src_is_const(instr
->src
[0])) {
54 block
= nir_src_as_uint(instr
->src
[0]);
57 nir_intrinsic_instr
*rsrc
= ir3_bindless_resource(instr
->src
[0]);
58 if (rsrc
&& nir_src_is_const(rsrc
->src
[0])) {
59 block
= nir_src_as_uint(rsrc
->src
[0]);
60 base
= nir_intrinsic_desc_set(rsrc
);
66 for (int i
= 0; i
< IR3_MAX_UBO_PUSH_RANGES
; i
++) {
67 struct ir3_ubo_range
*range
= &state
->range
[i
];
68 if (range
->end
< range
->start
) {
69 /* We don't have a matching range, but there are more available.
73 range
->bindless_base
= base
;
74 range
->bindless
= bindless
;
79 } else if (range
->block
== block
&& range
->bindless_base
== base
&&
80 range
->bindless
== bindless
) {
89 gather_ubo_ranges(nir_shader
*nir
, nir_intrinsic_instr
*instr
,
90 struct ir3_ubo_analysis_state
*state
)
92 struct ir3_ubo_range
*old_r
= get_existing_range(instr
, state
, true);
96 if (!nir_src_is_const(instr
->src
[1])) {
97 if (!old_r
->bindless
&& old_r
->block
== 0) {
98 /* If this is an indirect on UBO 0, we'll still lower it back to
99 * load_uniform. Set the range to cover all of UBO 0.
102 old_r
->end
= ALIGN(nir
->num_uniforms
* 16, 16 * 4);
108 const struct ir3_ubo_range r
= get_ubo_load_range(instr
);
110 /* if UBO lowering is disabled, we still want to lower block 0
111 * (which is normal uniforms):
113 if ((old_r
->bindless
|| old_r
->block
!= 0) && (ir3_shader_debug
& IR3_DBG_NOUBOOPT
))
116 if (r
.start
< old_r
->start
)
117 old_r
->start
= r
.start
;
118 if (old_r
->end
< r
.end
)
122 /* For indirect offset, it is common to see a pattern of multiple
123 * loads with the same base, but different constant offset, ie:
125 * vec1 32 ssa_33 = iadd ssa_base, const_offset
126 * vec4 32 ssa_34 = intrinsic load_uniform (ssa_33) (base=N, 0, 0)
128 * Detect this, and peel out the const_offset part, to end up with:
130 * vec4 32 ssa_34 = intrinsic load_uniform (ssa_base) (base=N+const_offset, 0, 0)
134 * vec1 32 ssa_33 = imad24_ir3 a, b, const_offset
135 * vec4 32 ssa_34 = intrinsic load_uniform (ssa_33) (base=N, 0, 0)
137 * Can be converted to:
139 * vec1 32 ssa_base = imul24 a, b
140 * vec4 32 ssa_34 = intrinsic load_uniform (ssa_base) (base=N+const_offset, 0, 0)
142 * This gives the other opt passes something much easier to work
143 * with (ie. not requiring value range tracking)
146 handle_partial_const(nir_builder
*b
, nir_ssa_def
**srcp
, int *offp
)
148 if ((*srcp
)->parent_instr
->type
!= nir_instr_type_alu
)
151 nir_alu_instr
*alu
= nir_instr_as_alu((*srcp
)->parent_instr
);
153 if (alu
->op
== nir_op_imad24_ir3
) {
154 /* This case is slightly more complicated as we need to
155 * replace the imad24_ir3 with an imul24:
157 if (!nir_src_is_const(alu
->src
[2].src
))
160 *offp
+= nir_src_as_uint(alu
->src
[2].src
);
161 *srcp
= nir_imul24(b
, nir_ssa_for_alu_src(b
, alu
, 0),
162 nir_ssa_for_alu_src(b
, alu
, 1));
167 if (alu
->op
!= nir_op_iadd
)
170 if (!(alu
->src
[0].src
.is_ssa
&& alu
->src
[1].src
.is_ssa
))
173 if (nir_src_is_const(alu
->src
[0].src
)) {
174 *offp
+= nir_src_as_uint(alu
->src
[0].src
);
175 *srcp
= alu
->src
[1].src
.ssa
;
176 } else if (nir_src_is_const(alu
->src
[1].src
)) {
177 *srcp
= alu
->src
[0].src
.ssa
;
178 *offp
+= nir_src_as_uint(alu
->src
[1].src
);
183 lower_ubo_block_decrement(nir_intrinsic_instr
*instr
, nir_builder
*b
)
185 /* Skip shifting things for turnip's bindless resources. */
186 if (ir3_bindless_resource(instr
->src
[0]))
189 /* Shift all GL nir_intrinsic_load_ubo UBO indices down by 1, because we
190 * have lowered block 0 off of load_ubo to constbuf and ir3_const only
191 * uploads pointers for block 1-N.
193 nir_ssa_def
*old_idx
= nir_ssa_for_src(b
, instr
->src
[0], 1);
194 nir_ssa_def
*new_idx
= nir_iadd_imm(b
, old_idx
, -1);
195 nir_instr_rewrite_src(&instr
->instr
, &instr
->src
[0],
196 nir_src_for_ssa(new_idx
));
200 lower_ubo_load_to_uniform(nir_intrinsic_instr
*instr
, nir_builder
*b
,
201 struct ir3_ubo_analysis_state
*state
)
203 b
->cursor
= nir_before_instr(&instr
->instr
);
205 /* We don't lower dynamic block index UBO loads to load_uniform, but we
206 * could probably with some effort determine a block stride in number of
209 struct ir3_ubo_range
*range
= get_existing_range(instr
, state
, false);
211 lower_ubo_block_decrement(instr
, b
);
215 if (range
->bindless
|| range
->block
> 0) {
216 /* We don't lower dynamic array indexing either, but we definitely should.
217 * We don't have a good way of determining the range of the dynamic
218 * access, so for now just fall back to pulling.
220 if (!nir_src_is_const(instr
->src
[1])) {
221 lower_ubo_block_decrement(instr
, b
);
225 /* After gathering the UBO access ranges, we limit the total
226 * upload. Reject if we're now outside the range.
228 const struct ir3_ubo_range r
= get_ubo_load_range(instr
);
229 if (!(range
->start
<= r
.start
&& r
.end
<= range
->end
)) {
230 lower_ubo_block_decrement(instr
, b
);
235 nir_ssa_def
*ubo_offset
= nir_ssa_for_src(b
, instr
->src
[1], 1);
236 int const_offset
= 0;
238 handle_partial_const(b
, &ubo_offset
, &const_offset
);
240 /* UBO offset is in bytes, but uniform offset is in units of
241 * dwords, so we need to divide by 4 (right-shift by 2). For ldc the
242 * offset is in units of 16 bytes, so we need to multiply by 4. And
243 * also the same for the constant part of the offset:
246 const int shift
= instr
->intrinsic
== nir_intrinsic_load_ubo_ir3
? 2 : -2;
247 nir_ssa_def
*new_offset
= ir3_nir_try_propagate_bit_shift(b
, ubo_offset
, shift
);
248 nir_ssa_def
*uniform_offset
= NULL
;
250 uniform_offset
= new_offset
;
252 uniform_offset
= shift
> 0 ?
253 nir_ishl(b
, ubo_offset
, nir_imm_int(b
, shift
)) :
254 nir_ushr(b
, ubo_offset
, nir_imm_int(b
, -shift
));
257 if (instr
->intrinsic
== nir_intrinsic_load_ubo_ir3
) {
259 const_offset
+= nir_intrinsic_base(instr
);
261 debug_assert(!(const_offset
& 0x3));
265 const int range_offset
= ((int)range
->offset
- (int)range
->start
) / 4;
266 const_offset
+= range_offset
;
268 /* The range_offset could be negative, if if only part of the UBO
269 * block is accessed, range->start can be greater than range->offset.
270 * But we can't underflow const_offset. If necessary we need to
271 * insert nir instructions to compensate (which can hopefully be
274 if (const_offset
< 0) {
275 uniform_offset
= nir_iadd_imm(b
, uniform_offset
, const_offset
);
279 nir_intrinsic_instr
*uniform
=
280 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_load_uniform
);
281 uniform
->num_components
= instr
->num_components
;
282 uniform
->src
[0] = nir_src_for_ssa(uniform_offset
);
283 nir_intrinsic_set_base(uniform
, const_offset
);
284 nir_ssa_dest_init(&uniform
->instr
, &uniform
->dest
,
285 uniform
->num_components
, instr
->dest
.ssa
.bit_size
,
286 instr
->dest
.ssa
.name
);
287 nir_builder_instr_insert(b
, &uniform
->instr
);
288 nir_ssa_def_rewrite_uses(&instr
->dest
.ssa
,
289 nir_src_for_ssa(&uniform
->dest
.ssa
));
291 nir_instr_remove(&instr
->instr
);
293 state
->lower_count
++;
297 instr_is_load_ubo(nir_instr
*instr
)
299 if (instr
->type
!= nir_instr_type_intrinsic
)
302 nir_intrinsic_op op
= nir_instr_as_intrinsic(instr
)->intrinsic
;
303 return op
== nir_intrinsic_load_ubo
|| op
== nir_intrinsic_load_ubo_ir3
;
307 ir3_nir_analyze_ubo_ranges(nir_shader
*nir
, struct ir3_shader
*shader
)
309 struct ir3_ubo_analysis_state
*state
= &shader
->ubo_state
;
311 memset(state
, 0, sizeof(*state
));
312 for (int i
= 0; i
< IR3_MAX_UBO_PUSH_RANGES
; i
++) {
313 state
->range
[i
].start
= UINT32_MAX
;
316 nir_foreach_function (function
, nir
) {
317 if (function
->impl
) {
318 nir_foreach_block (block
, function
->impl
) {
319 nir_foreach_instr (instr
, block
) {
320 if (instr_is_load_ubo(instr
))
321 gather_ubo_ranges(nir
, nir_instr_as_intrinsic(instr
), state
);
327 /* For now, everything we upload is accessed statically and thus will be
328 * used by the shader. Once we can upload dynamically indexed data, we may
329 * upload sparsely accessed arrays, at which point we probably want to
330 * give priority to smaller UBOs, on the assumption that big UBOs will be
331 * accessed dynamically. Alternatively, we can track statically and
332 * dynamically accessed ranges separately and upload static rangtes
335 const uint32_t max_upload
= 16 * 1024;
336 uint32_t offset
= shader
->const_state
.num_reserved_user_consts
* 16;
337 state
->num_enabled
= ARRAY_SIZE(state
->range
);
338 for (uint32_t i
= 0; i
< ARRAY_SIZE(state
->range
); i
++) {
339 if (state
->range
[i
].start
>= state
->range
[i
].end
) {
340 state
->num_enabled
= i
;
344 uint32_t range_size
= state
->range
[i
].end
- state
->range
[i
].start
;
346 debug_assert(offset
<= max_upload
);
347 state
->range
[i
].offset
= offset
;
348 if (offset
+ range_size
> max_upload
) {
349 range_size
= max_upload
- offset
;
350 state
->range
[i
].end
= state
->range
[i
].start
+ range_size
;
352 offset
+= range_size
;
355 state
->size
= offset
;
357 nir_foreach_function (function
, nir
) {
358 if (function
->impl
) {
360 nir_builder_init(&builder
, function
->impl
);
361 nir_foreach_block (block
, function
->impl
) {
362 nir_foreach_instr_safe (instr
, block
) {
363 if (instr_is_load_ubo(instr
))
364 lower_ubo_load_to_uniform(nir_instr_as_intrinsic(instr
), &builder
, state
);
368 nir_metadata_preserve(function
->impl
, nir_metadata_block_index
|
369 nir_metadata_dominance
);
373 /* If we previously had UBO 0, it's been lowered off of load_ubo and all
374 * the others were shifted down.
376 if (nir
->info
.num_ubos
>= 1 && nir
->info
.first_ubo_is_default_ubo
)
377 nir
->info
.num_ubos
--;
379 return state
->lower_count
> 0;