2 * Copyright © 2019 Google, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 #include "ir3_compiler.h"
26 #include "compiler/nir/nir.h"
27 #include "compiler/nir/nir_builder.h"
28 #include "util/u_math.h"
31 ubo_is_gl_uniforms(const struct ir3_ubo_info
*ubo
)
33 return !ubo
->bindless
&& ubo
->block
== 0;
36 static inline struct ir3_ubo_range
37 get_ubo_load_range(nir_shader
*nir
, nir_intrinsic_instr
*instr
, uint32_t alignment
)
39 struct ir3_ubo_range r
;
41 if (nir_src_is_const(instr
->src
[1])) {
42 int offset
= nir_src_as_uint(instr
->src
[1]);
43 const int bytes
= nir_intrinsic_dest_components(instr
) * 4;
45 r
.start
= ROUND_DOWN_TO(offset
, alignment
* 16);
46 r
.end
= ALIGN(offset
+ bytes
, alignment
* 16);
48 /* The other valid place to call this is on the GL default uniform block */
49 assert(nir_src_as_uint(instr
->src
[0]) == 0);
51 r
.end
= ALIGN(nir
->num_uniforms
* 16, alignment
* 16);
58 get_ubo_info(nir_intrinsic_instr
*instr
, struct ir3_ubo_info
*ubo
)
60 if (nir_src_is_const(instr
->src
[0])) {
61 ubo
->block
= nir_src_as_uint(instr
->src
[0]);
62 ubo
->bindless_base
= 0;
63 ubo
->bindless
= false;
66 nir_intrinsic_instr
*rsrc
= ir3_bindless_resource(instr
->src
[0]);
67 if (rsrc
&& nir_src_is_const(rsrc
->src
[0])) {
68 ubo
->block
= nir_src_as_uint(rsrc
->src
[0]);
69 ubo
->bindless_base
= nir_intrinsic_desc_set(rsrc
);
78 * Get an existing range, but don't create a new range associated with
79 * the ubo, but don't create a new one if one does not already exist.
81 static const struct ir3_ubo_range
*
82 get_existing_range(nir_intrinsic_instr
*instr
,
83 const struct ir3_ubo_analysis_state
*state
)
85 struct ir3_ubo_info ubo
= {};
87 if (!get_ubo_info(instr
, &ubo
))
90 for (int i
= 0; i
< IR3_MAX_UBO_PUSH_RANGES
; i
++) {
91 const struct ir3_ubo_range
*range
= &state
->range
[i
];
92 if (range
->end
< range
->start
) {
94 } else if (!memcmp(&range
->ubo
, &ubo
, sizeof(ubo
))) {
103 * Get an existing range, or create a new one if necessary/possible.
105 static struct ir3_ubo_range
*
106 get_range(nir_intrinsic_instr
*instr
, struct ir3_ubo_analysis_state
*state
)
108 struct ir3_ubo_info ubo
= {};
110 if (!get_ubo_info(instr
, &ubo
))
113 for (int i
= 0; i
< IR3_MAX_UBO_PUSH_RANGES
; i
++) {
114 struct ir3_ubo_range
*range
= &state
->range
[i
];
115 if (range
->end
< range
->start
) {
116 /* We don't have a matching range, but there are more available.
120 } else if (!memcmp(&range
->ubo
, &ubo
, sizeof(ubo
))) {
129 gather_ubo_ranges(nir_shader
*nir
, nir_intrinsic_instr
*instr
,
130 struct ir3_ubo_analysis_state
*state
, uint32_t alignment
)
132 if (ir3_shader_debug
& IR3_DBG_NOUBOOPT
)
135 struct ir3_ubo_range
*old_r
= get_range(instr
, state
);
139 /* We don't know how to get the size of UBOs being indirected on, other
140 * than on the GL uniforms where we have some other shader_info data.
142 if (!nir_src_is_const(instr
->src
[1]) && !ubo_is_gl_uniforms(&old_r
->ubo
))
145 const struct ir3_ubo_range r
= get_ubo_load_range(nir
, instr
, alignment
);
147 if (r
.start
< old_r
->start
)
148 old_r
->start
= r
.start
;
149 if (old_r
->end
< r
.end
)
153 /* For indirect offset, it is common to see a pattern of multiple
154 * loads with the same base, but different constant offset, ie:
156 * vec1 32 ssa_33 = iadd ssa_base, const_offset
157 * vec4 32 ssa_34 = intrinsic load_uniform (ssa_33) (base=N, 0, 0)
159 * Detect this, and peel out the const_offset part, to end up with:
161 * vec4 32 ssa_34 = intrinsic load_uniform (ssa_base) (base=N+const_offset, 0, 0)
165 * vec1 32 ssa_33 = imad24_ir3 a, b, const_offset
166 * vec4 32 ssa_34 = intrinsic load_uniform (ssa_33) (base=N, 0, 0)
168 * Can be converted to:
170 * vec1 32 ssa_base = imul24 a, b
171 * vec4 32 ssa_34 = intrinsic load_uniform (ssa_base) (base=N+const_offset, 0, 0)
173 * This gives the other opt passes something much easier to work
174 * with (ie. not requiring value range tracking)
177 handle_partial_const(nir_builder
*b
, nir_ssa_def
**srcp
, int *offp
)
179 if ((*srcp
)->parent_instr
->type
!= nir_instr_type_alu
)
182 nir_alu_instr
*alu
= nir_instr_as_alu((*srcp
)->parent_instr
);
184 if (alu
->op
== nir_op_imad24_ir3
) {
185 /* This case is slightly more complicated as we need to
186 * replace the imad24_ir3 with an imul24:
188 if (!nir_src_is_const(alu
->src
[2].src
))
191 *offp
+= nir_src_as_uint(alu
->src
[2].src
);
192 *srcp
= nir_imul24(b
, nir_ssa_for_alu_src(b
, alu
, 0),
193 nir_ssa_for_alu_src(b
, alu
, 1));
198 if (alu
->op
!= nir_op_iadd
)
201 if (!(alu
->src
[0].src
.is_ssa
&& alu
->src
[1].src
.is_ssa
))
204 if (nir_src_is_const(alu
->src
[0].src
)) {
205 *offp
+= nir_src_as_uint(alu
->src
[0].src
);
206 *srcp
= alu
->src
[1].src
.ssa
;
207 } else if (nir_src_is_const(alu
->src
[1].src
)) {
208 *srcp
= alu
->src
[0].src
.ssa
;
209 *offp
+= nir_src_as_uint(alu
->src
[1].src
);
213 /* Tracks the maximum bindful UBO accessed so that we reduce the UBO
214 * descriptors emitted in the fast path for GL.
217 track_ubo_use(nir_intrinsic_instr
*instr
, nir_builder
*b
, int *num_ubos
)
219 if (ir3_bindless_resource(instr
->src
[0])) {
220 assert(!b
->shader
->info
.first_ubo_is_default_ubo
); /* only set for GL */
224 if (nir_src_is_const(instr
->src
[0])) {
225 int block
= nir_src_as_uint(instr
->src
[0]);
226 *num_ubos
= MAX2(*num_ubos
, block
+ 1);
228 *num_ubos
= b
->shader
->info
.num_ubos
;
233 lower_ubo_load_to_uniform(nir_intrinsic_instr
*instr
, nir_builder
*b
,
234 const struct ir3_ubo_analysis_state
*state
,
235 int *num_ubos
, uint32_t alignment
)
237 b
->cursor
= nir_before_instr(&instr
->instr
);
239 /* We don't lower dynamic block index UBO loads to load_uniform, but we
240 * could probably with some effort determine a block stride in number of
243 const struct ir3_ubo_range
*range
= get_existing_range(instr
, state
);
245 track_ubo_use(instr
, b
, num_ubos
);
249 /* We don't have a good way of determining the range of the dynamic
250 * access in general, so for now just fall back to pulling.
252 if (!nir_src_is_const(instr
->src
[1]) && !ubo_is_gl_uniforms(&range
->ubo
)) {
253 track_ubo_use(instr
, b
, num_ubos
);
257 /* After gathering the UBO access ranges, we limit the total
258 * upload. Don't lower if this load is outside the range.
260 const struct ir3_ubo_range r
= get_ubo_load_range(b
->shader
,
262 if (!(range
->start
<= r
.start
&& r
.end
<= range
->end
)) {
263 track_ubo_use(instr
, b
, num_ubos
);
267 nir_ssa_def
*ubo_offset
= nir_ssa_for_src(b
, instr
->src
[1], 1);
268 int const_offset
= 0;
270 handle_partial_const(b
, &ubo_offset
, &const_offset
);
272 /* UBO offset is in bytes, but uniform offset is in units of
273 * dwords, so we need to divide by 4 (right-shift by 2). For ldc the
274 * offset is in units of 16 bytes, so we need to multiply by 4. And
275 * also the same for the constant part of the offset:
277 const int shift
= -2;
278 nir_ssa_def
*new_offset
= ir3_nir_try_propagate_bit_shift(b
, ubo_offset
, -2);
279 nir_ssa_def
*uniform_offset
= NULL
;
281 uniform_offset
= new_offset
;
283 uniform_offset
= shift
> 0 ?
284 nir_ishl(b
, ubo_offset
, nir_imm_int(b
, shift
)) :
285 nir_ushr(b
, ubo_offset
, nir_imm_int(b
, -shift
));
288 debug_assert(!(const_offset
& 0x3));
291 const int range_offset
= ((int)range
->offset
- (int)range
->start
) / 4;
292 const_offset
+= range_offset
;
294 /* The range_offset could be negative, if if only part of the UBO
295 * block is accessed, range->start can be greater than range->offset.
296 * But we can't underflow const_offset. If necessary we need to
297 * insert nir instructions to compensate (which can hopefully be
300 if (const_offset
< 0) {
301 uniform_offset
= nir_iadd_imm(b
, uniform_offset
, const_offset
);
305 nir_intrinsic_instr
*uniform
=
306 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_load_uniform
);
307 uniform
->num_components
= instr
->num_components
;
308 uniform
->src
[0] = nir_src_for_ssa(uniform_offset
);
309 nir_intrinsic_set_base(uniform
, const_offset
);
310 nir_ssa_dest_init(&uniform
->instr
, &uniform
->dest
,
311 uniform
->num_components
, instr
->dest
.ssa
.bit_size
,
312 instr
->dest
.ssa
.name
);
313 nir_builder_instr_insert(b
, &uniform
->instr
);
314 nir_ssa_def_rewrite_uses(&instr
->dest
.ssa
,
315 nir_src_for_ssa(&uniform
->dest
.ssa
));
317 nir_instr_remove(&instr
->instr
);
323 instr_is_load_ubo(nir_instr
*instr
)
325 if (instr
->type
!= nir_instr_type_intrinsic
)
328 nir_intrinsic_op op
= nir_instr_as_intrinsic(instr
)->intrinsic
;
330 /* ir3_nir_lower_io_offsets happens after this pass. */
331 assert(op
!= nir_intrinsic_load_ubo_ir3
);
333 return op
== nir_intrinsic_load_ubo
;
337 ir3_nir_analyze_ubo_ranges(nir_shader
*nir
, struct ir3_shader_variant
*v
)
339 struct ir3_const_state
*const_state
= ir3_const_state(v
);
340 struct ir3_ubo_analysis_state
*state
= &const_state
->ubo_state
;
341 struct ir3_compiler
*compiler
= v
->shader
->compiler
;
343 memset(state
, 0, sizeof(*state
));
344 for (int i
= 0; i
< IR3_MAX_UBO_PUSH_RANGES
; i
++) {
345 state
->range
[i
].start
= UINT32_MAX
;
348 nir_foreach_function (function
, nir
) {
349 if (function
->impl
) {
350 nir_foreach_block (block
, function
->impl
) {
351 nir_foreach_instr (instr
, block
) {
352 if (instr_is_load_ubo(instr
))
353 gather_ubo_ranges(nir
, nir_instr_as_intrinsic(instr
),
354 state
, compiler
->const_upload_unit
);
360 /* For now, everything we upload is accessed statically and thus will be
361 * used by the shader. Once we can upload dynamically indexed data, we may
362 * upload sparsely accessed arrays, at which point we probably want to
363 * give priority to smaller UBOs, on the assumption that big UBOs will be
364 * accessed dynamically. Alternatively, we can track statically and
365 * dynamically accessed ranges separately and upload static rangtes
369 /* Limit our uploads to the amount of constant buffer space available in
370 * the hardware, minus what the shader compiler may need for various
371 * driver params. We do this UBO-to-push-constant before the real
372 * allocation of the driver params' const space, because UBO pointers can
373 * be driver params but this pass usually eliminatings them.
375 struct ir3_const_state worst_case_const_state
= { };
376 ir3_setup_const_state(nir
, v
, &worst_case_const_state
);
377 const uint32_t max_upload
= (ir3_max_const(v
) -
378 worst_case_const_state
.offsets
.immediate
) * 16;
380 uint32_t offset
= v
->shader
->num_reserved_user_consts
* 16;
381 state
->num_enabled
= ARRAY_SIZE(state
->range
);
382 for (uint32_t i
= 0; i
< ARRAY_SIZE(state
->range
); i
++) {
383 if (state
->range
[i
].start
>= state
->range
[i
].end
) {
384 state
->num_enabled
= i
;
388 uint32_t range_size
= state
->range
[i
].end
- state
->range
[i
].start
;
390 debug_assert(offset
<= max_upload
);
391 state
->range
[i
].offset
= offset
;
392 if (offset
+ range_size
> max_upload
) {
393 range_size
= max_upload
- offset
;
394 state
->range
[i
].end
= state
->range
[i
].start
+ range_size
;
396 offset
+= range_size
;
399 state
->size
= offset
;
403 ir3_nir_lower_ubo_loads(nir_shader
*nir
, struct ir3_shader_variant
*v
)
405 struct ir3_compiler
*compiler
= v
->shader
->compiler
;
406 /* For the binning pass variant, we re-use the corresponding draw-pass
407 * variants const_state and ubo state. To make these clear, in this
408 * pass it is const (read-only)
410 const struct ir3_const_state
*const_state
= ir3_const_state(v
);
411 const struct ir3_ubo_analysis_state
*state
= &const_state
->ubo_state
;
414 bool progress
= false;
415 nir_foreach_function (function
, nir
) {
416 if (function
->impl
) {
418 nir_builder_init(&builder
, function
->impl
);
419 nir_foreach_block (block
, function
->impl
) {
420 nir_foreach_instr_safe (instr
, block
) {
421 if (!instr_is_load_ubo(instr
))
424 lower_ubo_load_to_uniform(nir_instr_as_intrinsic(instr
),
425 &builder
, state
, &num_ubos
,
426 compiler
->const_upload_unit
);
430 nir_metadata_preserve(function
->impl
, nir_metadata_block_index
|
431 nir_metadata_dominance
);
434 /* Update the num_ubos field for GL (first_ubo_is_default_ubo). With
435 * Vulkan's bindless, we don't use the num_ubos field, so we can leave it
438 if (nir
->info
.first_ubo_is_default_ubo
)
439 nir
->info
.num_ubos
= num_ubos
;