2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Connor Abbott (cwabbott0@gmail.com)
25 * Jason Ekstrand (jason@jlekstrand.net)
30 * This lowering pass converts references to input/output variables with
31 * loads/stores to actual input/output intrinsics.
35 #include "nir_builder.h"
36 #include "nir_deref.h"
38 struct lower_io_state
{
41 int (*type_size
)(const struct glsl_type
*type
, bool);
42 nir_variable_mode modes
;
43 nir_lower_io_options options
;
46 static nir_intrinsic_op
47 ssbo_atomic_for_deref(nir_intrinsic_op deref_op
)
50 #define OP(O) case nir_intrinsic_deref_##O: return nir_intrinsic_ssbo_##O;
67 unreachable("Invalid SSBO atomic");
71 static nir_intrinsic_op
72 global_atomic_for_deref(nir_intrinsic_op deref_op
)
75 #define OP(O) case nir_intrinsic_deref_##O: return nir_intrinsic_global_##O;
92 unreachable("Invalid SSBO atomic");
97 nir_assign_var_locations(struct exec_list
*var_list
, unsigned *size
,
98 int (*type_size
)(const struct glsl_type
*, bool))
100 unsigned location
= 0;
102 nir_foreach_variable(var
, var_list
) {
104 * UBOs have their own address spaces, so don't count them towards the
105 * number of global uniforms
107 if (var
->data
.mode
== nir_var_mem_ubo
|| var
->data
.mode
== nir_var_mem_ssbo
)
110 var
->data
.driver_location
= location
;
111 bool bindless_type_size
= var
->data
.mode
== nir_var_shader_in
||
112 var
->data
.mode
== nir_var_shader_out
||
114 location
+= type_size(var
->type
, bindless_type_size
);
121 * Return true if the given variable is a per-vertex input/output array.
122 * (such as geometry shader inputs).
125 nir_is_per_vertex_io(const nir_variable
*var
, gl_shader_stage stage
)
127 if (var
->data
.patch
|| !glsl_type_is_array(var
->type
))
130 if (var
->data
.mode
== nir_var_shader_in
)
131 return stage
== MESA_SHADER_GEOMETRY
||
132 stage
== MESA_SHADER_TESS_CTRL
||
133 stage
== MESA_SHADER_TESS_EVAL
;
135 if (var
->data
.mode
== nir_var_shader_out
)
136 return stage
== MESA_SHADER_TESS_CTRL
;
142 get_io_offset(nir_builder
*b
, nir_deref_instr
*deref
,
143 nir_ssa_def
**vertex_index
,
144 int (*type_size
)(const struct glsl_type
*, bool),
145 unsigned *component
, bool bts
)
148 nir_deref_path_init(&path
, deref
, NULL
);
150 assert(path
.path
[0]->deref_type
== nir_deref_type_var
);
151 nir_deref_instr
**p
= &path
.path
[1];
153 /* For per-vertex input arrays (i.e. geometry shader inputs), keep the
154 * outermost array index separate. Process the rest normally.
156 if (vertex_index
!= NULL
) {
157 assert((*p
)->deref_type
== nir_deref_type_array
);
158 *vertex_index
= nir_ssa_for_src(b
, (*p
)->arr
.index
, 1);
162 if (path
.path
[0]->var
->data
.compact
) {
163 assert((*p
)->deref_type
== nir_deref_type_array
);
164 assert(glsl_type_is_scalar((*p
)->type
));
166 /* We always lower indirect dereferences for "compact" array vars. */
167 const unsigned index
= nir_src_as_uint((*p
)->arr
.index
);
168 const unsigned total_offset
= *component
+ index
;
169 const unsigned slot_offset
= total_offset
/ 4;
170 *component
= total_offset
% 4;
171 return nir_imm_int(b
, type_size(glsl_vec4_type(), bts
) * slot_offset
);
174 /* Just emit code and let constant-folding go to town */
175 nir_ssa_def
*offset
= nir_imm_int(b
, 0);
178 if ((*p
)->deref_type
== nir_deref_type_array
) {
179 unsigned size
= type_size((*p
)->type
, bts
);
182 nir_imul_imm(b
, nir_ssa_for_src(b
, (*p
)->arr
.index
, 1), size
);
184 offset
= nir_iadd(b
, offset
, mul
);
185 } else if ((*p
)->deref_type
== nir_deref_type_struct
) {
186 /* p starts at path[1], so this is safe */
187 nir_deref_instr
*parent
= *(p
- 1);
189 unsigned field_offset
= 0;
190 for (unsigned i
= 0; i
< (*p
)->strct
.index
; i
++) {
191 field_offset
+= type_size(glsl_get_struct_field(parent
->type
, i
), bts
);
193 offset
= nir_iadd_imm(b
, offset
, field_offset
);
195 unreachable("Unsupported deref type");
199 nir_deref_path_finish(&path
);
205 lower_load(nir_intrinsic_instr
*intrin
, struct lower_io_state
*state
,
206 nir_ssa_def
*vertex_index
, nir_variable
*var
, nir_ssa_def
*offset
,
207 unsigned component
, const struct glsl_type
*type
)
209 nir_builder
*b
= &state
->builder
;
210 const nir_shader
*nir
= b
->shader
;
211 nir_variable_mode mode
= var
->data
.mode
;
212 nir_ssa_def
*barycentric
= NULL
;
216 case nir_var_shader_in
:
217 if (nir
->info
.stage
== MESA_SHADER_FRAGMENT
&&
218 nir
->options
->use_interpolated_input_intrinsics
&&
219 var
->data
.interpolation
!= INTERP_MODE_FLAT
) {
220 assert(vertex_index
== NULL
);
222 nir_intrinsic_op bary_op
;
223 if (var
->data
.sample
||
224 (state
->options
& nir_lower_io_force_sample_interpolation
))
225 bary_op
= nir_intrinsic_load_barycentric_sample
;
226 else if (var
->data
.centroid
)
227 bary_op
= nir_intrinsic_load_barycentric_centroid
;
229 bary_op
= nir_intrinsic_load_barycentric_pixel
;
231 barycentric
= nir_load_barycentric(&state
->builder
, bary_op
,
232 var
->data
.interpolation
);
233 op
= nir_intrinsic_load_interpolated_input
;
235 op
= vertex_index
? nir_intrinsic_load_per_vertex_input
:
236 nir_intrinsic_load_input
;
239 case nir_var_shader_out
:
240 op
= vertex_index
? nir_intrinsic_load_per_vertex_output
:
241 nir_intrinsic_load_output
;
243 case nir_var_uniform
:
244 op
= nir_intrinsic_load_uniform
;
246 case nir_var_mem_shared
:
247 op
= nir_intrinsic_load_shared
;
250 unreachable("Unknown variable mode");
253 nir_intrinsic_instr
*load
=
254 nir_intrinsic_instr_create(state
->builder
.shader
, op
);
255 load
->num_components
= intrin
->num_components
;
257 nir_intrinsic_set_base(load
, var
->data
.driver_location
);
258 if (mode
== nir_var_shader_in
|| mode
== nir_var_shader_out
)
259 nir_intrinsic_set_component(load
, component
);
261 if (load
->intrinsic
== nir_intrinsic_load_uniform
)
262 nir_intrinsic_set_range(load
,
263 state
->type_size(var
->type
, var
->data
.bindless
));
265 if (load
->intrinsic
== nir_intrinsic_load_input
||
266 load
->intrinsic
== nir_intrinsic_load_uniform
)
267 nir_intrinsic_set_type(load
, nir_get_nir_type_for_glsl_type(type
));
270 load
->src
[0] = nir_src_for_ssa(vertex_index
);
271 load
->src
[1] = nir_src_for_ssa(offset
);
272 } else if (barycentric
) {
273 load
->src
[0] = nir_src_for_ssa(barycentric
);
274 load
->src
[1] = nir_src_for_ssa(offset
);
276 load
->src
[0] = nir_src_for_ssa(offset
);
279 assert(intrin
->dest
.is_ssa
);
280 nir_ssa_dest_init(&load
->instr
, &load
->dest
,
281 intrin
->dest
.ssa
.num_components
,
282 intrin
->dest
.ssa
.bit_size
, NULL
);
283 nir_builder_instr_insert(b
, &load
->instr
);
285 return &load
->dest
.ssa
;
289 lower_store(nir_intrinsic_instr
*intrin
, struct lower_io_state
*state
,
290 nir_ssa_def
*vertex_index
, nir_variable
*var
, nir_ssa_def
*offset
,
291 unsigned component
, const struct glsl_type
*type
)
293 nir_builder
*b
= &state
->builder
;
294 nir_variable_mode mode
= var
->data
.mode
;
297 if (mode
== nir_var_mem_shared
) {
298 op
= nir_intrinsic_store_shared
;
300 assert(mode
== nir_var_shader_out
);
301 op
= vertex_index
? nir_intrinsic_store_per_vertex_output
:
302 nir_intrinsic_store_output
;
305 nir_intrinsic_instr
*store
=
306 nir_intrinsic_instr_create(state
->builder
.shader
, op
);
307 store
->num_components
= intrin
->num_components
;
309 nir_src_copy(&store
->src
[0], &intrin
->src
[1], store
);
311 nir_intrinsic_set_base(store
, var
->data
.driver_location
);
313 if (mode
== nir_var_shader_out
)
314 nir_intrinsic_set_component(store
, component
);
316 if (store
->intrinsic
== nir_intrinsic_store_output
)
317 nir_intrinsic_set_type(store
, nir_get_nir_type_for_glsl_type(type
));
319 nir_intrinsic_set_write_mask(store
, nir_intrinsic_write_mask(intrin
));
322 store
->src
[1] = nir_src_for_ssa(vertex_index
);
324 store
->src
[vertex_index
? 2 : 1] = nir_src_for_ssa(offset
);
326 nir_builder_instr_insert(b
, &store
->instr
);
330 lower_atomic(nir_intrinsic_instr
*intrin
, struct lower_io_state
*state
,
331 nir_variable
*var
, nir_ssa_def
*offset
)
333 nir_builder
*b
= &state
->builder
;
334 assert(var
->data
.mode
== nir_var_mem_shared
);
337 switch (intrin
->intrinsic
) {
338 #define OP(O) case nir_intrinsic_deref_##O: op = nir_intrinsic_shared_##O; break;
352 OP(atomic_fcomp_swap
)
355 unreachable("Invalid atomic");
358 nir_intrinsic_instr
*atomic
=
359 nir_intrinsic_instr_create(state
->builder
.shader
, op
);
361 nir_intrinsic_set_base(atomic
, var
->data
.driver_location
);
363 atomic
->src
[0] = nir_src_for_ssa(offset
);
364 assert(nir_intrinsic_infos
[intrin
->intrinsic
].num_srcs
==
365 nir_intrinsic_infos
[op
].num_srcs
);
366 for (unsigned i
= 1; i
< nir_intrinsic_infos
[op
].num_srcs
; i
++) {
367 nir_src_copy(&atomic
->src
[i
], &intrin
->src
[i
], atomic
);
370 if (nir_intrinsic_infos
[op
].has_dest
) {
371 assert(intrin
->dest
.is_ssa
);
372 assert(nir_intrinsic_infos
[intrin
->intrinsic
].has_dest
);
373 nir_ssa_dest_init(&atomic
->instr
, &atomic
->dest
,
374 intrin
->dest
.ssa
.num_components
,
375 intrin
->dest
.ssa
.bit_size
, NULL
);
378 nir_builder_instr_insert(b
, &atomic
->instr
);
380 return nir_intrinsic_infos
[op
].has_dest
? &atomic
->dest
.ssa
: NULL
;
384 lower_interpolate_at(nir_intrinsic_instr
*intrin
, struct lower_io_state
*state
,
385 nir_variable
*var
, nir_ssa_def
*offset
, unsigned component
,
386 const struct glsl_type
*type
)
388 nir_builder
*b
= &state
->builder
;
389 assert(var
->data
.mode
== nir_var_shader_in
);
391 /* Ignore interpolateAt() for flat variables - flat is flat. */
392 if (var
->data
.interpolation
== INTERP_MODE_FLAT
)
393 return lower_load(intrin
, state
, NULL
, var
, offset
, component
, type
);
395 nir_intrinsic_op bary_op
;
396 switch (intrin
->intrinsic
) {
397 case nir_intrinsic_interp_deref_at_centroid
:
398 bary_op
= (state
->options
& nir_lower_io_force_sample_interpolation
) ?
399 nir_intrinsic_load_barycentric_sample
:
400 nir_intrinsic_load_barycentric_centroid
;
402 case nir_intrinsic_interp_deref_at_sample
:
403 bary_op
= nir_intrinsic_load_barycentric_at_sample
;
405 case nir_intrinsic_interp_deref_at_offset
:
406 bary_op
= nir_intrinsic_load_barycentric_at_offset
;
409 unreachable("Bogus interpolateAt() intrinsic.");
412 nir_intrinsic_instr
*bary_setup
=
413 nir_intrinsic_instr_create(state
->builder
.shader
, bary_op
);
415 nir_ssa_dest_init(&bary_setup
->instr
, &bary_setup
->dest
, 2, 32, NULL
);
416 nir_intrinsic_set_interp_mode(bary_setup
, var
->data
.interpolation
);
418 if (intrin
->intrinsic
== nir_intrinsic_interp_deref_at_sample
||
419 intrin
->intrinsic
== nir_intrinsic_interp_deref_at_offset
)
420 nir_src_copy(&bary_setup
->src
[0], &intrin
->src
[1], bary_setup
);
422 nir_builder_instr_insert(b
, &bary_setup
->instr
);
424 nir_intrinsic_instr
*load
=
425 nir_intrinsic_instr_create(state
->builder
.shader
,
426 nir_intrinsic_load_interpolated_input
);
427 load
->num_components
= intrin
->num_components
;
429 nir_intrinsic_set_base(load
, var
->data
.driver_location
);
430 nir_intrinsic_set_component(load
, component
);
432 load
->src
[0] = nir_src_for_ssa(&bary_setup
->dest
.ssa
);
433 load
->src
[1] = nir_src_for_ssa(offset
);
435 assert(intrin
->dest
.is_ssa
);
436 nir_ssa_dest_init(&load
->instr
, &load
->dest
,
437 intrin
->dest
.ssa
.num_components
,
438 intrin
->dest
.ssa
.bit_size
, NULL
);
439 nir_builder_instr_insert(b
, &load
->instr
);
441 return &load
->dest
.ssa
;
445 nir_lower_io_block(nir_block
*block
,
446 struct lower_io_state
*state
)
448 nir_builder
*b
= &state
->builder
;
449 const nir_shader_compiler_options
*options
= b
->shader
->options
;
450 bool progress
= false;
452 nir_foreach_instr_safe(instr
, block
) {
453 if (instr
->type
!= nir_instr_type_intrinsic
)
456 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
458 switch (intrin
->intrinsic
) {
459 case nir_intrinsic_load_deref
:
460 case nir_intrinsic_store_deref
:
461 case nir_intrinsic_deref_atomic_add
:
462 case nir_intrinsic_deref_atomic_imin
:
463 case nir_intrinsic_deref_atomic_umin
:
464 case nir_intrinsic_deref_atomic_imax
:
465 case nir_intrinsic_deref_atomic_umax
:
466 case nir_intrinsic_deref_atomic_and
:
467 case nir_intrinsic_deref_atomic_or
:
468 case nir_intrinsic_deref_atomic_xor
:
469 case nir_intrinsic_deref_atomic_exchange
:
470 case nir_intrinsic_deref_atomic_comp_swap
:
471 case nir_intrinsic_deref_atomic_fadd
:
472 case nir_intrinsic_deref_atomic_fmin
:
473 case nir_intrinsic_deref_atomic_fmax
:
474 case nir_intrinsic_deref_atomic_fcomp_swap
:
475 /* We can lower the io for this nir instrinsic */
477 case nir_intrinsic_interp_deref_at_centroid
:
478 case nir_intrinsic_interp_deref_at_sample
:
479 case nir_intrinsic_interp_deref_at_offset
:
480 /* We can optionally lower these to load_interpolated_input */
481 if (options
->use_interpolated_input_intrinsics
)
484 /* We can't lower the io for this nir instrinsic, so skip it */
488 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
490 nir_variable_mode mode
= deref
->mode
;
492 if ((state
->modes
& mode
) == 0)
495 if (mode
!= nir_var_shader_in
&&
496 mode
!= nir_var_shader_out
&&
497 mode
!= nir_var_mem_shared
&&
498 mode
!= nir_var_uniform
)
501 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
503 b
->cursor
= nir_before_instr(instr
);
505 const bool per_vertex
= nir_is_per_vertex_io(var
, b
->shader
->info
.stage
);
508 nir_ssa_def
*vertex_index
= NULL
;
509 unsigned component_offset
= var
->data
.location_frac
;
510 bool bindless_type_size
= mode
== nir_var_shader_in
||
511 mode
== nir_var_shader_out
||
514 offset
= get_io_offset(b
, deref
, per_vertex
? &vertex_index
: NULL
,
515 state
->type_size
, &component_offset
,
518 nir_ssa_def
*replacement
= NULL
;
520 switch (intrin
->intrinsic
) {
521 case nir_intrinsic_load_deref
:
522 replacement
= lower_load(intrin
, state
, vertex_index
, var
, offset
,
523 component_offset
, deref
->type
);
526 case nir_intrinsic_store_deref
:
527 lower_store(intrin
, state
, vertex_index
, var
, offset
,
528 component_offset
, deref
->type
);
531 case nir_intrinsic_deref_atomic_add
:
532 case nir_intrinsic_deref_atomic_imin
:
533 case nir_intrinsic_deref_atomic_umin
:
534 case nir_intrinsic_deref_atomic_imax
:
535 case nir_intrinsic_deref_atomic_umax
:
536 case nir_intrinsic_deref_atomic_and
:
537 case nir_intrinsic_deref_atomic_or
:
538 case nir_intrinsic_deref_atomic_xor
:
539 case nir_intrinsic_deref_atomic_exchange
:
540 case nir_intrinsic_deref_atomic_comp_swap
:
541 case nir_intrinsic_deref_atomic_fadd
:
542 case nir_intrinsic_deref_atomic_fmin
:
543 case nir_intrinsic_deref_atomic_fmax
:
544 case nir_intrinsic_deref_atomic_fcomp_swap
:
545 assert(vertex_index
== NULL
);
546 replacement
= lower_atomic(intrin
, state
, var
, offset
);
549 case nir_intrinsic_interp_deref_at_centroid
:
550 case nir_intrinsic_interp_deref_at_sample
:
551 case nir_intrinsic_interp_deref_at_offset
:
552 assert(vertex_index
== NULL
);
553 replacement
= lower_interpolate_at(intrin
, state
, var
, offset
,
554 component_offset
, deref
->type
);
562 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
,
563 nir_src_for_ssa(replacement
));
565 nir_instr_remove(&intrin
->instr
);
573 nir_lower_io_impl(nir_function_impl
*impl
,
574 nir_variable_mode modes
,
575 int (*type_size
)(const struct glsl_type
*, bool),
576 nir_lower_io_options options
)
578 struct lower_io_state state
;
579 bool progress
= false;
581 nir_builder_init(&state
.builder
, impl
);
582 state
.dead_ctx
= ralloc_context(NULL
);
584 state
.type_size
= type_size
;
585 state
.options
= options
;
587 nir_foreach_block(block
, impl
) {
588 progress
|= nir_lower_io_block(block
, &state
);
591 ralloc_free(state
.dead_ctx
);
593 nir_metadata_preserve(impl
, nir_metadata_block_index
|
594 nir_metadata_dominance
);
599 nir_lower_io(nir_shader
*shader
, nir_variable_mode modes
,
600 int (*type_size
)(const struct glsl_type
*, bool),
601 nir_lower_io_options options
)
603 bool progress
= false;
605 nir_foreach_function(function
, shader
) {
606 if (function
->impl
) {
607 progress
|= nir_lower_io_impl(function
->impl
, modes
,
616 type_scalar_size_bytes(const struct glsl_type
*type
)
618 assert(glsl_type_is_vector_or_scalar(type
) ||
619 glsl_type_is_matrix(type
));
620 return glsl_type_is_boolean(type
) ? 4 : glsl_get_bit_size(type
) / 8;
624 build_addr_iadd(nir_builder
*b
, nir_ssa_def
*addr
,
625 nir_address_format addr_format
, nir_ssa_def
*offset
)
627 assert(offset
->num_components
== 1);
628 assert(addr
->bit_size
== offset
->bit_size
);
630 switch (addr_format
) {
631 case nir_address_format_32bit_global
:
632 case nir_address_format_64bit_global
:
633 case nir_address_format_32bit_offset
:
634 assert(addr
->num_components
== 1);
635 return nir_iadd(b
, addr
, offset
);
637 case nir_address_format_64bit_bounded_global
:
638 assert(addr
->num_components
== 4);
639 return nir_vec4(b
, nir_channel(b
, addr
, 0),
640 nir_channel(b
, addr
, 1),
641 nir_channel(b
, addr
, 2),
642 nir_iadd(b
, nir_channel(b
, addr
, 3), offset
));
644 case nir_address_format_32bit_index_offset
:
645 assert(addr
->num_components
== 2);
646 return nir_vec2(b
, nir_channel(b
, addr
, 0),
647 nir_iadd(b
, nir_channel(b
, addr
, 1), offset
));
648 case nir_address_format_logical
:
649 unreachable("Unsupported address format");
651 unreachable("Invalid address format");
655 build_addr_iadd_imm(nir_builder
*b
, nir_ssa_def
*addr
,
656 nir_address_format addr_format
, int64_t offset
)
658 return build_addr_iadd(b
, addr
, addr_format
,
659 nir_imm_intN_t(b
, offset
, addr
->bit_size
));
663 addr_to_index(nir_builder
*b
, nir_ssa_def
*addr
,
664 nir_address_format addr_format
)
666 assert(addr_format
== nir_address_format_32bit_index_offset
);
667 assert(addr
->num_components
== 2);
668 return nir_channel(b
, addr
, 0);
672 addr_to_offset(nir_builder
*b
, nir_ssa_def
*addr
,
673 nir_address_format addr_format
)
675 assert(addr_format
== nir_address_format_32bit_index_offset
);
676 assert(addr
->num_components
== 2);
677 return nir_channel(b
, addr
, 1);
680 /** Returns true if the given address format resolves to a global address */
682 addr_format_is_global(nir_address_format addr_format
)
684 return addr_format
== nir_address_format_32bit_global
||
685 addr_format
== nir_address_format_64bit_global
||
686 addr_format
== nir_address_format_64bit_bounded_global
;
690 addr_to_global(nir_builder
*b
, nir_ssa_def
*addr
,
691 nir_address_format addr_format
)
693 switch (addr_format
) {
694 case nir_address_format_32bit_global
:
695 case nir_address_format_64bit_global
:
696 assert(addr
->num_components
== 1);
699 case nir_address_format_64bit_bounded_global
:
700 assert(addr
->num_components
== 4);
701 return nir_iadd(b
, nir_pack_64_2x32(b
, nir_channels(b
, addr
, 0x3)),
702 nir_u2u64(b
, nir_channel(b
, addr
, 3)));
704 case nir_address_format_32bit_index_offset
:
705 case nir_address_format_32bit_offset
:
706 case nir_address_format_logical
:
707 unreachable("Cannot get a 64-bit address with this address format");
710 unreachable("Invalid address format");
714 addr_format_needs_bounds_check(nir_address_format addr_format
)
716 return addr_format
== nir_address_format_64bit_bounded_global
;
720 addr_is_in_bounds(nir_builder
*b
, nir_ssa_def
*addr
,
721 nir_address_format addr_format
, unsigned size
)
723 assert(addr_format
== nir_address_format_64bit_bounded_global
);
724 assert(addr
->num_components
== 4);
725 return nir_ige(b
, nir_channel(b
, addr
, 2),
726 nir_iadd_imm(b
, nir_channel(b
, addr
, 3), size
));
730 build_explicit_io_load(nir_builder
*b
, nir_intrinsic_instr
*intrin
,
731 nir_ssa_def
*addr
, nir_address_format addr_format
,
732 unsigned num_components
)
734 nir_variable_mode mode
= nir_src_as_deref(intrin
->src
[0])->mode
;
738 case nir_var_mem_ubo
:
739 op
= nir_intrinsic_load_ubo
;
741 case nir_var_mem_ssbo
:
742 if (addr_format_is_global(addr_format
))
743 op
= nir_intrinsic_load_global
;
745 op
= nir_intrinsic_load_ssbo
;
747 case nir_var_mem_global
:
748 assert(addr_format_is_global(addr_format
));
749 op
= nir_intrinsic_load_global
;
751 case nir_var_shader_in
:
752 assert(addr_format_is_global(addr_format
));
753 op
= nir_intrinsic_load_kernel_input
;
756 unreachable("Unsupported explicit IO variable mode");
759 nir_intrinsic_instr
*load
= nir_intrinsic_instr_create(b
->shader
, op
);
761 if (addr_format_is_global(addr_format
)) {
762 load
->src
[0] = nir_src_for_ssa(addr_to_global(b
, addr
, addr_format
));
764 load
->src
[0] = nir_src_for_ssa(addr_to_index(b
, addr
, addr_format
));
765 load
->src
[1] = nir_src_for_ssa(addr_to_offset(b
, addr
, addr_format
));
768 if (mode
!= nir_var_mem_ubo
&& mode
!= nir_var_shader_in
)
769 nir_intrinsic_set_access(load
, nir_intrinsic_access(intrin
));
771 /* TODO: We should try and provide a better alignment. For OpenCL, we need
772 * to plumb the alignment through from SPIR-V when we have one.
774 nir_intrinsic_set_align(load
, intrin
->dest
.ssa
.bit_size
/ 8, 0);
776 assert(intrin
->dest
.is_ssa
);
777 load
->num_components
= num_components
;
778 nir_ssa_dest_init(&load
->instr
, &load
->dest
, num_components
,
779 intrin
->dest
.ssa
.bit_size
, intrin
->dest
.ssa
.name
);
781 assert(load
->dest
.ssa
.bit_size
% 8 == 0);
783 if (addr_format_needs_bounds_check(addr_format
)) {
784 /* The Vulkan spec for robustBufferAccess gives us quite a few options
785 * as to what we can do with an OOB read. Unfortunately, returning
786 * undefined values isn't one of them so we return an actual zero.
788 nir_ssa_def
*zero
= nir_imm_zero(b
, load
->num_components
,
789 load
->dest
.ssa
.bit_size
);
791 const unsigned load_size
=
792 (load
->dest
.ssa
.bit_size
/ 8) * load
->num_components
;
793 nir_push_if(b
, addr_is_in_bounds(b
, addr
, addr_format
, load_size
));
795 nir_builder_instr_insert(b
, &load
->instr
);
799 return nir_if_phi(b
, &load
->dest
.ssa
, zero
);
801 nir_builder_instr_insert(b
, &load
->instr
);
802 return &load
->dest
.ssa
;
807 build_explicit_io_store(nir_builder
*b
, nir_intrinsic_instr
*intrin
,
808 nir_ssa_def
*addr
, nir_address_format addr_format
,
809 nir_ssa_def
*value
, nir_component_mask_t write_mask
)
811 nir_variable_mode mode
= nir_src_as_deref(intrin
->src
[0])->mode
;
815 case nir_var_mem_ssbo
:
816 if (addr_format_is_global(addr_format
))
817 op
= nir_intrinsic_store_global
;
819 op
= nir_intrinsic_store_ssbo
;
821 case nir_var_mem_global
:
822 assert(addr_format_is_global(addr_format
));
823 op
= nir_intrinsic_store_global
;
826 unreachable("Unsupported explicit IO variable mode");
829 nir_intrinsic_instr
*store
= nir_intrinsic_instr_create(b
->shader
, op
);
831 store
->src
[0] = nir_src_for_ssa(value
);
832 if (addr_format_is_global(addr_format
)) {
833 store
->src
[1] = nir_src_for_ssa(addr_to_global(b
, addr
, addr_format
));
835 store
->src
[1] = nir_src_for_ssa(addr_to_index(b
, addr
, addr_format
));
836 store
->src
[2] = nir_src_for_ssa(addr_to_offset(b
, addr
, addr_format
));
839 nir_intrinsic_set_write_mask(store
, write_mask
);
841 nir_intrinsic_set_access(store
, nir_intrinsic_access(intrin
));
843 /* TODO: We should try and provide a better alignment. For OpenCL, we need
844 * to plumb the alignment through from SPIR-V when we have one.
846 nir_intrinsic_set_align(store
, value
->bit_size
/ 8, 0);
848 assert(value
->num_components
== 1 ||
849 value
->num_components
== intrin
->num_components
);
850 store
->num_components
= value
->num_components
;
852 assert(value
->bit_size
% 8 == 0);
854 if (addr_format_needs_bounds_check(addr_format
)) {
855 const unsigned store_size
= (value
->bit_size
/ 8) * store
->num_components
;
856 nir_push_if(b
, addr_is_in_bounds(b
, addr
, addr_format
, store_size
));
858 nir_builder_instr_insert(b
, &store
->instr
);
862 nir_builder_instr_insert(b
, &store
->instr
);
867 build_explicit_io_atomic(nir_builder
*b
, nir_intrinsic_instr
*intrin
,
868 nir_ssa_def
*addr
, nir_address_format addr_format
)
870 nir_variable_mode mode
= nir_src_as_deref(intrin
->src
[0])->mode
;
871 const unsigned num_data_srcs
=
872 nir_intrinsic_infos
[intrin
->intrinsic
].num_srcs
- 1;
876 case nir_var_mem_ssbo
:
877 if (addr_format_is_global(addr_format
))
878 op
= global_atomic_for_deref(intrin
->intrinsic
);
880 op
= ssbo_atomic_for_deref(intrin
->intrinsic
);
882 case nir_var_mem_global
:
883 assert(addr_format_is_global(addr_format
));
884 op
= global_atomic_for_deref(intrin
->intrinsic
);
887 unreachable("Unsupported explicit IO variable mode");
890 nir_intrinsic_instr
*atomic
= nir_intrinsic_instr_create(b
->shader
, op
);
893 if (addr_format_is_global(addr_format
)) {
894 atomic
->src
[src
++] = nir_src_for_ssa(addr_to_global(b
, addr
, addr_format
));
896 atomic
->src
[src
++] = nir_src_for_ssa(addr_to_index(b
, addr
, addr_format
));
897 atomic
->src
[src
++] = nir_src_for_ssa(addr_to_offset(b
, addr
, addr_format
));
899 for (unsigned i
= 0; i
< num_data_srcs
; i
++) {
900 atomic
->src
[src
++] = nir_src_for_ssa(intrin
->src
[1 + i
].ssa
);
903 /* Global atomics don't have access flags because they assume that the
904 * address may be non-uniform.
906 if (!addr_format_is_global(addr_format
))
907 nir_intrinsic_set_access(atomic
, nir_intrinsic_access(intrin
));
909 assert(intrin
->dest
.ssa
.num_components
== 1);
910 nir_ssa_dest_init(&atomic
->instr
, &atomic
->dest
,
911 1, intrin
->dest
.ssa
.bit_size
, intrin
->dest
.ssa
.name
);
913 assert(atomic
->dest
.ssa
.bit_size
% 8 == 0);
915 if (addr_format_needs_bounds_check(addr_format
)) {
916 const unsigned atomic_size
= atomic
->dest
.ssa
.bit_size
/ 8;
917 nir_push_if(b
, addr_is_in_bounds(b
, addr
, addr_format
, atomic_size
));
919 nir_builder_instr_insert(b
, &atomic
->instr
);
922 return nir_if_phi(b
, &atomic
->dest
.ssa
,
923 nir_ssa_undef(b
, 1, atomic
->dest
.ssa
.bit_size
));
925 nir_builder_instr_insert(b
, &atomic
->instr
);
926 return &atomic
->dest
.ssa
;
931 nir_explicit_io_address_from_deref(nir_builder
*b
, nir_deref_instr
*deref
,
932 nir_ssa_def
*base_addr
,
933 nir_address_format addr_format
)
935 assert(deref
->dest
.is_ssa
);
936 switch (deref
->deref_type
) {
937 case nir_deref_type_var
:
938 assert(deref
->mode
== nir_var_shader_in
);
939 return nir_imm_intN_t(b
, deref
->var
->data
.driver_location
,
940 deref
->dest
.ssa
.bit_size
);
942 case nir_deref_type_array
: {
943 nir_deref_instr
*parent
= nir_deref_instr_parent(deref
);
945 unsigned stride
= glsl_get_explicit_stride(parent
->type
);
946 if ((glsl_type_is_matrix(parent
->type
) &&
947 glsl_matrix_type_is_row_major(parent
->type
)) ||
948 (glsl_type_is_vector(parent
->type
) && stride
== 0))
949 stride
= type_scalar_size_bytes(parent
->type
);
953 nir_ssa_def
*index
= nir_ssa_for_src(b
, deref
->arr
.index
, 1);
954 index
= nir_i2i(b
, index
, base_addr
->bit_size
);
955 return build_addr_iadd(b
, base_addr
, addr_format
,
956 nir_imul_imm(b
, index
, stride
));
959 case nir_deref_type_ptr_as_array
: {
960 nir_ssa_def
*index
= nir_ssa_for_src(b
, deref
->arr
.index
, 1);
961 index
= nir_i2i(b
, index
, base_addr
->bit_size
);
962 unsigned stride
= nir_deref_instr_ptr_as_array_stride(deref
);
963 return build_addr_iadd(b
, base_addr
, addr_format
,
964 nir_imul_imm(b
, index
, stride
));
967 case nir_deref_type_array_wildcard
:
968 unreachable("Wildcards should be lowered by now");
971 case nir_deref_type_struct
: {
972 nir_deref_instr
*parent
= nir_deref_instr_parent(deref
);
973 int offset
= glsl_get_struct_field_offset(parent
->type
,
976 return build_addr_iadd_imm(b
, base_addr
, addr_format
, offset
);
979 case nir_deref_type_cast
:
980 /* Nothing to do here */
984 unreachable("Invalid NIR deref type");
988 nir_lower_explicit_io_instr(nir_builder
*b
,
989 nir_intrinsic_instr
*intrin
,
991 nir_address_format addr_format
)
993 b
->cursor
= nir_after_instr(&intrin
->instr
);
995 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
996 unsigned vec_stride
= glsl_get_explicit_stride(deref
->type
);
997 unsigned scalar_size
= type_scalar_size_bytes(deref
->type
);
998 assert(vec_stride
== 0 || glsl_type_is_vector(deref
->type
));
999 assert(vec_stride
== 0 || vec_stride
>= scalar_size
);
1001 if (intrin
->intrinsic
== nir_intrinsic_load_deref
) {
1003 if (vec_stride
> scalar_size
) {
1004 nir_ssa_def
*comps
[4] = { NULL
, };
1005 for (unsigned i
= 0; i
< intrin
->num_components
; i
++) {
1006 nir_ssa_def
*comp_addr
= build_addr_iadd_imm(b
, addr
, addr_format
,
1008 comps
[i
] = build_explicit_io_load(b
, intrin
, comp_addr
,
1011 value
= nir_vec(b
, comps
, intrin
->num_components
);
1013 value
= build_explicit_io_load(b
, intrin
, addr
, addr_format
,
1014 intrin
->num_components
);
1016 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
, nir_src_for_ssa(value
));
1017 } else if (intrin
->intrinsic
== nir_intrinsic_store_deref
) {
1018 assert(intrin
->src
[1].is_ssa
);
1019 nir_ssa_def
*value
= intrin
->src
[1].ssa
;
1020 nir_component_mask_t write_mask
= nir_intrinsic_write_mask(intrin
);
1021 if (vec_stride
> scalar_size
) {
1022 for (unsigned i
= 0; i
< intrin
->num_components
; i
++) {
1023 if (!(write_mask
& (1 << i
)))
1026 nir_ssa_def
*comp_addr
= build_addr_iadd_imm(b
, addr
, addr_format
,
1028 build_explicit_io_store(b
, intrin
, comp_addr
, addr_format
,
1029 nir_channel(b
, value
, i
), 1);
1032 build_explicit_io_store(b
, intrin
, addr
, addr_format
,
1036 nir_ssa_def
*value
=
1037 build_explicit_io_atomic(b
, intrin
, addr
, addr_format
);
1038 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
, nir_src_for_ssa(value
));
1041 nir_instr_remove(&intrin
->instr
);
1045 lower_explicit_io_deref(nir_builder
*b
, nir_deref_instr
*deref
,
1046 nir_address_format addr_format
)
1048 /* Just delete the deref if it's not used. We can't use
1049 * nir_deref_instr_remove_if_unused here because it may remove more than
1050 * one deref which could break our list walking since we walk the list
1053 assert(list_empty(&deref
->dest
.ssa
.if_uses
));
1054 if (list_empty(&deref
->dest
.ssa
.uses
)) {
1055 nir_instr_remove(&deref
->instr
);
1059 b
->cursor
= nir_after_instr(&deref
->instr
);
1061 nir_ssa_def
*base_addr
= NULL
;
1062 if (deref
->deref_type
!= nir_deref_type_var
) {
1063 assert(deref
->parent
.is_ssa
);
1064 base_addr
= deref
->parent
.ssa
;
1067 nir_ssa_def
*addr
= nir_explicit_io_address_from_deref(b
, deref
, base_addr
,
1070 nir_instr_remove(&deref
->instr
);
1071 nir_ssa_def_rewrite_uses(&deref
->dest
.ssa
, nir_src_for_ssa(addr
));
1075 lower_explicit_io_access(nir_builder
*b
, nir_intrinsic_instr
*intrin
,
1076 nir_address_format addr_format
)
1078 assert(intrin
->src
[0].is_ssa
);
1079 nir_lower_explicit_io_instr(b
, intrin
, intrin
->src
[0].ssa
, addr_format
);
1083 lower_explicit_io_array_length(nir_builder
*b
, nir_intrinsic_instr
*intrin
,
1084 nir_address_format addr_format
)
1086 b
->cursor
= nir_after_instr(&intrin
->instr
);
1088 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
1090 assert(glsl_type_is_array(deref
->type
));
1091 assert(glsl_get_length(deref
->type
) == 0);
1092 unsigned stride
= glsl_get_explicit_stride(deref
->type
);
1095 assert(addr_format
== nir_address_format_32bit_index_offset
);
1096 nir_ssa_def
*addr
= &deref
->dest
.ssa
;
1097 nir_ssa_def
*index
= addr_to_index(b
, addr
, addr_format
);
1098 nir_ssa_def
*offset
= addr_to_offset(b
, addr
, addr_format
);
1100 nir_intrinsic_instr
*bsize
=
1101 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_get_buffer_size
);
1102 bsize
->src
[0] = nir_src_for_ssa(index
);
1103 nir_ssa_dest_init(&bsize
->instr
, &bsize
->dest
, 1, 32, NULL
);
1104 nir_builder_instr_insert(b
, &bsize
->instr
);
1106 nir_ssa_def
*arr_size
=
1107 nir_idiv(b
, nir_isub(b
, &bsize
->dest
.ssa
, offset
),
1108 nir_imm_int(b
, stride
));
1110 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
, nir_src_for_ssa(arr_size
));
1111 nir_instr_remove(&intrin
->instr
);
1115 nir_lower_explicit_io_impl(nir_function_impl
*impl
, nir_variable_mode modes
,
1116 nir_address_format addr_format
)
1118 bool progress
= false;
1121 nir_builder_init(&b
, impl
);
1123 /* Walk in reverse order so that we can see the full deref chain when we
1124 * lower the access operations. We lower them assuming that the derefs
1125 * will be turned into address calculations later.
1127 nir_foreach_block_reverse(block
, impl
) {
1128 nir_foreach_instr_reverse_safe(instr
, block
) {
1129 switch (instr
->type
) {
1130 case nir_instr_type_deref
: {
1131 nir_deref_instr
*deref
= nir_instr_as_deref(instr
);
1132 if (deref
->mode
& modes
) {
1133 lower_explicit_io_deref(&b
, deref
, addr_format
);
1139 case nir_instr_type_intrinsic
: {
1140 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
1141 switch (intrin
->intrinsic
) {
1142 case nir_intrinsic_load_deref
:
1143 case nir_intrinsic_store_deref
:
1144 case nir_intrinsic_deref_atomic_add
:
1145 case nir_intrinsic_deref_atomic_imin
:
1146 case nir_intrinsic_deref_atomic_umin
:
1147 case nir_intrinsic_deref_atomic_imax
:
1148 case nir_intrinsic_deref_atomic_umax
:
1149 case nir_intrinsic_deref_atomic_and
:
1150 case nir_intrinsic_deref_atomic_or
:
1151 case nir_intrinsic_deref_atomic_xor
:
1152 case nir_intrinsic_deref_atomic_exchange
:
1153 case nir_intrinsic_deref_atomic_comp_swap
:
1154 case nir_intrinsic_deref_atomic_fadd
:
1155 case nir_intrinsic_deref_atomic_fmin
:
1156 case nir_intrinsic_deref_atomic_fmax
:
1157 case nir_intrinsic_deref_atomic_fcomp_swap
: {
1158 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
1159 if (deref
->mode
& modes
) {
1160 lower_explicit_io_access(&b
, intrin
, addr_format
);
1166 case nir_intrinsic_deref_buffer_array_length
: {
1167 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
1168 if (deref
->mode
& modes
) {
1169 lower_explicit_io_array_length(&b
, intrin
, addr_format
);
1189 nir_metadata_preserve(impl
, nir_metadata_block_index
|
1190 nir_metadata_dominance
);
1197 nir_lower_explicit_io(nir_shader
*shader
, nir_variable_mode modes
,
1198 nir_address_format addr_format
)
1200 bool progress
= false;
1202 nir_foreach_function(function
, shader
) {
1203 if (function
->impl
&&
1204 nir_lower_explicit_io_impl(function
->impl
, modes
, addr_format
))
1212 * Return the offset source for a load/store intrinsic.
1215 nir_get_io_offset_src(nir_intrinsic_instr
*instr
)
1217 switch (instr
->intrinsic
) {
1218 case nir_intrinsic_load_input
:
1219 case nir_intrinsic_load_output
:
1220 case nir_intrinsic_load_shared
:
1221 case nir_intrinsic_load_uniform
:
1222 case nir_intrinsic_load_global
:
1223 case nir_intrinsic_load_scratch
:
1224 case nir_intrinsic_load_fs_input_interp_deltas
:
1225 return &instr
->src
[0];
1226 case nir_intrinsic_load_ubo
:
1227 case nir_intrinsic_load_ssbo
:
1228 case nir_intrinsic_load_per_vertex_input
:
1229 case nir_intrinsic_load_per_vertex_output
:
1230 case nir_intrinsic_load_interpolated_input
:
1231 case nir_intrinsic_store_output
:
1232 case nir_intrinsic_store_shared
:
1233 case nir_intrinsic_store_global
:
1234 case nir_intrinsic_store_scratch
:
1235 return &instr
->src
[1];
1236 case nir_intrinsic_store_ssbo
:
1237 case nir_intrinsic_store_per_vertex_output
:
1238 return &instr
->src
[2];
1245 * Return the vertex index source for a load/store per_vertex intrinsic.
1248 nir_get_io_vertex_index_src(nir_intrinsic_instr
*instr
)
1250 switch (instr
->intrinsic
) {
1251 case nir_intrinsic_load_per_vertex_input
:
1252 case nir_intrinsic_load_per_vertex_output
:
1253 return &instr
->src
[0];
1254 case nir_intrinsic_store_per_vertex_output
:
1255 return &instr
->src
[1];
1262 * Return the numeric constant that identify a NULL pointer for each address
1265 const nir_const_value
*
1266 nir_address_format_null_value(nir_address_format addr_format
)
1268 const static nir_const_value null_values
[][NIR_MAX_VEC_COMPONENTS
] = {
1269 [nir_address_format_32bit_global
] = {{0}},
1270 [nir_address_format_64bit_global
] = {{0}},
1271 [nir_address_format_64bit_bounded_global
] = {{0}},
1272 [nir_address_format_32bit_index_offset
] = {{.u32
= ~0}, {.u32
= ~0}},
1273 [nir_address_format_32bit_offset
] = {{.u32
= ~0}},
1274 [nir_address_format_logical
] = {{.u32
= ~0}},
1277 assert(addr_format
< ARRAY_SIZE(null_values
));
1278 return null_values
[addr_format
];
1282 nir_build_addr_ieq(nir_builder
*b
, nir_ssa_def
*addr0
, nir_ssa_def
*addr1
,
1283 nir_address_format addr_format
)
1285 switch (addr_format
) {
1286 case nir_address_format_32bit_global
:
1287 case nir_address_format_64bit_global
:
1288 case nir_address_format_64bit_bounded_global
:
1289 case nir_address_format_32bit_index_offset
:
1290 case nir_address_format_32bit_offset
:
1291 return nir_ball_iequal(b
, addr0
, addr1
);
1293 case nir_address_format_logical
:
1294 unreachable("Unsupported address format");
1297 unreachable("Invalid address format");
1301 nir_build_addr_isub(nir_builder
*b
, nir_ssa_def
*addr0
, nir_ssa_def
*addr1
,
1302 nir_address_format addr_format
)
1304 switch (addr_format
) {
1305 case nir_address_format_32bit_global
:
1306 case nir_address_format_64bit_global
:
1307 case nir_address_format_32bit_offset
:
1308 assert(addr0
->num_components
== 1);
1309 assert(addr1
->num_components
== 1);
1310 return nir_isub(b
, addr0
, addr1
);
1312 case nir_address_format_64bit_bounded_global
:
1313 return nir_isub(b
, addr_to_global(b
, addr0
, addr_format
),
1314 addr_to_global(b
, addr1
, addr_format
));
1316 case nir_address_format_32bit_index_offset
:
1317 assert(addr0
->num_components
== 2);
1318 assert(addr1
->num_components
== 2);
1319 /* Assume the same buffer index. */
1320 return nir_isub(b
, nir_channel(b
, addr0
, 1), nir_channel(b
, addr1
, 1));
1322 case nir_address_format_logical
:
1323 unreachable("Unsupported address format");
1326 unreachable("Invalid address format");
1330 is_input(nir_intrinsic_instr
*intrin
)
1332 return intrin
->intrinsic
== nir_intrinsic_load_input
||
1333 intrin
->intrinsic
== nir_intrinsic_load_per_vertex_input
||
1334 intrin
->intrinsic
== nir_intrinsic_load_interpolated_input
||
1335 intrin
->intrinsic
== nir_intrinsic_load_fs_input_interp_deltas
;
1339 is_output(nir_intrinsic_instr
*intrin
)
1341 return intrin
->intrinsic
== nir_intrinsic_load_output
||
1342 intrin
->intrinsic
== nir_intrinsic_load_per_vertex_output
||
1343 intrin
->intrinsic
== nir_intrinsic_store_output
||
1344 intrin
->intrinsic
== nir_intrinsic_store_per_vertex_output
;
1349 * This pass adds constant offsets to instr->const_index[0] for input/output
1350 * intrinsics, and resets the offset source to 0. Non-constant offsets remain
1351 * unchanged - since we don't know what part of a compound variable is
1352 * accessed, we allocate storage for the entire thing. For drivers that use
1353 * nir_lower_io_to_temporaries() before nir_lower_io(), this guarantees that
1354 * the offset source will be 0, so that they don't have to add it in manually.
1358 add_const_offset_to_base_block(nir_block
*block
, nir_builder
*b
,
1359 nir_variable_mode mode
)
1361 bool progress
= false;
1362 nir_foreach_instr_safe(instr
, block
) {
1363 if (instr
->type
!= nir_instr_type_intrinsic
)
1366 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
1368 if ((mode
== nir_var_shader_in
&& is_input(intrin
)) ||
1369 (mode
== nir_var_shader_out
&& is_output(intrin
))) {
1370 nir_src
*offset
= nir_get_io_offset_src(intrin
);
1372 if (nir_src_is_const(*offset
)) {
1373 intrin
->const_index
[0] += nir_src_as_uint(*offset
);
1374 b
->cursor
= nir_before_instr(&intrin
->instr
);
1375 nir_instr_rewrite_src(&intrin
->instr
, offset
,
1376 nir_src_for_ssa(nir_imm_int(b
, 0)));
1386 nir_io_add_const_offset_to_base(nir_shader
*nir
, nir_variable_mode mode
)
1388 bool progress
= false;
1390 nir_foreach_function(f
, nir
) {
1393 nir_builder_init(&b
, f
->impl
);
1394 nir_foreach_block(block
, f
->impl
) {
1395 progress
|= add_const_offset_to_base_block(block
, &b
, mode
);