2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Connor Abbott (cwabbott0@gmail.com)
25 * Jason Ekstrand (jason@jlekstrand.net)
30 * This lowering pass converts references to input/output variables with
31 * loads/stores to actual input/output intrinsics.
35 #include "nir_builder.h"
36 #include "nir_deref.h"
38 struct lower_io_state
{
41 int (*type_size
)(const struct glsl_type
*type
, bool);
42 nir_variable_mode modes
;
43 nir_lower_io_options options
;
46 static nir_intrinsic_op
47 ssbo_atomic_for_deref(nir_intrinsic_op deref_op
)
50 #define OP(O) case nir_intrinsic_deref_##O: return nir_intrinsic_ssbo_##O;
67 unreachable("Invalid SSBO atomic");
71 static nir_intrinsic_op
72 global_atomic_for_deref(nir_intrinsic_op deref_op
)
75 #define OP(O) case nir_intrinsic_deref_##O: return nir_intrinsic_global_##O;
92 unreachable("Invalid SSBO atomic");
96 static nir_intrinsic_op
97 shared_atomic_for_deref(nir_intrinsic_op deref_op
)
100 #define OP(O) case nir_intrinsic_deref_##O: return nir_intrinsic_shared_##O;
114 OP(atomic_fcomp_swap
)
117 unreachable("Invalid shared atomic");
122 nir_assign_var_locations(struct exec_list
*var_list
, unsigned *size
,
123 int (*type_size
)(const struct glsl_type
*, bool))
125 unsigned location
= 0;
127 nir_foreach_variable(var
, var_list
) {
129 * UBOs have their own address spaces, so don't count them towards the
130 * number of global uniforms
132 if (var
->data
.mode
== nir_var_mem_ubo
|| var
->data
.mode
== nir_var_mem_ssbo
)
135 var
->data
.driver_location
= location
;
136 bool bindless_type_size
= var
->data
.mode
== nir_var_shader_in
||
137 var
->data
.mode
== nir_var_shader_out
||
139 location
+= type_size(var
->type
, bindless_type_size
);
146 * Return true if the given variable is a per-vertex input/output array.
147 * (such as geometry shader inputs).
150 nir_is_per_vertex_io(const nir_variable
*var
, gl_shader_stage stage
)
152 if (var
->data
.patch
|| !glsl_type_is_array(var
->type
))
155 if (var
->data
.mode
== nir_var_shader_in
)
156 return stage
== MESA_SHADER_GEOMETRY
||
157 stage
== MESA_SHADER_TESS_CTRL
||
158 stage
== MESA_SHADER_TESS_EVAL
;
160 if (var
->data
.mode
== nir_var_shader_out
)
161 return stage
== MESA_SHADER_TESS_CTRL
;
167 get_io_offset(nir_builder
*b
, nir_deref_instr
*deref
,
168 nir_ssa_def
**vertex_index
,
169 int (*type_size
)(const struct glsl_type
*, bool),
170 unsigned *component
, bool bts
)
173 nir_deref_path_init(&path
, deref
, NULL
);
175 assert(path
.path
[0]->deref_type
== nir_deref_type_var
);
176 nir_deref_instr
**p
= &path
.path
[1];
178 /* For per-vertex input arrays (i.e. geometry shader inputs), keep the
179 * outermost array index separate. Process the rest normally.
181 if (vertex_index
!= NULL
) {
182 assert((*p
)->deref_type
== nir_deref_type_array
);
183 *vertex_index
= nir_ssa_for_src(b
, (*p
)->arr
.index
, 1);
187 if (path
.path
[0]->var
->data
.compact
) {
188 assert((*p
)->deref_type
== nir_deref_type_array
);
189 assert(glsl_type_is_scalar((*p
)->type
));
191 /* We always lower indirect dereferences for "compact" array vars. */
192 const unsigned index
= nir_src_as_uint((*p
)->arr
.index
);
193 const unsigned total_offset
= *component
+ index
;
194 const unsigned slot_offset
= total_offset
/ 4;
195 *component
= total_offset
% 4;
196 return nir_imm_int(b
, type_size(glsl_vec4_type(), bts
) * slot_offset
);
199 /* Just emit code and let constant-folding go to town */
200 nir_ssa_def
*offset
= nir_imm_int(b
, 0);
203 if ((*p
)->deref_type
== nir_deref_type_array
) {
204 unsigned size
= type_size((*p
)->type
, bts
);
207 nir_imul_imm(b
, nir_ssa_for_src(b
, (*p
)->arr
.index
, 1), size
);
209 offset
= nir_iadd(b
, offset
, mul
);
210 } else if ((*p
)->deref_type
== nir_deref_type_struct
) {
211 /* p starts at path[1], so this is safe */
212 nir_deref_instr
*parent
= *(p
- 1);
214 unsigned field_offset
= 0;
215 for (unsigned i
= 0; i
< (*p
)->strct
.index
; i
++) {
216 field_offset
+= type_size(glsl_get_struct_field(parent
->type
, i
), bts
);
218 offset
= nir_iadd_imm(b
, offset
, field_offset
);
220 unreachable("Unsupported deref type");
224 nir_deref_path_finish(&path
);
230 emit_load(struct lower_io_state
*state
,
231 nir_ssa_def
*vertex_index
, nir_variable
*var
, nir_ssa_def
*offset
,
232 unsigned component
, unsigned num_components
, unsigned bit_size
,
235 nir_builder
*b
= &state
->builder
;
236 const nir_shader
*nir
= b
->shader
;
237 nir_variable_mode mode
= var
->data
.mode
;
238 nir_ssa_def
*barycentric
= NULL
;
242 case nir_var_shader_in
:
243 if (nir
->info
.stage
== MESA_SHADER_FRAGMENT
&&
244 nir
->options
->use_interpolated_input_intrinsics
&&
245 var
->data
.interpolation
!= INTERP_MODE_FLAT
) {
246 assert(vertex_index
== NULL
);
248 nir_intrinsic_op bary_op
;
249 if (var
->data
.sample
||
250 (state
->options
& nir_lower_io_force_sample_interpolation
))
251 bary_op
= nir_intrinsic_load_barycentric_sample
;
252 else if (var
->data
.centroid
)
253 bary_op
= nir_intrinsic_load_barycentric_centroid
;
255 bary_op
= nir_intrinsic_load_barycentric_pixel
;
257 barycentric
= nir_load_barycentric(&state
->builder
, bary_op
,
258 var
->data
.interpolation
);
259 op
= nir_intrinsic_load_interpolated_input
;
261 op
= vertex_index
? nir_intrinsic_load_per_vertex_input
:
262 nir_intrinsic_load_input
;
265 case nir_var_shader_out
:
266 op
= vertex_index
? nir_intrinsic_load_per_vertex_output
:
267 nir_intrinsic_load_output
;
269 case nir_var_uniform
:
270 op
= nir_intrinsic_load_uniform
;
272 case nir_var_mem_shared
:
273 op
= nir_intrinsic_load_shared
;
276 unreachable("Unknown variable mode");
279 nir_intrinsic_instr
*load
=
280 nir_intrinsic_instr_create(state
->builder
.shader
, op
);
281 load
->num_components
= num_components
;
283 nir_intrinsic_set_base(load
, var
->data
.driver_location
);
284 if (mode
== nir_var_shader_in
|| mode
== nir_var_shader_out
)
285 nir_intrinsic_set_component(load
, component
);
287 if (load
->intrinsic
== nir_intrinsic_load_uniform
)
288 nir_intrinsic_set_range(load
,
289 state
->type_size(var
->type
, var
->data
.bindless
));
291 if (load
->intrinsic
== nir_intrinsic_load_input
||
292 load
->intrinsic
== nir_intrinsic_load_uniform
)
293 nir_intrinsic_set_type(load
, type
);
296 load
->src
[0] = nir_src_for_ssa(vertex_index
);
297 load
->src
[1] = nir_src_for_ssa(offset
);
298 } else if (barycentric
) {
299 load
->src
[0] = nir_src_for_ssa(barycentric
);
300 load
->src
[1] = nir_src_for_ssa(offset
);
302 load
->src
[0] = nir_src_for_ssa(offset
);
305 nir_ssa_dest_init(&load
->instr
, &load
->dest
,
306 num_components
, bit_size
, NULL
);
307 nir_builder_instr_insert(b
, &load
->instr
);
309 return &load
->dest
.ssa
;
313 lower_load(nir_intrinsic_instr
*intrin
, struct lower_io_state
*state
,
314 nir_ssa_def
*vertex_index
, nir_variable
*var
, nir_ssa_def
*offset
,
315 unsigned component
, const struct glsl_type
*type
)
317 assert(intrin
->dest
.is_ssa
);
318 if (intrin
->dest
.ssa
.bit_size
== 64 &&
319 (state
->options
& nir_lower_io_lower_64bit_to_32
)) {
320 nir_builder
*b
= &state
->builder
;
322 const unsigned slot_size
= state
->type_size(glsl_dvec_type(2), false);
324 nir_ssa_def
*comp64
[4];
325 assert(component
== 0 || component
== 2);
326 unsigned dest_comp
= 0;
327 while (dest_comp
< intrin
->dest
.ssa
.num_components
) {
328 const unsigned num_comps
=
329 MIN2(intrin
->dest
.ssa
.num_components
- dest_comp
,
330 (4 - component
) / 2);
332 nir_ssa_def
*data32
=
333 emit_load(state
, vertex_index
, var
, offset
, component
,
334 num_comps
* 2, 32, nir_type_uint32
);
335 for (unsigned i
= 0; i
< num_comps
; i
++) {
336 comp64
[dest_comp
+ i
] =
337 nir_pack_64_2x32(b
, nir_channels(b
, data32
, 3 << (i
* 2)));
340 /* Only the first store has a component offset */
342 dest_comp
+= num_comps
;
343 offset
= nir_iadd_imm(b
, offset
, slot_size
);
346 return nir_vec(b
, comp64
, intrin
->dest
.ssa
.num_components
);
348 return emit_load(state
, vertex_index
, var
, offset
, component
,
349 intrin
->dest
.ssa
.num_components
,
350 intrin
->dest
.ssa
.bit_size
,
351 nir_get_nir_type_for_glsl_type(type
));
356 emit_store(struct lower_io_state
*state
, nir_ssa_def
*data
,
357 nir_ssa_def
*vertex_index
, nir_variable
*var
, nir_ssa_def
*offset
,
358 unsigned component
, unsigned num_components
,
359 nir_component_mask_t write_mask
, nir_alu_type type
)
361 nir_builder
*b
= &state
->builder
;
362 nir_variable_mode mode
= var
->data
.mode
;
365 if (mode
== nir_var_mem_shared
) {
366 op
= nir_intrinsic_store_shared
;
368 assert(mode
== nir_var_shader_out
);
369 op
= vertex_index
? nir_intrinsic_store_per_vertex_output
:
370 nir_intrinsic_store_output
;
373 nir_intrinsic_instr
*store
=
374 nir_intrinsic_instr_create(state
->builder
.shader
, op
);
375 store
->num_components
= num_components
;
377 store
->src
[0] = nir_src_for_ssa(data
);
379 nir_intrinsic_set_base(store
, var
->data
.driver_location
);
381 if (mode
== nir_var_shader_out
)
382 nir_intrinsic_set_component(store
, component
);
384 if (store
->intrinsic
== nir_intrinsic_store_output
)
385 nir_intrinsic_set_type(store
, type
);
387 nir_intrinsic_set_write_mask(store
, write_mask
);
390 store
->src
[1] = nir_src_for_ssa(vertex_index
);
392 store
->src
[vertex_index
? 2 : 1] = nir_src_for_ssa(offset
);
394 nir_builder_instr_insert(b
, &store
->instr
);
398 lower_store(nir_intrinsic_instr
*intrin
, struct lower_io_state
*state
,
399 nir_ssa_def
*vertex_index
, nir_variable
*var
, nir_ssa_def
*offset
,
400 unsigned component
, const struct glsl_type
*type
)
402 assert(intrin
->src
[1].is_ssa
);
403 if (intrin
->src
[1].ssa
->bit_size
== 64 &&
404 (state
->options
& nir_lower_io_lower_64bit_to_32
)) {
405 nir_builder
*b
= &state
->builder
;
407 const unsigned slot_size
= state
->type_size(glsl_dvec_type(2), false);
409 assert(component
== 0 || component
== 2);
410 unsigned src_comp
= 0;
411 nir_component_mask_t write_mask
= nir_intrinsic_write_mask(intrin
);
412 while (src_comp
< intrin
->num_components
) {
413 const unsigned num_comps
=
414 MIN2(intrin
->num_components
- src_comp
,
415 (4 - component
) / 2);
417 if (write_mask
& BITFIELD_MASK(num_comps
)) {
419 nir_channels(b
, intrin
->src
[1].ssa
,
420 BITFIELD_RANGE(src_comp
, num_comps
));
421 nir_ssa_def
*data32
= nir_bitcast_vector(b
, data
, 32);
423 nir_component_mask_t write_mask32
= 0;
424 for (unsigned i
= 0; i
< num_comps
; i
++) {
425 if (write_mask
& BITFIELD_MASK(num_comps
) & (1 << i
))
426 write_mask32
|= 3 << (i
* 2);
429 emit_store(state
, data32
, vertex_index
, var
, offset
,
430 component
, data32
->num_components
, write_mask32
,
434 /* Only the first store has a component offset */
436 src_comp
+= num_comps
;
437 write_mask
>>= num_comps
;
438 offset
= nir_iadd_imm(b
, offset
, slot_size
);
441 emit_store(state
, intrin
->src
[1].ssa
, vertex_index
, var
, offset
,
442 component
, intrin
->num_components
,
443 nir_intrinsic_write_mask(intrin
),
444 nir_get_nir_type_for_glsl_type(type
));
449 lower_atomic(nir_intrinsic_instr
*intrin
, struct lower_io_state
*state
,
450 nir_variable
*var
, nir_ssa_def
*offset
)
452 nir_builder
*b
= &state
->builder
;
453 assert(var
->data
.mode
== nir_var_mem_shared
);
455 nir_intrinsic_op op
= shared_atomic_for_deref(intrin
->intrinsic
);
457 nir_intrinsic_instr
*atomic
=
458 nir_intrinsic_instr_create(state
->builder
.shader
, op
);
460 nir_intrinsic_set_base(atomic
, var
->data
.driver_location
);
462 atomic
->src
[0] = nir_src_for_ssa(offset
);
463 assert(nir_intrinsic_infos
[intrin
->intrinsic
].num_srcs
==
464 nir_intrinsic_infos
[op
].num_srcs
);
465 for (unsigned i
= 1; i
< nir_intrinsic_infos
[op
].num_srcs
; i
++) {
466 nir_src_copy(&atomic
->src
[i
], &intrin
->src
[i
], atomic
);
469 if (nir_intrinsic_infos
[op
].has_dest
) {
470 assert(intrin
->dest
.is_ssa
);
471 assert(nir_intrinsic_infos
[intrin
->intrinsic
].has_dest
);
472 nir_ssa_dest_init(&atomic
->instr
, &atomic
->dest
,
473 intrin
->dest
.ssa
.num_components
,
474 intrin
->dest
.ssa
.bit_size
, NULL
);
477 nir_builder_instr_insert(b
, &atomic
->instr
);
479 return nir_intrinsic_infos
[op
].has_dest
? &atomic
->dest
.ssa
: NULL
;
483 lower_interpolate_at(nir_intrinsic_instr
*intrin
, struct lower_io_state
*state
,
484 nir_variable
*var
, nir_ssa_def
*offset
, unsigned component
,
485 const struct glsl_type
*type
)
487 nir_builder
*b
= &state
->builder
;
488 assert(var
->data
.mode
== nir_var_shader_in
);
490 /* Ignore interpolateAt() for flat variables - flat is flat. */
491 if (var
->data
.interpolation
== INTERP_MODE_FLAT
)
492 return lower_load(intrin
, state
, NULL
, var
, offset
, component
, type
);
494 /* None of the supported APIs allow interpolation on 64-bit things */
495 assert(intrin
->dest
.is_ssa
&& intrin
->dest
.ssa
.bit_size
<= 32);
497 nir_intrinsic_op bary_op
;
498 switch (intrin
->intrinsic
) {
499 case nir_intrinsic_interp_deref_at_centroid
:
500 bary_op
= (state
->options
& nir_lower_io_force_sample_interpolation
) ?
501 nir_intrinsic_load_barycentric_sample
:
502 nir_intrinsic_load_barycentric_centroid
;
504 case nir_intrinsic_interp_deref_at_sample
:
505 bary_op
= nir_intrinsic_load_barycentric_at_sample
;
507 case nir_intrinsic_interp_deref_at_offset
:
508 bary_op
= nir_intrinsic_load_barycentric_at_offset
;
511 unreachable("Bogus interpolateAt() intrinsic.");
514 nir_intrinsic_instr
*bary_setup
=
515 nir_intrinsic_instr_create(state
->builder
.shader
, bary_op
);
517 nir_ssa_dest_init(&bary_setup
->instr
, &bary_setup
->dest
, 2, 32, NULL
);
518 nir_intrinsic_set_interp_mode(bary_setup
, var
->data
.interpolation
);
520 if (intrin
->intrinsic
== nir_intrinsic_interp_deref_at_sample
||
521 intrin
->intrinsic
== nir_intrinsic_interp_deref_at_offset
)
522 nir_src_copy(&bary_setup
->src
[0], &intrin
->src
[1], bary_setup
);
524 nir_builder_instr_insert(b
, &bary_setup
->instr
);
526 nir_intrinsic_instr
*load
=
527 nir_intrinsic_instr_create(state
->builder
.shader
,
528 nir_intrinsic_load_interpolated_input
);
529 load
->num_components
= intrin
->num_components
;
531 nir_intrinsic_set_base(load
, var
->data
.driver_location
);
532 nir_intrinsic_set_component(load
, component
);
534 load
->src
[0] = nir_src_for_ssa(&bary_setup
->dest
.ssa
);
535 load
->src
[1] = nir_src_for_ssa(offset
);
537 assert(intrin
->dest
.is_ssa
);
538 nir_ssa_dest_init(&load
->instr
, &load
->dest
,
539 intrin
->dest
.ssa
.num_components
,
540 intrin
->dest
.ssa
.bit_size
, NULL
);
541 nir_builder_instr_insert(b
, &load
->instr
);
543 return &load
->dest
.ssa
;
547 nir_lower_io_block(nir_block
*block
,
548 struct lower_io_state
*state
)
550 nir_builder
*b
= &state
->builder
;
551 const nir_shader_compiler_options
*options
= b
->shader
->options
;
552 bool progress
= false;
554 nir_foreach_instr_safe(instr
, block
) {
555 if (instr
->type
!= nir_instr_type_intrinsic
)
558 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
560 switch (intrin
->intrinsic
) {
561 case nir_intrinsic_load_deref
:
562 case nir_intrinsic_store_deref
:
563 case nir_intrinsic_deref_atomic_add
:
564 case nir_intrinsic_deref_atomic_imin
:
565 case nir_intrinsic_deref_atomic_umin
:
566 case nir_intrinsic_deref_atomic_imax
:
567 case nir_intrinsic_deref_atomic_umax
:
568 case nir_intrinsic_deref_atomic_and
:
569 case nir_intrinsic_deref_atomic_or
:
570 case nir_intrinsic_deref_atomic_xor
:
571 case nir_intrinsic_deref_atomic_exchange
:
572 case nir_intrinsic_deref_atomic_comp_swap
:
573 case nir_intrinsic_deref_atomic_fadd
:
574 case nir_intrinsic_deref_atomic_fmin
:
575 case nir_intrinsic_deref_atomic_fmax
:
576 case nir_intrinsic_deref_atomic_fcomp_swap
:
577 /* We can lower the io for this nir instrinsic */
579 case nir_intrinsic_interp_deref_at_centroid
:
580 case nir_intrinsic_interp_deref_at_sample
:
581 case nir_intrinsic_interp_deref_at_offset
:
582 /* We can optionally lower these to load_interpolated_input */
583 if (options
->use_interpolated_input_intrinsics
)
586 /* We can't lower the io for this nir instrinsic, so skip it */
590 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
592 nir_variable_mode mode
= deref
->mode
;
594 if ((state
->modes
& mode
) == 0)
597 if (mode
!= nir_var_shader_in
&&
598 mode
!= nir_var_shader_out
&&
599 mode
!= nir_var_mem_shared
&&
600 mode
!= nir_var_uniform
)
603 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
605 b
->cursor
= nir_before_instr(instr
);
607 const bool per_vertex
= nir_is_per_vertex_io(var
, b
->shader
->info
.stage
);
610 nir_ssa_def
*vertex_index
= NULL
;
611 unsigned component_offset
= var
->data
.location_frac
;
612 bool bindless_type_size
= mode
== nir_var_shader_in
||
613 mode
== nir_var_shader_out
||
616 offset
= get_io_offset(b
, deref
, per_vertex
? &vertex_index
: NULL
,
617 state
->type_size
, &component_offset
,
620 nir_ssa_def
*replacement
= NULL
;
622 switch (intrin
->intrinsic
) {
623 case nir_intrinsic_load_deref
:
624 replacement
= lower_load(intrin
, state
, vertex_index
, var
, offset
,
625 component_offset
, deref
->type
);
628 case nir_intrinsic_store_deref
:
629 lower_store(intrin
, state
, vertex_index
, var
, offset
,
630 component_offset
, deref
->type
);
633 case nir_intrinsic_deref_atomic_add
:
634 case nir_intrinsic_deref_atomic_imin
:
635 case nir_intrinsic_deref_atomic_umin
:
636 case nir_intrinsic_deref_atomic_imax
:
637 case nir_intrinsic_deref_atomic_umax
:
638 case nir_intrinsic_deref_atomic_and
:
639 case nir_intrinsic_deref_atomic_or
:
640 case nir_intrinsic_deref_atomic_xor
:
641 case nir_intrinsic_deref_atomic_exchange
:
642 case nir_intrinsic_deref_atomic_comp_swap
:
643 case nir_intrinsic_deref_atomic_fadd
:
644 case nir_intrinsic_deref_atomic_fmin
:
645 case nir_intrinsic_deref_atomic_fmax
:
646 case nir_intrinsic_deref_atomic_fcomp_swap
:
647 assert(vertex_index
== NULL
);
648 replacement
= lower_atomic(intrin
, state
, var
, offset
);
651 case nir_intrinsic_interp_deref_at_centroid
:
652 case nir_intrinsic_interp_deref_at_sample
:
653 case nir_intrinsic_interp_deref_at_offset
:
654 assert(vertex_index
== NULL
);
655 replacement
= lower_interpolate_at(intrin
, state
, var
, offset
,
656 component_offset
, deref
->type
);
664 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
,
665 nir_src_for_ssa(replacement
));
667 nir_instr_remove(&intrin
->instr
);
675 nir_lower_io_impl(nir_function_impl
*impl
,
676 nir_variable_mode modes
,
677 int (*type_size
)(const struct glsl_type
*, bool),
678 nir_lower_io_options options
)
680 struct lower_io_state state
;
681 bool progress
= false;
683 nir_builder_init(&state
.builder
, impl
);
684 state
.dead_ctx
= ralloc_context(NULL
);
686 state
.type_size
= type_size
;
687 state
.options
= options
;
689 nir_foreach_block(block
, impl
) {
690 progress
|= nir_lower_io_block(block
, &state
);
693 ralloc_free(state
.dead_ctx
);
695 nir_metadata_preserve(impl
, nir_metadata_block_index
|
696 nir_metadata_dominance
);
701 nir_lower_io(nir_shader
*shader
, nir_variable_mode modes
,
702 int (*type_size
)(const struct glsl_type
*, bool),
703 nir_lower_io_options options
)
705 bool progress
= false;
707 nir_foreach_function(function
, shader
) {
708 if (function
->impl
) {
709 progress
|= nir_lower_io_impl(function
->impl
, modes
,
718 type_scalar_size_bytes(const struct glsl_type
*type
)
720 assert(glsl_type_is_vector_or_scalar(type
) ||
721 glsl_type_is_matrix(type
));
722 return glsl_type_is_boolean(type
) ? 4 : glsl_get_bit_size(type
) / 8;
726 build_addr_iadd(nir_builder
*b
, nir_ssa_def
*addr
,
727 nir_address_format addr_format
, nir_ssa_def
*offset
)
729 assert(offset
->num_components
== 1);
730 assert(addr
->bit_size
== offset
->bit_size
);
732 switch (addr_format
) {
733 case nir_address_format_32bit_global
:
734 case nir_address_format_64bit_global
:
735 case nir_address_format_32bit_offset
:
736 assert(addr
->num_components
== 1);
737 return nir_iadd(b
, addr
, offset
);
739 case nir_address_format_64bit_bounded_global
:
740 assert(addr
->num_components
== 4);
741 return nir_vec4(b
, nir_channel(b
, addr
, 0),
742 nir_channel(b
, addr
, 1),
743 nir_channel(b
, addr
, 2),
744 nir_iadd(b
, nir_channel(b
, addr
, 3), offset
));
746 case nir_address_format_32bit_index_offset
:
747 assert(addr
->num_components
== 2);
748 return nir_vec2(b
, nir_channel(b
, addr
, 0),
749 nir_iadd(b
, nir_channel(b
, addr
, 1), offset
));
750 case nir_address_format_logical
:
751 unreachable("Unsupported address format");
753 unreachable("Invalid address format");
757 build_addr_iadd_imm(nir_builder
*b
, nir_ssa_def
*addr
,
758 nir_address_format addr_format
, int64_t offset
)
760 return build_addr_iadd(b
, addr
, addr_format
,
761 nir_imm_intN_t(b
, offset
, addr
->bit_size
));
765 addr_to_index(nir_builder
*b
, nir_ssa_def
*addr
,
766 nir_address_format addr_format
)
768 assert(addr_format
== nir_address_format_32bit_index_offset
);
769 assert(addr
->num_components
== 2);
770 return nir_channel(b
, addr
, 0);
774 addr_to_offset(nir_builder
*b
, nir_ssa_def
*addr
,
775 nir_address_format addr_format
)
777 assert(addr_format
== nir_address_format_32bit_index_offset
);
778 assert(addr
->num_components
== 2);
779 return nir_channel(b
, addr
, 1);
782 /** Returns true if the given address format resolves to a global address */
784 addr_format_is_global(nir_address_format addr_format
)
786 return addr_format
== nir_address_format_32bit_global
||
787 addr_format
== nir_address_format_64bit_global
||
788 addr_format
== nir_address_format_64bit_bounded_global
;
792 addr_to_global(nir_builder
*b
, nir_ssa_def
*addr
,
793 nir_address_format addr_format
)
795 switch (addr_format
) {
796 case nir_address_format_32bit_global
:
797 case nir_address_format_64bit_global
:
798 assert(addr
->num_components
== 1);
801 case nir_address_format_64bit_bounded_global
:
802 assert(addr
->num_components
== 4);
803 return nir_iadd(b
, nir_pack_64_2x32(b
, nir_channels(b
, addr
, 0x3)),
804 nir_u2u64(b
, nir_channel(b
, addr
, 3)));
806 case nir_address_format_32bit_index_offset
:
807 case nir_address_format_32bit_offset
:
808 case nir_address_format_logical
:
809 unreachable("Cannot get a 64-bit address with this address format");
812 unreachable("Invalid address format");
816 addr_format_needs_bounds_check(nir_address_format addr_format
)
818 return addr_format
== nir_address_format_64bit_bounded_global
;
822 addr_is_in_bounds(nir_builder
*b
, nir_ssa_def
*addr
,
823 nir_address_format addr_format
, unsigned size
)
825 assert(addr_format
== nir_address_format_64bit_bounded_global
);
826 assert(addr
->num_components
== 4);
827 return nir_ige(b
, nir_channel(b
, addr
, 2),
828 nir_iadd_imm(b
, nir_channel(b
, addr
, 3), size
));
832 build_explicit_io_load(nir_builder
*b
, nir_intrinsic_instr
*intrin
,
833 nir_ssa_def
*addr
, nir_address_format addr_format
,
834 unsigned num_components
)
836 nir_variable_mode mode
= nir_src_as_deref(intrin
->src
[0])->mode
;
840 case nir_var_mem_ubo
:
841 op
= nir_intrinsic_load_ubo
;
843 case nir_var_mem_ssbo
:
844 if (addr_format_is_global(addr_format
))
845 op
= nir_intrinsic_load_global
;
847 op
= nir_intrinsic_load_ssbo
;
849 case nir_var_mem_global
:
850 assert(addr_format_is_global(addr_format
));
851 op
= nir_intrinsic_load_global
;
853 case nir_var_shader_in
:
854 assert(addr_format_is_global(addr_format
));
855 op
= nir_intrinsic_load_kernel_input
;
857 case nir_var_mem_shared
:
858 assert(addr_format
== nir_address_format_32bit_offset
);
859 op
= nir_intrinsic_load_shared
;
862 unreachable("Unsupported explicit IO variable mode");
865 nir_intrinsic_instr
*load
= nir_intrinsic_instr_create(b
->shader
, op
);
867 if (addr_format_is_global(addr_format
)) {
868 load
->src
[0] = nir_src_for_ssa(addr_to_global(b
, addr
, addr_format
));
869 } else if (addr_format
== nir_address_format_32bit_offset
) {
870 assert(addr
->num_components
== 1);
871 load
->src
[0] = nir_src_for_ssa(addr
);
873 load
->src
[0] = nir_src_for_ssa(addr_to_index(b
, addr
, addr_format
));
874 load
->src
[1] = nir_src_for_ssa(addr_to_offset(b
, addr
, addr_format
));
877 if (mode
!= nir_var_mem_ubo
&& mode
!= nir_var_shader_in
&& mode
!= nir_var_mem_shared
)
878 nir_intrinsic_set_access(load
, nir_intrinsic_access(intrin
));
880 /* TODO: We should try and provide a better alignment. For OpenCL, we need
881 * to plumb the alignment through from SPIR-V when we have one.
883 nir_intrinsic_set_align(load
, intrin
->dest
.ssa
.bit_size
/ 8, 0);
885 assert(intrin
->dest
.is_ssa
);
886 load
->num_components
= num_components
;
887 nir_ssa_dest_init(&load
->instr
, &load
->dest
, num_components
,
888 intrin
->dest
.ssa
.bit_size
, intrin
->dest
.ssa
.name
);
890 assert(load
->dest
.ssa
.bit_size
% 8 == 0);
892 if (addr_format_needs_bounds_check(addr_format
)) {
893 /* The Vulkan spec for robustBufferAccess gives us quite a few options
894 * as to what we can do with an OOB read. Unfortunately, returning
895 * undefined values isn't one of them so we return an actual zero.
897 nir_ssa_def
*zero
= nir_imm_zero(b
, load
->num_components
,
898 load
->dest
.ssa
.bit_size
);
900 const unsigned load_size
=
901 (load
->dest
.ssa
.bit_size
/ 8) * load
->num_components
;
902 nir_push_if(b
, addr_is_in_bounds(b
, addr
, addr_format
, load_size
));
904 nir_builder_instr_insert(b
, &load
->instr
);
908 return nir_if_phi(b
, &load
->dest
.ssa
, zero
);
910 nir_builder_instr_insert(b
, &load
->instr
);
911 return &load
->dest
.ssa
;
916 build_explicit_io_store(nir_builder
*b
, nir_intrinsic_instr
*intrin
,
917 nir_ssa_def
*addr
, nir_address_format addr_format
,
918 nir_ssa_def
*value
, nir_component_mask_t write_mask
)
920 nir_variable_mode mode
= nir_src_as_deref(intrin
->src
[0])->mode
;
924 case nir_var_mem_ssbo
:
925 if (addr_format_is_global(addr_format
))
926 op
= nir_intrinsic_store_global
;
928 op
= nir_intrinsic_store_ssbo
;
930 case nir_var_mem_global
:
931 assert(addr_format_is_global(addr_format
));
932 op
= nir_intrinsic_store_global
;
934 case nir_var_mem_shared
:
935 assert(addr_format
== nir_address_format_32bit_offset
);
936 op
= nir_intrinsic_store_shared
;
939 unreachable("Unsupported explicit IO variable mode");
942 nir_intrinsic_instr
*store
= nir_intrinsic_instr_create(b
->shader
, op
);
944 store
->src
[0] = nir_src_for_ssa(value
);
945 if (addr_format_is_global(addr_format
)) {
946 store
->src
[1] = nir_src_for_ssa(addr_to_global(b
, addr
, addr_format
));
947 } else if (addr_format
== nir_address_format_32bit_offset
) {
948 assert(addr
->num_components
== 1);
949 store
->src
[1] = nir_src_for_ssa(addr
);
951 store
->src
[1] = nir_src_for_ssa(addr_to_index(b
, addr
, addr_format
));
952 store
->src
[2] = nir_src_for_ssa(addr_to_offset(b
, addr
, addr_format
));
955 nir_intrinsic_set_write_mask(store
, write_mask
);
957 if (mode
!= nir_var_mem_shared
)
958 nir_intrinsic_set_access(store
, nir_intrinsic_access(intrin
));
960 /* TODO: We should try and provide a better alignment. For OpenCL, we need
961 * to plumb the alignment through from SPIR-V when we have one.
963 nir_intrinsic_set_align(store
, value
->bit_size
/ 8, 0);
965 assert(value
->num_components
== 1 ||
966 value
->num_components
== intrin
->num_components
);
967 store
->num_components
= value
->num_components
;
969 assert(value
->bit_size
% 8 == 0);
971 if (addr_format_needs_bounds_check(addr_format
)) {
972 const unsigned store_size
= (value
->bit_size
/ 8) * store
->num_components
;
973 nir_push_if(b
, addr_is_in_bounds(b
, addr
, addr_format
, store_size
));
975 nir_builder_instr_insert(b
, &store
->instr
);
979 nir_builder_instr_insert(b
, &store
->instr
);
984 build_explicit_io_atomic(nir_builder
*b
, nir_intrinsic_instr
*intrin
,
985 nir_ssa_def
*addr
, nir_address_format addr_format
)
987 nir_variable_mode mode
= nir_src_as_deref(intrin
->src
[0])->mode
;
988 const unsigned num_data_srcs
=
989 nir_intrinsic_infos
[intrin
->intrinsic
].num_srcs
- 1;
993 case nir_var_mem_ssbo
:
994 if (addr_format_is_global(addr_format
))
995 op
= global_atomic_for_deref(intrin
->intrinsic
);
997 op
= ssbo_atomic_for_deref(intrin
->intrinsic
);
999 case nir_var_mem_global
:
1000 assert(addr_format_is_global(addr_format
));
1001 op
= global_atomic_for_deref(intrin
->intrinsic
);
1003 case nir_var_mem_shared
:
1004 assert(addr_format
== nir_address_format_32bit_offset
);
1005 op
= shared_atomic_for_deref(intrin
->intrinsic
);
1008 unreachable("Unsupported explicit IO variable mode");
1011 nir_intrinsic_instr
*atomic
= nir_intrinsic_instr_create(b
->shader
, op
);
1014 if (addr_format_is_global(addr_format
)) {
1015 atomic
->src
[src
++] = nir_src_for_ssa(addr_to_global(b
, addr
, addr_format
));
1016 } else if (addr_format
== nir_address_format_32bit_offset
) {
1017 assert(addr
->num_components
== 1);
1018 atomic
->src
[src
++] = nir_src_for_ssa(addr
);
1020 atomic
->src
[src
++] = nir_src_for_ssa(addr_to_index(b
, addr
, addr_format
));
1021 atomic
->src
[src
++] = nir_src_for_ssa(addr_to_offset(b
, addr
, addr_format
));
1023 for (unsigned i
= 0; i
< num_data_srcs
; i
++) {
1024 atomic
->src
[src
++] = nir_src_for_ssa(intrin
->src
[1 + i
].ssa
);
1027 /* Global atomics don't have access flags because they assume that the
1028 * address may be non-uniform.
1030 if (!addr_format_is_global(addr_format
) && mode
!= nir_var_mem_shared
)
1031 nir_intrinsic_set_access(atomic
, nir_intrinsic_access(intrin
));
1033 assert(intrin
->dest
.ssa
.num_components
== 1);
1034 nir_ssa_dest_init(&atomic
->instr
, &atomic
->dest
,
1035 1, intrin
->dest
.ssa
.bit_size
, intrin
->dest
.ssa
.name
);
1037 assert(atomic
->dest
.ssa
.bit_size
% 8 == 0);
1039 if (addr_format_needs_bounds_check(addr_format
)) {
1040 const unsigned atomic_size
= atomic
->dest
.ssa
.bit_size
/ 8;
1041 nir_push_if(b
, addr_is_in_bounds(b
, addr
, addr_format
, atomic_size
));
1043 nir_builder_instr_insert(b
, &atomic
->instr
);
1045 nir_pop_if(b
, NULL
);
1046 return nir_if_phi(b
, &atomic
->dest
.ssa
,
1047 nir_ssa_undef(b
, 1, atomic
->dest
.ssa
.bit_size
));
1049 nir_builder_instr_insert(b
, &atomic
->instr
);
1050 return &atomic
->dest
.ssa
;
1055 nir_explicit_io_address_from_deref(nir_builder
*b
, nir_deref_instr
*deref
,
1056 nir_ssa_def
*base_addr
,
1057 nir_address_format addr_format
)
1059 assert(deref
->dest
.is_ssa
);
1060 switch (deref
->deref_type
) {
1061 case nir_deref_type_var
:
1062 assert(deref
->mode
& (nir_var_shader_in
| nir_var_mem_shared
));
1063 return nir_imm_intN_t(b
, deref
->var
->data
.driver_location
,
1064 deref
->dest
.ssa
.bit_size
);
1066 case nir_deref_type_array
: {
1067 nir_deref_instr
*parent
= nir_deref_instr_parent(deref
);
1069 unsigned stride
= glsl_get_explicit_stride(parent
->type
);
1070 if ((glsl_type_is_matrix(parent
->type
) &&
1071 glsl_matrix_type_is_row_major(parent
->type
)) ||
1072 (glsl_type_is_vector(parent
->type
) && stride
== 0))
1073 stride
= type_scalar_size_bytes(parent
->type
);
1077 nir_ssa_def
*index
= nir_ssa_for_src(b
, deref
->arr
.index
, 1);
1078 index
= nir_i2i(b
, index
, base_addr
->bit_size
);
1079 return build_addr_iadd(b
, base_addr
, addr_format
,
1080 nir_imul_imm(b
, index
, stride
));
1083 case nir_deref_type_ptr_as_array
: {
1084 nir_ssa_def
*index
= nir_ssa_for_src(b
, deref
->arr
.index
, 1);
1085 index
= nir_i2i(b
, index
, base_addr
->bit_size
);
1086 unsigned stride
= nir_deref_instr_ptr_as_array_stride(deref
);
1087 return build_addr_iadd(b
, base_addr
, addr_format
,
1088 nir_imul_imm(b
, index
, stride
));
1091 case nir_deref_type_array_wildcard
:
1092 unreachable("Wildcards should be lowered by now");
1095 case nir_deref_type_struct
: {
1096 nir_deref_instr
*parent
= nir_deref_instr_parent(deref
);
1097 int offset
= glsl_get_struct_field_offset(parent
->type
,
1098 deref
->strct
.index
);
1099 assert(offset
>= 0);
1100 return build_addr_iadd_imm(b
, base_addr
, addr_format
, offset
);
1103 case nir_deref_type_cast
:
1104 /* Nothing to do here */
1108 unreachable("Invalid NIR deref type");
1112 nir_lower_explicit_io_instr(nir_builder
*b
,
1113 nir_intrinsic_instr
*intrin
,
1115 nir_address_format addr_format
)
1117 b
->cursor
= nir_after_instr(&intrin
->instr
);
1119 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
1120 unsigned vec_stride
= glsl_get_explicit_stride(deref
->type
);
1121 unsigned scalar_size
= type_scalar_size_bytes(deref
->type
);
1122 assert(vec_stride
== 0 || glsl_type_is_vector(deref
->type
));
1123 assert(vec_stride
== 0 || vec_stride
>= scalar_size
);
1125 if (intrin
->intrinsic
== nir_intrinsic_load_deref
) {
1127 if (vec_stride
> scalar_size
) {
1128 nir_ssa_def
*comps
[4] = { NULL
, };
1129 for (unsigned i
= 0; i
< intrin
->num_components
; i
++) {
1130 nir_ssa_def
*comp_addr
= build_addr_iadd_imm(b
, addr
, addr_format
,
1132 comps
[i
] = build_explicit_io_load(b
, intrin
, comp_addr
,
1135 value
= nir_vec(b
, comps
, intrin
->num_components
);
1137 value
= build_explicit_io_load(b
, intrin
, addr
, addr_format
,
1138 intrin
->num_components
);
1140 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
, nir_src_for_ssa(value
));
1141 } else if (intrin
->intrinsic
== nir_intrinsic_store_deref
) {
1142 assert(intrin
->src
[1].is_ssa
);
1143 nir_ssa_def
*value
= intrin
->src
[1].ssa
;
1144 nir_component_mask_t write_mask
= nir_intrinsic_write_mask(intrin
);
1145 if (vec_stride
> scalar_size
) {
1146 for (unsigned i
= 0; i
< intrin
->num_components
; i
++) {
1147 if (!(write_mask
& (1 << i
)))
1150 nir_ssa_def
*comp_addr
= build_addr_iadd_imm(b
, addr
, addr_format
,
1152 build_explicit_io_store(b
, intrin
, comp_addr
, addr_format
,
1153 nir_channel(b
, value
, i
), 1);
1156 build_explicit_io_store(b
, intrin
, addr
, addr_format
,
1160 nir_ssa_def
*value
=
1161 build_explicit_io_atomic(b
, intrin
, addr
, addr_format
);
1162 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
, nir_src_for_ssa(value
));
1165 nir_instr_remove(&intrin
->instr
);
1169 lower_explicit_io_deref(nir_builder
*b
, nir_deref_instr
*deref
,
1170 nir_address_format addr_format
)
1172 /* Just delete the deref if it's not used. We can't use
1173 * nir_deref_instr_remove_if_unused here because it may remove more than
1174 * one deref which could break our list walking since we walk the list
1177 assert(list_empty(&deref
->dest
.ssa
.if_uses
));
1178 if (list_empty(&deref
->dest
.ssa
.uses
)) {
1179 nir_instr_remove(&deref
->instr
);
1183 b
->cursor
= nir_after_instr(&deref
->instr
);
1185 nir_ssa_def
*base_addr
= NULL
;
1186 if (deref
->deref_type
!= nir_deref_type_var
) {
1187 assert(deref
->parent
.is_ssa
);
1188 base_addr
= deref
->parent
.ssa
;
1191 nir_ssa_def
*addr
= nir_explicit_io_address_from_deref(b
, deref
, base_addr
,
1194 nir_instr_remove(&deref
->instr
);
1195 nir_ssa_def_rewrite_uses(&deref
->dest
.ssa
, nir_src_for_ssa(addr
));
1199 lower_explicit_io_access(nir_builder
*b
, nir_intrinsic_instr
*intrin
,
1200 nir_address_format addr_format
)
1202 assert(intrin
->src
[0].is_ssa
);
1203 nir_lower_explicit_io_instr(b
, intrin
, intrin
->src
[0].ssa
, addr_format
);
1207 lower_explicit_io_array_length(nir_builder
*b
, nir_intrinsic_instr
*intrin
,
1208 nir_address_format addr_format
)
1210 b
->cursor
= nir_after_instr(&intrin
->instr
);
1212 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
1214 assert(glsl_type_is_array(deref
->type
));
1215 assert(glsl_get_length(deref
->type
) == 0);
1216 unsigned stride
= glsl_get_explicit_stride(deref
->type
);
1219 assert(addr_format
== nir_address_format_32bit_index_offset
);
1220 nir_ssa_def
*addr
= &deref
->dest
.ssa
;
1221 nir_ssa_def
*index
= addr_to_index(b
, addr
, addr_format
);
1222 nir_ssa_def
*offset
= addr_to_offset(b
, addr
, addr_format
);
1224 nir_intrinsic_instr
*bsize
=
1225 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_get_buffer_size
);
1226 bsize
->src
[0] = nir_src_for_ssa(index
);
1227 nir_ssa_dest_init(&bsize
->instr
, &bsize
->dest
, 1, 32, NULL
);
1228 nir_builder_instr_insert(b
, &bsize
->instr
);
1230 nir_ssa_def
*arr_size
=
1231 nir_idiv(b
, nir_isub(b
, &bsize
->dest
.ssa
, offset
),
1232 nir_imm_int(b
, stride
));
1234 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
, nir_src_for_ssa(arr_size
));
1235 nir_instr_remove(&intrin
->instr
);
1239 nir_lower_explicit_io_impl(nir_function_impl
*impl
, nir_variable_mode modes
,
1240 nir_address_format addr_format
)
1242 bool progress
= false;
1245 nir_builder_init(&b
, impl
);
1247 /* Walk in reverse order so that we can see the full deref chain when we
1248 * lower the access operations. We lower them assuming that the derefs
1249 * will be turned into address calculations later.
1251 nir_foreach_block_reverse(block
, impl
) {
1252 nir_foreach_instr_reverse_safe(instr
, block
) {
1253 switch (instr
->type
) {
1254 case nir_instr_type_deref
: {
1255 nir_deref_instr
*deref
= nir_instr_as_deref(instr
);
1256 if (deref
->mode
& modes
) {
1257 lower_explicit_io_deref(&b
, deref
, addr_format
);
1263 case nir_instr_type_intrinsic
: {
1264 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
1265 switch (intrin
->intrinsic
) {
1266 case nir_intrinsic_load_deref
:
1267 case nir_intrinsic_store_deref
:
1268 case nir_intrinsic_deref_atomic_add
:
1269 case nir_intrinsic_deref_atomic_imin
:
1270 case nir_intrinsic_deref_atomic_umin
:
1271 case nir_intrinsic_deref_atomic_imax
:
1272 case nir_intrinsic_deref_atomic_umax
:
1273 case nir_intrinsic_deref_atomic_and
:
1274 case nir_intrinsic_deref_atomic_or
:
1275 case nir_intrinsic_deref_atomic_xor
:
1276 case nir_intrinsic_deref_atomic_exchange
:
1277 case nir_intrinsic_deref_atomic_comp_swap
:
1278 case nir_intrinsic_deref_atomic_fadd
:
1279 case nir_intrinsic_deref_atomic_fmin
:
1280 case nir_intrinsic_deref_atomic_fmax
:
1281 case nir_intrinsic_deref_atomic_fcomp_swap
: {
1282 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
1283 if (deref
->mode
& modes
) {
1284 lower_explicit_io_access(&b
, intrin
, addr_format
);
1290 case nir_intrinsic_deref_buffer_array_length
: {
1291 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
1292 if (deref
->mode
& modes
) {
1293 lower_explicit_io_array_length(&b
, intrin
, addr_format
);
1313 nir_metadata_preserve(impl
, nir_metadata_block_index
|
1314 nir_metadata_dominance
);
1321 nir_lower_explicit_io(nir_shader
*shader
, nir_variable_mode modes
,
1322 nir_address_format addr_format
)
1324 bool progress
= false;
1326 nir_foreach_function(function
, shader
) {
1327 if (function
->impl
&&
1328 nir_lower_explicit_io_impl(function
->impl
, modes
, addr_format
))
1336 * Return the offset source for a load/store intrinsic.
1339 nir_get_io_offset_src(nir_intrinsic_instr
*instr
)
1341 switch (instr
->intrinsic
) {
1342 case nir_intrinsic_load_input
:
1343 case nir_intrinsic_load_output
:
1344 case nir_intrinsic_load_shared
:
1345 case nir_intrinsic_load_uniform
:
1346 case nir_intrinsic_load_global
:
1347 case nir_intrinsic_load_scratch
:
1348 case nir_intrinsic_load_fs_input_interp_deltas
:
1349 return &instr
->src
[0];
1350 case nir_intrinsic_load_ubo
:
1351 case nir_intrinsic_load_ssbo
:
1352 case nir_intrinsic_load_per_vertex_input
:
1353 case nir_intrinsic_load_per_vertex_output
:
1354 case nir_intrinsic_load_interpolated_input
:
1355 case nir_intrinsic_store_output
:
1356 case nir_intrinsic_store_shared
:
1357 case nir_intrinsic_store_global
:
1358 case nir_intrinsic_store_scratch
:
1359 return &instr
->src
[1];
1360 case nir_intrinsic_store_ssbo
:
1361 case nir_intrinsic_store_per_vertex_output
:
1362 return &instr
->src
[2];
1369 * Return the vertex index source for a load/store per_vertex intrinsic.
1372 nir_get_io_vertex_index_src(nir_intrinsic_instr
*instr
)
1374 switch (instr
->intrinsic
) {
1375 case nir_intrinsic_load_per_vertex_input
:
1376 case nir_intrinsic_load_per_vertex_output
:
1377 return &instr
->src
[0];
1378 case nir_intrinsic_store_per_vertex_output
:
1379 return &instr
->src
[1];
1386 * Return the numeric constant that identify a NULL pointer for each address
1389 const nir_const_value
*
1390 nir_address_format_null_value(nir_address_format addr_format
)
1392 const static nir_const_value null_values
[][NIR_MAX_VEC_COMPONENTS
] = {
1393 [nir_address_format_32bit_global
] = {{0}},
1394 [nir_address_format_64bit_global
] = {{0}},
1395 [nir_address_format_64bit_bounded_global
] = {{0}},
1396 [nir_address_format_32bit_index_offset
] = {{.u32
= ~0}, {.u32
= ~0}},
1397 [nir_address_format_32bit_offset
] = {{.u32
= ~0}},
1398 [nir_address_format_logical
] = {{.u32
= ~0}},
1401 assert(addr_format
< ARRAY_SIZE(null_values
));
1402 return null_values
[addr_format
];
1406 nir_build_addr_ieq(nir_builder
*b
, nir_ssa_def
*addr0
, nir_ssa_def
*addr1
,
1407 nir_address_format addr_format
)
1409 switch (addr_format
) {
1410 case nir_address_format_32bit_global
:
1411 case nir_address_format_64bit_global
:
1412 case nir_address_format_64bit_bounded_global
:
1413 case nir_address_format_32bit_index_offset
:
1414 case nir_address_format_32bit_offset
:
1415 return nir_ball_iequal(b
, addr0
, addr1
);
1417 case nir_address_format_logical
:
1418 unreachable("Unsupported address format");
1421 unreachable("Invalid address format");
1425 nir_build_addr_isub(nir_builder
*b
, nir_ssa_def
*addr0
, nir_ssa_def
*addr1
,
1426 nir_address_format addr_format
)
1428 switch (addr_format
) {
1429 case nir_address_format_32bit_global
:
1430 case nir_address_format_64bit_global
:
1431 case nir_address_format_32bit_offset
:
1432 assert(addr0
->num_components
== 1);
1433 assert(addr1
->num_components
== 1);
1434 return nir_isub(b
, addr0
, addr1
);
1436 case nir_address_format_64bit_bounded_global
:
1437 return nir_isub(b
, addr_to_global(b
, addr0
, addr_format
),
1438 addr_to_global(b
, addr1
, addr_format
));
1440 case nir_address_format_32bit_index_offset
:
1441 assert(addr0
->num_components
== 2);
1442 assert(addr1
->num_components
== 2);
1443 /* Assume the same buffer index. */
1444 return nir_isub(b
, nir_channel(b
, addr0
, 1), nir_channel(b
, addr1
, 1));
1446 case nir_address_format_logical
:
1447 unreachable("Unsupported address format");
1450 unreachable("Invalid address format");
1454 is_input(nir_intrinsic_instr
*intrin
)
1456 return intrin
->intrinsic
== nir_intrinsic_load_input
||
1457 intrin
->intrinsic
== nir_intrinsic_load_per_vertex_input
||
1458 intrin
->intrinsic
== nir_intrinsic_load_interpolated_input
||
1459 intrin
->intrinsic
== nir_intrinsic_load_fs_input_interp_deltas
;
1463 is_output(nir_intrinsic_instr
*intrin
)
1465 return intrin
->intrinsic
== nir_intrinsic_load_output
||
1466 intrin
->intrinsic
== nir_intrinsic_load_per_vertex_output
||
1467 intrin
->intrinsic
== nir_intrinsic_store_output
||
1468 intrin
->intrinsic
== nir_intrinsic_store_per_vertex_output
;
1473 * This pass adds constant offsets to instr->const_index[0] for input/output
1474 * intrinsics, and resets the offset source to 0. Non-constant offsets remain
1475 * unchanged - since we don't know what part of a compound variable is
1476 * accessed, we allocate storage for the entire thing. For drivers that use
1477 * nir_lower_io_to_temporaries() before nir_lower_io(), this guarantees that
1478 * the offset source will be 0, so that they don't have to add it in manually.
1482 add_const_offset_to_base_block(nir_block
*block
, nir_builder
*b
,
1483 nir_variable_mode mode
)
1485 bool progress
= false;
1486 nir_foreach_instr_safe(instr
, block
) {
1487 if (instr
->type
!= nir_instr_type_intrinsic
)
1490 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
1492 if ((mode
== nir_var_shader_in
&& is_input(intrin
)) ||
1493 (mode
== nir_var_shader_out
&& is_output(intrin
))) {
1494 nir_src
*offset
= nir_get_io_offset_src(intrin
);
1496 if (nir_src_is_const(*offset
)) {
1497 intrin
->const_index
[0] += nir_src_as_uint(*offset
);
1498 b
->cursor
= nir_before_instr(&intrin
->instr
);
1499 nir_instr_rewrite_src(&intrin
->instr
, offset
,
1500 nir_src_for_ssa(nir_imm_int(b
, 0)));
1510 nir_io_add_const_offset_to_base(nir_shader
*nir
, nir_variable_mode mode
)
1512 bool progress
= false;
1514 nir_foreach_function(f
, nir
) {
1517 nir_builder_init(&b
, f
->impl
);
1518 nir_foreach_block(block
, f
->impl
) {
1519 progress
|= add_const_offset_to_base_block(block
, &b
, mode
);