2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Connor Abbott (cwabbott0@gmail.com)
25 * Jason Ekstrand (jason@jlekstrand.net)
30 * This lowering pass converts references to input/output variables with
31 * loads/stores to actual input/output intrinsics.
35 #include "nir_builder.h"
36 #include "nir_deref.h"
38 struct lower_io_state
{
41 int (*type_size
)(const struct glsl_type
*type
, bool);
42 nir_variable_mode modes
;
43 nir_lower_io_options options
;
46 static nir_intrinsic_op
47 ssbo_atomic_for_deref(nir_intrinsic_op deref_op
)
50 #define OP(O) case nir_intrinsic_deref_##O: return nir_intrinsic_ssbo_##O;
67 unreachable("Invalid SSBO atomic");
71 static nir_intrinsic_op
72 global_atomic_for_deref(nir_intrinsic_op deref_op
)
75 #define OP(O) case nir_intrinsic_deref_##O: return nir_intrinsic_global_##O;
92 unreachable("Invalid SSBO atomic");
97 nir_assign_var_locations(struct exec_list
*var_list
, unsigned *size
,
98 int (*type_size
)(const struct glsl_type
*, bool))
100 unsigned location
= 0;
102 nir_foreach_variable(var
, var_list
) {
104 * UBOs have their own address spaces, so don't count them towards the
105 * number of global uniforms
107 if (var
->data
.mode
== nir_var_mem_ubo
|| var
->data
.mode
== nir_var_mem_ssbo
)
110 var
->data
.driver_location
= location
;
111 bool bindless_type_size
= var
->data
.mode
== nir_var_shader_in
||
112 var
->data
.mode
== nir_var_shader_out
||
114 location
+= type_size(var
->type
, bindless_type_size
);
121 * Return true if the given variable is a per-vertex input/output array.
122 * (such as geometry shader inputs).
125 nir_is_per_vertex_io(const nir_variable
*var
, gl_shader_stage stage
)
127 if (var
->data
.patch
|| !glsl_type_is_array(var
->type
))
130 if (var
->data
.mode
== nir_var_shader_in
)
131 return stage
== MESA_SHADER_GEOMETRY
||
132 stage
== MESA_SHADER_TESS_CTRL
||
133 stage
== MESA_SHADER_TESS_EVAL
;
135 if (var
->data
.mode
== nir_var_shader_out
)
136 return stage
== MESA_SHADER_TESS_CTRL
;
142 get_io_offset(nir_builder
*b
, nir_deref_instr
*deref
,
143 nir_ssa_def
**vertex_index
,
144 int (*type_size
)(const struct glsl_type
*, bool),
145 unsigned *component
, bool bts
)
148 nir_deref_path_init(&path
, deref
, NULL
);
150 assert(path
.path
[0]->deref_type
== nir_deref_type_var
);
151 nir_deref_instr
**p
= &path
.path
[1];
153 /* For per-vertex input arrays (i.e. geometry shader inputs), keep the
154 * outermost array index separate. Process the rest normally.
156 if (vertex_index
!= NULL
) {
157 assert((*p
)->deref_type
== nir_deref_type_array
);
158 *vertex_index
= nir_ssa_for_src(b
, (*p
)->arr
.index
, 1);
162 if (path
.path
[0]->var
->data
.compact
) {
163 assert((*p
)->deref_type
== nir_deref_type_array
);
164 assert(glsl_type_is_scalar((*p
)->type
));
166 /* We always lower indirect dereferences for "compact" array vars. */
167 const unsigned index
= nir_src_as_uint((*p
)->arr
.index
);
168 const unsigned total_offset
= *component
+ index
;
169 const unsigned slot_offset
= total_offset
/ 4;
170 *component
= total_offset
% 4;
171 return nir_imm_int(b
, type_size(glsl_vec4_type(), bts
) * slot_offset
);
174 /* Just emit code and let constant-folding go to town */
175 nir_ssa_def
*offset
= nir_imm_int(b
, 0);
178 if ((*p
)->deref_type
== nir_deref_type_array
) {
179 unsigned size
= type_size((*p
)->type
, bts
);
182 nir_imul_imm(b
, nir_ssa_for_src(b
, (*p
)->arr
.index
, 1), size
);
184 offset
= nir_iadd(b
, offset
, mul
);
185 } else if ((*p
)->deref_type
== nir_deref_type_struct
) {
186 /* p starts at path[1], so this is safe */
187 nir_deref_instr
*parent
= *(p
- 1);
189 unsigned field_offset
= 0;
190 for (unsigned i
= 0; i
< (*p
)->strct
.index
; i
++) {
191 field_offset
+= type_size(glsl_get_struct_field(parent
->type
, i
), bts
);
193 offset
= nir_iadd_imm(b
, offset
, field_offset
);
195 unreachable("Unsupported deref type");
199 nir_deref_path_finish(&path
);
204 static nir_intrinsic_instr
*
205 lower_load(nir_intrinsic_instr
*intrin
, struct lower_io_state
*state
,
206 nir_ssa_def
*vertex_index
, nir_variable
*var
, nir_ssa_def
*offset
,
209 const nir_shader
*nir
= state
->builder
.shader
;
210 nir_variable_mode mode
= var
->data
.mode
;
211 nir_ssa_def
*barycentric
= NULL
;
215 case nir_var_shader_in
:
216 if (nir
->info
.stage
== MESA_SHADER_FRAGMENT
&&
217 nir
->options
->use_interpolated_input_intrinsics
&&
218 var
->data
.interpolation
!= INTERP_MODE_FLAT
) {
219 assert(vertex_index
== NULL
);
221 nir_intrinsic_op bary_op
;
222 if (var
->data
.sample
||
223 (state
->options
& nir_lower_io_force_sample_interpolation
))
224 bary_op
= nir_intrinsic_load_barycentric_sample
;
225 else if (var
->data
.centroid
)
226 bary_op
= nir_intrinsic_load_barycentric_centroid
;
228 bary_op
= nir_intrinsic_load_barycentric_pixel
;
230 barycentric
= nir_load_barycentric(&state
->builder
, bary_op
,
231 var
->data
.interpolation
);
232 op
= nir_intrinsic_load_interpolated_input
;
234 op
= vertex_index
? nir_intrinsic_load_per_vertex_input
:
235 nir_intrinsic_load_input
;
238 case nir_var_shader_out
:
239 op
= vertex_index
? nir_intrinsic_load_per_vertex_output
:
240 nir_intrinsic_load_output
;
242 case nir_var_uniform
:
243 op
= nir_intrinsic_load_uniform
;
245 case nir_var_mem_shared
:
246 op
= nir_intrinsic_load_shared
;
249 unreachable("Unknown variable mode");
252 nir_intrinsic_instr
*load
=
253 nir_intrinsic_instr_create(state
->builder
.shader
, op
);
254 load
->num_components
= intrin
->num_components
;
256 nir_intrinsic_set_base(load
, var
->data
.driver_location
);
257 if (mode
== nir_var_shader_in
|| mode
== nir_var_shader_out
)
258 nir_intrinsic_set_component(load
, component
);
260 if (load
->intrinsic
== nir_intrinsic_load_uniform
)
261 nir_intrinsic_set_range(load
,
262 state
->type_size(var
->type
, var
->data
.bindless
));
265 load
->src
[0] = nir_src_for_ssa(vertex_index
);
266 load
->src
[1] = nir_src_for_ssa(offset
);
267 } else if (barycentric
) {
268 load
->src
[0] = nir_src_for_ssa(barycentric
);
269 load
->src
[1] = nir_src_for_ssa(offset
);
271 load
->src
[0] = nir_src_for_ssa(offset
);
277 static nir_intrinsic_instr
*
278 lower_store(nir_intrinsic_instr
*intrin
, struct lower_io_state
*state
,
279 nir_ssa_def
*vertex_index
, nir_variable
*var
, nir_ssa_def
*offset
,
282 nir_variable_mode mode
= var
->data
.mode
;
285 if (mode
== nir_var_mem_shared
) {
286 op
= nir_intrinsic_store_shared
;
288 assert(mode
== nir_var_shader_out
);
289 op
= vertex_index
? nir_intrinsic_store_per_vertex_output
:
290 nir_intrinsic_store_output
;
293 nir_intrinsic_instr
*store
=
294 nir_intrinsic_instr_create(state
->builder
.shader
, op
);
295 store
->num_components
= intrin
->num_components
;
297 nir_src_copy(&store
->src
[0], &intrin
->src
[1], store
);
299 nir_intrinsic_set_base(store
, var
->data
.driver_location
);
301 if (mode
== nir_var_shader_out
)
302 nir_intrinsic_set_component(store
, component
);
304 nir_intrinsic_set_write_mask(store
, nir_intrinsic_write_mask(intrin
));
307 store
->src
[1] = nir_src_for_ssa(vertex_index
);
309 store
->src
[vertex_index
? 2 : 1] = nir_src_for_ssa(offset
);
314 static nir_intrinsic_instr
*
315 lower_atomic(nir_intrinsic_instr
*intrin
, struct lower_io_state
*state
,
316 nir_variable
*var
, nir_ssa_def
*offset
)
318 assert(var
->data
.mode
== nir_var_mem_shared
);
321 switch (intrin
->intrinsic
) {
322 #define OP(O) case nir_intrinsic_deref_##O: op = nir_intrinsic_shared_##O; break;
336 OP(atomic_fcomp_swap
)
339 unreachable("Invalid atomic");
342 nir_intrinsic_instr
*atomic
=
343 nir_intrinsic_instr_create(state
->builder
.shader
, op
);
345 nir_intrinsic_set_base(atomic
, var
->data
.driver_location
);
347 atomic
->src
[0] = nir_src_for_ssa(offset
);
348 assert(nir_intrinsic_infos
[intrin
->intrinsic
].num_srcs
==
349 nir_intrinsic_infos
[op
].num_srcs
);
350 for (unsigned i
= 1; i
< nir_intrinsic_infos
[op
].num_srcs
; i
++) {
351 nir_src_copy(&atomic
->src
[i
], &intrin
->src
[i
], atomic
);
357 static nir_intrinsic_instr
*
358 lower_interpolate_at(nir_intrinsic_instr
*intrin
, struct lower_io_state
*state
,
359 nir_variable
*var
, nir_ssa_def
*offset
, unsigned component
)
361 assert(var
->data
.mode
== nir_var_shader_in
);
363 /* Ignore interpolateAt() for flat variables - flat is flat. */
364 if (var
->data
.interpolation
== INTERP_MODE_FLAT
)
365 return lower_load(intrin
, state
, NULL
, var
, offset
, component
);
367 nir_intrinsic_op bary_op
;
368 switch (intrin
->intrinsic
) {
369 case nir_intrinsic_interp_deref_at_centroid
:
370 bary_op
= (state
->options
& nir_lower_io_force_sample_interpolation
) ?
371 nir_intrinsic_load_barycentric_sample
:
372 nir_intrinsic_load_barycentric_centroid
;
374 case nir_intrinsic_interp_deref_at_sample
:
375 bary_op
= nir_intrinsic_load_barycentric_at_sample
;
377 case nir_intrinsic_interp_deref_at_offset
:
378 bary_op
= nir_intrinsic_load_barycentric_at_offset
;
381 unreachable("Bogus interpolateAt() intrinsic.");
384 nir_intrinsic_instr
*bary_setup
=
385 nir_intrinsic_instr_create(state
->builder
.shader
, bary_op
);
387 nir_ssa_dest_init(&bary_setup
->instr
, &bary_setup
->dest
, 2, 32, NULL
);
388 nir_intrinsic_set_interp_mode(bary_setup
, var
->data
.interpolation
);
390 if (intrin
->intrinsic
== nir_intrinsic_interp_deref_at_sample
||
391 intrin
->intrinsic
== nir_intrinsic_interp_deref_at_offset
)
392 nir_src_copy(&bary_setup
->src
[0], &intrin
->src
[1], bary_setup
);
394 nir_builder_instr_insert(&state
->builder
, &bary_setup
->instr
);
396 nir_intrinsic_instr
*load
=
397 nir_intrinsic_instr_create(state
->builder
.shader
,
398 nir_intrinsic_load_interpolated_input
);
399 load
->num_components
= intrin
->num_components
;
401 nir_intrinsic_set_base(load
, var
->data
.driver_location
);
402 nir_intrinsic_set_component(load
, component
);
404 load
->src
[0] = nir_src_for_ssa(&bary_setup
->dest
.ssa
);
405 load
->src
[1] = nir_src_for_ssa(offset
);
411 nir_lower_io_block(nir_block
*block
,
412 struct lower_io_state
*state
)
414 nir_builder
*b
= &state
->builder
;
415 const nir_shader_compiler_options
*options
= b
->shader
->options
;
416 bool progress
= false;
418 nir_foreach_instr_safe(instr
, block
) {
419 if (instr
->type
!= nir_instr_type_intrinsic
)
422 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
424 switch (intrin
->intrinsic
) {
425 case nir_intrinsic_load_deref
:
426 case nir_intrinsic_store_deref
:
427 case nir_intrinsic_deref_atomic_add
:
428 case nir_intrinsic_deref_atomic_imin
:
429 case nir_intrinsic_deref_atomic_umin
:
430 case nir_intrinsic_deref_atomic_imax
:
431 case nir_intrinsic_deref_atomic_umax
:
432 case nir_intrinsic_deref_atomic_and
:
433 case nir_intrinsic_deref_atomic_or
:
434 case nir_intrinsic_deref_atomic_xor
:
435 case nir_intrinsic_deref_atomic_exchange
:
436 case nir_intrinsic_deref_atomic_comp_swap
:
437 case nir_intrinsic_deref_atomic_fadd
:
438 case nir_intrinsic_deref_atomic_fmin
:
439 case nir_intrinsic_deref_atomic_fmax
:
440 case nir_intrinsic_deref_atomic_fcomp_swap
:
441 /* We can lower the io for this nir instrinsic */
443 case nir_intrinsic_interp_deref_at_centroid
:
444 case nir_intrinsic_interp_deref_at_sample
:
445 case nir_intrinsic_interp_deref_at_offset
:
446 /* We can optionally lower these to load_interpolated_input */
447 if (options
->use_interpolated_input_intrinsics
)
450 /* We can't lower the io for this nir instrinsic, so skip it */
454 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
456 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
457 nir_variable_mode mode
= var
->data
.mode
;
459 if ((state
->modes
& mode
) == 0)
462 if (mode
!= nir_var_shader_in
&&
463 mode
!= nir_var_shader_out
&&
464 mode
!= nir_var_mem_shared
&&
465 mode
!= nir_var_uniform
)
468 b
->cursor
= nir_before_instr(instr
);
470 const bool per_vertex
= nir_is_per_vertex_io(var
, b
->shader
->info
.stage
);
473 nir_ssa_def
*vertex_index
= NULL
;
474 unsigned component_offset
= var
->data
.location_frac
;
475 bool bindless_type_size
= mode
== nir_var_shader_in
||
476 mode
== nir_var_shader_out
||
479 offset
= get_io_offset(b
, deref
, per_vertex
? &vertex_index
: NULL
,
480 state
->type_size
, &component_offset
,
483 nir_intrinsic_instr
*replacement
;
485 switch (intrin
->intrinsic
) {
486 case nir_intrinsic_load_deref
:
487 replacement
= lower_load(intrin
, state
, vertex_index
, var
, offset
,
491 case nir_intrinsic_store_deref
:
492 replacement
= lower_store(intrin
, state
, vertex_index
, var
, offset
,
496 case nir_intrinsic_deref_atomic_add
:
497 case nir_intrinsic_deref_atomic_imin
:
498 case nir_intrinsic_deref_atomic_umin
:
499 case nir_intrinsic_deref_atomic_imax
:
500 case nir_intrinsic_deref_atomic_umax
:
501 case nir_intrinsic_deref_atomic_and
:
502 case nir_intrinsic_deref_atomic_or
:
503 case nir_intrinsic_deref_atomic_xor
:
504 case nir_intrinsic_deref_atomic_exchange
:
505 case nir_intrinsic_deref_atomic_comp_swap
:
506 case nir_intrinsic_deref_atomic_fadd
:
507 case nir_intrinsic_deref_atomic_fmin
:
508 case nir_intrinsic_deref_atomic_fmax
:
509 case nir_intrinsic_deref_atomic_fcomp_swap
:
510 assert(vertex_index
== NULL
);
511 replacement
= lower_atomic(intrin
, state
, var
, offset
);
514 case nir_intrinsic_interp_deref_at_centroid
:
515 case nir_intrinsic_interp_deref_at_sample
:
516 case nir_intrinsic_interp_deref_at_offset
:
517 assert(vertex_index
== NULL
);
518 replacement
= lower_interpolate_at(intrin
, state
, var
, offset
,
526 if (nir_intrinsic_infos
[intrin
->intrinsic
].has_dest
) {
527 if (intrin
->dest
.is_ssa
) {
528 nir_ssa_dest_init(&replacement
->instr
, &replacement
->dest
,
529 intrin
->dest
.ssa
.num_components
,
530 intrin
->dest
.ssa
.bit_size
, NULL
);
531 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
,
532 nir_src_for_ssa(&replacement
->dest
.ssa
));
534 nir_dest_copy(&replacement
->dest
, &intrin
->dest
, &intrin
->instr
);
538 nir_instr_insert_before(&intrin
->instr
, &replacement
->instr
);
539 nir_instr_remove(&intrin
->instr
);
547 nir_lower_io_impl(nir_function_impl
*impl
,
548 nir_variable_mode modes
,
549 int (*type_size
)(const struct glsl_type
*, bool),
550 nir_lower_io_options options
)
552 struct lower_io_state state
;
553 bool progress
= false;
555 nir_builder_init(&state
.builder
, impl
);
556 state
.dead_ctx
= ralloc_context(NULL
);
558 state
.type_size
= type_size
;
559 state
.options
= options
;
561 nir_foreach_block(block
, impl
) {
562 progress
|= nir_lower_io_block(block
, &state
);
565 ralloc_free(state
.dead_ctx
);
567 nir_metadata_preserve(impl
, nir_metadata_block_index
|
568 nir_metadata_dominance
);
573 nir_lower_io(nir_shader
*shader
, nir_variable_mode modes
,
574 int (*type_size
)(const struct glsl_type
*, bool),
575 nir_lower_io_options options
)
577 bool progress
= false;
579 nir_foreach_function(function
, shader
) {
580 if (function
->impl
) {
581 progress
|= nir_lower_io_impl(function
->impl
, modes
,
590 type_scalar_size_bytes(const struct glsl_type
*type
)
592 assert(glsl_type_is_vector_or_scalar(type
) ||
593 glsl_type_is_matrix(type
));
594 return glsl_type_is_boolean(type
) ? 4 : glsl_get_bit_size(type
) / 8;
598 build_addr_iadd(nir_builder
*b
, nir_ssa_def
*addr
,
599 nir_address_format addr_format
, nir_ssa_def
*offset
)
601 assert(offset
->num_components
== 1);
602 assert(addr
->bit_size
== offset
->bit_size
);
604 switch (addr_format
) {
605 case nir_address_format_32bit_global
:
606 case nir_address_format_64bit_global
:
607 assert(addr
->num_components
== 1);
608 return nir_iadd(b
, addr
, offset
);
610 case nir_address_format_64bit_bounded_global
:
611 assert(addr
->num_components
== 4);
612 return nir_vec4(b
, nir_channel(b
, addr
, 0),
613 nir_channel(b
, addr
, 1),
614 nir_channel(b
, addr
, 2),
615 nir_iadd(b
, nir_channel(b
, addr
, 3), offset
));
617 case nir_address_format_32bit_index_offset
:
618 assert(addr
->num_components
== 2);
619 return nir_vec2(b
, nir_channel(b
, addr
, 0),
620 nir_iadd(b
, nir_channel(b
, addr
, 1), offset
));
622 unreachable("Invalid address format");
626 build_addr_iadd_imm(nir_builder
*b
, nir_ssa_def
*addr
,
627 nir_address_format addr_format
, int64_t offset
)
629 return build_addr_iadd(b
, addr
, addr_format
,
630 nir_imm_intN_t(b
, offset
, addr
->bit_size
));
634 addr_to_index(nir_builder
*b
, nir_ssa_def
*addr
,
635 nir_address_format addr_format
)
637 assert(addr_format
== nir_address_format_32bit_index_offset
);
638 assert(addr
->num_components
== 2);
639 return nir_channel(b
, addr
, 0);
643 addr_to_offset(nir_builder
*b
, nir_ssa_def
*addr
,
644 nir_address_format addr_format
)
646 assert(addr_format
== nir_address_format_32bit_index_offset
);
647 assert(addr
->num_components
== 2);
648 return nir_channel(b
, addr
, 1);
651 /** Returns true if the given address format resolves to a global address */
653 addr_format_is_global(nir_address_format addr_format
)
655 return addr_format
== nir_address_format_32bit_global
||
656 addr_format
== nir_address_format_64bit_global
||
657 addr_format
== nir_address_format_64bit_bounded_global
;
661 addr_to_global(nir_builder
*b
, nir_ssa_def
*addr
,
662 nir_address_format addr_format
)
664 switch (addr_format
) {
665 case nir_address_format_32bit_global
:
666 case nir_address_format_64bit_global
:
667 assert(addr
->num_components
== 1);
670 case nir_address_format_64bit_bounded_global
:
671 assert(addr
->num_components
== 4);
672 return nir_iadd(b
, nir_pack_64_2x32(b
, nir_channels(b
, addr
, 0x3)),
673 nir_u2u64(b
, nir_channel(b
, addr
, 3)));
675 case nir_address_format_32bit_index_offset
:
676 unreachable("Cannot get a 64-bit address with this address format");
679 unreachable("Invalid address format");
683 addr_format_needs_bounds_check(nir_address_format addr_format
)
685 return addr_format
== nir_address_format_64bit_bounded_global
;
689 addr_is_in_bounds(nir_builder
*b
, nir_ssa_def
*addr
,
690 nir_address_format addr_format
, unsigned size
)
692 assert(addr_format
== nir_address_format_64bit_bounded_global
);
693 assert(addr
->num_components
== 4);
694 return nir_ige(b
, nir_channel(b
, addr
, 2),
695 nir_iadd_imm(b
, nir_channel(b
, addr
, 3), size
));
699 build_explicit_io_load(nir_builder
*b
, nir_intrinsic_instr
*intrin
,
700 nir_ssa_def
*addr
, nir_address_format addr_format
,
701 unsigned num_components
)
703 nir_variable_mode mode
= nir_src_as_deref(intrin
->src
[0])->mode
;
707 case nir_var_mem_ubo
:
708 op
= nir_intrinsic_load_ubo
;
710 case nir_var_mem_ssbo
:
711 if (addr_format_is_global(addr_format
))
712 op
= nir_intrinsic_load_global
;
714 op
= nir_intrinsic_load_ssbo
;
716 case nir_var_mem_global
:
717 assert(addr_format_is_global(addr_format
));
718 op
= nir_intrinsic_load_global
;
720 case nir_var_shader_in
:
721 assert(addr_format_is_global(addr_format
));
722 op
= nir_intrinsic_load_kernel_input
;
725 unreachable("Unsupported explicit IO variable mode");
728 nir_intrinsic_instr
*load
= nir_intrinsic_instr_create(b
->shader
, op
);
730 if (addr_format_is_global(addr_format
)) {
731 load
->src
[0] = nir_src_for_ssa(addr_to_global(b
, addr
, addr_format
));
733 load
->src
[0] = nir_src_for_ssa(addr_to_index(b
, addr
, addr_format
));
734 load
->src
[1] = nir_src_for_ssa(addr_to_offset(b
, addr
, addr_format
));
737 if (mode
!= nir_var_mem_ubo
&& mode
!= nir_var_shader_in
)
738 nir_intrinsic_set_access(load
, nir_intrinsic_access(intrin
));
740 /* TODO: We should try and provide a better alignment. For OpenCL, we need
741 * to plumb the alignment through from SPIR-V when we have one.
743 nir_intrinsic_set_align(load
, intrin
->dest
.ssa
.bit_size
/ 8, 0);
745 assert(intrin
->dest
.is_ssa
);
746 load
->num_components
= num_components
;
747 nir_ssa_dest_init(&load
->instr
, &load
->dest
, num_components
,
748 intrin
->dest
.ssa
.bit_size
, intrin
->dest
.ssa
.name
);
750 assert(load
->dest
.ssa
.bit_size
% 8 == 0);
752 if (addr_format_needs_bounds_check(addr_format
)) {
753 /* The Vulkan spec for robustBufferAccess gives us quite a few options
754 * as to what we can do with an OOB read. Unfortunately, returning
755 * undefined values isn't one of them so we return an actual zero.
757 nir_const_value zero_val
;
758 memset(&zero_val
, 0, sizeof(zero_val
));
759 nir_ssa_def
*zero
= nir_build_imm(b
, load
->num_components
,
760 load
->dest
.ssa
.bit_size
, zero_val
);
762 const unsigned load_size
=
763 (load
->dest
.ssa
.bit_size
/ 8) * load
->num_components
;
764 nir_push_if(b
, addr_is_in_bounds(b
, addr
, addr_format
, load_size
));
766 nir_builder_instr_insert(b
, &load
->instr
);
770 return nir_if_phi(b
, &load
->dest
.ssa
, zero
);
772 nir_builder_instr_insert(b
, &load
->instr
);
773 return &load
->dest
.ssa
;
778 build_explicit_io_store(nir_builder
*b
, nir_intrinsic_instr
*intrin
,
779 nir_ssa_def
*addr
, nir_address_format addr_format
,
780 nir_ssa_def
*value
, nir_component_mask_t write_mask
)
782 nir_variable_mode mode
= nir_src_as_deref(intrin
->src
[0])->mode
;
786 case nir_var_mem_ssbo
:
787 if (addr_format_is_global(addr_format
))
788 op
= nir_intrinsic_store_global
;
790 op
= nir_intrinsic_store_ssbo
;
792 case nir_var_mem_global
:
793 assert(addr_format_is_global(addr_format
));
794 op
= nir_intrinsic_store_global
;
797 unreachable("Unsupported explicit IO variable mode");
800 nir_intrinsic_instr
*store
= nir_intrinsic_instr_create(b
->shader
, op
);
802 store
->src
[0] = nir_src_for_ssa(value
);
803 if (addr_format_is_global(addr_format
)) {
804 store
->src
[1] = nir_src_for_ssa(addr_to_global(b
, addr
, addr_format
));
806 store
->src
[1] = nir_src_for_ssa(addr_to_index(b
, addr
, addr_format
));
807 store
->src
[2] = nir_src_for_ssa(addr_to_offset(b
, addr
, addr_format
));
810 nir_intrinsic_set_write_mask(store
, write_mask
);
812 nir_intrinsic_set_access(store
, nir_intrinsic_access(intrin
));
814 /* TODO: We should try and provide a better alignment. For OpenCL, we need
815 * to plumb the alignment through from SPIR-V when we have one.
817 nir_intrinsic_set_align(store
, value
->bit_size
/ 8, 0);
819 assert(value
->num_components
== 1 ||
820 value
->num_components
== intrin
->num_components
);
821 store
->num_components
= value
->num_components
;
823 assert(value
->bit_size
% 8 == 0);
825 if (addr_format_needs_bounds_check(addr_format
)) {
826 const unsigned store_size
= (value
->bit_size
/ 8) * store
->num_components
;
827 nir_push_if(b
, addr_is_in_bounds(b
, addr
, addr_format
, store_size
));
829 nir_builder_instr_insert(b
, &store
->instr
);
833 nir_builder_instr_insert(b
, &store
->instr
);
838 build_explicit_io_atomic(nir_builder
*b
, nir_intrinsic_instr
*intrin
,
839 nir_ssa_def
*addr
, nir_address_format addr_format
)
841 nir_variable_mode mode
= nir_src_as_deref(intrin
->src
[0])->mode
;
842 const unsigned num_data_srcs
=
843 nir_intrinsic_infos
[intrin
->intrinsic
].num_srcs
- 1;
847 case nir_var_mem_ssbo
:
848 if (addr_format_is_global(addr_format
))
849 op
= global_atomic_for_deref(intrin
->intrinsic
);
851 op
= ssbo_atomic_for_deref(intrin
->intrinsic
);
853 case nir_var_mem_global
:
854 assert(addr_format_is_global(addr_format
));
855 op
= global_atomic_for_deref(intrin
->intrinsic
);
858 unreachable("Unsupported explicit IO variable mode");
861 nir_intrinsic_instr
*atomic
= nir_intrinsic_instr_create(b
->shader
, op
);
864 if (addr_format_is_global(addr_format
)) {
865 atomic
->src
[src
++] = nir_src_for_ssa(addr_to_global(b
, addr
, addr_format
));
867 atomic
->src
[src
++] = nir_src_for_ssa(addr_to_index(b
, addr
, addr_format
));
868 atomic
->src
[src
++] = nir_src_for_ssa(addr_to_offset(b
, addr
, addr_format
));
870 for (unsigned i
= 0; i
< num_data_srcs
; i
++) {
871 atomic
->src
[src
++] = nir_src_for_ssa(intrin
->src
[1 + i
].ssa
);
874 /* Global atomics don't have access flags because they assume that the
875 * address may be non-uniform.
877 if (!addr_format_is_global(addr_format
))
878 nir_intrinsic_set_access(atomic
, nir_intrinsic_access(intrin
));
880 assert(intrin
->dest
.ssa
.num_components
== 1);
881 nir_ssa_dest_init(&atomic
->instr
, &atomic
->dest
,
882 1, intrin
->dest
.ssa
.bit_size
, intrin
->dest
.ssa
.name
);
884 assert(atomic
->dest
.ssa
.bit_size
% 8 == 0);
886 if (addr_format_needs_bounds_check(addr_format
)) {
887 const unsigned atomic_size
= atomic
->dest
.ssa
.bit_size
/ 8;
888 nir_push_if(b
, addr_is_in_bounds(b
, addr
, addr_format
, atomic_size
));
890 nir_builder_instr_insert(b
, &atomic
->instr
);
893 return nir_if_phi(b
, &atomic
->dest
.ssa
,
894 nir_ssa_undef(b
, 1, atomic
->dest
.ssa
.bit_size
));
896 nir_builder_instr_insert(b
, &atomic
->instr
);
897 return &atomic
->dest
.ssa
;
902 lower_explicit_io_deref(nir_builder
*b
, nir_deref_instr
*deref
,
903 nir_address_format addr_format
)
905 /* Just delete the deref if it's not used. We can't use
906 * nir_deref_instr_remove_if_unused here because it may remove more than
907 * one deref which could break our list walking since we walk the list
910 assert(list_empty(&deref
->dest
.ssa
.if_uses
));
911 if (list_empty(&deref
->dest
.ssa
.uses
)) {
912 nir_instr_remove(&deref
->instr
);
916 b
->cursor
= nir_after_instr(&deref
->instr
);
918 nir_ssa_def
*parent_addr
= NULL
;
919 if (deref
->deref_type
!= nir_deref_type_var
) {
920 assert(deref
->parent
.is_ssa
);
921 parent_addr
= deref
->parent
.ssa
;
925 nir_ssa_def
*addr
= NULL
;
926 assert(deref
->dest
.is_ssa
);
927 switch (deref
->deref_type
) {
928 case nir_deref_type_var
:
929 assert(deref
->mode
== nir_var_shader_in
);
930 addr
= nir_imm_intN_t(b
, deref
->var
->data
.driver_location
,
931 deref
->dest
.ssa
.bit_size
);
934 case nir_deref_type_array
: {
935 nir_deref_instr
*parent
= nir_deref_instr_parent(deref
);
937 unsigned stride
= glsl_get_explicit_stride(parent
->type
);
938 if ((glsl_type_is_matrix(parent
->type
) &&
939 glsl_matrix_type_is_row_major(parent
->type
)) ||
940 (glsl_type_is_vector(parent
->type
) && stride
== 0))
941 stride
= type_scalar_size_bytes(parent
->type
);
945 nir_ssa_def
*index
= nir_ssa_for_src(b
, deref
->arr
.index
, 1);
946 index
= nir_i2i(b
, index
, parent_addr
->bit_size
);
947 addr
= build_addr_iadd(b
, parent_addr
, addr_format
,
948 nir_imul_imm(b
, index
, stride
));
952 case nir_deref_type_ptr_as_array
: {
953 nir_ssa_def
*index
= nir_ssa_for_src(b
, deref
->arr
.index
, 1);
954 index
= nir_i2i(b
, index
, parent_addr
->bit_size
);
955 unsigned stride
= nir_deref_instr_ptr_as_array_stride(deref
);
956 addr
= build_addr_iadd(b
, parent_addr
, addr_format
,
957 nir_imul_imm(b
, index
, stride
));
961 case nir_deref_type_array_wildcard
:
962 unreachable("Wildcards should be lowered by now");
965 case nir_deref_type_struct
: {
966 nir_deref_instr
*parent
= nir_deref_instr_parent(deref
);
967 int offset
= glsl_get_struct_field_offset(parent
->type
,
970 addr
= build_addr_iadd_imm(b
, parent_addr
, addr_format
, offset
);
974 case nir_deref_type_cast
:
975 /* Nothing to do here */
980 nir_instr_remove(&deref
->instr
);
981 nir_ssa_def_rewrite_uses(&deref
->dest
.ssa
, nir_src_for_ssa(addr
));
985 lower_explicit_io_access(nir_builder
*b
, nir_intrinsic_instr
*intrin
,
986 nir_address_format addr_format
)
988 b
->cursor
= nir_after_instr(&intrin
->instr
);
990 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
991 unsigned vec_stride
= glsl_get_explicit_stride(deref
->type
);
992 unsigned scalar_size
= type_scalar_size_bytes(deref
->type
);
993 assert(vec_stride
== 0 || glsl_type_is_vector(deref
->type
));
994 assert(vec_stride
== 0 || vec_stride
>= scalar_size
);
996 nir_ssa_def
*addr
= &deref
->dest
.ssa
;
997 if (intrin
->intrinsic
== nir_intrinsic_load_deref
) {
999 if (vec_stride
> scalar_size
) {
1000 nir_ssa_def
*comps
[4] = { NULL
, };
1001 for (unsigned i
= 0; i
< intrin
->num_components
; i
++) {
1002 nir_ssa_def
*comp_addr
= build_addr_iadd_imm(b
, addr
, addr_format
,
1004 comps
[i
] = build_explicit_io_load(b
, intrin
, comp_addr
,
1007 value
= nir_vec(b
, comps
, intrin
->num_components
);
1009 value
= build_explicit_io_load(b
, intrin
, addr
, addr_format
,
1010 intrin
->num_components
);
1012 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
, nir_src_for_ssa(value
));
1013 } else if (intrin
->intrinsic
== nir_intrinsic_store_deref
) {
1014 assert(intrin
->src
[1].is_ssa
);
1015 nir_ssa_def
*value
= intrin
->src
[1].ssa
;
1016 nir_component_mask_t write_mask
= nir_intrinsic_write_mask(intrin
);
1017 if (vec_stride
> scalar_size
) {
1018 for (unsigned i
= 0; i
< intrin
->num_components
; i
++) {
1019 if (!(write_mask
& (1 << i
)))
1022 nir_ssa_def
*comp_addr
= build_addr_iadd_imm(b
, addr
, addr_format
,
1024 build_explicit_io_store(b
, intrin
, comp_addr
, addr_format
,
1025 nir_channel(b
, value
, i
), 1);
1028 build_explicit_io_store(b
, intrin
, addr
, addr_format
,
1032 nir_ssa_def
*value
=
1033 build_explicit_io_atomic(b
, intrin
, addr
, addr_format
);
1034 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
, nir_src_for_ssa(value
));
1037 nir_instr_remove(&intrin
->instr
);
1041 lower_explicit_io_array_length(nir_builder
*b
, nir_intrinsic_instr
*intrin
,
1042 nir_address_format addr_format
)
1044 b
->cursor
= nir_after_instr(&intrin
->instr
);
1046 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
1048 assert(glsl_type_is_array(deref
->type
));
1049 assert(glsl_get_length(deref
->type
) == 0);
1050 unsigned stride
= glsl_get_explicit_stride(deref
->type
);
1053 assert(addr_format
== nir_address_format_32bit_index_offset
);
1054 nir_ssa_def
*addr
= &deref
->dest
.ssa
;
1055 nir_ssa_def
*index
= addr_to_index(b
, addr
, addr_format
);
1056 nir_ssa_def
*offset
= addr_to_offset(b
, addr
, addr_format
);
1058 nir_intrinsic_instr
*bsize
=
1059 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_get_buffer_size
);
1060 bsize
->src
[0] = nir_src_for_ssa(index
);
1061 nir_ssa_dest_init(&bsize
->instr
, &bsize
->dest
, 1, 32, NULL
);
1062 nir_builder_instr_insert(b
, &bsize
->instr
);
1064 nir_ssa_def
*arr_size
=
1065 nir_idiv(b
, nir_isub(b
, &bsize
->dest
.ssa
, offset
),
1066 nir_imm_int(b
, stride
));
1068 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
, nir_src_for_ssa(arr_size
));
1069 nir_instr_remove(&intrin
->instr
);
1073 nir_lower_explicit_io_impl(nir_function_impl
*impl
, nir_variable_mode modes
,
1074 nir_address_format addr_format
)
1076 bool progress
= false;
1079 nir_builder_init(&b
, impl
);
1081 /* Walk in reverse order so that we can see the full deref chain when we
1082 * lower the access operations. We lower them assuming that the derefs
1083 * will be turned into address calculations later.
1085 nir_foreach_block_reverse(block
, impl
) {
1086 nir_foreach_instr_reverse_safe(instr
, block
) {
1087 switch (instr
->type
) {
1088 case nir_instr_type_deref
: {
1089 nir_deref_instr
*deref
= nir_instr_as_deref(instr
);
1090 if (deref
->mode
& modes
) {
1091 lower_explicit_io_deref(&b
, deref
, addr_format
);
1097 case nir_instr_type_intrinsic
: {
1098 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
1099 switch (intrin
->intrinsic
) {
1100 case nir_intrinsic_load_deref
:
1101 case nir_intrinsic_store_deref
:
1102 case nir_intrinsic_deref_atomic_add
:
1103 case nir_intrinsic_deref_atomic_imin
:
1104 case nir_intrinsic_deref_atomic_umin
:
1105 case nir_intrinsic_deref_atomic_imax
:
1106 case nir_intrinsic_deref_atomic_umax
:
1107 case nir_intrinsic_deref_atomic_and
:
1108 case nir_intrinsic_deref_atomic_or
:
1109 case nir_intrinsic_deref_atomic_xor
:
1110 case nir_intrinsic_deref_atomic_exchange
:
1111 case nir_intrinsic_deref_atomic_comp_swap
:
1112 case nir_intrinsic_deref_atomic_fadd
:
1113 case nir_intrinsic_deref_atomic_fmin
:
1114 case nir_intrinsic_deref_atomic_fmax
:
1115 case nir_intrinsic_deref_atomic_fcomp_swap
: {
1116 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
1117 if (deref
->mode
& modes
) {
1118 lower_explicit_io_access(&b
, intrin
, addr_format
);
1124 case nir_intrinsic_deref_buffer_array_length
: {
1125 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
1126 if (deref
->mode
& modes
) {
1127 lower_explicit_io_array_length(&b
, intrin
, addr_format
);
1147 nir_metadata_preserve(impl
, nir_metadata_block_index
|
1148 nir_metadata_dominance
);
1155 nir_lower_explicit_io(nir_shader
*shader
, nir_variable_mode modes
,
1156 nir_address_format addr_format
)
1158 bool progress
= false;
1160 nir_foreach_function(function
, shader
) {
1161 if (function
->impl
&&
1162 nir_lower_explicit_io_impl(function
->impl
, modes
, addr_format
))
1170 * Return the offset source for a load/store intrinsic.
1173 nir_get_io_offset_src(nir_intrinsic_instr
*instr
)
1175 switch (instr
->intrinsic
) {
1176 case nir_intrinsic_load_input
:
1177 case nir_intrinsic_load_output
:
1178 case nir_intrinsic_load_shared
:
1179 case nir_intrinsic_load_uniform
:
1180 case nir_intrinsic_load_global
:
1181 return &instr
->src
[0];
1182 case nir_intrinsic_load_ubo
:
1183 case nir_intrinsic_load_ssbo
:
1184 case nir_intrinsic_load_per_vertex_input
:
1185 case nir_intrinsic_load_per_vertex_output
:
1186 case nir_intrinsic_load_interpolated_input
:
1187 case nir_intrinsic_store_output
:
1188 case nir_intrinsic_store_shared
:
1189 case nir_intrinsic_store_global
:
1190 return &instr
->src
[1];
1191 case nir_intrinsic_store_ssbo
:
1192 case nir_intrinsic_store_per_vertex_output
:
1193 return &instr
->src
[2];
1200 * Return the vertex index source for a load/store per_vertex intrinsic.
1203 nir_get_io_vertex_index_src(nir_intrinsic_instr
*instr
)
1205 switch (instr
->intrinsic
) {
1206 case nir_intrinsic_load_per_vertex_input
:
1207 case nir_intrinsic_load_per_vertex_output
:
1208 return &instr
->src
[0];
1209 case nir_intrinsic_store_per_vertex_output
:
1210 return &instr
->src
[1];