2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Connor Abbott (cwabbott0@gmail.com)
25 * Jason Ekstrand (jason@jlekstrand.net)
30 * This lowering pass converts references to input/output variables with
31 * loads/stores to actual input/output intrinsics.
35 #include "nir_builder.h"
36 #include "nir_deref.h"
38 #include "util/u_math.h"
40 struct lower_io_state
{
43 int (*type_size
)(const struct glsl_type
*type
, bool);
44 nir_variable_mode modes
;
45 nir_lower_io_options options
;
48 static nir_intrinsic_op
49 ssbo_atomic_for_deref(nir_intrinsic_op deref_op
)
52 #define OP(O) case nir_intrinsic_deref_##O: return nir_intrinsic_ssbo_##O;
69 unreachable("Invalid SSBO atomic");
73 static nir_intrinsic_op
74 global_atomic_for_deref(nir_intrinsic_op deref_op
)
77 #define OP(O) case nir_intrinsic_deref_##O: return nir_intrinsic_global_##O;
94 unreachable("Invalid SSBO atomic");
98 static nir_intrinsic_op
99 shared_atomic_for_deref(nir_intrinsic_op deref_op
)
102 #define OP(O) case nir_intrinsic_deref_##O: return nir_intrinsic_shared_##O;
116 OP(atomic_fcomp_swap
)
119 unreachable("Invalid shared atomic");
124 nir_assign_var_locations(nir_shader
*shader
, nir_variable_mode mode
,
126 int (*type_size
)(const struct glsl_type
*, bool))
128 unsigned location
= 0;
130 nir_foreach_variable_with_modes(var
, shader
, mode
) {
131 var
->data
.driver_location
= location
;
132 bool bindless_type_size
= var
->data
.mode
== nir_var_shader_in
||
133 var
->data
.mode
== nir_var_shader_out
||
135 location
+= type_size(var
->type
, bindless_type_size
);
142 * Return true if the given variable is a per-vertex input/output array.
143 * (such as geometry shader inputs).
146 nir_is_per_vertex_io(const nir_variable
*var
, gl_shader_stage stage
)
148 if (var
->data
.patch
|| !glsl_type_is_array(var
->type
))
151 if (var
->data
.mode
== nir_var_shader_in
)
152 return stage
== MESA_SHADER_GEOMETRY
||
153 stage
== MESA_SHADER_TESS_CTRL
||
154 stage
== MESA_SHADER_TESS_EVAL
;
156 if (var
->data
.mode
== nir_var_shader_out
)
157 return stage
== MESA_SHADER_TESS_CTRL
;
163 get_io_offset(nir_builder
*b
, nir_deref_instr
*deref
,
164 nir_ssa_def
**vertex_index
,
165 int (*type_size
)(const struct glsl_type
*, bool),
166 unsigned *component
, bool bts
)
169 nir_deref_path_init(&path
, deref
, NULL
);
171 assert(path
.path
[0]->deref_type
== nir_deref_type_var
);
172 nir_deref_instr
**p
= &path
.path
[1];
174 /* For per-vertex input arrays (i.e. geometry shader inputs), keep the
175 * outermost array index separate. Process the rest normally.
177 if (vertex_index
!= NULL
) {
178 assert((*p
)->deref_type
== nir_deref_type_array
);
179 *vertex_index
= nir_ssa_for_src(b
, (*p
)->arr
.index
, 1);
183 if (path
.path
[0]->var
->data
.compact
) {
184 assert((*p
)->deref_type
== nir_deref_type_array
);
185 assert(glsl_type_is_scalar((*p
)->type
));
187 /* We always lower indirect dereferences for "compact" array vars. */
188 const unsigned index
= nir_src_as_uint((*p
)->arr
.index
);
189 const unsigned total_offset
= *component
+ index
;
190 const unsigned slot_offset
= total_offset
/ 4;
191 *component
= total_offset
% 4;
192 return nir_imm_int(b
, type_size(glsl_vec4_type(), bts
) * slot_offset
);
195 /* Just emit code and let constant-folding go to town */
196 nir_ssa_def
*offset
= nir_imm_int(b
, 0);
199 if ((*p
)->deref_type
== nir_deref_type_array
) {
200 unsigned size
= type_size((*p
)->type
, bts
);
203 nir_amul_imm(b
, nir_ssa_for_src(b
, (*p
)->arr
.index
, 1), size
);
205 offset
= nir_iadd(b
, offset
, mul
);
206 } else if ((*p
)->deref_type
== nir_deref_type_struct
) {
207 /* p starts at path[1], so this is safe */
208 nir_deref_instr
*parent
= *(p
- 1);
210 unsigned field_offset
= 0;
211 for (unsigned i
= 0; i
< (*p
)->strct
.index
; i
++) {
212 field_offset
+= type_size(glsl_get_struct_field(parent
->type
, i
), bts
);
214 offset
= nir_iadd_imm(b
, offset
, field_offset
);
216 unreachable("Unsupported deref type");
220 nir_deref_path_finish(&path
);
226 emit_load(struct lower_io_state
*state
,
227 nir_ssa_def
*vertex_index
, nir_variable
*var
, nir_ssa_def
*offset
,
228 unsigned component
, unsigned num_components
, unsigned bit_size
,
231 nir_builder
*b
= &state
->builder
;
232 const nir_shader
*nir
= b
->shader
;
233 nir_variable_mode mode
= var
->data
.mode
;
234 nir_ssa_def
*barycentric
= NULL
;
238 case nir_var_shader_in
:
239 if (nir
->info
.stage
== MESA_SHADER_FRAGMENT
&&
240 nir
->options
->use_interpolated_input_intrinsics
&&
241 var
->data
.interpolation
!= INTERP_MODE_FLAT
) {
242 if (var
->data
.interpolation
== INTERP_MODE_EXPLICIT
) {
243 assert(vertex_index
!= NULL
);
244 op
= nir_intrinsic_load_input_vertex
;
246 assert(vertex_index
== NULL
);
248 nir_intrinsic_op bary_op
;
249 if (var
->data
.sample
||
250 (state
->options
& nir_lower_io_force_sample_interpolation
))
251 bary_op
= nir_intrinsic_load_barycentric_sample
;
252 else if (var
->data
.centroid
)
253 bary_op
= nir_intrinsic_load_barycentric_centroid
;
255 bary_op
= nir_intrinsic_load_barycentric_pixel
;
257 barycentric
= nir_load_barycentric(&state
->builder
, bary_op
,
258 var
->data
.interpolation
);
259 op
= nir_intrinsic_load_interpolated_input
;
262 op
= vertex_index
? nir_intrinsic_load_per_vertex_input
:
263 nir_intrinsic_load_input
;
266 case nir_var_shader_out
:
267 op
= vertex_index
? nir_intrinsic_load_per_vertex_output
:
268 nir_intrinsic_load_output
;
270 case nir_var_uniform
:
271 op
= nir_intrinsic_load_uniform
;
274 unreachable("Unknown variable mode");
277 nir_intrinsic_instr
*load
=
278 nir_intrinsic_instr_create(state
->builder
.shader
, op
);
279 load
->num_components
= num_components
;
281 nir_intrinsic_set_base(load
, var
->data
.driver_location
);
282 if (mode
== nir_var_shader_in
|| mode
== nir_var_shader_out
)
283 nir_intrinsic_set_component(load
, component
);
285 if (load
->intrinsic
== nir_intrinsic_load_uniform
)
286 nir_intrinsic_set_range(load
,
287 state
->type_size(var
->type
, var
->data
.bindless
));
289 if (load
->intrinsic
== nir_intrinsic_load_input
||
290 load
->intrinsic
== nir_intrinsic_load_input_vertex
||
291 load
->intrinsic
== nir_intrinsic_load_uniform
)
292 nir_intrinsic_set_type(load
, type
);
295 load
->src
[0] = nir_src_for_ssa(vertex_index
);
296 load
->src
[1] = nir_src_for_ssa(offset
);
297 } else if (barycentric
) {
298 load
->src
[0] = nir_src_for_ssa(barycentric
);
299 load
->src
[1] = nir_src_for_ssa(offset
);
301 load
->src
[0] = nir_src_for_ssa(offset
);
304 nir_ssa_dest_init(&load
->instr
, &load
->dest
,
305 num_components
, bit_size
, NULL
);
306 nir_builder_instr_insert(b
, &load
->instr
);
308 return &load
->dest
.ssa
;
312 lower_load(nir_intrinsic_instr
*intrin
, struct lower_io_state
*state
,
313 nir_ssa_def
*vertex_index
, nir_variable
*var
, nir_ssa_def
*offset
,
314 unsigned component
, const struct glsl_type
*type
)
316 assert(intrin
->dest
.is_ssa
);
317 if (intrin
->dest
.ssa
.bit_size
== 64 &&
318 (state
->options
& nir_lower_io_lower_64bit_to_32
)) {
319 nir_builder
*b
= &state
->builder
;
321 const unsigned slot_size
= state
->type_size(glsl_dvec_type(2), false);
323 nir_ssa_def
*comp64
[4];
324 assert(component
== 0 || component
== 2);
325 unsigned dest_comp
= 0;
326 while (dest_comp
< intrin
->dest
.ssa
.num_components
) {
327 const unsigned num_comps
=
328 MIN2(intrin
->dest
.ssa
.num_components
- dest_comp
,
329 (4 - component
) / 2);
331 nir_ssa_def
*data32
=
332 emit_load(state
, vertex_index
, var
, offset
, component
,
333 num_comps
* 2, 32, nir_type_uint32
);
334 for (unsigned i
= 0; i
< num_comps
; i
++) {
335 comp64
[dest_comp
+ i
] =
336 nir_pack_64_2x32(b
, nir_channels(b
, data32
, 3 << (i
* 2)));
339 /* Only the first store has a component offset */
341 dest_comp
+= num_comps
;
342 offset
= nir_iadd_imm(b
, offset
, slot_size
);
345 return nir_vec(b
, comp64
, intrin
->dest
.ssa
.num_components
);
346 } else if (intrin
->dest
.ssa
.bit_size
== 1) {
347 /* Booleans are 32-bit */
348 assert(glsl_type_is_boolean(type
));
349 return nir_b2b1(&state
->builder
,
350 emit_load(state
, vertex_index
, var
, offset
, component
,
351 intrin
->dest
.ssa
.num_components
, 32,
354 return emit_load(state
, vertex_index
, var
, offset
, component
,
355 intrin
->dest
.ssa
.num_components
,
356 intrin
->dest
.ssa
.bit_size
,
357 nir_get_nir_type_for_glsl_type(type
));
362 emit_store(struct lower_io_state
*state
, nir_ssa_def
*data
,
363 nir_ssa_def
*vertex_index
, nir_variable
*var
, nir_ssa_def
*offset
,
364 unsigned component
, unsigned num_components
,
365 nir_component_mask_t write_mask
, nir_alu_type type
)
367 nir_builder
*b
= &state
->builder
;
368 nir_variable_mode mode
= var
->data
.mode
;
370 assert(mode
== nir_var_shader_out
);
372 op
= vertex_index
? nir_intrinsic_store_per_vertex_output
:
373 nir_intrinsic_store_output
;
375 nir_intrinsic_instr
*store
=
376 nir_intrinsic_instr_create(state
->builder
.shader
, op
);
377 store
->num_components
= num_components
;
379 store
->src
[0] = nir_src_for_ssa(data
);
381 nir_intrinsic_set_base(store
, var
->data
.driver_location
);
383 if (mode
== nir_var_shader_out
)
384 nir_intrinsic_set_component(store
, component
);
386 if (store
->intrinsic
== nir_intrinsic_store_output
)
387 nir_intrinsic_set_type(store
, type
);
389 nir_intrinsic_set_write_mask(store
, write_mask
);
392 store
->src
[1] = nir_src_for_ssa(vertex_index
);
394 store
->src
[vertex_index
? 2 : 1] = nir_src_for_ssa(offset
);
396 nir_builder_instr_insert(b
, &store
->instr
);
400 lower_store(nir_intrinsic_instr
*intrin
, struct lower_io_state
*state
,
401 nir_ssa_def
*vertex_index
, nir_variable
*var
, nir_ssa_def
*offset
,
402 unsigned component
, const struct glsl_type
*type
)
404 assert(intrin
->src
[1].is_ssa
);
405 if (intrin
->src
[1].ssa
->bit_size
== 64 &&
406 (state
->options
& nir_lower_io_lower_64bit_to_32
)) {
407 nir_builder
*b
= &state
->builder
;
409 const unsigned slot_size
= state
->type_size(glsl_dvec_type(2), false);
411 assert(component
== 0 || component
== 2);
412 unsigned src_comp
= 0;
413 nir_component_mask_t write_mask
= nir_intrinsic_write_mask(intrin
);
414 while (src_comp
< intrin
->num_components
) {
415 const unsigned num_comps
=
416 MIN2(intrin
->num_components
- src_comp
,
417 (4 - component
) / 2);
419 if (write_mask
& BITFIELD_MASK(num_comps
)) {
421 nir_channels(b
, intrin
->src
[1].ssa
,
422 BITFIELD_RANGE(src_comp
, num_comps
));
423 nir_ssa_def
*data32
= nir_bitcast_vector(b
, data
, 32);
425 nir_component_mask_t write_mask32
= 0;
426 for (unsigned i
= 0; i
< num_comps
; i
++) {
427 if (write_mask
& BITFIELD_MASK(num_comps
) & (1 << i
))
428 write_mask32
|= 3 << (i
* 2);
431 emit_store(state
, data32
, vertex_index
, var
, offset
,
432 component
, data32
->num_components
, write_mask32
,
436 /* Only the first store has a component offset */
438 src_comp
+= num_comps
;
439 write_mask
>>= num_comps
;
440 offset
= nir_iadd_imm(b
, offset
, slot_size
);
442 } else if (intrin
->dest
.ssa
.bit_size
== 1) {
443 /* Booleans are 32-bit */
444 assert(glsl_type_is_boolean(type
));
445 nir_ssa_def
*b32_val
= nir_b2b32(&state
->builder
, intrin
->src
[1].ssa
);
446 emit_store(state
, b32_val
, vertex_index
, var
, offset
,
447 component
, intrin
->num_components
,
448 nir_intrinsic_write_mask(intrin
),
451 emit_store(state
, intrin
->src
[1].ssa
, vertex_index
, var
, offset
,
452 component
, intrin
->num_components
,
453 nir_intrinsic_write_mask(intrin
),
454 nir_get_nir_type_for_glsl_type(type
));
459 lower_interpolate_at(nir_intrinsic_instr
*intrin
, struct lower_io_state
*state
,
460 nir_variable
*var
, nir_ssa_def
*offset
, unsigned component
,
461 const struct glsl_type
*type
)
463 nir_builder
*b
= &state
->builder
;
464 assert(var
->data
.mode
== nir_var_shader_in
);
466 /* Ignore interpolateAt() for flat variables - flat is flat. Lower
467 * interpolateAtVertex() for explicit variables.
469 if (var
->data
.interpolation
== INTERP_MODE_FLAT
||
470 var
->data
.interpolation
== INTERP_MODE_EXPLICIT
) {
471 nir_ssa_def
*vertex_index
= NULL
;
473 if (var
->data
.interpolation
== INTERP_MODE_EXPLICIT
) {
474 assert(intrin
->intrinsic
== nir_intrinsic_interp_deref_at_vertex
);
475 vertex_index
= intrin
->src
[1].ssa
;
478 return lower_load(intrin
, state
, vertex_index
, var
, offset
, component
, type
);
481 /* None of the supported APIs allow interpolation on 64-bit things */
482 assert(intrin
->dest
.is_ssa
&& intrin
->dest
.ssa
.bit_size
<= 32);
484 nir_intrinsic_op bary_op
;
485 switch (intrin
->intrinsic
) {
486 case nir_intrinsic_interp_deref_at_centroid
:
487 bary_op
= (state
->options
& nir_lower_io_force_sample_interpolation
) ?
488 nir_intrinsic_load_barycentric_sample
:
489 nir_intrinsic_load_barycentric_centroid
;
491 case nir_intrinsic_interp_deref_at_sample
:
492 bary_op
= nir_intrinsic_load_barycentric_at_sample
;
494 case nir_intrinsic_interp_deref_at_offset
:
495 bary_op
= nir_intrinsic_load_barycentric_at_offset
;
498 unreachable("Bogus interpolateAt() intrinsic.");
501 nir_intrinsic_instr
*bary_setup
=
502 nir_intrinsic_instr_create(state
->builder
.shader
, bary_op
);
504 nir_ssa_dest_init(&bary_setup
->instr
, &bary_setup
->dest
, 2, 32, NULL
);
505 nir_intrinsic_set_interp_mode(bary_setup
, var
->data
.interpolation
);
507 if (intrin
->intrinsic
== nir_intrinsic_interp_deref_at_sample
||
508 intrin
->intrinsic
== nir_intrinsic_interp_deref_at_offset
||
509 intrin
->intrinsic
== nir_intrinsic_interp_deref_at_vertex
)
510 nir_src_copy(&bary_setup
->src
[0], &intrin
->src
[1], bary_setup
);
512 nir_builder_instr_insert(b
, &bary_setup
->instr
);
514 nir_intrinsic_instr
*load
=
515 nir_intrinsic_instr_create(state
->builder
.shader
,
516 nir_intrinsic_load_interpolated_input
);
517 load
->num_components
= intrin
->num_components
;
519 nir_intrinsic_set_base(load
, var
->data
.driver_location
);
520 nir_intrinsic_set_component(load
, component
);
522 load
->src
[0] = nir_src_for_ssa(&bary_setup
->dest
.ssa
);
523 load
->src
[1] = nir_src_for_ssa(offset
);
525 assert(intrin
->dest
.is_ssa
);
526 nir_ssa_dest_init(&load
->instr
, &load
->dest
,
527 intrin
->dest
.ssa
.num_components
,
528 intrin
->dest
.ssa
.bit_size
, NULL
);
529 nir_builder_instr_insert(b
, &load
->instr
);
531 return &load
->dest
.ssa
;
535 nir_lower_io_block(nir_block
*block
,
536 struct lower_io_state
*state
)
538 nir_builder
*b
= &state
->builder
;
539 const nir_shader_compiler_options
*options
= b
->shader
->options
;
540 bool progress
= false;
542 nir_foreach_instr_safe(instr
, block
) {
543 if (instr
->type
!= nir_instr_type_intrinsic
)
546 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
548 switch (intrin
->intrinsic
) {
549 case nir_intrinsic_load_deref
:
550 case nir_intrinsic_store_deref
:
551 /* We can lower the io for this nir instrinsic */
553 case nir_intrinsic_interp_deref_at_centroid
:
554 case nir_intrinsic_interp_deref_at_sample
:
555 case nir_intrinsic_interp_deref_at_offset
:
556 case nir_intrinsic_interp_deref_at_vertex
:
557 /* We can optionally lower these to load_interpolated_input */
558 if (options
->use_interpolated_input_intrinsics
)
561 /* We can't lower the io for this nir instrinsic, so skip it */
565 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
567 nir_variable_mode mode
= deref
->mode
;
568 assert(util_is_power_of_two_nonzero(mode
));
569 if ((state
->modes
& mode
) == 0)
572 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
574 b
->cursor
= nir_before_instr(instr
);
576 const bool per_vertex
= nir_is_per_vertex_io(var
, b
->shader
->info
.stage
);
579 nir_ssa_def
*vertex_index
= NULL
;
580 unsigned component_offset
= var
->data
.location_frac
;
581 bool bindless_type_size
= mode
== nir_var_shader_in
||
582 mode
== nir_var_shader_out
||
585 offset
= get_io_offset(b
, deref
, per_vertex
? &vertex_index
: NULL
,
586 state
->type_size
, &component_offset
,
589 nir_ssa_def
*replacement
= NULL
;
591 switch (intrin
->intrinsic
) {
592 case nir_intrinsic_load_deref
:
593 replacement
= lower_load(intrin
, state
, vertex_index
, var
, offset
,
594 component_offset
, deref
->type
);
597 case nir_intrinsic_store_deref
:
598 lower_store(intrin
, state
, vertex_index
, var
, offset
,
599 component_offset
, deref
->type
);
602 case nir_intrinsic_interp_deref_at_centroid
:
603 case nir_intrinsic_interp_deref_at_sample
:
604 case nir_intrinsic_interp_deref_at_offset
:
605 case nir_intrinsic_interp_deref_at_vertex
:
606 assert(vertex_index
== NULL
);
607 replacement
= lower_interpolate_at(intrin
, state
, var
, offset
,
608 component_offset
, deref
->type
);
616 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
,
617 nir_src_for_ssa(replacement
));
619 nir_instr_remove(&intrin
->instr
);
627 nir_lower_io_impl(nir_function_impl
*impl
,
628 nir_variable_mode modes
,
629 int (*type_size
)(const struct glsl_type
*, bool),
630 nir_lower_io_options options
)
632 struct lower_io_state state
;
633 bool progress
= false;
635 nir_builder_init(&state
.builder
, impl
);
636 state
.dead_ctx
= ralloc_context(NULL
);
638 state
.type_size
= type_size
;
639 state
.options
= options
;
641 ASSERTED nir_variable_mode supported_modes
=
642 nir_var_shader_in
| nir_var_shader_out
| nir_var_uniform
;
643 assert(!(modes
& ~supported_modes
));
645 nir_foreach_block(block
, impl
) {
646 progress
|= nir_lower_io_block(block
, &state
);
649 ralloc_free(state
.dead_ctx
);
651 nir_metadata_preserve(impl
, nir_metadata_block_index
|
652 nir_metadata_dominance
);
656 /** Lower load/store_deref intrinsics on I/O variables to offset-based intrinsics
658 * This pass is intended to be used for cross-stage shader I/O and driver-
659 * managed uniforms to turn deref-based access into a simpler model using
660 * locations or offsets. For fragment shader inputs, it can optionally turn
661 * load_deref into an explicit interpolation using barycentrics coming from
662 * one of the load_barycentric_* intrinsics. This pass requires that all
663 * deref chains are complete and contain no casts.
666 nir_lower_io(nir_shader
*shader
, nir_variable_mode modes
,
667 int (*type_size
)(const struct glsl_type
*, bool),
668 nir_lower_io_options options
)
670 bool progress
= false;
672 nir_foreach_function(function
, shader
) {
673 if (function
->impl
) {
674 progress
|= nir_lower_io_impl(function
->impl
, modes
,
683 type_scalar_size_bytes(const struct glsl_type
*type
)
685 assert(glsl_type_is_vector_or_scalar(type
) ||
686 glsl_type_is_matrix(type
));
687 return glsl_type_is_boolean(type
) ? 4 : glsl_get_bit_size(type
) / 8;
691 build_addr_iadd(nir_builder
*b
, nir_ssa_def
*addr
,
692 nir_address_format addr_format
, nir_ssa_def
*offset
)
694 assert(offset
->num_components
== 1);
696 switch (addr_format
) {
697 case nir_address_format_32bit_global
:
698 case nir_address_format_64bit_global
:
699 case nir_address_format_32bit_offset
:
700 assert(addr
->bit_size
== offset
->bit_size
);
701 assert(addr
->num_components
== 1);
702 return nir_iadd(b
, addr
, offset
);
704 case nir_address_format_32bit_offset_as_64bit
:
705 assert(addr
->num_components
== 1);
706 assert(offset
->bit_size
== 32);
707 return nir_u2u64(b
, nir_iadd(b
, nir_u2u32(b
, addr
), offset
));
709 case nir_address_format_64bit_bounded_global
:
710 assert(addr
->num_components
== 4);
711 assert(addr
->bit_size
== offset
->bit_size
);
712 return nir_vec4(b
, nir_channel(b
, addr
, 0),
713 nir_channel(b
, addr
, 1),
714 nir_channel(b
, addr
, 2),
715 nir_iadd(b
, nir_channel(b
, addr
, 3), offset
));
717 case nir_address_format_32bit_index_offset
:
718 assert(addr
->num_components
== 2);
719 assert(addr
->bit_size
== offset
->bit_size
);
720 return nir_vec2(b
, nir_channel(b
, addr
, 0),
721 nir_iadd(b
, nir_channel(b
, addr
, 1), offset
));
722 case nir_address_format_vec2_index_32bit_offset
:
723 assert(addr
->num_components
== 3);
724 assert(offset
->bit_size
== 32);
725 return nir_vec3(b
, nir_channel(b
, addr
, 0), nir_channel(b
, addr
, 1),
726 nir_iadd(b
, nir_channel(b
, addr
, 2), offset
));
727 case nir_address_format_logical
:
728 unreachable("Unsupported address format");
730 unreachable("Invalid address format");
734 addr_get_offset_bit_size(nir_ssa_def
*addr
, nir_address_format addr_format
)
736 if (addr_format
== nir_address_format_32bit_offset_as_64bit
)
738 return addr
->bit_size
;
742 build_addr_iadd_imm(nir_builder
*b
, nir_ssa_def
*addr
,
743 nir_address_format addr_format
, int64_t offset
)
745 return build_addr_iadd(b
, addr
, addr_format
,
746 nir_imm_intN_t(b
, offset
,
747 addr_get_offset_bit_size(addr
, addr_format
)));
751 addr_to_index(nir_builder
*b
, nir_ssa_def
*addr
,
752 nir_address_format addr_format
)
754 if (addr_format
== nir_address_format_32bit_index_offset
) {
755 assert(addr
->num_components
== 2);
756 return nir_channel(b
, addr
, 0);
757 } else if (addr_format
== nir_address_format_vec2_index_32bit_offset
) {
758 assert(addr
->num_components
== 3);
759 return nir_channels(b
, addr
, 0x3);
761 unreachable("bad address format for index");
766 addr_to_offset(nir_builder
*b
, nir_ssa_def
*addr
,
767 nir_address_format addr_format
)
769 switch (addr_format
) {
770 case nir_address_format_32bit_index_offset
:
771 assert(addr
->num_components
== 2);
772 return nir_channel(b
, addr
, 1);
773 case nir_address_format_vec2_index_32bit_offset
:
774 assert(addr
->num_components
== 3);
775 return nir_channel(b
, addr
, 2);
776 case nir_address_format_32bit_offset
:
778 case nir_address_format_32bit_offset_as_64bit
:
779 return nir_u2u32(b
, addr
);
781 unreachable("Invalid address format");
785 /** Returns true if the given address format resolves to a global address */
787 addr_format_is_global(nir_address_format addr_format
)
789 return addr_format
== nir_address_format_32bit_global
||
790 addr_format
== nir_address_format_64bit_global
||
791 addr_format
== nir_address_format_64bit_bounded_global
;
795 addr_format_is_offset(nir_address_format addr_format
)
797 return addr_format
== nir_address_format_32bit_offset
||
798 addr_format
== nir_address_format_32bit_offset_as_64bit
;
802 addr_to_global(nir_builder
*b
, nir_ssa_def
*addr
,
803 nir_address_format addr_format
)
805 switch (addr_format
) {
806 case nir_address_format_32bit_global
:
807 case nir_address_format_64bit_global
:
808 assert(addr
->num_components
== 1);
811 case nir_address_format_64bit_bounded_global
:
812 assert(addr
->num_components
== 4);
813 return nir_iadd(b
, nir_pack_64_2x32(b
, nir_channels(b
, addr
, 0x3)),
814 nir_u2u64(b
, nir_channel(b
, addr
, 3)));
816 case nir_address_format_32bit_index_offset
:
817 case nir_address_format_vec2_index_32bit_offset
:
818 case nir_address_format_32bit_offset
:
819 case nir_address_format_32bit_offset_as_64bit
:
820 case nir_address_format_logical
:
821 unreachable("Cannot get a 64-bit address with this address format");
824 unreachable("Invalid address format");
828 addr_format_needs_bounds_check(nir_address_format addr_format
)
830 return addr_format
== nir_address_format_64bit_bounded_global
;
834 addr_is_in_bounds(nir_builder
*b
, nir_ssa_def
*addr
,
835 nir_address_format addr_format
, unsigned size
)
837 assert(addr_format
== nir_address_format_64bit_bounded_global
);
838 assert(addr
->num_components
== 4);
839 return nir_ige(b
, nir_channel(b
, addr
, 2),
840 nir_iadd_imm(b
, nir_channel(b
, addr
, 3), size
));
844 build_explicit_io_load(nir_builder
*b
, nir_intrinsic_instr
*intrin
,
845 nir_ssa_def
*addr
, nir_address_format addr_format
,
846 unsigned num_components
)
848 nir_variable_mode mode
= nir_src_as_deref(intrin
->src
[0])->mode
;
852 case nir_var_mem_ubo
:
853 op
= nir_intrinsic_load_ubo
;
855 case nir_var_mem_ssbo
:
856 if (addr_format_is_global(addr_format
))
857 op
= nir_intrinsic_load_global
;
859 op
= nir_intrinsic_load_ssbo
;
861 case nir_var_mem_global
:
862 assert(addr_format_is_global(addr_format
));
863 op
= nir_intrinsic_load_global
;
865 case nir_var_shader_in
:
866 assert(addr_format_is_offset(addr_format
));
867 op
= nir_intrinsic_load_kernel_input
;
869 case nir_var_mem_shared
:
870 assert(addr_format_is_offset(addr_format
));
871 op
= nir_intrinsic_load_shared
;
873 case nir_var_shader_temp
:
874 case nir_var_function_temp
:
875 if (addr_format_is_offset(addr_format
)) {
876 op
= nir_intrinsic_load_scratch
;
878 assert(addr_format_is_global(addr_format
));
879 op
= nir_intrinsic_load_global
;
883 unreachable("Unsupported explicit IO variable mode");
886 nir_intrinsic_instr
*load
= nir_intrinsic_instr_create(b
->shader
, op
);
888 if (addr_format_is_global(addr_format
)) {
889 load
->src
[0] = nir_src_for_ssa(addr_to_global(b
, addr
, addr_format
));
890 } else if (addr_format_is_offset(addr_format
)) {
891 assert(addr
->num_components
== 1);
892 load
->src
[0] = nir_src_for_ssa(addr_to_offset(b
, addr
, addr_format
));
894 load
->src
[0] = nir_src_for_ssa(addr_to_index(b
, addr
, addr_format
));
895 load
->src
[1] = nir_src_for_ssa(addr_to_offset(b
, addr
, addr_format
));
898 if (nir_intrinsic_infos
[op
].index_map
[NIR_INTRINSIC_ACCESS
] > 0)
899 nir_intrinsic_set_access(load
, nir_intrinsic_access(intrin
));
901 unsigned bit_size
= intrin
->dest
.ssa
.bit_size
;
903 /* TODO: Make the native bool bit_size an option. */
907 /* TODO: We should try and provide a better alignment. For OpenCL, we need
908 * to plumb the alignment through from SPIR-V when we have one.
910 nir_intrinsic_set_align(load
, bit_size
/ 8, 0);
912 assert(intrin
->dest
.is_ssa
);
913 load
->num_components
= num_components
;
914 nir_ssa_dest_init(&load
->instr
, &load
->dest
, num_components
,
915 bit_size
, intrin
->dest
.ssa
.name
);
917 assert(bit_size
% 8 == 0);
920 if (addr_format_needs_bounds_check(addr_format
)) {
921 /* The Vulkan spec for robustBufferAccess gives us quite a few options
922 * as to what we can do with an OOB read. Unfortunately, returning
923 * undefined values isn't one of them so we return an actual zero.
925 nir_ssa_def
*zero
= nir_imm_zero(b
, load
->num_components
, bit_size
);
927 const unsigned load_size
= (bit_size
/ 8) * load
->num_components
;
928 nir_push_if(b
, addr_is_in_bounds(b
, addr
, addr_format
, load_size
));
930 nir_builder_instr_insert(b
, &load
->instr
);
934 result
= nir_if_phi(b
, &load
->dest
.ssa
, zero
);
936 nir_builder_instr_insert(b
, &load
->instr
);
937 result
= &load
->dest
.ssa
;
940 if (intrin
->dest
.ssa
.bit_size
== 1) {
941 /* For shared, we can go ahead and use NIR's and/or the back-end's
942 * standard encoding for booleans rather than forcing a 0/1 boolean.
943 * This should save an instruction or two.
945 if (mode
== nir_var_mem_shared
||
946 mode
== nir_var_shader_temp
||
947 mode
== nir_var_function_temp
)
948 result
= nir_b2b1(b
, result
);
950 result
= nir_i2b(b
, result
);
957 build_explicit_io_store(nir_builder
*b
, nir_intrinsic_instr
*intrin
,
958 nir_ssa_def
*addr
, nir_address_format addr_format
,
959 nir_ssa_def
*value
, nir_component_mask_t write_mask
)
961 nir_variable_mode mode
= nir_src_as_deref(intrin
->src
[0])->mode
;
965 case nir_var_mem_ssbo
:
966 if (addr_format_is_global(addr_format
))
967 op
= nir_intrinsic_store_global
;
969 op
= nir_intrinsic_store_ssbo
;
971 case nir_var_mem_global
:
972 assert(addr_format_is_global(addr_format
));
973 op
= nir_intrinsic_store_global
;
975 case nir_var_mem_shared
:
976 assert(addr_format_is_offset(addr_format
));
977 op
= nir_intrinsic_store_shared
;
979 case nir_var_shader_temp
:
980 case nir_var_function_temp
:
981 if (addr_format_is_offset(addr_format
)) {
982 op
= nir_intrinsic_store_scratch
;
984 assert(addr_format_is_global(addr_format
));
985 op
= nir_intrinsic_store_global
;
989 unreachable("Unsupported explicit IO variable mode");
992 nir_intrinsic_instr
*store
= nir_intrinsic_instr_create(b
->shader
, op
);
994 if (value
->bit_size
== 1) {
995 /* For shared, we can go ahead and use NIR's and/or the back-end's
996 * standard encoding for booleans rather than forcing a 0/1 boolean.
997 * This should save an instruction or two.
999 * TODO: Make the native bool bit_size an option.
1001 if (mode
== nir_var_mem_shared
||
1002 mode
== nir_var_shader_temp
||
1003 mode
== nir_var_function_temp
)
1004 value
= nir_b2b32(b
, value
);
1006 value
= nir_b2i(b
, value
, 32);
1009 store
->src
[0] = nir_src_for_ssa(value
);
1010 if (addr_format_is_global(addr_format
)) {
1011 store
->src
[1] = nir_src_for_ssa(addr_to_global(b
, addr
, addr_format
));
1012 } else if (addr_format_is_offset(addr_format
)) {
1013 assert(addr
->num_components
== 1);
1014 store
->src
[1] = nir_src_for_ssa(addr_to_offset(b
, addr
, addr_format
));
1016 store
->src
[1] = nir_src_for_ssa(addr_to_index(b
, addr
, addr_format
));
1017 store
->src
[2] = nir_src_for_ssa(addr_to_offset(b
, addr
, addr_format
));
1020 nir_intrinsic_set_write_mask(store
, write_mask
);
1022 if (nir_intrinsic_infos
[op
].index_map
[NIR_INTRINSIC_ACCESS
] > 0)
1023 nir_intrinsic_set_access(store
, nir_intrinsic_access(intrin
));
1025 /* TODO: We should try and provide a better alignment. For OpenCL, we need
1026 * to plumb the alignment through from SPIR-V when we have one.
1028 nir_intrinsic_set_align(store
, value
->bit_size
/ 8, 0);
1030 assert(value
->num_components
== 1 ||
1031 value
->num_components
== intrin
->num_components
);
1032 store
->num_components
= value
->num_components
;
1034 assert(value
->bit_size
% 8 == 0);
1036 if (addr_format_needs_bounds_check(addr_format
)) {
1037 const unsigned store_size
= (value
->bit_size
/ 8) * store
->num_components
;
1038 nir_push_if(b
, addr_is_in_bounds(b
, addr
, addr_format
, store_size
));
1040 nir_builder_instr_insert(b
, &store
->instr
);
1042 nir_pop_if(b
, NULL
);
1044 nir_builder_instr_insert(b
, &store
->instr
);
1048 static nir_ssa_def
*
1049 build_explicit_io_atomic(nir_builder
*b
, nir_intrinsic_instr
*intrin
,
1050 nir_ssa_def
*addr
, nir_address_format addr_format
)
1052 nir_variable_mode mode
= nir_src_as_deref(intrin
->src
[0])->mode
;
1053 const unsigned num_data_srcs
=
1054 nir_intrinsic_infos
[intrin
->intrinsic
].num_srcs
- 1;
1056 nir_intrinsic_op op
;
1058 case nir_var_mem_ssbo
:
1059 if (addr_format_is_global(addr_format
))
1060 op
= global_atomic_for_deref(intrin
->intrinsic
);
1062 op
= ssbo_atomic_for_deref(intrin
->intrinsic
);
1064 case nir_var_mem_global
:
1065 assert(addr_format_is_global(addr_format
));
1066 op
= global_atomic_for_deref(intrin
->intrinsic
);
1068 case nir_var_mem_shared
:
1069 assert(addr_format_is_offset(addr_format
));
1070 op
= shared_atomic_for_deref(intrin
->intrinsic
);
1073 unreachable("Unsupported explicit IO variable mode");
1076 nir_intrinsic_instr
*atomic
= nir_intrinsic_instr_create(b
->shader
, op
);
1079 if (addr_format_is_global(addr_format
)) {
1080 atomic
->src
[src
++] = nir_src_for_ssa(addr_to_global(b
, addr
, addr_format
));
1081 } else if (addr_format_is_offset(addr_format
)) {
1082 assert(addr
->num_components
== 1);
1083 atomic
->src
[src
++] = nir_src_for_ssa(addr_to_offset(b
, addr
, addr_format
));
1085 atomic
->src
[src
++] = nir_src_for_ssa(addr_to_index(b
, addr
, addr_format
));
1086 atomic
->src
[src
++] = nir_src_for_ssa(addr_to_offset(b
, addr
, addr_format
));
1088 for (unsigned i
= 0; i
< num_data_srcs
; i
++) {
1089 atomic
->src
[src
++] = nir_src_for_ssa(intrin
->src
[1 + i
].ssa
);
1092 /* Global atomics don't have access flags because they assume that the
1093 * address may be non-uniform.
1095 if (nir_intrinsic_infos
[op
].index_map
[NIR_INTRINSIC_ACCESS
] > 0)
1096 nir_intrinsic_set_access(atomic
, nir_intrinsic_access(intrin
));
1098 assert(intrin
->dest
.ssa
.num_components
== 1);
1099 nir_ssa_dest_init(&atomic
->instr
, &atomic
->dest
,
1100 1, intrin
->dest
.ssa
.bit_size
, intrin
->dest
.ssa
.name
);
1102 assert(atomic
->dest
.ssa
.bit_size
% 8 == 0);
1104 if (addr_format_needs_bounds_check(addr_format
)) {
1105 const unsigned atomic_size
= atomic
->dest
.ssa
.bit_size
/ 8;
1106 nir_push_if(b
, addr_is_in_bounds(b
, addr
, addr_format
, atomic_size
));
1108 nir_builder_instr_insert(b
, &atomic
->instr
);
1110 nir_pop_if(b
, NULL
);
1111 return nir_if_phi(b
, &atomic
->dest
.ssa
,
1112 nir_ssa_undef(b
, 1, atomic
->dest
.ssa
.bit_size
));
1114 nir_builder_instr_insert(b
, &atomic
->instr
);
1115 return &atomic
->dest
.ssa
;
1120 nir_explicit_io_address_from_deref(nir_builder
*b
, nir_deref_instr
*deref
,
1121 nir_ssa_def
*base_addr
,
1122 nir_address_format addr_format
)
1124 assert(deref
->dest
.is_ssa
);
1125 switch (deref
->deref_type
) {
1126 case nir_deref_type_var
:
1127 assert(deref
->mode
& (nir_var_shader_in
| nir_var_mem_shared
|
1128 nir_var_shader_temp
| nir_var_function_temp
));
1129 if (addr_format_is_global(addr_format
)) {
1130 assert(nir_var_shader_temp
| nir_var_function_temp
);
1132 nir_load_scratch_base_ptr(b
, !(deref
->mode
& nir_var_shader_temp
),
1133 nir_address_format_num_components(addr_format
),
1134 nir_address_format_bit_size(addr_format
));
1135 return build_addr_iadd_imm(b
, base_addr
, addr_format
,
1136 deref
->var
->data
.driver_location
);
1138 assert(deref
->var
->data
.driver_location
<= UINT32_MAX
);
1139 return nir_imm_intN_t(b
, deref
->var
->data
.driver_location
,
1140 deref
->dest
.ssa
.bit_size
);
1143 case nir_deref_type_array
: {
1144 nir_deref_instr
*parent
= nir_deref_instr_parent(deref
);
1146 unsigned stride
= glsl_get_explicit_stride(parent
->type
);
1147 if ((glsl_type_is_matrix(parent
->type
) &&
1148 glsl_matrix_type_is_row_major(parent
->type
)) ||
1149 (glsl_type_is_vector(parent
->type
) && stride
== 0))
1150 stride
= type_scalar_size_bytes(parent
->type
);
1154 nir_ssa_def
*index
= nir_ssa_for_src(b
, deref
->arr
.index
, 1);
1155 index
= nir_i2i(b
, index
, addr_get_offset_bit_size(base_addr
, addr_format
));
1156 return build_addr_iadd(b
, base_addr
, addr_format
,
1157 nir_amul_imm(b
, index
, stride
));
1160 case nir_deref_type_ptr_as_array
: {
1161 nir_ssa_def
*index
= nir_ssa_for_src(b
, deref
->arr
.index
, 1);
1162 index
= nir_i2i(b
, index
, addr_get_offset_bit_size(base_addr
, addr_format
));
1163 unsigned stride
= nir_deref_instr_ptr_as_array_stride(deref
);
1164 return build_addr_iadd(b
, base_addr
, addr_format
,
1165 nir_amul_imm(b
, index
, stride
));
1168 case nir_deref_type_array_wildcard
:
1169 unreachable("Wildcards should be lowered by now");
1172 case nir_deref_type_struct
: {
1173 nir_deref_instr
*parent
= nir_deref_instr_parent(deref
);
1174 int offset
= glsl_get_struct_field_offset(parent
->type
,
1175 deref
->strct
.index
);
1176 assert(offset
>= 0);
1177 return build_addr_iadd_imm(b
, base_addr
, addr_format
, offset
);
1180 case nir_deref_type_cast
:
1181 /* Nothing to do here */
1185 unreachable("Invalid NIR deref type");
1189 nir_lower_explicit_io_instr(nir_builder
*b
,
1190 nir_intrinsic_instr
*intrin
,
1192 nir_address_format addr_format
)
1194 b
->cursor
= nir_after_instr(&intrin
->instr
);
1196 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
1197 unsigned vec_stride
= glsl_get_explicit_stride(deref
->type
);
1198 unsigned scalar_size
= type_scalar_size_bytes(deref
->type
);
1199 assert(vec_stride
== 0 || glsl_type_is_vector(deref
->type
));
1200 assert(vec_stride
== 0 || vec_stride
>= scalar_size
);
1202 if (intrin
->intrinsic
== nir_intrinsic_load_deref
) {
1204 if (vec_stride
> scalar_size
) {
1205 nir_ssa_def
*comps
[4] = { NULL
, };
1206 for (unsigned i
= 0; i
< intrin
->num_components
; i
++) {
1207 nir_ssa_def
*comp_addr
= build_addr_iadd_imm(b
, addr
, addr_format
,
1209 comps
[i
] = build_explicit_io_load(b
, intrin
, comp_addr
,
1212 value
= nir_vec(b
, comps
, intrin
->num_components
);
1214 value
= build_explicit_io_load(b
, intrin
, addr
, addr_format
,
1215 intrin
->num_components
);
1217 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
, nir_src_for_ssa(value
));
1218 } else if (intrin
->intrinsic
== nir_intrinsic_store_deref
) {
1219 assert(intrin
->src
[1].is_ssa
);
1220 nir_ssa_def
*value
= intrin
->src
[1].ssa
;
1221 nir_component_mask_t write_mask
= nir_intrinsic_write_mask(intrin
);
1222 if (vec_stride
> scalar_size
) {
1223 for (unsigned i
= 0; i
< intrin
->num_components
; i
++) {
1224 if (!(write_mask
& (1 << i
)))
1227 nir_ssa_def
*comp_addr
= build_addr_iadd_imm(b
, addr
, addr_format
,
1229 build_explicit_io_store(b
, intrin
, comp_addr
, addr_format
,
1230 nir_channel(b
, value
, i
), 1);
1233 build_explicit_io_store(b
, intrin
, addr
, addr_format
,
1237 nir_ssa_def
*value
=
1238 build_explicit_io_atomic(b
, intrin
, addr
, addr_format
);
1239 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
, nir_src_for_ssa(value
));
1242 nir_instr_remove(&intrin
->instr
);
1246 lower_explicit_io_deref(nir_builder
*b
, nir_deref_instr
*deref
,
1247 nir_address_format addr_format
)
1249 /* Just delete the deref if it's not used. We can't use
1250 * nir_deref_instr_remove_if_unused here because it may remove more than
1251 * one deref which could break our list walking since we walk the list
1254 assert(list_is_empty(&deref
->dest
.ssa
.if_uses
));
1255 if (list_is_empty(&deref
->dest
.ssa
.uses
)) {
1256 nir_instr_remove(&deref
->instr
);
1260 b
->cursor
= nir_after_instr(&deref
->instr
);
1262 nir_ssa_def
*base_addr
= NULL
;
1263 if (deref
->deref_type
!= nir_deref_type_var
) {
1264 assert(deref
->parent
.is_ssa
);
1265 base_addr
= deref
->parent
.ssa
;
1268 nir_ssa_def
*addr
= nir_explicit_io_address_from_deref(b
, deref
, base_addr
,
1271 nir_instr_remove(&deref
->instr
);
1272 nir_ssa_def_rewrite_uses(&deref
->dest
.ssa
, nir_src_for_ssa(addr
));
1276 lower_explicit_io_access(nir_builder
*b
, nir_intrinsic_instr
*intrin
,
1277 nir_address_format addr_format
)
1279 assert(intrin
->src
[0].is_ssa
);
1280 nir_lower_explicit_io_instr(b
, intrin
, intrin
->src
[0].ssa
, addr_format
);
1284 lower_explicit_io_array_length(nir_builder
*b
, nir_intrinsic_instr
*intrin
,
1285 nir_address_format addr_format
)
1287 b
->cursor
= nir_after_instr(&intrin
->instr
);
1289 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
1291 assert(glsl_type_is_array(deref
->type
));
1292 assert(glsl_get_length(deref
->type
) == 0);
1293 unsigned stride
= glsl_get_explicit_stride(deref
->type
);
1296 assert(addr_format
== nir_address_format_32bit_index_offset
||
1297 addr_format
== nir_address_format_vec2_index_32bit_offset
);
1298 nir_ssa_def
*addr
= &deref
->dest
.ssa
;
1299 nir_ssa_def
*index
= addr_to_index(b
, addr
, addr_format
);
1300 nir_ssa_def
*offset
= addr_to_offset(b
, addr
, addr_format
);
1302 nir_intrinsic_instr
*bsize
=
1303 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_get_buffer_size
);
1304 bsize
->src
[0] = nir_src_for_ssa(index
);
1305 nir_ssa_dest_init(&bsize
->instr
, &bsize
->dest
, 1, 32, NULL
);
1306 nir_builder_instr_insert(b
, &bsize
->instr
);
1308 nir_ssa_def
*arr_size
=
1309 nir_idiv(b
, nir_isub(b
, &bsize
->dest
.ssa
, offset
),
1310 nir_imm_int(b
, stride
));
1312 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
, nir_src_for_ssa(arr_size
));
1313 nir_instr_remove(&intrin
->instr
);
1317 nir_lower_explicit_io_impl(nir_function_impl
*impl
, nir_variable_mode modes
,
1318 nir_address_format addr_format
)
1320 bool progress
= false;
1323 nir_builder_init(&b
, impl
);
1325 /* Walk in reverse order so that we can see the full deref chain when we
1326 * lower the access operations. We lower them assuming that the derefs
1327 * will be turned into address calculations later.
1329 nir_foreach_block_reverse(block
, impl
) {
1330 nir_foreach_instr_reverse_safe(instr
, block
) {
1331 switch (instr
->type
) {
1332 case nir_instr_type_deref
: {
1333 nir_deref_instr
*deref
= nir_instr_as_deref(instr
);
1334 if (deref
->mode
& modes
) {
1335 lower_explicit_io_deref(&b
, deref
, addr_format
);
1341 case nir_instr_type_intrinsic
: {
1342 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
1343 switch (intrin
->intrinsic
) {
1344 case nir_intrinsic_load_deref
:
1345 case nir_intrinsic_store_deref
:
1346 case nir_intrinsic_deref_atomic_add
:
1347 case nir_intrinsic_deref_atomic_imin
:
1348 case nir_intrinsic_deref_atomic_umin
:
1349 case nir_intrinsic_deref_atomic_imax
:
1350 case nir_intrinsic_deref_atomic_umax
:
1351 case nir_intrinsic_deref_atomic_and
:
1352 case nir_intrinsic_deref_atomic_or
:
1353 case nir_intrinsic_deref_atomic_xor
:
1354 case nir_intrinsic_deref_atomic_exchange
:
1355 case nir_intrinsic_deref_atomic_comp_swap
:
1356 case nir_intrinsic_deref_atomic_fadd
:
1357 case nir_intrinsic_deref_atomic_fmin
:
1358 case nir_intrinsic_deref_atomic_fmax
:
1359 case nir_intrinsic_deref_atomic_fcomp_swap
: {
1360 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
1361 if (deref
->mode
& modes
) {
1362 lower_explicit_io_access(&b
, intrin
, addr_format
);
1368 case nir_intrinsic_deref_buffer_array_length
: {
1369 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
1370 if (deref
->mode
& modes
) {
1371 lower_explicit_io_array_length(&b
, intrin
, addr_format
);
1391 nir_metadata_preserve(impl
, nir_metadata_block_index
|
1392 nir_metadata_dominance
);
1398 /** Lower explicitly laid out I/O access to byte offset/address intrinsics
1400 * This pass is intended to be used for any I/O which touches memory external
1401 * to the shader or which is directly visible to the client. It requires that
1402 * all data types in the given modes have a explicit stride/offset decorations
1403 * to tell it exactly how to calculate the offset/address for the given load,
1404 * store, or atomic operation. If the offset/stride information does not come
1405 * from the client explicitly (as with shared variables in GL or Vulkan),
1406 * nir_lower_vars_to_explicit_types() can be used to add them.
1408 * Unlike nir_lower_io, this pass is fully capable of handling incomplete
1409 * pointer chains which may contain cast derefs. It does so by walking the
1410 * deref chain backwards and simply replacing each deref, one at a time, with
1411 * the appropriate address calculation. The pass takes a nir_address_format
1412 * parameter which describes how the offset or address is to be represented
1413 * during calculations. By ensuring that the address is always in a
1414 * consistent format, pointers can safely be conjured from thin air by the
1415 * driver, stored to variables, passed through phis, etc.
1417 * The one exception to the simple algorithm described above is for handling
1418 * row-major matrices in which case we may look down one additional level of
1422 nir_lower_explicit_io(nir_shader
*shader
, nir_variable_mode modes
,
1423 nir_address_format addr_format
)
1425 bool progress
= false;
1427 nir_foreach_function(function
, shader
) {
1428 if (function
->impl
&&
1429 nir_lower_explicit_io_impl(function
->impl
, modes
, addr_format
))
1437 nir_lower_vars_to_explicit_types_impl(nir_function_impl
*impl
,
1438 nir_variable_mode modes
,
1439 glsl_type_size_align_func type_info
)
1441 bool progress
= false;
1443 nir_foreach_block(block
, impl
) {
1444 nir_foreach_instr(instr
, block
) {
1445 if (instr
->type
!= nir_instr_type_deref
)
1448 nir_deref_instr
*deref
= nir_instr_as_deref(instr
);
1449 if (!(deref
->mode
& modes
))
1452 unsigned size
, alignment
;
1453 const struct glsl_type
*new_type
=
1454 glsl_get_explicit_type_for_size_align(deref
->type
, type_info
, &size
, &alignment
);
1455 if (new_type
!= deref
->type
) {
1457 deref
->type
= new_type
;
1459 if (deref
->deref_type
== nir_deref_type_cast
) {
1460 /* See also glsl_type::get_explicit_type_for_size_align() */
1461 unsigned new_stride
= align(size
, alignment
);
1462 if (new_stride
!= deref
->cast
.ptr_stride
) {
1463 deref
->cast
.ptr_stride
= new_stride
;
1471 nir_metadata_preserve(impl
, nir_metadata_block_index
|
1472 nir_metadata_dominance
|
1473 nir_metadata_live_ssa_defs
|
1474 nir_metadata_loop_analysis
);
1481 lower_vars_to_explicit(nir_shader
*shader
,
1482 struct exec_list
*vars
, nir_variable_mode mode
,
1483 glsl_type_size_align_func type_info
)
1485 bool progress
= false;
1488 case nir_var_function_temp
:
1489 case nir_var_shader_temp
:
1490 offset
= shader
->scratch_size
;
1492 case nir_var_mem_shared
:
1496 unreachable("Unsupported mode");
1498 nir_foreach_variable_in_list(var
, vars
) {
1499 if (var
->data
.mode
!= mode
)
1502 unsigned size
, align
;
1503 const struct glsl_type
*explicit_type
=
1504 glsl_get_explicit_type_for_size_align(var
->type
, type_info
, &size
, &align
);
1506 if (explicit_type
!= var
->type
) {
1508 var
->type
= explicit_type
;
1511 var
->data
.driver_location
= ALIGN_POT(offset
, align
);
1512 offset
= var
->data
.driver_location
+ size
;
1516 case nir_var_shader_temp
:
1517 case nir_var_function_temp
:
1518 shader
->scratch_size
= offset
;
1520 case nir_var_mem_shared
:
1521 shader
->info
.cs
.shared_size
= offset
;
1522 shader
->num_shared
= offset
;
1525 unreachable("Unsupported mode");
1532 nir_lower_vars_to_explicit_types(nir_shader
*shader
,
1533 nir_variable_mode modes
,
1534 glsl_type_size_align_func type_info
)
1536 /* TODO: Situations which need to be handled to support more modes:
1537 * - row-major matrices
1538 * - compact shader inputs/outputs
1541 ASSERTED nir_variable_mode supported
= nir_var_mem_shared
|
1542 nir_var_shader_temp
| nir_var_function_temp
;
1543 assert(!(modes
& ~supported
) && "unsupported");
1545 bool progress
= false;
1547 if (modes
& nir_var_mem_shared
)
1548 progress
|= lower_vars_to_explicit(shader
, &shader
->variables
, nir_var_mem_shared
, type_info
);
1549 if (modes
& nir_var_shader_temp
)
1550 progress
|= lower_vars_to_explicit(shader
, &shader
->variables
, nir_var_shader_temp
, type_info
);
1552 nir_foreach_function(function
, shader
) {
1553 if (function
->impl
) {
1554 if (modes
& nir_var_function_temp
)
1555 progress
|= lower_vars_to_explicit(shader
, &function
->impl
->locals
, nir_var_function_temp
, type_info
);
1557 progress
|= nir_lower_vars_to_explicit_types_impl(function
->impl
, modes
, type_info
);
1565 * Return the offset source for a load/store intrinsic.
1568 nir_get_io_offset_src(nir_intrinsic_instr
*instr
)
1570 switch (instr
->intrinsic
) {
1571 case nir_intrinsic_load_input
:
1572 case nir_intrinsic_load_output
:
1573 case nir_intrinsic_load_shared
:
1574 case nir_intrinsic_load_uniform
:
1575 case nir_intrinsic_load_global
:
1576 case nir_intrinsic_load_scratch
:
1577 case nir_intrinsic_load_fs_input_interp_deltas
:
1578 return &instr
->src
[0];
1579 case nir_intrinsic_load_ubo
:
1580 case nir_intrinsic_load_ssbo
:
1581 case nir_intrinsic_load_input_vertex
:
1582 case nir_intrinsic_load_per_vertex_input
:
1583 case nir_intrinsic_load_per_vertex_output
:
1584 case nir_intrinsic_load_interpolated_input
:
1585 case nir_intrinsic_store_output
:
1586 case nir_intrinsic_store_shared
:
1587 case nir_intrinsic_store_global
:
1588 case nir_intrinsic_store_scratch
:
1589 case nir_intrinsic_ssbo_atomic_add
:
1590 case nir_intrinsic_ssbo_atomic_imin
:
1591 case nir_intrinsic_ssbo_atomic_umin
:
1592 case nir_intrinsic_ssbo_atomic_imax
:
1593 case nir_intrinsic_ssbo_atomic_umax
:
1594 case nir_intrinsic_ssbo_atomic_and
:
1595 case nir_intrinsic_ssbo_atomic_or
:
1596 case nir_intrinsic_ssbo_atomic_xor
:
1597 case nir_intrinsic_ssbo_atomic_exchange
:
1598 case nir_intrinsic_ssbo_atomic_comp_swap
:
1599 case nir_intrinsic_ssbo_atomic_fadd
:
1600 case nir_intrinsic_ssbo_atomic_fmin
:
1601 case nir_intrinsic_ssbo_atomic_fmax
:
1602 case nir_intrinsic_ssbo_atomic_fcomp_swap
:
1603 return &instr
->src
[1];
1604 case nir_intrinsic_store_ssbo
:
1605 case nir_intrinsic_store_per_vertex_output
:
1606 return &instr
->src
[2];
1613 * Return the vertex index source for a load/store per_vertex intrinsic.
1616 nir_get_io_vertex_index_src(nir_intrinsic_instr
*instr
)
1618 switch (instr
->intrinsic
) {
1619 case nir_intrinsic_load_per_vertex_input
:
1620 case nir_intrinsic_load_per_vertex_output
:
1621 return &instr
->src
[0];
1622 case nir_intrinsic_store_per_vertex_output
:
1623 return &instr
->src
[1];
1630 * Return the numeric constant that identify a NULL pointer for each address
1633 const nir_const_value
*
1634 nir_address_format_null_value(nir_address_format addr_format
)
1636 const static nir_const_value null_values
[][NIR_MAX_VEC_COMPONENTS
] = {
1637 [nir_address_format_32bit_global
] = {{0}},
1638 [nir_address_format_64bit_global
] = {{0}},
1639 [nir_address_format_64bit_bounded_global
] = {{0}},
1640 [nir_address_format_32bit_index_offset
] = {{.u32
= ~0}, {.u32
= ~0}},
1641 [nir_address_format_vec2_index_32bit_offset
] = {{.u32
= ~0}, {.u32
= ~0}, {.u32
= ~0}},
1642 [nir_address_format_32bit_offset
] = {{.u32
= ~0}},
1643 [nir_address_format_32bit_offset_as_64bit
] = {{.u64
= ~0ull}},
1644 [nir_address_format_logical
] = {{.u32
= ~0}},
1647 assert(addr_format
< ARRAY_SIZE(null_values
));
1648 return null_values
[addr_format
];
1652 nir_build_addr_ieq(nir_builder
*b
, nir_ssa_def
*addr0
, nir_ssa_def
*addr1
,
1653 nir_address_format addr_format
)
1655 switch (addr_format
) {
1656 case nir_address_format_32bit_global
:
1657 case nir_address_format_64bit_global
:
1658 case nir_address_format_64bit_bounded_global
:
1659 case nir_address_format_32bit_index_offset
:
1660 case nir_address_format_vec2_index_32bit_offset
:
1661 case nir_address_format_32bit_offset
:
1662 return nir_ball_iequal(b
, addr0
, addr1
);
1664 case nir_address_format_32bit_offset_as_64bit
:
1665 assert(addr0
->num_components
== 1 && addr1
->num_components
== 1);
1666 return nir_ieq(b
, nir_u2u32(b
, addr0
), nir_u2u32(b
, addr1
));
1668 case nir_address_format_logical
:
1669 unreachable("Unsupported address format");
1672 unreachable("Invalid address format");
1676 nir_build_addr_isub(nir_builder
*b
, nir_ssa_def
*addr0
, nir_ssa_def
*addr1
,
1677 nir_address_format addr_format
)
1679 switch (addr_format
) {
1680 case nir_address_format_32bit_global
:
1681 case nir_address_format_64bit_global
:
1682 case nir_address_format_32bit_offset
:
1683 assert(addr0
->num_components
== 1);
1684 assert(addr1
->num_components
== 1);
1685 return nir_isub(b
, addr0
, addr1
);
1687 case nir_address_format_32bit_offset_as_64bit
:
1688 assert(addr0
->num_components
== 1);
1689 assert(addr1
->num_components
== 1);
1690 return nir_u2u64(b
, nir_isub(b
, nir_u2u32(b
, addr0
), nir_u2u32(b
, addr1
)));
1692 case nir_address_format_64bit_bounded_global
:
1693 return nir_isub(b
, addr_to_global(b
, addr0
, addr_format
),
1694 addr_to_global(b
, addr1
, addr_format
));
1696 case nir_address_format_32bit_index_offset
:
1697 assert(addr0
->num_components
== 2);
1698 assert(addr1
->num_components
== 2);
1699 /* Assume the same buffer index. */
1700 return nir_isub(b
, nir_channel(b
, addr0
, 1), nir_channel(b
, addr1
, 1));
1702 case nir_address_format_vec2_index_32bit_offset
:
1703 assert(addr0
->num_components
== 3);
1704 assert(addr1
->num_components
== 3);
1705 /* Assume the same buffer index. */
1706 return nir_isub(b
, nir_channel(b
, addr0
, 2), nir_channel(b
, addr1
, 2));
1708 case nir_address_format_logical
:
1709 unreachable("Unsupported address format");
1712 unreachable("Invalid address format");
1716 is_input(nir_intrinsic_instr
*intrin
)
1718 return intrin
->intrinsic
== nir_intrinsic_load_input
||
1719 intrin
->intrinsic
== nir_intrinsic_load_per_vertex_input
||
1720 intrin
->intrinsic
== nir_intrinsic_load_interpolated_input
||
1721 intrin
->intrinsic
== nir_intrinsic_load_fs_input_interp_deltas
;
1725 is_output(nir_intrinsic_instr
*intrin
)
1727 return intrin
->intrinsic
== nir_intrinsic_load_output
||
1728 intrin
->intrinsic
== nir_intrinsic_load_per_vertex_output
||
1729 intrin
->intrinsic
== nir_intrinsic_store_output
||
1730 intrin
->intrinsic
== nir_intrinsic_store_per_vertex_output
;
1735 * This pass adds constant offsets to instr->const_index[0] for input/output
1736 * intrinsics, and resets the offset source to 0. Non-constant offsets remain
1737 * unchanged - since we don't know what part of a compound variable is
1738 * accessed, we allocate storage for the entire thing. For drivers that use
1739 * nir_lower_io_to_temporaries() before nir_lower_io(), this guarantees that
1740 * the offset source will be 0, so that they don't have to add it in manually.
1744 add_const_offset_to_base_block(nir_block
*block
, nir_builder
*b
,
1745 nir_variable_mode mode
)
1747 bool progress
= false;
1748 nir_foreach_instr_safe(instr
, block
) {
1749 if (instr
->type
!= nir_instr_type_intrinsic
)
1752 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
1754 if ((mode
== nir_var_shader_in
&& is_input(intrin
)) ||
1755 (mode
== nir_var_shader_out
&& is_output(intrin
))) {
1756 nir_src
*offset
= nir_get_io_offset_src(intrin
);
1758 if (nir_src_is_const(*offset
)) {
1759 intrin
->const_index
[0] += nir_src_as_uint(*offset
);
1760 b
->cursor
= nir_before_instr(&intrin
->instr
);
1761 nir_instr_rewrite_src(&intrin
->instr
, offset
,
1762 nir_src_for_ssa(nir_imm_int(b
, 0)));
1772 nir_io_add_const_offset_to_base(nir_shader
*nir
, nir_variable_mode mode
)
1774 bool progress
= false;
1776 nir_foreach_function(f
, nir
) {
1779 nir_builder_init(&b
, f
->impl
);
1780 nir_foreach_block(block
, f
->impl
) {
1781 progress
|= add_const_offset_to_base_block(block
, &b
, mode
);