2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Connor Abbott (cwabbott0@gmail.com)
25 * Jason Ekstrand (jason@jlekstrand.net)
30 * This lowering pass converts references to input/output variables with
31 * loads/stores to actual input/output intrinsics.
35 #include "nir_builder.h"
36 #include "nir_deref.h"
38 #include "util/u_math.h"
40 struct lower_io_state
{
43 int (*type_size
)(const struct glsl_type
*type
, bool);
44 nir_variable_mode modes
;
45 nir_lower_io_options options
;
48 static nir_intrinsic_op
49 ssbo_atomic_for_deref(nir_intrinsic_op deref_op
)
52 #define OP(O) case nir_intrinsic_deref_##O: return nir_intrinsic_ssbo_##O;
69 unreachable("Invalid SSBO atomic");
73 static nir_intrinsic_op
74 global_atomic_for_deref(nir_intrinsic_op deref_op
)
77 #define OP(O) case nir_intrinsic_deref_##O: return nir_intrinsic_global_##O;
94 unreachable("Invalid SSBO atomic");
98 static nir_intrinsic_op
99 shared_atomic_for_deref(nir_intrinsic_op deref_op
)
102 #define OP(O) case nir_intrinsic_deref_##O: return nir_intrinsic_shared_##O;
116 OP(atomic_fcomp_swap
)
119 unreachable("Invalid shared atomic");
124 nir_assign_var_locations(nir_shader
*shader
, nir_variable_mode mode
,
126 int (*type_size
)(const struct glsl_type
*, bool))
128 unsigned location
= 0;
130 nir_foreach_variable_with_modes(var
, shader
, mode
) {
131 var
->data
.driver_location
= location
;
132 bool bindless_type_size
= var
->data
.mode
== nir_var_shader_in
||
133 var
->data
.mode
== nir_var_shader_out
||
135 location
+= type_size(var
->type
, bindless_type_size
);
142 * Return true if the given variable is a per-vertex input/output array.
143 * (such as geometry shader inputs).
146 nir_is_per_vertex_io(const nir_variable
*var
, gl_shader_stage stage
)
148 if (var
->data
.patch
|| !glsl_type_is_array(var
->type
))
151 if (var
->data
.mode
== nir_var_shader_in
)
152 return stage
== MESA_SHADER_GEOMETRY
||
153 stage
== MESA_SHADER_TESS_CTRL
||
154 stage
== MESA_SHADER_TESS_EVAL
;
156 if (var
->data
.mode
== nir_var_shader_out
)
157 return stage
== MESA_SHADER_TESS_CTRL
;
162 static unsigned get_number_of_slots(struct lower_io_state
*state
,
163 const nir_variable
*var
)
165 const struct glsl_type
*type
= var
->type
;
167 if (nir_is_per_vertex_io(var
, state
->builder
.shader
->info
.stage
)) {
168 assert(glsl_type_is_array(type
));
169 type
= glsl_get_array_element(type
);
172 return state
->type_size(type
, var
->data
.bindless
);
176 get_io_offset(nir_builder
*b
, nir_deref_instr
*deref
,
177 nir_ssa_def
**vertex_index
,
178 int (*type_size
)(const struct glsl_type
*, bool),
179 unsigned *component
, bool bts
)
182 nir_deref_path_init(&path
, deref
, NULL
);
184 assert(path
.path
[0]->deref_type
== nir_deref_type_var
);
185 nir_deref_instr
**p
= &path
.path
[1];
187 /* For per-vertex input arrays (i.e. geometry shader inputs), keep the
188 * outermost array index separate. Process the rest normally.
190 if (vertex_index
!= NULL
) {
191 assert((*p
)->deref_type
== nir_deref_type_array
);
192 *vertex_index
= nir_ssa_for_src(b
, (*p
)->arr
.index
, 1);
196 if (path
.path
[0]->var
->data
.compact
) {
197 assert((*p
)->deref_type
== nir_deref_type_array
);
198 assert(glsl_type_is_scalar((*p
)->type
));
200 /* We always lower indirect dereferences for "compact" array vars. */
201 const unsigned index
= nir_src_as_uint((*p
)->arr
.index
);
202 const unsigned total_offset
= *component
+ index
;
203 const unsigned slot_offset
= total_offset
/ 4;
204 *component
= total_offset
% 4;
205 return nir_imm_int(b
, type_size(glsl_vec4_type(), bts
) * slot_offset
);
208 /* Just emit code and let constant-folding go to town */
209 nir_ssa_def
*offset
= nir_imm_int(b
, 0);
212 if ((*p
)->deref_type
== nir_deref_type_array
) {
213 unsigned size
= type_size((*p
)->type
, bts
);
216 nir_amul_imm(b
, nir_ssa_for_src(b
, (*p
)->arr
.index
, 1), size
);
218 offset
= nir_iadd(b
, offset
, mul
);
219 } else if ((*p
)->deref_type
== nir_deref_type_struct
) {
220 /* p starts at path[1], so this is safe */
221 nir_deref_instr
*parent
= *(p
- 1);
223 unsigned field_offset
= 0;
224 for (unsigned i
= 0; i
< (*p
)->strct
.index
; i
++) {
225 field_offset
+= type_size(glsl_get_struct_field(parent
->type
, i
), bts
);
227 offset
= nir_iadd_imm(b
, offset
, field_offset
);
229 unreachable("Unsupported deref type");
233 nir_deref_path_finish(&path
);
239 emit_load(struct lower_io_state
*state
,
240 nir_ssa_def
*vertex_index
, nir_variable
*var
, nir_ssa_def
*offset
,
241 unsigned component
, unsigned num_components
, unsigned bit_size
,
244 nir_builder
*b
= &state
->builder
;
245 const nir_shader
*nir
= b
->shader
;
246 nir_variable_mode mode
= var
->data
.mode
;
247 nir_ssa_def
*barycentric
= NULL
;
251 case nir_var_shader_in
:
252 if (nir
->info
.stage
== MESA_SHADER_FRAGMENT
&&
253 nir
->options
->use_interpolated_input_intrinsics
&&
254 var
->data
.interpolation
!= INTERP_MODE_FLAT
) {
255 if (var
->data
.interpolation
== INTERP_MODE_EXPLICIT
) {
256 assert(vertex_index
!= NULL
);
257 op
= nir_intrinsic_load_input_vertex
;
259 assert(vertex_index
== NULL
);
261 nir_intrinsic_op bary_op
;
262 if (var
->data
.sample
||
263 (state
->options
& nir_lower_io_force_sample_interpolation
))
264 bary_op
= nir_intrinsic_load_barycentric_sample
;
265 else if (var
->data
.centroid
)
266 bary_op
= nir_intrinsic_load_barycentric_centroid
;
268 bary_op
= nir_intrinsic_load_barycentric_pixel
;
270 barycentric
= nir_load_barycentric(&state
->builder
, bary_op
,
271 var
->data
.interpolation
);
272 op
= nir_intrinsic_load_interpolated_input
;
275 op
= vertex_index
? nir_intrinsic_load_per_vertex_input
:
276 nir_intrinsic_load_input
;
279 case nir_var_shader_out
:
280 op
= vertex_index
? nir_intrinsic_load_per_vertex_output
:
281 nir_intrinsic_load_output
;
283 case nir_var_uniform
:
284 op
= nir_intrinsic_load_uniform
;
287 unreachable("Unknown variable mode");
290 nir_intrinsic_instr
*load
=
291 nir_intrinsic_instr_create(state
->builder
.shader
, op
);
292 load
->num_components
= num_components
;
294 nir_intrinsic_set_base(load
, var
->data
.driver_location
);
295 if (mode
== nir_var_shader_in
|| mode
== nir_var_shader_out
)
296 nir_intrinsic_set_component(load
, component
);
298 if (load
->intrinsic
== nir_intrinsic_load_uniform
)
299 nir_intrinsic_set_range(load
,
300 state
->type_size(var
->type
, var
->data
.bindless
));
302 if (load
->intrinsic
== nir_intrinsic_load_input
||
303 load
->intrinsic
== nir_intrinsic_load_input_vertex
||
304 load
->intrinsic
== nir_intrinsic_load_uniform
)
305 nir_intrinsic_set_type(load
, type
);
307 if (load
->intrinsic
!= nir_intrinsic_load_uniform
) {
308 nir_io_semantics semantics
= {0};
309 semantics
.location
= var
->data
.location
;
310 semantics
.num_slots
= get_number_of_slots(state
, var
);
311 semantics
.fb_fetch_output
= var
->data
.fb_fetch_output
;
312 nir_intrinsic_set_io_semantics(load
, semantics
);
316 load
->src
[0] = nir_src_for_ssa(vertex_index
);
317 load
->src
[1] = nir_src_for_ssa(offset
);
318 } else if (barycentric
) {
319 load
->src
[0] = nir_src_for_ssa(barycentric
);
320 load
->src
[1] = nir_src_for_ssa(offset
);
322 load
->src
[0] = nir_src_for_ssa(offset
);
325 nir_ssa_dest_init(&load
->instr
, &load
->dest
,
326 num_components
, bit_size
, NULL
);
327 nir_builder_instr_insert(b
, &load
->instr
);
329 return &load
->dest
.ssa
;
333 lower_load(nir_intrinsic_instr
*intrin
, struct lower_io_state
*state
,
334 nir_ssa_def
*vertex_index
, nir_variable
*var
, nir_ssa_def
*offset
,
335 unsigned component
, const struct glsl_type
*type
)
337 assert(intrin
->dest
.is_ssa
);
338 if (intrin
->dest
.ssa
.bit_size
== 64 &&
339 (state
->options
& nir_lower_io_lower_64bit_to_32
)) {
340 nir_builder
*b
= &state
->builder
;
342 const unsigned slot_size
= state
->type_size(glsl_dvec_type(2), false);
344 nir_ssa_def
*comp64
[4];
345 assert(component
== 0 || component
== 2);
346 unsigned dest_comp
= 0;
347 while (dest_comp
< intrin
->dest
.ssa
.num_components
) {
348 const unsigned num_comps
=
349 MIN2(intrin
->dest
.ssa
.num_components
- dest_comp
,
350 (4 - component
) / 2);
352 nir_ssa_def
*data32
=
353 emit_load(state
, vertex_index
, var
, offset
, component
,
354 num_comps
* 2, 32, nir_type_uint32
);
355 for (unsigned i
= 0; i
< num_comps
; i
++) {
356 comp64
[dest_comp
+ i
] =
357 nir_pack_64_2x32(b
, nir_channels(b
, data32
, 3 << (i
* 2)));
360 /* Only the first store has a component offset */
362 dest_comp
+= num_comps
;
363 offset
= nir_iadd_imm(b
, offset
, slot_size
);
366 return nir_vec(b
, comp64
, intrin
->dest
.ssa
.num_components
);
367 } else if (intrin
->dest
.ssa
.bit_size
== 1) {
368 /* Booleans are 32-bit */
369 assert(glsl_type_is_boolean(type
));
370 return nir_b2b1(&state
->builder
,
371 emit_load(state
, vertex_index
, var
, offset
, component
,
372 intrin
->dest
.ssa
.num_components
, 32,
375 return emit_load(state
, vertex_index
, var
, offset
, component
,
376 intrin
->dest
.ssa
.num_components
,
377 intrin
->dest
.ssa
.bit_size
,
378 nir_get_nir_type_for_glsl_type(type
));
383 emit_store(struct lower_io_state
*state
, nir_ssa_def
*data
,
384 nir_ssa_def
*vertex_index
, nir_variable
*var
, nir_ssa_def
*offset
,
385 unsigned component
, unsigned num_components
,
386 nir_component_mask_t write_mask
, nir_alu_type type
)
388 nir_builder
*b
= &state
->builder
;
389 nir_variable_mode mode
= var
->data
.mode
;
391 assert(mode
== nir_var_shader_out
);
393 op
= vertex_index
? nir_intrinsic_store_per_vertex_output
:
394 nir_intrinsic_store_output
;
396 nir_intrinsic_instr
*store
=
397 nir_intrinsic_instr_create(state
->builder
.shader
, op
);
398 store
->num_components
= num_components
;
400 store
->src
[0] = nir_src_for_ssa(data
);
402 nir_intrinsic_set_base(store
, var
->data
.driver_location
);
404 if (mode
== nir_var_shader_out
)
405 nir_intrinsic_set_component(store
, component
);
407 if (store
->intrinsic
== nir_intrinsic_store_output
)
408 nir_intrinsic_set_type(store
, type
);
410 nir_intrinsic_set_write_mask(store
, write_mask
);
413 store
->src
[1] = nir_src_for_ssa(vertex_index
);
415 store
->src
[vertex_index
? 2 : 1] = nir_src_for_ssa(offset
);
417 unsigned gs_streams
= 0;
418 if (state
->builder
.shader
->info
.stage
== MESA_SHADER_GEOMETRY
) {
419 if (var
->data
.stream
& NIR_STREAM_PACKED
) {
420 gs_streams
= var
->data
.stream
& ~NIR_STREAM_PACKED
;
422 assert(var
->data
.stream
< 4);
424 for (unsigned i
= 0; i
< num_components
; ++i
)
425 gs_streams
|= var
->data
.stream
<< (2 * i
);
429 nir_io_semantics semantics
= {0};
430 semantics
.location
= var
->data
.location
;
431 semantics
.num_slots
= get_number_of_slots(state
, var
);
432 semantics
.dual_source_blend_index
= var
->data
.index
;
433 semantics
.gs_streams
= gs_streams
;
434 nir_intrinsic_set_io_semantics(store
, semantics
);
436 nir_builder_instr_insert(b
, &store
->instr
);
440 lower_store(nir_intrinsic_instr
*intrin
, struct lower_io_state
*state
,
441 nir_ssa_def
*vertex_index
, nir_variable
*var
, nir_ssa_def
*offset
,
442 unsigned component
, const struct glsl_type
*type
)
444 assert(intrin
->src
[1].is_ssa
);
445 if (intrin
->src
[1].ssa
->bit_size
== 64 &&
446 (state
->options
& nir_lower_io_lower_64bit_to_32
)) {
447 nir_builder
*b
= &state
->builder
;
449 const unsigned slot_size
= state
->type_size(glsl_dvec_type(2), false);
451 assert(component
== 0 || component
== 2);
452 unsigned src_comp
= 0;
453 nir_component_mask_t write_mask
= nir_intrinsic_write_mask(intrin
);
454 while (src_comp
< intrin
->num_components
) {
455 const unsigned num_comps
=
456 MIN2(intrin
->num_components
- src_comp
,
457 (4 - component
) / 2);
459 if (write_mask
& BITFIELD_MASK(num_comps
)) {
461 nir_channels(b
, intrin
->src
[1].ssa
,
462 BITFIELD_RANGE(src_comp
, num_comps
));
463 nir_ssa_def
*data32
= nir_bitcast_vector(b
, data
, 32);
465 nir_component_mask_t write_mask32
= 0;
466 for (unsigned i
= 0; i
< num_comps
; i
++) {
467 if (write_mask
& BITFIELD_MASK(num_comps
) & (1 << i
))
468 write_mask32
|= 3 << (i
* 2);
471 emit_store(state
, data32
, vertex_index
, var
, offset
,
472 component
, data32
->num_components
, write_mask32
,
476 /* Only the first store has a component offset */
478 src_comp
+= num_comps
;
479 write_mask
>>= num_comps
;
480 offset
= nir_iadd_imm(b
, offset
, slot_size
);
482 } else if (intrin
->dest
.ssa
.bit_size
== 1) {
483 /* Booleans are 32-bit */
484 assert(glsl_type_is_boolean(type
));
485 nir_ssa_def
*b32_val
= nir_b2b32(&state
->builder
, intrin
->src
[1].ssa
);
486 emit_store(state
, b32_val
, vertex_index
, var
, offset
,
487 component
, intrin
->num_components
,
488 nir_intrinsic_write_mask(intrin
),
491 emit_store(state
, intrin
->src
[1].ssa
, vertex_index
, var
, offset
,
492 component
, intrin
->num_components
,
493 nir_intrinsic_write_mask(intrin
),
494 nir_get_nir_type_for_glsl_type(type
));
499 lower_interpolate_at(nir_intrinsic_instr
*intrin
, struct lower_io_state
*state
,
500 nir_variable
*var
, nir_ssa_def
*offset
, unsigned component
,
501 const struct glsl_type
*type
)
503 nir_builder
*b
= &state
->builder
;
504 assert(var
->data
.mode
== nir_var_shader_in
);
506 /* Ignore interpolateAt() for flat variables - flat is flat. Lower
507 * interpolateAtVertex() for explicit variables.
509 if (var
->data
.interpolation
== INTERP_MODE_FLAT
||
510 var
->data
.interpolation
== INTERP_MODE_EXPLICIT
) {
511 nir_ssa_def
*vertex_index
= NULL
;
513 if (var
->data
.interpolation
== INTERP_MODE_EXPLICIT
) {
514 assert(intrin
->intrinsic
== nir_intrinsic_interp_deref_at_vertex
);
515 vertex_index
= intrin
->src
[1].ssa
;
518 return lower_load(intrin
, state
, vertex_index
, var
, offset
, component
, type
);
521 /* None of the supported APIs allow interpolation on 64-bit things */
522 assert(intrin
->dest
.is_ssa
&& intrin
->dest
.ssa
.bit_size
<= 32);
524 nir_intrinsic_op bary_op
;
525 switch (intrin
->intrinsic
) {
526 case nir_intrinsic_interp_deref_at_centroid
:
527 bary_op
= (state
->options
& nir_lower_io_force_sample_interpolation
) ?
528 nir_intrinsic_load_barycentric_sample
:
529 nir_intrinsic_load_barycentric_centroid
;
531 case nir_intrinsic_interp_deref_at_sample
:
532 bary_op
= nir_intrinsic_load_barycentric_at_sample
;
534 case nir_intrinsic_interp_deref_at_offset
:
535 bary_op
= nir_intrinsic_load_barycentric_at_offset
;
538 unreachable("Bogus interpolateAt() intrinsic.");
541 nir_intrinsic_instr
*bary_setup
=
542 nir_intrinsic_instr_create(state
->builder
.shader
, bary_op
);
544 nir_ssa_dest_init(&bary_setup
->instr
, &bary_setup
->dest
, 2, 32, NULL
);
545 nir_intrinsic_set_interp_mode(bary_setup
, var
->data
.interpolation
);
547 if (intrin
->intrinsic
== nir_intrinsic_interp_deref_at_sample
||
548 intrin
->intrinsic
== nir_intrinsic_interp_deref_at_offset
||
549 intrin
->intrinsic
== nir_intrinsic_interp_deref_at_vertex
)
550 nir_src_copy(&bary_setup
->src
[0], &intrin
->src
[1], bary_setup
);
552 nir_builder_instr_insert(b
, &bary_setup
->instr
);
554 nir_intrinsic_instr
*load
=
555 nir_intrinsic_instr_create(state
->builder
.shader
,
556 nir_intrinsic_load_interpolated_input
);
557 load
->num_components
= intrin
->num_components
;
559 nir_intrinsic_set_base(load
, var
->data
.driver_location
);
560 nir_intrinsic_set_component(load
, component
);
562 nir_io_semantics semantics
= {0};
563 semantics
.location
= var
->data
.location
;
564 semantics
.num_slots
= get_number_of_slots(state
, var
);
565 nir_intrinsic_set_io_semantics(load
, semantics
);
567 load
->src
[0] = nir_src_for_ssa(&bary_setup
->dest
.ssa
);
568 load
->src
[1] = nir_src_for_ssa(offset
);
570 assert(intrin
->dest
.is_ssa
);
571 nir_ssa_dest_init(&load
->instr
, &load
->dest
,
572 intrin
->dest
.ssa
.num_components
,
573 intrin
->dest
.ssa
.bit_size
, NULL
);
574 nir_builder_instr_insert(b
, &load
->instr
);
576 return &load
->dest
.ssa
;
580 nir_lower_io_block(nir_block
*block
,
581 struct lower_io_state
*state
)
583 nir_builder
*b
= &state
->builder
;
584 const nir_shader_compiler_options
*options
= b
->shader
->options
;
585 bool progress
= false;
587 nir_foreach_instr_safe(instr
, block
) {
588 if (instr
->type
!= nir_instr_type_intrinsic
)
591 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
593 switch (intrin
->intrinsic
) {
594 case nir_intrinsic_load_deref
:
595 case nir_intrinsic_store_deref
:
596 /* We can lower the io for this nir instrinsic */
598 case nir_intrinsic_interp_deref_at_centroid
:
599 case nir_intrinsic_interp_deref_at_sample
:
600 case nir_intrinsic_interp_deref_at_offset
:
601 case nir_intrinsic_interp_deref_at_vertex
:
602 /* We can optionally lower these to load_interpolated_input */
603 if (options
->use_interpolated_input_intrinsics
)
606 /* We can't lower the io for this nir instrinsic, so skip it */
610 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
612 nir_variable_mode mode
= deref
->mode
;
613 assert(util_is_power_of_two_nonzero(mode
));
614 if ((state
->modes
& mode
) == 0)
617 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
619 b
->cursor
= nir_before_instr(instr
);
621 const bool per_vertex
= nir_is_per_vertex_io(var
, b
->shader
->info
.stage
);
624 nir_ssa_def
*vertex_index
= NULL
;
625 unsigned component_offset
= var
->data
.location_frac
;
626 bool bindless_type_size
= mode
== nir_var_shader_in
||
627 mode
== nir_var_shader_out
||
630 offset
= get_io_offset(b
, deref
, per_vertex
? &vertex_index
: NULL
,
631 state
->type_size
, &component_offset
,
634 nir_ssa_def
*replacement
= NULL
;
636 switch (intrin
->intrinsic
) {
637 case nir_intrinsic_load_deref
:
638 replacement
= lower_load(intrin
, state
, vertex_index
, var
, offset
,
639 component_offset
, deref
->type
);
642 case nir_intrinsic_store_deref
:
643 lower_store(intrin
, state
, vertex_index
, var
, offset
,
644 component_offset
, deref
->type
);
647 case nir_intrinsic_interp_deref_at_centroid
:
648 case nir_intrinsic_interp_deref_at_sample
:
649 case nir_intrinsic_interp_deref_at_offset
:
650 case nir_intrinsic_interp_deref_at_vertex
:
651 assert(vertex_index
== NULL
);
652 replacement
= lower_interpolate_at(intrin
, state
, var
, offset
,
653 component_offset
, deref
->type
);
661 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
,
662 nir_src_for_ssa(replacement
));
664 nir_instr_remove(&intrin
->instr
);
672 nir_lower_io_impl(nir_function_impl
*impl
,
673 nir_variable_mode modes
,
674 int (*type_size
)(const struct glsl_type
*, bool),
675 nir_lower_io_options options
)
677 struct lower_io_state state
;
678 bool progress
= false;
680 nir_builder_init(&state
.builder
, impl
);
681 state
.dead_ctx
= ralloc_context(NULL
);
683 state
.type_size
= type_size
;
684 state
.options
= options
;
686 ASSERTED nir_variable_mode supported_modes
=
687 nir_var_shader_in
| nir_var_shader_out
| nir_var_uniform
;
688 assert(!(modes
& ~supported_modes
));
690 nir_foreach_block(block
, impl
) {
691 progress
|= nir_lower_io_block(block
, &state
);
694 ralloc_free(state
.dead_ctx
);
696 nir_metadata_preserve(impl
, nir_metadata_block_index
|
697 nir_metadata_dominance
);
701 /** Lower load/store_deref intrinsics on I/O variables to offset-based intrinsics
703 * This pass is intended to be used for cross-stage shader I/O and driver-
704 * managed uniforms to turn deref-based access into a simpler model using
705 * locations or offsets. For fragment shader inputs, it can optionally turn
706 * load_deref into an explicit interpolation using barycentrics coming from
707 * one of the load_barycentric_* intrinsics. This pass requires that all
708 * deref chains are complete and contain no casts.
711 nir_lower_io(nir_shader
*shader
, nir_variable_mode modes
,
712 int (*type_size
)(const struct glsl_type
*, bool),
713 nir_lower_io_options options
)
715 bool progress
= false;
717 nir_foreach_function(function
, shader
) {
718 if (function
->impl
) {
719 progress
|= nir_lower_io_impl(function
->impl
, modes
,
728 type_scalar_size_bytes(const struct glsl_type
*type
)
730 assert(glsl_type_is_vector_or_scalar(type
) ||
731 glsl_type_is_matrix(type
));
732 return glsl_type_is_boolean(type
) ? 4 : glsl_get_bit_size(type
) / 8;
736 build_addr_iadd(nir_builder
*b
, nir_ssa_def
*addr
,
737 nir_address_format addr_format
, nir_ssa_def
*offset
)
739 assert(offset
->num_components
== 1);
741 switch (addr_format
) {
742 case nir_address_format_32bit_global
:
743 case nir_address_format_64bit_global
:
744 case nir_address_format_32bit_offset
:
745 assert(addr
->bit_size
== offset
->bit_size
);
746 assert(addr
->num_components
== 1);
747 return nir_iadd(b
, addr
, offset
);
749 case nir_address_format_32bit_offset_as_64bit
:
750 assert(addr
->num_components
== 1);
751 assert(offset
->bit_size
== 32);
752 return nir_u2u64(b
, nir_iadd(b
, nir_u2u32(b
, addr
), offset
));
754 case nir_address_format_64bit_bounded_global
:
755 assert(addr
->num_components
== 4);
756 assert(addr
->bit_size
== offset
->bit_size
);
757 return nir_vec4(b
, nir_channel(b
, addr
, 0),
758 nir_channel(b
, addr
, 1),
759 nir_channel(b
, addr
, 2),
760 nir_iadd(b
, nir_channel(b
, addr
, 3), offset
));
762 case nir_address_format_32bit_index_offset
:
763 assert(addr
->num_components
== 2);
764 assert(addr
->bit_size
== offset
->bit_size
);
765 return nir_vec2(b
, nir_channel(b
, addr
, 0),
766 nir_iadd(b
, nir_channel(b
, addr
, 1), offset
));
768 case nir_address_format_32bit_index_offset_pack64
:
769 assert(addr
->num_components
== 1);
770 assert(offset
->bit_size
== 32);
771 return nir_pack_64_2x32_split(b
,
772 nir_iadd(b
, nir_unpack_64_2x32_split_x(b
, addr
), offset
),
773 nir_unpack_64_2x32_split_y(b
, addr
));
775 case nir_address_format_vec2_index_32bit_offset
:
776 assert(addr
->num_components
== 3);
777 assert(offset
->bit_size
== 32);
778 return nir_vec3(b
, nir_channel(b
, addr
, 0), nir_channel(b
, addr
, 1),
779 nir_iadd(b
, nir_channel(b
, addr
, 2), offset
));
781 case nir_address_format_logical
:
782 unreachable("Unsupported address format");
784 unreachable("Invalid address format");
788 addr_get_offset_bit_size(nir_ssa_def
*addr
, nir_address_format addr_format
)
790 if (addr_format
== nir_address_format_32bit_offset_as_64bit
||
791 addr_format
== nir_address_format_32bit_index_offset_pack64
)
793 return addr
->bit_size
;
797 build_addr_iadd_imm(nir_builder
*b
, nir_ssa_def
*addr
,
798 nir_address_format addr_format
, int64_t offset
)
800 return build_addr_iadd(b
, addr
, addr_format
,
801 nir_imm_intN_t(b
, offset
,
802 addr_get_offset_bit_size(addr
, addr_format
)));
806 addr_to_index(nir_builder
*b
, nir_ssa_def
*addr
,
807 nir_address_format addr_format
)
809 switch (addr_format
) {
810 case nir_address_format_32bit_index_offset
:
811 assert(addr
->num_components
== 2);
812 return nir_channel(b
, addr
, 0);
813 case nir_address_format_32bit_index_offset_pack64
:
814 return nir_unpack_64_2x32_split_y(b
, addr
);
815 case nir_address_format_vec2_index_32bit_offset
:
816 assert(addr
->num_components
== 3);
817 return nir_channels(b
, addr
, 0x3);
818 default: unreachable("Invalid address format");
823 addr_to_offset(nir_builder
*b
, nir_ssa_def
*addr
,
824 nir_address_format addr_format
)
826 switch (addr_format
) {
827 case nir_address_format_32bit_index_offset
:
828 assert(addr
->num_components
== 2);
829 return nir_channel(b
, addr
, 1);
830 case nir_address_format_32bit_index_offset_pack64
:
831 return nir_unpack_64_2x32_split_x(b
, addr
);
832 case nir_address_format_vec2_index_32bit_offset
:
833 assert(addr
->num_components
== 3);
834 return nir_channel(b
, addr
, 2);
835 case nir_address_format_32bit_offset
:
837 case nir_address_format_32bit_offset_as_64bit
:
838 return nir_u2u32(b
, addr
);
840 unreachable("Invalid address format");
844 /** Returns true if the given address format resolves to a global address */
846 addr_format_is_global(nir_address_format addr_format
)
848 return addr_format
== nir_address_format_32bit_global
||
849 addr_format
== nir_address_format_64bit_global
||
850 addr_format
== nir_address_format_64bit_bounded_global
;
854 addr_format_is_offset(nir_address_format addr_format
)
856 return addr_format
== nir_address_format_32bit_offset
||
857 addr_format
== nir_address_format_32bit_offset_as_64bit
;
861 addr_to_global(nir_builder
*b
, nir_ssa_def
*addr
,
862 nir_address_format addr_format
)
864 switch (addr_format
) {
865 case nir_address_format_32bit_global
:
866 case nir_address_format_64bit_global
:
867 assert(addr
->num_components
== 1);
870 case nir_address_format_64bit_bounded_global
:
871 assert(addr
->num_components
== 4);
872 return nir_iadd(b
, nir_pack_64_2x32(b
, nir_channels(b
, addr
, 0x3)),
873 nir_u2u64(b
, nir_channel(b
, addr
, 3)));
875 case nir_address_format_32bit_index_offset
:
876 case nir_address_format_32bit_index_offset_pack64
:
877 case nir_address_format_vec2_index_32bit_offset
:
878 case nir_address_format_32bit_offset
:
879 case nir_address_format_32bit_offset_as_64bit
:
880 case nir_address_format_logical
:
881 unreachable("Cannot get a 64-bit address with this address format");
884 unreachable("Invalid address format");
888 addr_format_needs_bounds_check(nir_address_format addr_format
)
890 return addr_format
== nir_address_format_64bit_bounded_global
;
894 addr_is_in_bounds(nir_builder
*b
, nir_ssa_def
*addr
,
895 nir_address_format addr_format
, unsigned size
)
897 assert(addr_format
== nir_address_format_64bit_bounded_global
);
898 assert(addr
->num_components
== 4);
899 return nir_ige(b
, nir_channel(b
, addr
, 2),
900 nir_iadd_imm(b
, nir_channel(b
, addr
, 3), size
));
904 build_explicit_io_load(nir_builder
*b
, nir_intrinsic_instr
*intrin
,
905 nir_ssa_def
*addr
, nir_address_format addr_format
,
906 unsigned num_components
)
908 nir_variable_mode mode
= nir_src_as_deref(intrin
->src
[0])->mode
;
912 case nir_var_mem_ubo
:
913 op
= nir_intrinsic_load_ubo
;
915 case nir_var_mem_ssbo
:
916 if (addr_format_is_global(addr_format
))
917 op
= nir_intrinsic_load_global
;
919 op
= nir_intrinsic_load_ssbo
;
921 case nir_var_mem_global
:
922 assert(addr_format_is_global(addr_format
));
923 op
= nir_intrinsic_load_global
;
925 case nir_var_shader_in
:
926 assert(addr_format_is_offset(addr_format
));
927 op
= nir_intrinsic_load_kernel_input
;
929 case nir_var_mem_shared
:
930 assert(addr_format_is_offset(addr_format
));
931 op
= nir_intrinsic_load_shared
;
933 case nir_var_shader_temp
:
934 case nir_var_function_temp
:
935 if (addr_format_is_offset(addr_format
)) {
936 op
= nir_intrinsic_load_scratch
;
938 assert(addr_format_is_global(addr_format
));
939 op
= nir_intrinsic_load_global
;
943 unreachable("Unsupported explicit IO variable mode");
946 nir_intrinsic_instr
*load
= nir_intrinsic_instr_create(b
->shader
, op
);
948 if (addr_format_is_global(addr_format
)) {
949 load
->src
[0] = nir_src_for_ssa(addr_to_global(b
, addr
, addr_format
));
950 } else if (addr_format_is_offset(addr_format
)) {
951 assert(addr
->num_components
== 1);
952 load
->src
[0] = nir_src_for_ssa(addr_to_offset(b
, addr
, addr_format
));
954 load
->src
[0] = nir_src_for_ssa(addr_to_index(b
, addr
, addr_format
));
955 load
->src
[1] = nir_src_for_ssa(addr_to_offset(b
, addr
, addr_format
));
958 if (nir_intrinsic_has_access(load
))
959 nir_intrinsic_set_access(load
, nir_intrinsic_access(intrin
));
961 unsigned bit_size
= intrin
->dest
.ssa
.bit_size
;
963 /* TODO: Make the native bool bit_size an option. */
967 /* TODO: We should try and provide a better alignment. For OpenCL, we need
968 * to plumb the alignment through from SPIR-V when we have one.
970 nir_intrinsic_set_align(load
, bit_size
/ 8, 0);
972 assert(intrin
->dest
.is_ssa
);
973 load
->num_components
= num_components
;
974 nir_ssa_dest_init(&load
->instr
, &load
->dest
, num_components
,
975 bit_size
, intrin
->dest
.ssa
.name
);
977 assert(bit_size
% 8 == 0);
980 if (addr_format_needs_bounds_check(addr_format
)) {
981 /* The Vulkan spec for robustBufferAccess gives us quite a few options
982 * as to what we can do with an OOB read. Unfortunately, returning
983 * undefined values isn't one of them so we return an actual zero.
985 nir_ssa_def
*zero
= nir_imm_zero(b
, load
->num_components
, bit_size
);
987 const unsigned load_size
= (bit_size
/ 8) * load
->num_components
;
988 nir_push_if(b
, addr_is_in_bounds(b
, addr
, addr_format
, load_size
));
990 nir_builder_instr_insert(b
, &load
->instr
);
994 result
= nir_if_phi(b
, &load
->dest
.ssa
, zero
);
996 nir_builder_instr_insert(b
, &load
->instr
);
997 result
= &load
->dest
.ssa
;
1000 if (intrin
->dest
.ssa
.bit_size
== 1) {
1001 /* For shared, we can go ahead and use NIR's and/or the back-end's
1002 * standard encoding for booleans rather than forcing a 0/1 boolean.
1003 * This should save an instruction or two.
1005 if (mode
== nir_var_mem_shared
||
1006 mode
== nir_var_shader_temp
||
1007 mode
== nir_var_function_temp
)
1008 result
= nir_b2b1(b
, result
);
1010 result
= nir_i2b(b
, result
);
1017 build_explicit_io_store(nir_builder
*b
, nir_intrinsic_instr
*intrin
,
1018 nir_ssa_def
*addr
, nir_address_format addr_format
,
1019 nir_ssa_def
*value
, nir_component_mask_t write_mask
)
1021 nir_variable_mode mode
= nir_src_as_deref(intrin
->src
[0])->mode
;
1023 nir_intrinsic_op op
;
1025 case nir_var_mem_ssbo
:
1026 if (addr_format_is_global(addr_format
))
1027 op
= nir_intrinsic_store_global
;
1029 op
= nir_intrinsic_store_ssbo
;
1031 case nir_var_mem_global
:
1032 assert(addr_format_is_global(addr_format
));
1033 op
= nir_intrinsic_store_global
;
1035 case nir_var_mem_shared
:
1036 assert(addr_format_is_offset(addr_format
));
1037 op
= nir_intrinsic_store_shared
;
1039 case nir_var_shader_temp
:
1040 case nir_var_function_temp
:
1041 if (addr_format_is_offset(addr_format
)) {
1042 op
= nir_intrinsic_store_scratch
;
1044 assert(addr_format_is_global(addr_format
));
1045 op
= nir_intrinsic_store_global
;
1049 unreachable("Unsupported explicit IO variable mode");
1052 nir_intrinsic_instr
*store
= nir_intrinsic_instr_create(b
->shader
, op
);
1054 if (value
->bit_size
== 1) {
1055 /* For shared, we can go ahead and use NIR's and/or the back-end's
1056 * standard encoding for booleans rather than forcing a 0/1 boolean.
1057 * This should save an instruction or two.
1059 * TODO: Make the native bool bit_size an option.
1061 if (mode
== nir_var_mem_shared
||
1062 mode
== nir_var_shader_temp
||
1063 mode
== nir_var_function_temp
)
1064 value
= nir_b2b32(b
, value
);
1066 value
= nir_b2i(b
, value
, 32);
1069 store
->src
[0] = nir_src_for_ssa(value
);
1070 if (addr_format_is_global(addr_format
)) {
1071 store
->src
[1] = nir_src_for_ssa(addr_to_global(b
, addr
, addr_format
));
1072 } else if (addr_format_is_offset(addr_format
)) {
1073 assert(addr
->num_components
== 1);
1074 store
->src
[1] = nir_src_for_ssa(addr_to_offset(b
, addr
, addr_format
));
1076 store
->src
[1] = nir_src_for_ssa(addr_to_index(b
, addr
, addr_format
));
1077 store
->src
[2] = nir_src_for_ssa(addr_to_offset(b
, addr
, addr_format
));
1080 nir_intrinsic_set_write_mask(store
, write_mask
);
1082 if (nir_intrinsic_has_access(store
))
1083 nir_intrinsic_set_access(store
, nir_intrinsic_access(intrin
));
1085 /* TODO: We should try and provide a better alignment. For OpenCL, we need
1086 * to plumb the alignment through from SPIR-V when we have one.
1088 nir_intrinsic_set_align(store
, value
->bit_size
/ 8, 0);
1090 assert(value
->num_components
== 1 ||
1091 value
->num_components
== intrin
->num_components
);
1092 store
->num_components
= value
->num_components
;
1094 assert(value
->bit_size
% 8 == 0);
1096 if (addr_format_needs_bounds_check(addr_format
)) {
1097 const unsigned store_size
= (value
->bit_size
/ 8) * store
->num_components
;
1098 nir_push_if(b
, addr_is_in_bounds(b
, addr
, addr_format
, store_size
));
1100 nir_builder_instr_insert(b
, &store
->instr
);
1102 nir_pop_if(b
, NULL
);
1104 nir_builder_instr_insert(b
, &store
->instr
);
1108 static nir_ssa_def
*
1109 build_explicit_io_atomic(nir_builder
*b
, nir_intrinsic_instr
*intrin
,
1110 nir_ssa_def
*addr
, nir_address_format addr_format
)
1112 nir_variable_mode mode
= nir_src_as_deref(intrin
->src
[0])->mode
;
1113 const unsigned num_data_srcs
=
1114 nir_intrinsic_infos
[intrin
->intrinsic
].num_srcs
- 1;
1116 nir_intrinsic_op op
;
1118 case nir_var_mem_ssbo
:
1119 if (addr_format_is_global(addr_format
))
1120 op
= global_atomic_for_deref(intrin
->intrinsic
);
1122 op
= ssbo_atomic_for_deref(intrin
->intrinsic
);
1124 case nir_var_mem_global
:
1125 assert(addr_format_is_global(addr_format
));
1126 op
= global_atomic_for_deref(intrin
->intrinsic
);
1128 case nir_var_mem_shared
:
1129 assert(addr_format_is_offset(addr_format
));
1130 op
= shared_atomic_for_deref(intrin
->intrinsic
);
1133 unreachable("Unsupported explicit IO variable mode");
1136 nir_intrinsic_instr
*atomic
= nir_intrinsic_instr_create(b
->shader
, op
);
1139 if (addr_format_is_global(addr_format
)) {
1140 atomic
->src
[src
++] = nir_src_for_ssa(addr_to_global(b
, addr
, addr_format
));
1141 } else if (addr_format_is_offset(addr_format
)) {
1142 assert(addr
->num_components
== 1);
1143 atomic
->src
[src
++] = nir_src_for_ssa(addr_to_offset(b
, addr
, addr_format
));
1145 atomic
->src
[src
++] = nir_src_for_ssa(addr_to_index(b
, addr
, addr_format
));
1146 atomic
->src
[src
++] = nir_src_for_ssa(addr_to_offset(b
, addr
, addr_format
));
1148 for (unsigned i
= 0; i
< num_data_srcs
; i
++) {
1149 atomic
->src
[src
++] = nir_src_for_ssa(intrin
->src
[1 + i
].ssa
);
1152 /* Global atomics don't have access flags because they assume that the
1153 * address may be non-uniform.
1155 if (nir_intrinsic_has_access(atomic
))
1156 nir_intrinsic_set_access(atomic
, nir_intrinsic_access(intrin
));
1158 assert(intrin
->dest
.ssa
.num_components
== 1);
1159 nir_ssa_dest_init(&atomic
->instr
, &atomic
->dest
,
1160 1, intrin
->dest
.ssa
.bit_size
, intrin
->dest
.ssa
.name
);
1162 assert(atomic
->dest
.ssa
.bit_size
% 8 == 0);
1164 if (addr_format_needs_bounds_check(addr_format
)) {
1165 const unsigned atomic_size
= atomic
->dest
.ssa
.bit_size
/ 8;
1166 nir_push_if(b
, addr_is_in_bounds(b
, addr
, addr_format
, atomic_size
));
1168 nir_builder_instr_insert(b
, &atomic
->instr
);
1170 nir_pop_if(b
, NULL
);
1171 return nir_if_phi(b
, &atomic
->dest
.ssa
,
1172 nir_ssa_undef(b
, 1, atomic
->dest
.ssa
.bit_size
));
1174 nir_builder_instr_insert(b
, &atomic
->instr
);
1175 return &atomic
->dest
.ssa
;
1180 nir_explicit_io_address_from_deref(nir_builder
*b
, nir_deref_instr
*deref
,
1181 nir_ssa_def
*base_addr
,
1182 nir_address_format addr_format
)
1184 assert(deref
->dest
.is_ssa
);
1185 switch (deref
->deref_type
) {
1186 case nir_deref_type_var
:
1187 assert(deref
->mode
& (nir_var_shader_in
| nir_var_mem_shared
|
1188 nir_var_shader_temp
| nir_var_function_temp
));
1189 if (addr_format_is_global(addr_format
)) {
1190 assert(nir_var_shader_temp
| nir_var_function_temp
);
1192 nir_load_scratch_base_ptr(b
, !(deref
->mode
& nir_var_shader_temp
),
1193 nir_address_format_num_components(addr_format
),
1194 nir_address_format_bit_size(addr_format
));
1195 return build_addr_iadd_imm(b
, base_addr
, addr_format
,
1196 deref
->var
->data
.driver_location
);
1198 assert(deref
->var
->data
.driver_location
<= UINT32_MAX
);
1199 return nir_imm_intN_t(b
, deref
->var
->data
.driver_location
,
1200 deref
->dest
.ssa
.bit_size
);
1203 case nir_deref_type_array
: {
1204 nir_deref_instr
*parent
= nir_deref_instr_parent(deref
);
1206 unsigned stride
= glsl_get_explicit_stride(parent
->type
);
1207 if ((glsl_type_is_matrix(parent
->type
) &&
1208 glsl_matrix_type_is_row_major(parent
->type
)) ||
1209 (glsl_type_is_vector(parent
->type
) && stride
== 0))
1210 stride
= type_scalar_size_bytes(parent
->type
);
1214 nir_ssa_def
*index
= nir_ssa_for_src(b
, deref
->arr
.index
, 1);
1215 index
= nir_i2i(b
, index
, addr_get_offset_bit_size(base_addr
, addr_format
));
1216 return build_addr_iadd(b
, base_addr
, addr_format
,
1217 nir_amul_imm(b
, index
, stride
));
1220 case nir_deref_type_ptr_as_array
: {
1221 nir_ssa_def
*index
= nir_ssa_for_src(b
, deref
->arr
.index
, 1);
1222 index
= nir_i2i(b
, index
, addr_get_offset_bit_size(base_addr
, addr_format
));
1223 unsigned stride
= nir_deref_instr_ptr_as_array_stride(deref
);
1224 return build_addr_iadd(b
, base_addr
, addr_format
,
1225 nir_amul_imm(b
, index
, stride
));
1228 case nir_deref_type_array_wildcard
:
1229 unreachable("Wildcards should be lowered by now");
1232 case nir_deref_type_struct
: {
1233 nir_deref_instr
*parent
= nir_deref_instr_parent(deref
);
1234 int offset
= glsl_get_struct_field_offset(parent
->type
,
1235 deref
->strct
.index
);
1236 assert(offset
>= 0);
1237 return build_addr_iadd_imm(b
, base_addr
, addr_format
, offset
);
1240 case nir_deref_type_cast
:
1241 /* Nothing to do here */
1245 unreachable("Invalid NIR deref type");
1249 nir_lower_explicit_io_instr(nir_builder
*b
,
1250 nir_intrinsic_instr
*intrin
,
1252 nir_address_format addr_format
)
1254 b
->cursor
= nir_after_instr(&intrin
->instr
);
1256 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
1257 unsigned vec_stride
= glsl_get_explicit_stride(deref
->type
);
1258 unsigned scalar_size
= type_scalar_size_bytes(deref
->type
);
1259 assert(vec_stride
== 0 || glsl_type_is_vector(deref
->type
));
1260 assert(vec_stride
== 0 || vec_stride
>= scalar_size
);
1262 if (intrin
->intrinsic
== nir_intrinsic_load_deref
) {
1264 if (vec_stride
> scalar_size
) {
1265 nir_ssa_def
*comps
[4] = { NULL
, };
1266 for (unsigned i
= 0; i
< intrin
->num_components
; i
++) {
1267 nir_ssa_def
*comp_addr
= build_addr_iadd_imm(b
, addr
, addr_format
,
1269 comps
[i
] = build_explicit_io_load(b
, intrin
, comp_addr
,
1272 value
= nir_vec(b
, comps
, intrin
->num_components
);
1274 value
= build_explicit_io_load(b
, intrin
, addr
, addr_format
,
1275 intrin
->num_components
);
1277 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
, nir_src_for_ssa(value
));
1278 } else if (intrin
->intrinsic
== nir_intrinsic_store_deref
) {
1279 assert(intrin
->src
[1].is_ssa
);
1280 nir_ssa_def
*value
= intrin
->src
[1].ssa
;
1281 nir_component_mask_t write_mask
= nir_intrinsic_write_mask(intrin
);
1282 if (vec_stride
> scalar_size
) {
1283 for (unsigned i
= 0; i
< intrin
->num_components
; i
++) {
1284 if (!(write_mask
& (1 << i
)))
1287 nir_ssa_def
*comp_addr
= build_addr_iadd_imm(b
, addr
, addr_format
,
1289 build_explicit_io_store(b
, intrin
, comp_addr
, addr_format
,
1290 nir_channel(b
, value
, i
), 1);
1293 build_explicit_io_store(b
, intrin
, addr
, addr_format
,
1297 nir_ssa_def
*value
=
1298 build_explicit_io_atomic(b
, intrin
, addr
, addr_format
);
1299 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
, nir_src_for_ssa(value
));
1302 nir_instr_remove(&intrin
->instr
);
1306 lower_explicit_io_deref(nir_builder
*b
, nir_deref_instr
*deref
,
1307 nir_address_format addr_format
)
1309 /* Just delete the deref if it's not used. We can't use
1310 * nir_deref_instr_remove_if_unused here because it may remove more than
1311 * one deref which could break our list walking since we walk the list
1314 assert(list_is_empty(&deref
->dest
.ssa
.if_uses
));
1315 if (list_is_empty(&deref
->dest
.ssa
.uses
)) {
1316 nir_instr_remove(&deref
->instr
);
1320 b
->cursor
= nir_after_instr(&deref
->instr
);
1322 nir_ssa_def
*base_addr
= NULL
;
1323 if (deref
->deref_type
!= nir_deref_type_var
) {
1324 assert(deref
->parent
.is_ssa
);
1325 base_addr
= deref
->parent
.ssa
;
1328 nir_ssa_def
*addr
= nir_explicit_io_address_from_deref(b
, deref
, base_addr
,
1331 nir_instr_remove(&deref
->instr
);
1332 nir_ssa_def_rewrite_uses(&deref
->dest
.ssa
, nir_src_for_ssa(addr
));
1336 lower_explicit_io_access(nir_builder
*b
, nir_intrinsic_instr
*intrin
,
1337 nir_address_format addr_format
)
1339 assert(intrin
->src
[0].is_ssa
);
1340 nir_lower_explicit_io_instr(b
, intrin
, intrin
->src
[0].ssa
, addr_format
);
1344 lower_explicit_io_array_length(nir_builder
*b
, nir_intrinsic_instr
*intrin
,
1345 nir_address_format addr_format
)
1347 b
->cursor
= nir_after_instr(&intrin
->instr
);
1349 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
1351 assert(glsl_type_is_array(deref
->type
));
1352 assert(glsl_get_length(deref
->type
) == 0);
1353 unsigned stride
= glsl_get_explicit_stride(deref
->type
);
1356 nir_ssa_def
*addr
= &deref
->dest
.ssa
;
1357 nir_ssa_def
*index
= addr_to_index(b
, addr
, addr_format
);
1358 nir_ssa_def
*offset
= addr_to_offset(b
, addr
, addr_format
);
1360 nir_intrinsic_instr
*bsize
=
1361 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_get_buffer_size
);
1362 bsize
->src
[0] = nir_src_for_ssa(index
);
1363 nir_ssa_dest_init(&bsize
->instr
, &bsize
->dest
, 1, 32, NULL
);
1364 nir_builder_instr_insert(b
, &bsize
->instr
);
1366 nir_ssa_def
*arr_size
=
1367 nir_idiv(b
, nir_isub(b
, &bsize
->dest
.ssa
, offset
),
1368 nir_imm_int(b
, stride
));
1370 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
, nir_src_for_ssa(arr_size
));
1371 nir_instr_remove(&intrin
->instr
);
1375 nir_lower_explicit_io_impl(nir_function_impl
*impl
, nir_variable_mode modes
,
1376 nir_address_format addr_format
)
1378 bool progress
= false;
1381 nir_builder_init(&b
, impl
);
1383 /* Walk in reverse order so that we can see the full deref chain when we
1384 * lower the access operations. We lower them assuming that the derefs
1385 * will be turned into address calculations later.
1387 nir_foreach_block_reverse(block
, impl
) {
1388 nir_foreach_instr_reverse_safe(instr
, block
) {
1389 switch (instr
->type
) {
1390 case nir_instr_type_deref
: {
1391 nir_deref_instr
*deref
= nir_instr_as_deref(instr
);
1392 if (deref
->mode
& modes
) {
1393 lower_explicit_io_deref(&b
, deref
, addr_format
);
1399 case nir_instr_type_intrinsic
: {
1400 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
1401 switch (intrin
->intrinsic
) {
1402 case nir_intrinsic_load_deref
:
1403 case nir_intrinsic_store_deref
:
1404 case nir_intrinsic_deref_atomic_add
:
1405 case nir_intrinsic_deref_atomic_imin
:
1406 case nir_intrinsic_deref_atomic_umin
:
1407 case nir_intrinsic_deref_atomic_imax
:
1408 case nir_intrinsic_deref_atomic_umax
:
1409 case nir_intrinsic_deref_atomic_and
:
1410 case nir_intrinsic_deref_atomic_or
:
1411 case nir_intrinsic_deref_atomic_xor
:
1412 case nir_intrinsic_deref_atomic_exchange
:
1413 case nir_intrinsic_deref_atomic_comp_swap
:
1414 case nir_intrinsic_deref_atomic_fadd
:
1415 case nir_intrinsic_deref_atomic_fmin
:
1416 case nir_intrinsic_deref_atomic_fmax
:
1417 case nir_intrinsic_deref_atomic_fcomp_swap
: {
1418 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
1419 if (deref
->mode
& modes
) {
1420 lower_explicit_io_access(&b
, intrin
, addr_format
);
1426 case nir_intrinsic_deref_buffer_array_length
: {
1427 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
1428 if (deref
->mode
& modes
) {
1429 lower_explicit_io_array_length(&b
, intrin
, addr_format
);
1449 nir_metadata_preserve(impl
, nir_metadata_block_index
|
1450 nir_metadata_dominance
);
1456 /** Lower explicitly laid out I/O access to byte offset/address intrinsics
1458 * This pass is intended to be used for any I/O which touches memory external
1459 * to the shader or which is directly visible to the client. It requires that
1460 * all data types in the given modes have a explicit stride/offset decorations
1461 * to tell it exactly how to calculate the offset/address for the given load,
1462 * store, or atomic operation. If the offset/stride information does not come
1463 * from the client explicitly (as with shared variables in GL or Vulkan),
1464 * nir_lower_vars_to_explicit_types() can be used to add them.
1466 * Unlike nir_lower_io, this pass is fully capable of handling incomplete
1467 * pointer chains which may contain cast derefs. It does so by walking the
1468 * deref chain backwards and simply replacing each deref, one at a time, with
1469 * the appropriate address calculation. The pass takes a nir_address_format
1470 * parameter which describes how the offset or address is to be represented
1471 * during calculations. By ensuring that the address is always in a
1472 * consistent format, pointers can safely be conjured from thin air by the
1473 * driver, stored to variables, passed through phis, etc.
1475 * The one exception to the simple algorithm described above is for handling
1476 * row-major matrices in which case we may look down one additional level of
1480 nir_lower_explicit_io(nir_shader
*shader
, nir_variable_mode modes
,
1481 nir_address_format addr_format
)
1483 bool progress
= false;
1485 nir_foreach_function(function
, shader
) {
1486 if (function
->impl
&&
1487 nir_lower_explicit_io_impl(function
->impl
, modes
, addr_format
))
1495 nir_lower_vars_to_explicit_types_impl(nir_function_impl
*impl
,
1496 nir_variable_mode modes
,
1497 glsl_type_size_align_func type_info
)
1499 bool progress
= false;
1501 nir_foreach_block(block
, impl
) {
1502 nir_foreach_instr(instr
, block
) {
1503 if (instr
->type
!= nir_instr_type_deref
)
1506 nir_deref_instr
*deref
= nir_instr_as_deref(instr
);
1507 if (!(deref
->mode
& modes
))
1510 unsigned size
, alignment
;
1511 const struct glsl_type
*new_type
=
1512 glsl_get_explicit_type_for_size_align(deref
->type
, type_info
, &size
, &alignment
);
1513 if (new_type
!= deref
->type
) {
1515 deref
->type
= new_type
;
1517 if (deref
->deref_type
== nir_deref_type_cast
) {
1518 /* See also glsl_type::get_explicit_type_for_size_align() */
1519 unsigned new_stride
= align(size
, alignment
);
1520 if (new_stride
!= deref
->cast
.ptr_stride
) {
1521 deref
->cast
.ptr_stride
= new_stride
;
1529 nir_metadata_preserve(impl
, nir_metadata_block_index
|
1530 nir_metadata_dominance
|
1531 nir_metadata_live_ssa_defs
|
1532 nir_metadata_loop_analysis
);
1539 lower_vars_to_explicit(nir_shader
*shader
,
1540 struct exec_list
*vars
, nir_variable_mode mode
,
1541 glsl_type_size_align_func type_info
)
1543 bool progress
= false;
1546 case nir_var_function_temp
:
1547 case nir_var_shader_temp
:
1548 offset
= shader
->scratch_size
;
1550 case nir_var_mem_shared
:
1554 unreachable("Unsupported mode");
1556 nir_foreach_variable_in_list(var
, vars
) {
1557 if (var
->data
.mode
!= mode
)
1560 unsigned size
, align
;
1561 const struct glsl_type
*explicit_type
=
1562 glsl_get_explicit_type_for_size_align(var
->type
, type_info
, &size
, &align
);
1564 if (explicit_type
!= var
->type
) {
1566 var
->type
= explicit_type
;
1569 var
->data
.driver_location
= ALIGN_POT(offset
, align
);
1570 offset
= var
->data
.driver_location
+ size
;
1574 case nir_var_shader_temp
:
1575 case nir_var_function_temp
:
1576 shader
->scratch_size
= offset
;
1578 case nir_var_mem_shared
:
1579 shader
->info
.cs
.shared_size
= offset
;
1580 shader
->num_shared
= offset
;
1583 unreachable("Unsupported mode");
1590 nir_lower_vars_to_explicit_types(nir_shader
*shader
,
1591 nir_variable_mode modes
,
1592 glsl_type_size_align_func type_info
)
1594 /* TODO: Situations which need to be handled to support more modes:
1595 * - row-major matrices
1596 * - compact shader inputs/outputs
1599 ASSERTED nir_variable_mode supported
= nir_var_mem_shared
|
1600 nir_var_shader_temp
| nir_var_function_temp
;
1601 assert(!(modes
& ~supported
) && "unsupported");
1603 bool progress
= false;
1605 if (modes
& nir_var_mem_shared
)
1606 progress
|= lower_vars_to_explicit(shader
, &shader
->variables
, nir_var_mem_shared
, type_info
);
1607 if (modes
& nir_var_shader_temp
)
1608 progress
|= lower_vars_to_explicit(shader
, &shader
->variables
, nir_var_shader_temp
, type_info
);
1610 nir_foreach_function(function
, shader
) {
1611 if (function
->impl
) {
1612 if (modes
& nir_var_function_temp
)
1613 progress
|= lower_vars_to_explicit(shader
, &function
->impl
->locals
, nir_var_function_temp
, type_info
);
1615 progress
|= nir_lower_vars_to_explicit_types_impl(function
->impl
, modes
, type_info
);
1623 * Return the offset source for a load/store intrinsic.
1626 nir_get_io_offset_src(nir_intrinsic_instr
*instr
)
1628 switch (instr
->intrinsic
) {
1629 case nir_intrinsic_load_input
:
1630 case nir_intrinsic_load_output
:
1631 case nir_intrinsic_load_shared
:
1632 case nir_intrinsic_load_uniform
:
1633 case nir_intrinsic_load_global
:
1634 case nir_intrinsic_load_scratch
:
1635 case nir_intrinsic_load_fs_input_interp_deltas
:
1636 return &instr
->src
[0];
1637 case nir_intrinsic_load_ubo
:
1638 case nir_intrinsic_load_ssbo
:
1639 case nir_intrinsic_load_input_vertex
:
1640 case nir_intrinsic_load_per_vertex_input
:
1641 case nir_intrinsic_load_per_vertex_output
:
1642 case nir_intrinsic_load_interpolated_input
:
1643 case nir_intrinsic_store_output
:
1644 case nir_intrinsic_store_shared
:
1645 case nir_intrinsic_store_global
:
1646 case nir_intrinsic_store_scratch
:
1647 case nir_intrinsic_ssbo_atomic_add
:
1648 case nir_intrinsic_ssbo_atomic_imin
:
1649 case nir_intrinsic_ssbo_atomic_umin
:
1650 case nir_intrinsic_ssbo_atomic_imax
:
1651 case nir_intrinsic_ssbo_atomic_umax
:
1652 case nir_intrinsic_ssbo_atomic_and
:
1653 case nir_intrinsic_ssbo_atomic_or
:
1654 case nir_intrinsic_ssbo_atomic_xor
:
1655 case nir_intrinsic_ssbo_atomic_exchange
:
1656 case nir_intrinsic_ssbo_atomic_comp_swap
:
1657 case nir_intrinsic_ssbo_atomic_fadd
:
1658 case nir_intrinsic_ssbo_atomic_fmin
:
1659 case nir_intrinsic_ssbo_atomic_fmax
:
1660 case nir_intrinsic_ssbo_atomic_fcomp_swap
:
1661 return &instr
->src
[1];
1662 case nir_intrinsic_store_ssbo
:
1663 case nir_intrinsic_store_per_vertex_output
:
1664 return &instr
->src
[2];
1671 * Return the vertex index source for a load/store per_vertex intrinsic.
1674 nir_get_io_vertex_index_src(nir_intrinsic_instr
*instr
)
1676 switch (instr
->intrinsic
) {
1677 case nir_intrinsic_load_per_vertex_input
:
1678 case nir_intrinsic_load_per_vertex_output
:
1679 return &instr
->src
[0];
1680 case nir_intrinsic_store_per_vertex_output
:
1681 return &instr
->src
[1];
1688 * Return the numeric constant that identify a NULL pointer for each address
1691 const nir_const_value
*
1692 nir_address_format_null_value(nir_address_format addr_format
)
1694 const static nir_const_value null_values
[][NIR_MAX_VEC_COMPONENTS
] = {
1695 [nir_address_format_32bit_global
] = {{0}},
1696 [nir_address_format_64bit_global
] = {{0}},
1697 [nir_address_format_64bit_bounded_global
] = {{0}},
1698 [nir_address_format_32bit_index_offset
] = {{.u32
= ~0}, {.u32
= ~0}},
1699 [nir_address_format_32bit_index_offset_pack64
] = {{.u64
= ~0ull}},
1700 [nir_address_format_vec2_index_32bit_offset
] = {{.u32
= ~0}, {.u32
= ~0}, {.u32
= ~0}},
1701 [nir_address_format_32bit_offset
] = {{.u32
= ~0}},
1702 [nir_address_format_32bit_offset_as_64bit
] = {{.u64
= ~0ull}},
1703 [nir_address_format_logical
] = {{.u32
= ~0}},
1706 assert(addr_format
< ARRAY_SIZE(null_values
));
1707 return null_values
[addr_format
];
1711 nir_build_addr_ieq(nir_builder
*b
, nir_ssa_def
*addr0
, nir_ssa_def
*addr1
,
1712 nir_address_format addr_format
)
1714 switch (addr_format
) {
1715 case nir_address_format_32bit_global
:
1716 case nir_address_format_64bit_global
:
1717 case nir_address_format_64bit_bounded_global
:
1718 case nir_address_format_32bit_index_offset
:
1719 case nir_address_format_vec2_index_32bit_offset
:
1720 case nir_address_format_32bit_offset
:
1721 return nir_ball_iequal(b
, addr0
, addr1
);
1723 case nir_address_format_32bit_offset_as_64bit
:
1724 assert(addr0
->num_components
== 1 && addr1
->num_components
== 1);
1725 return nir_ieq(b
, nir_u2u32(b
, addr0
), nir_u2u32(b
, addr1
));
1727 case nir_address_format_32bit_index_offset_pack64
:
1728 assert(addr0
->num_components
== 1 && addr1
->num_components
== 1);
1729 return nir_ball_iequal(b
, nir_unpack_64_2x32(b
, addr0
), nir_unpack_64_2x32(b
, addr1
));
1731 case nir_address_format_logical
:
1732 unreachable("Unsupported address format");
1735 unreachable("Invalid address format");
1739 nir_build_addr_isub(nir_builder
*b
, nir_ssa_def
*addr0
, nir_ssa_def
*addr1
,
1740 nir_address_format addr_format
)
1742 switch (addr_format
) {
1743 case nir_address_format_32bit_global
:
1744 case nir_address_format_64bit_global
:
1745 case nir_address_format_32bit_offset
:
1746 case nir_address_format_32bit_index_offset_pack64
:
1747 assert(addr0
->num_components
== 1);
1748 assert(addr1
->num_components
== 1);
1749 return nir_isub(b
, addr0
, addr1
);
1751 case nir_address_format_32bit_offset_as_64bit
:
1752 assert(addr0
->num_components
== 1);
1753 assert(addr1
->num_components
== 1);
1754 return nir_u2u64(b
, nir_isub(b
, nir_u2u32(b
, addr0
), nir_u2u32(b
, addr1
)));
1756 case nir_address_format_64bit_bounded_global
:
1757 return nir_isub(b
, addr_to_global(b
, addr0
, addr_format
),
1758 addr_to_global(b
, addr1
, addr_format
));
1760 case nir_address_format_32bit_index_offset
:
1761 assert(addr0
->num_components
== 2);
1762 assert(addr1
->num_components
== 2);
1763 /* Assume the same buffer index. */
1764 return nir_isub(b
, nir_channel(b
, addr0
, 1), nir_channel(b
, addr1
, 1));
1766 case nir_address_format_vec2_index_32bit_offset
:
1767 assert(addr0
->num_components
== 3);
1768 assert(addr1
->num_components
== 3);
1769 /* Assume the same buffer index. */
1770 return nir_isub(b
, nir_channel(b
, addr0
, 2), nir_channel(b
, addr1
, 2));
1772 case nir_address_format_logical
:
1773 unreachable("Unsupported address format");
1776 unreachable("Invalid address format");
1780 is_input(nir_intrinsic_instr
*intrin
)
1782 return intrin
->intrinsic
== nir_intrinsic_load_input
||
1783 intrin
->intrinsic
== nir_intrinsic_load_per_vertex_input
||
1784 intrin
->intrinsic
== nir_intrinsic_load_interpolated_input
||
1785 intrin
->intrinsic
== nir_intrinsic_load_fs_input_interp_deltas
;
1789 is_output(nir_intrinsic_instr
*intrin
)
1791 return intrin
->intrinsic
== nir_intrinsic_load_output
||
1792 intrin
->intrinsic
== nir_intrinsic_load_per_vertex_output
||
1793 intrin
->intrinsic
== nir_intrinsic_store_output
||
1794 intrin
->intrinsic
== nir_intrinsic_store_per_vertex_output
;
1799 * This pass adds constant offsets to instr->const_index[0] for input/output
1800 * intrinsics, and resets the offset source to 0. Non-constant offsets remain
1801 * unchanged - since we don't know what part of a compound variable is
1802 * accessed, we allocate storage for the entire thing. For drivers that use
1803 * nir_lower_io_to_temporaries() before nir_lower_io(), this guarantees that
1804 * the offset source will be 0, so that they don't have to add it in manually.
1808 add_const_offset_to_base_block(nir_block
*block
, nir_builder
*b
,
1809 nir_variable_mode mode
)
1811 bool progress
= false;
1812 nir_foreach_instr_safe(instr
, block
) {
1813 if (instr
->type
!= nir_instr_type_intrinsic
)
1816 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
1818 if ((mode
== nir_var_shader_in
&& is_input(intrin
)) ||
1819 (mode
== nir_var_shader_out
&& is_output(intrin
))) {
1820 nir_src
*offset
= nir_get_io_offset_src(intrin
);
1822 if (nir_src_is_const(*offset
)) {
1823 intrin
->const_index
[0] += nir_src_as_uint(*offset
);
1824 b
->cursor
= nir_before_instr(&intrin
->instr
);
1825 nir_instr_rewrite_src(&intrin
->instr
, offset
,
1826 nir_src_for_ssa(nir_imm_int(b
, 0)));
1836 nir_io_add_const_offset_to_base(nir_shader
*nir
, nir_variable_mode mode
)
1838 bool progress
= false;
1840 nir_foreach_function(f
, nir
) {
1843 nir_builder_init(&b
, f
->impl
);
1844 nir_foreach_block(block
, f
->impl
) {
1845 progress
|= add_const_offset_to_base_block(block
, &b
, mode
);