2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Connor Abbott (cwabbott0@gmail.com)
25 * Jason Ekstrand (jason@jlekstrand.net)
30 * This lowering pass converts references to input/output variables with
31 * loads/stores to actual input/output intrinsics.
35 #include "nir_builder.h"
36 #include "nir_deref.h"
38 #include "util/u_math.h"
40 struct lower_io_state
{
43 int (*type_size
)(const struct glsl_type
*type
, bool);
44 nir_variable_mode modes
;
45 nir_lower_io_options options
;
48 static nir_intrinsic_op
49 ssbo_atomic_for_deref(nir_intrinsic_op deref_op
)
52 #define OP(O) case nir_intrinsic_deref_##O: return nir_intrinsic_ssbo_##O;
69 unreachable("Invalid SSBO atomic");
73 static nir_intrinsic_op
74 global_atomic_for_deref(nir_intrinsic_op deref_op
)
77 #define OP(O) case nir_intrinsic_deref_##O: return nir_intrinsic_global_##O;
94 unreachable("Invalid SSBO atomic");
98 static nir_intrinsic_op
99 shared_atomic_for_deref(nir_intrinsic_op deref_op
)
102 #define OP(O) case nir_intrinsic_deref_##O: return nir_intrinsic_shared_##O;
116 OP(atomic_fcomp_swap
)
119 unreachable("Invalid shared atomic");
124 nir_assign_var_locations(struct exec_list
*var_list
, unsigned *size
,
125 int (*type_size
)(const struct glsl_type
*, bool))
127 unsigned location
= 0;
129 nir_foreach_variable(var
, var_list
) {
131 * UBOs have their own address spaces, so don't count them towards the
132 * number of global uniforms
134 if (var
->data
.mode
== nir_var_mem_ubo
|| var
->data
.mode
== nir_var_mem_ssbo
)
137 var
->data
.driver_location
= location
;
138 bool bindless_type_size
= var
->data
.mode
== nir_var_shader_in
||
139 var
->data
.mode
== nir_var_shader_out
||
141 location
+= type_size(var
->type
, bindless_type_size
);
148 * Return true if the given variable is a per-vertex input/output array.
149 * (such as geometry shader inputs).
152 nir_is_per_vertex_io(const nir_variable
*var
, gl_shader_stage stage
)
154 if (var
->data
.patch
|| !glsl_type_is_array(var
->type
))
157 if (var
->data
.mode
== nir_var_shader_in
)
158 return stage
== MESA_SHADER_GEOMETRY
||
159 stage
== MESA_SHADER_TESS_CTRL
||
160 stage
== MESA_SHADER_TESS_EVAL
;
162 if (var
->data
.mode
== nir_var_shader_out
)
163 return stage
== MESA_SHADER_TESS_CTRL
;
169 get_io_offset(nir_builder
*b
, nir_deref_instr
*deref
,
170 nir_ssa_def
**vertex_index
,
171 int (*type_size
)(const struct glsl_type
*, bool),
172 unsigned *component
, bool bts
)
175 nir_deref_path_init(&path
, deref
, NULL
);
177 assert(path
.path
[0]->deref_type
== nir_deref_type_var
);
178 nir_deref_instr
**p
= &path
.path
[1];
180 /* For per-vertex input arrays (i.e. geometry shader inputs), keep the
181 * outermost array index separate. Process the rest normally.
183 if (vertex_index
!= NULL
) {
184 assert((*p
)->deref_type
== nir_deref_type_array
);
185 *vertex_index
= nir_ssa_for_src(b
, (*p
)->arr
.index
, 1);
189 if (path
.path
[0]->var
->data
.compact
) {
190 assert((*p
)->deref_type
== nir_deref_type_array
);
191 assert(glsl_type_is_scalar((*p
)->type
));
193 /* We always lower indirect dereferences for "compact" array vars. */
194 const unsigned index
= nir_src_as_uint((*p
)->arr
.index
);
195 const unsigned total_offset
= *component
+ index
;
196 const unsigned slot_offset
= total_offset
/ 4;
197 *component
= total_offset
% 4;
198 return nir_imm_int(b
, type_size(glsl_vec4_type(), bts
) * slot_offset
);
201 /* Just emit code and let constant-folding go to town */
202 nir_ssa_def
*offset
= nir_imm_int(b
, 0);
205 if ((*p
)->deref_type
== nir_deref_type_array
) {
206 unsigned size
= type_size((*p
)->type
, bts
);
209 nir_amul_imm(b
, nir_ssa_for_src(b
, (*p
)->arr
.index
, 1), size
);
211 offset
= nir_iadd(b
, offset
, mul
);
212 } else if ((*p
)->deref_type
== nir_deref_type_struct
) {
213 /* p starts at path[1], so this is safe */
214 nir_deref_instr
*parent
= *(p
- 1);
216 unsigned field_offset
= 0;
217 for (unsigned i
= 0; i
< (*p
)->strct
.index
; i
++) {
218 field_offset
+= type_size(glsl_get_struct_field(parent
->type
, i
), bts
);
220 offset
= nir_iadd_imm(b
, offset
, field_offset
);
222 unreachable("Unsupported deref type");
226 nir_deref_path_finish(&path
);
232 emit_load(struct lower_io_state
*state
,
233 nir_ssa_def
*vertex_index
, nir_variable
*var
, nir_ssa_def
*offset
,
234 unsigned component
, unsigned num_components
, unsigned bit_size
,
237 nir_builder
*b
= &state
->builder
;
238 const nir_shader
*nir
= b
->shader
;
239 nir_variable_mode mode
= var
->data
.mode
;
240 nir_ssa_def
*barycentric
= NULL
;
244 case nir_var_shader_in
:
245 if (nir
->info
.stage
== MESA_SHADER_FRAGMENT
&&
246 nir
->options
->use_interpolated_input_intrinsics
&&
247 var
->data
.interpolation
!= INTERP_MODE_FLAT
) {
248 if (var
->data
.interpolation
== INTERP_MODE_EXPLICIT
) {
249 assert(vertex_index
!= NULL
);
250 op
= nir_intrinsic_load_input_vertex
;
252 assert(vertex_index
== NULL
);
254 nir_intrinsic_op bary_op
;
255 if (var
->data
.sample
||
256 (state
->options
& nir_lower_io_force_sample_interpolation
))
257 bary_op
= nir_intrinsic_load_barycentric_sample
;
258 else if (var
->data
.centroid
)
259 bary_op
= nir_intrinsic_load_barycentric_centroid
;
261 bary_op
= nir_intrinsic_load_barycentric_pixel
;
263 barycentric
= nir_load_barycentric(&state
->builder
, bary_op
,
264 var
->data
.interpolation
);
265 op
= nir_intrinsic_load_interpolated_input
;
268 op
= vertex_index
? nir_intrinsic_load_per_vertex_input
:
269 nir_intrinsic_load_input
;
272 case nir_var_shader_out
:
273 op
= vertex_index
? nir_intrinsic_load_per_vertex_output
:
274 nir_intrinsic_load_output
;
276 case nir_var_uniform
:
277 op
= nir_intrinsic_load_uniform
;
279 case nir_var_mem_shared
:
280 op
= nir_intrinsic_load_shared
;
283 unreachable("Unknown variable mode");
286 nir_intrinsic_instr
*load
=
287 nir_intrinsic_instr_create(state
->builder
.shader
, op
);
288 load
->num_components
= num_components
;
290 nir_intrinsic_set_base(load
, var
->data
.driver_location
);
291 if (mode
== nir_var_shader_in
|| mode
== nir_var_shader_out
)
292 nir_intrinsic_set_component(load
, component
);
294 if (load
->intrinsic
== nir_intrinsic_load_uniform
)
295 nir_intrinsic_set_range(load
,
296 state
->type_size(var
->type
, var
->data
.bindless
));
298 if (load
->intrinsic
== nir_intrinsic_load_input
||
299 load
->intrinsic
== nir_intrinsic_load_input_vertex
||
300 load
->intrinsic
== nir_intrinsic_load_uniform
)
301 nir_intrinsic_set_type(load
, type
);
304 load
->src
[0] = nir_src_for_ssa(vertex_index
);
305 load
->src
[1] = nir_src_for_ssa(offset
);
306 } else if (barycentric
) {
307 load
->src
[0] = nir_src_for_ssa(barycentric
);
308 load
->src
[1] = nir_src_for_ssa(offset
);
310 load
->src
[0] = nir_src_for_ssa(offset
);
313 nir_ssa_dest_init(&load
->instr
, &load
->dest
,
314 num_components
, bit_size
, NULL
);
315 nir_builder_instr_insert(b
, &load
->instr
);
317 return &load
->dest
.ssa
;
321 lower_load(nir_intrinsic_instr
*intrin
, struct lower_io_state
*state
,
322 nir_ssa_def
*vertex_index
, nir_variable
*var
, nir_ssa_def
*offset
,
323 unsigned component
, const struct glsl_type
*type
)
325 assert(intrin
->dest
.is_ssa
);
326 if (intrin
->dest
.ssa
.bit_size
== 64 &&
327 (state
->options
& nir_lower_io_lower_64bit_to_32
)) {
328 nir_builder
*b
= &state
->builder
;
330 const unsigned slot_size
= state
->type_size(glsl_dvec_type(2), false);
332 nir_ssa_def
*comp64
[4];
333 assert(component
== 0 || component
== 2);
334 unsigned dest_comp
= 0;
335 while (dest_comp
< intrin
->dest
.ssa
.num_components
) {
336 const unsigned num_comps
=
337 MIN2(intrin
->dest
.ssa
.num_components
- dest_comp
,
338 (4 - component
) / 2);
340 nir_ssa_def
*data32
=
341 emit_load(state
, vertex_index
, var
, offset
, component
,
342 num_comps
* 2, 32, nir_type_uint32
);
343 for (unsigned i
= 0; i
< num_comps
; i
++) {
344 comp64
[dest_comp
+ i
] =
345 nir_pack_64_2x32(b
, nir_channels(b
, data32
, 3 << (i
* 2)));
348 /* Only the first store has a component offset */
350 dest_comp
+= num_comps
;
351 offset
= nir_iadd_imm(b
, offset
, slot_size
);
354 return nir_vec(b
, comp64
, intrin
->dest
.ssa
.num_components
);
355 } else if (intrin
->dest
.ssa
.bit_size
== 1) {
356 /* Booleans are 32-bit */
357 assert(glsl_type_is_boolean(type
));
358 return nir_b2b1(&state
->builder
,
359 emit_load(state
, vertex_index
, var
, offset
, component
,
360 intrin
->dest
.ssa
.num_components
, 32,
363 return emit_load(state
, vertex_index
, var
, offset
, component
,
364 intrin
->dest
.ssa
.num_components
,
365 intrin
->dest
.ssa
.bit_size
,
366 nir_get_nir_type_for_glsl_type(type
));
371 emit_store(struct lower_io_state
*state
, nir_ssa_def
*data
,
372 nir_ssa_def
*vertex_index
, nir_variable
*var
, nir_ssa_def
*offset
,
373 unsigned component
, unsigned num_components
,
374 nir_component_mask_t write_mask
, nir_alu_type type
)
376 nir_builder
*b
= &state
->builder
;
377 nir_variable_mode mode
= var
->data
.mode
;
380 if (mode
== nir_var_mem_shared
) {
381 op
= nir_intrinsic_store_shared
;
383 assert(mode
== nir_var_shader_out
);
384 op
= vertex_index
? nir_intrinsic_store_per_vertex_output
:
385 nir_intrinsic_store_output
;
388 nir_intrinsic_instr
*store
=
389 nir_intrinsic_instr_create(state
->builder
.shader
, op
);
390 store
->num_components
= num_components
;
392 store
->src
[0] = nir_src_for_ssa(data
);
394 nir_intrinsic_set_base(store
, var
->data
.driver_location
);
396 if (mode
== nir_var_shader_out
)
397 nir_intrinsic_set_component(store
, component
);
399 if (store
->intrinsic
== nir_intrinsic_store_output
)
400 nir_intrinsic_set_type(store
, type
);
402 nir_intrinsic_set_write_mask(store
, write_mask
);
405 store
->src
[1] = nir_src_for_ssa(vertex_index
);
407 store
->src
[vertex_index
? 2 : 1] = nir_src_for_ssa(offset
);
409 nir_builder_instr_insert(b
, &store
->instr
);
413 lower_store(nir_intrinsic_instr
*intrin
, struct lower_io_state
*state
,
414 nir_ssa_def
*vertex_index
, nir_variable
*var
, nir_ssa_def
*offset
,
415 unsigned component
, const struct glsl_type
*type
)
417 assert(intrin
->src
[1].is_ssa
);
418 if (intrin
->src
[1].ssa
->bit_size
== 64 &&
419 (state
->options
& nir_lower_io_lower_64bit_to_32
)) {
420 nir_builder
*b
= &state
->builder
;
422 const unsigned slot_size
= state
->type_size(glsl_dvec_type(2), false);
424 assert(component
== 0 || component
== 2);
425 unsigned src_comp
= 0;
426 nir_component_mask_t write_mask
= nir_intrinsic_write_mask(intrin
);
427 while (src_comp
< intrin
->num_components
) {
428 const unsigned num_comps
=
429 MIN2(intrin
->num_components
- src_comp
,
430 (4 - component
) / 2);
432 if (write_mask
& BITFIELD_MASK(num_comps
)) {
434 nir_channels(b
, intrin
->src
[1].ssa
,
435 BITFIELD_RANGE(src_comp
, num_comps
));
436 nir_ssa_def
*data32
= nir_bitcast_vector(b
, data
, 32);
438 nir_component_mask_t write_mask32
= 0;
439 for (unsigned i
= 0; i
< num_comps
; i
++) {
440 if (write_mask
& BITFIELD_MASK(num_comps
) & (1 << i
))
441 write_mask32
|= 3 << (i
* 2);
444 emit_store(state
, data32
, vertex_index
, var
, offset
,
445 component
, data32
->num_components
, write_mask32
,
449 /* Only the first store has a component offset */
451 src_comp
+= num_comps
;
452 write_mask
>>= num_comps
;
453 offset
= nir_iadd_imm(b
, offset
, slot_size
);
455 } else if (intrin
->dest
.ssa
.bit_size
== 1) {
456 /* Booleans are 32-bit */
457 assert(glsl_type_is_boolean(type
));
458 nir_ssa_def
*b32_val
= nir_b2b32(&state
->builder
, intrin
->src
[1].ssa
);
459 emit_store(state
, b32_val
, vertex_index
, var
, offset
,
460 component
, intrin
->num_components
,
461 nir_intrinsic_write_mask(intrin
),
464 emit_store(state
, intrin
->src
[1].ssa
, vertex_index
, var
, offset
,
465 component
, intrin
->num_components
,
466 nir_intrinsic_write_mask(intrin
),
467 nir_get_nir_type_for_glsl_type(type
));
472 lower_atomic(nir_intrinsic_instr
*intrin
, struct lower_io_state
*state
,
473 nir_variable
*var
, nir_ssa_def
*offset
)
475 nir_builder
*b
= &state
->builder
;
476 assert(var
->data
.mode
== nir_var_mem_shared
);
478 nir_intrinsic_op op
= shared_atomic_for_deref(intrin
->intrinsic
);
480 nir_intrinsic_instr
*atomic
=
481 nir_intrinsic_instr_create(state
->builder
.shader
, op
);
483 nir_intrinsic_set_base(atomic
, var
->data
.driver_location
);
485 atomic
->src
[0] = nir_src_for_ssa(offset
);
486 assert(nir_intrinsic_infos
[intrin
->intrinsic
].num_srcs
==
487 nir_intrinsic_infos
[op
].num_srcs
);
488 for (unsigned i
= 1; i
< nir_intrinsic_infos
[op
].num_srcs
; i
++) {
489 nir_src_copy(&atomic
->src
[i
], &intrin
->src
[i
], atomic
);
492 if (nir_intrinsic_infos
[op
].has_dest
) {
493 assert(intrin
->dest
.is_ssa
);
494 assert(nir_intrinsic_infos
[intrin
->intrinsic
].has_dest
);
495 nir_ssa_dest_init(&atomic
->instr
, &atomic
->dest
,
496 intrin
->dest
.ssa
.num_components
,
497 intrin
->dest
.ssa
.bit_size
, NULL
);
500 nir_builder_instr_insert(b
, &atomic
->instr
);
502 return nir_intrinsic_infos
[op
].has_dest
? &atomic
->dest
.ssa
: NULL
;
506 lower_interpolate_at(nir_intrinsic_instr
*intrin
, struct lower_io_state
*state
,
507 nir_variable
*var
, nir_ssa_def
*offset
, unsigned component
,
508 const struct glsl_type
*type
)
510 nir_builder
*b
= &state
->builder
;
511 assert(var
->data
.mode
== nir_var_shader_in
);
513 /* Ignore interpolateAt() for flat variables - flat is flat. Lower
514 * interpolateAtVertex() for explicit variables.
516 if (var
->data
.interpolation
== INTERP_MODE_FLAT
||
517 var
->data
.interpolation
== INTERP_MODE_EXPLICIT
) {
518 nir_ssa_def
*vertex_index
= NULL
;
520 if (var
->data
.interpolation
== INTERP_MODE_EXPLICIT
) {
521 assert(intrin
->intrinsic
== nir_intrinsic_interp_deref_at_vertex
);
522 vertex_index
= intrin
->src
[1].ssa
;
525 return lower_load(intrin
, state
, vertex_index
, var
, offset
, component
, type
);
528 /* None of the supported APIs allow interpolation on 64-bit things */
529 assert(intrin
->dest
.is_ssa
&& intrin
->dest
.ssa
.bit_size
<= 32);
531 nir_intrinsic_op bary_op
;
532 switch (intrin
->intrinsic
) {
533 case nir_intrinsic_interp_deref_at_centroid
:
534 bary_op
= (state
->options
& nir_lower_io_force_sample_interpolation
) ?
535 nir_intrinsic_load_barycentric_sample
:
536 nir_intrinsic_load_barycentric_centroid
;
538 case nir_intrinsic_interp_deref_at_sample
:
539 bary_op
= nir_intrinsic_load_barycentric_at_sample
;
541 case nir_intrinsic_interp_deref_at_offset
:
542 bary_op
= nir_intrinsic_load_barycentric_at_offset
;
545 unreachable("Bogus interpolateAt() intrinsic.");
548 nir_intrinsic_instr
*bary_setup
=
549 nir_intrinsic_instr_create(state
->builder
.shader
, bary_op
);
551 nir_ssa_dest_init(&bary_setup
->instr
, &bary_setup
->dest
, 2, 32, NULL
);
552 nir_intrinsic_set_interp_mode(bary_setup
, var
->data
.interpolation
);
554 if (intrin
->intrinsic
== nir_intrinsic_interp_deref_at_sample
||
555 intrin
->intrinsic
== nir_intrinsic_interp_deref_at_offset
||
556 intrin
->intrinsic
== nir_intrinsic_interp_deref_at_vertex
)
557 nir_src_copy(&bary_setup
->src
[0], &intrin
->src
[1], bary_setup
);
559 nir_builder_instr_insert(b
, &bary_setup
->instr
);
561 nir_intrinsic_instr
*load
=
562 nir_intrinsic_instr_create(state
->builder
.shader
,
563 nir_intrinsic_load_interpolated_input
);
564 load
->num_components
= intrin
->num_components
;
566 nir_intrinsic_set_base(load
, var
->data
.driver_location
);
567 nir_intrinsic_set_component(load
, component
);
569 load
->src
[0] = nir_src_for_ssa(&bary_setup
->dest
.ssa
);
570 load
->src
[1] = nir_src_for_ssa(offset
);
572 assert(intrin
->dest
.is_ssa
);
573 nir_ssa_dest_init(&load
->instr
, &load
->dest
,
574 intrin
->dest
.ssa
.num_components
,
575 intrin
->dest
.ssa
.bit_size
, NULL
);
576 nir_builder_instr_insert(b
, &load
->instr
);
578 return &load
->dest
.ssa
;
582 nir_lower_io_block(nir_block
*block
,
583 struct lower_io_state
*state
)
585 nir_builder
*b
= &state
->builder
;
586 const nir_shader_compiler_options
*options
= b
->shader
->options
;
587 bool progress
= false;
589 nir_foreach_instr_safe(instr
, block
) {
590 if (instr
->type
!= nir_instr_type_intrinsic
)
593 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
595 switch (intrin
->intrinsic
) {
596 case nir_intrinsic_load_deref
:
597 case nir_intrinsic_store_deref
:
598 case nir_intrinsic_deref_atomic_add
:
599 case nir_intrinsic_deref_atomic_imin
:
600 case nir_intrinsic_deref_atomic_umin
:
601 case nir_intrinsic_deref_atomic_imax
:
602 case nir_intrinsic_deref_atomic_umax
:
603 case nir_intrinsic_deref_atomic_and
:
604 case nir_intrinsic_deref_atomic_or
:
605 case nir_intrinsic_deref_atomic_xor
:
606 case nir_intrinsic_deref_atomic_exchange
:
607 case nir_intrinsic_deref_atomic_comp_swap
:
608 case nir_intrinsic_deref_atomic_fadd
:
609 case nir_intrinsic_deref_atomic_fmin
:
610 case nir_intrinsic_deref_atomic_fmax
:
611 case nir_intrinsic_deref_atomic_fcomp_swap
:
612 /* We can lower the io for this nir instrinsic */
614 case nir_intrinsic_interp_deref_at_centroid
:
615 case nir_intrinsic_interp_deref_at_sample
:
616 case nir_intrinsic_interp_deref_at_offset
:
617 case nir_intrinsic_interp_deref_at_vertex
:
618 /* We can optionally lower these to load_interpolated_input */
619 if (options
->use_interpolated_input_intrinsics
)
622 /* We can't lower the io for this nir instrinsic, so skip it */
626 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
628 nir_variable_mode mode
= deref
->mode
;
629 assert(util_is_power_of_two_nonzero(mode
));
630 if ((state
->modes
& mode
) == 0)
633 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
635 b
->cursor
= nir_before_instr(instr
);
637 const bool per_vertex
= nir_is_per_vertex_io(var
, b
->shader
->info
.stage
);
640 nir_ssa_def
*vertex_index
= NULL
;
641 unsigned component_offset
= var
->data
.location_frac
;
642 bool bindless_type_size
= mode
== nir_var_shader_in
||
643 mode
== nir_var_shader_out
||
646 offset
= get_io_offset(b
, deref
, per_vertex
? &vertex_index
: NULL
,
647 state
->type_size
, &component_offset
,
650 nir_ssa_def
*replacement
= NULL
;
652 switch (intrin
->intrinsic
) {
653 case nir_intrinsic_load_deref
:
654 replacement
= lower_load(intrin
, state
, vertex_index
, var
, offset
,
655 component_offset
, deref
->type
);
658 case nir_intrinsic_store_deref
:
659 lower_store(intrin
, state
, vertex_index
, var
, offset
,
660 component_offset
, deref
->type
);
663 case nir_intrinsic_deref_atomic_add
:
664 case nir_intrinsic_deref_atomic_imin
:
665 case nir_intrinsic_deref_atomic_umin
:
666 case nir_intrinsic_deref_atomic_imax
:
667 case nir_intrinsic_deref_atomic_umax
:
668 case nir_intrinsic_deref_atomic_and
:
669 case nir_intrinsic_deref_atomic_or
:
670 case nir_intrinsic_deref_atomic_xor
:
671 case nir_intrinsic_deref_atomic_exchange
:
672 case nir_intrinsic_deref_atomic_comp_swap
:
673 case nir_intrinsic_deref_atomic_fadd
:
674 case nir_intrinsic_deref_atomic_fmin
:
675 case nir_intrinsic_deref_atomic_fmax
:
676 case nir_intrinsic_deref_atomic_fcomp_swap
:
677 assert(vertex_index
== NULL
);
678 replacement
= lower_atomic(intrin
, state
, var
, offset
);
681 case nir_intrinsic_interp_deref_at_centroid
:
682 case nir_intrinsic_interp_deref_at_sample
:
683 case nir_intrinsic_interp_deref_at_offset
:
684 case nir_intrinsic_interp_deref_at_vertex
:
685 assert(vertex_index
== NULL
);
686 replacement
= lower_interpolate_at(intrin
, state
, var
, offset
,
687 component_offset
, deref
->type
);
695 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
,
696 nir_src_for_ssa(replacement
));
698 nir_instr_remove(&intrin
->instr
);
706 nir_lower_io_impl(nir_function_impl
*impl
,
707 nir_variable_mode modes
,
708 int (*type_size
)(const struct glsl_type
*, bool),
709 nir_lower_io_options options
)
711 struct lower_io_state state
;
712 bool progress
= false;
714 nir_builder_init(&state
.builder
, impl
);
715 state
.dead_ctx
= ralloc_context(NULL
);
717 state
.type_size
= type_size
;
718 state
.options
= options
;
720 ASSERTED nir_variable_mode supported_modes
=
721 nir_var_shader_in
| nir_var_shader_out
|
722 nir_var_mem_shared
| nir_var_uniform
;
723 assert(!(modes
& ~supported_modes
));
725 nir_foreach_block(block
, impl
) {
726 progress
|= nir_lower_io_block(block
, &state
);
729 ralloc_free(state
.dead_ctx
);
731 nir_metadata_preserve(impl
, nir_metadata_block_index
|
732 nir_metadata_dominance
);
737 nir_lower_io(nir_shader
*shader
, nir_variable_mode modes
,
738 int (*type_size
)(const struct glsl_type
*, bool),
739 nir_lower_io_options options
)
741 bool progress
= false;
743 nir_foreach_function(function
, shader
) {
744 if (function
->impl
) {
745 progress
|= nir_lower_io_impl(function
->impl
, modes
,
754 type_scalar_size_bytes(const struct glsl_type
*type
)
756 assert(glsl_type_is_vector_or_scalar(type
) ||
757 glsl_type_is_matrix(type
));
758 return glsl_type_is_boolean(type
) ? 4 : glsl_get_bit_size(type
) / 8;
762 build_addr_iadd(nir_builder
*b
, nir_ssa_def
*addr
,
763 nir_address_format addr_format
, nir_ssa_def
*offset
)
765 assert(offset
->num_components
== 1);
766 assert(addr
->bit_size
== offset
->bit_size
);
768 switch (addr_format
) {
769 case nir_address_format_32bit_global
:
770 case nir_address_format_64bit_global
:
771 case nir_address_format_32bit_offset
:
772 assert(addr
->num_components
== 1);
773 return nir_iadd(b
, addr
, offset
);
775 case nir_address_format_64bit_bounded_global
:
776 assert(addr
->num_components
== 4);
777 return nir_vec4(b
, nir_channel(b
, addr
, 0),
778 nir_channel(b
, addr
, 1),
779 nir_channel(b
, addr
, 2),
780 nir_iadd(b
, nir_channel(b
, addr
, 3), offset
));
782 case nir_address_format_32bit_index_offset
:
783 assert(addr
->num_components
== 2);
784 return nir_vec2(b
, nir_channel(b
, addr
, 0),
785 nir_iadd(b
, nir_channel(b
, addr
, 1), offset
));
786 case nir_address_format_vec2_index_32bit_offset
:
787 assert(addr
->num_components
== 3);
788 return nir_vec3(b
, nir_channel(b
, addr
, 0), nir_channel(b
, addr
, 1),
789 nir_iadd(b
, nir_channel(b
, addr
, 2), offset
));
790 case nir_address_format_logical
:
791 unreachable("Unsupported address format");
793 unreachable("Invalid address format");
797 build_addr_iadd_imm(nir_builder
*b
, nir_ssa_def
*addr
,
798 nir_address_format addr_format
, int64_t offset
)
800 return build_addr_iadd(b
, addr
, addr_format
,
801 nir_imm_intN_t(b
, offset
, addr
->bit_size
));
805 addr_to_index(nir_builder
*b
, nir_ssa_def
*addr
,
806 nir_address_format addr_format
)
808 if (addr_format
== nir_address_format_32bit_index_offset
) {
809 assert(addr
->num_components
== 2);
810 return nir_channel(b
, addr
, 0);
811 } else if (addr_format
== nir_address_format_vec2_index_32bit_offset
) {
812 assert(addr
->num_components
== 3);
813 return nir_channels(b
, addr
, 0x3);
815 unreachable("bad address format for index");
820 addr_to_offset(nir_builder
*b
, nir_ssa_def
*addr
,
821 nir_address_format addr_format
)
823 if (addr_format
== nir_address_format_32bit_index_offset
) {
824 assert(addr
->num_components
== 2);
825 return nir_channel(b
, addr
, 1);
826 } else if (addr_format
== nir_address_format_vec2_index_32bit_offset
) {
827 assert(addr
->num_components
== 3);
828 return nir_channel(b
, addr
, 2);
830 unreachable("bad address format for offset");
834 /** Returns true if the given address format resolves to a global address */
836 addr_format_is_global(nir_address_format addr_format
)
838 return addr_format
== nir_address_format_32bit_global
||
839 addr_format
== nir_address_format_64bit_global
||
840 addr_format
== nir_address_format_64bit_bounded_global
;
844 addr_to_global(nir_builder
*b
, nir_ssa_def
*addr
,
845 nir_address_format addr_format
)
847 switch (addr_format
) {
848 case nir_address_format_32bit_global
:
849 case nir_address_format_64bit_global
:
850 assert(addr
->num_components
== 1);
853 case nir_address_format_64bit_bounded_global
:
854 assert(addr
->num_components
== 4);
855 return nir_iadd(b
, nir_pack_64_2x32(b
, nir_channels(b
, addr
, 0x3)),
856 nir_u2u64(b
, nir_channel(b
, addr
, 3)));
858 case nir_address_format_32bit_index_offset
:
859 case nir_address_format_vec2_index_32bit_offset
:
860 case nir_address_format_32bit_offset
:
861 case nir_address_format_logical
:
862 unreachable("Cannot get a 64-bit address with this address format");
865 unreachable("Invalid address format");
869 addr_format_needs_bounds_check(nir_address_format addr_format
)
871 return addr_format
== nir_address_format_64bit_bounded_global
;
875 addr_is_in_bounds(nir_builder
*b
, nir_ssa_def
*addr
,
876 nir_address_format addr_format
, unsigned size
)
878 assert(addr_format
== nir_address_format_64bit_bounded_global
);
879 assert(addr
->num_components
== 4);
880 return nir_ige(b
, nir_channel(b
, addr
, 2),
881 nir_iadd_imm(b
, nir_channel(b
, addr
, 3), size
));
885 build_explicit_io_load(nir_builder
*b
, nir_intrinsic_instr
*intrin
,
886 nir_ssa_def
*addr
, nir_address_format addr_format
,
887 unsigned num_components
)
889 nir_variable_mode mode
= nir_src_as_deref(intrin
->src
[0])->mode
;
893 case nir_var_mem_ubo
:
894 op
= nir_intrinsic_load_ubo
;
896 case nir_var_mem_ssbo
:
897 if (addr_format_is_global(addr_format
))
898 op
= nir_intrinsic_load_global
;
900 op
= nir_intrinsic_load_ssbo
;
902 case nir_var_mem_global
:
903 assert(addr_format_is_global(addr_format
));
904 op
= nir_intrinsic_load_global
;
906 case nir_var_shader_in
:
907 assert(addr_format_is_global(addr_format
));
908 op
= nir_intrinsic_load_kernel_input
;
910 case nir_var_mem_shared
:
911 assert(addr_format
== nir_address_format_32bit_offset
);
912 op
= nir_intrinsic_load_shared
;
915 unreachable("Unsupported explicit IO variable mode");
918 nir_intrinsic_instr
*load
= nir_intrinsic_instr_create(b
->shader
, op
);
920 if (addr_format_is_global(addr_format
)) {
921 load
->src
[0] = nir_src_for_ssa(addr_to_global(b
, addr
, addr_format
));
922 } else if (addr_format
== nir_address_format_32bit_offset
) {
923 assert(addr
->num_components
== 1);
924 load
->src
[0] = nir_src_for_ssa(addr
);
926 load
->src
[0] = nir_src_for_ssa(addr_to_index(b
, addr
, addr_format
));
927 load
->src
[1] = nir_src_for_ssa(addr_to_offset(b
, addr
, addr_format
));
930 if (mode
!= nir_var_shader_in
&& mode
!= nir_var_mem_shared
)
931 nir_intrinsic_set_access(load
, nir_intrinsic_access(intrin
));
933 unsigned bit_size
= intrin
->dest
.ssa
.bit_size
;
935 /* TODO: Make the native bool bit_size an option. */
939 /* TODO: We should try and provide a better alignment. For OpenCL, we need
940 * to plumb the alignment through from SPIR-V when we have one.
942 nir_intrinsic_set_align(load
, bit_size
/ 8, 0);
944 assert(intrin
->dest
.is_ssa
);
945 load
->num_components
= num_components
;
946 nir_ssa_dest_init(&load
->instr
, &load
->dest
, num_components
,
947 bit_size
, intrin
->dest
.ssa
.name
);
949 assert(bit_size
% 8 == 0);
952 if (addr_format_needs_bounds_check(addr_format
)) {
953 /* The Vulkan spec for robustBufferAccess gives us quite a few options
954 * as to what we can do with an OOB read. Unfortunately, returning
955 * undefined values isn't one of them so we return an actual zero.
957 nir_ssa_def
*zero
= nir_imm_zero(b
, load
->num_components
, bit_size
);
959 const unsigned load_size
= (bit_size
/ 8) * load
->num_components
;
960 nir_push_if(b
, addr_is_in_bounds(b
, addr
, addr_format
, load_size
));
962 nir_builder_instr_insert(b
, &load
->instr
);
966 result
= nir_if_phi(b
, &load
->dest
.ssa
, zero
);
968 nir_builder_instr_insert(b
, &load
->instr
);
969 result
= &load
->dest
.ssa
;
972 if (intrin
->dest
.ssa
.bit_size
== 1) {
973 /* For shared, we can go ahead and use NIR's and/or the back-end's
974 * standard encoding for booleans rather than forcing a 0/1 boolean.
975 * This should save an instruction or two.
977 if (mode
== nir_var_mem_shared
)
978 result
= nir_b2b1(b
, result
);
980 result
= nir_i2b(b
, result
);
987 build_explicit_io_store(nir_builder
*b
, nir_intrinsic_instr
*intrin
,
988 nir_ssa_def
*addr
, nir_address_format addr_format
,
989 nir_ssa_def
*value
, nir_component_mask_t write_mask
)
991 nir_variable_mode mode
= nir_src_as_deref(intrin
->src
[0])->mode
;
995 case nir_var_mem_ssbo
:
996 if (addr_format_is_global(addr_format
))
997 op
= nir_intrinsic_store_global
;
999 op
= nir_intrinsic_store_ssbo
;
1001 case nir_var_mem_global
:
1002 assert(addr_format_is_global(addr_format
));
1003 op
= nir_intrinsic_store_global
;
1005 case nir_var_mem_shared
:
1006 assert(addr_format
== nir_address_format_32bit_offset
);
1007 op
= nir_intrinsic_store_shared
;
1010 unreachable("Unsupported explicit IO variable mode");
1013 nir_intrinsic_instr
*store
= nir_intrinsic_instr_create(b
->shader
, op
);
1015 if (value
->bit_size
== 1) {
1016 /* For shared, we can go ahead and use NIR's and/or the back-end's
1017 * standard encoding for booleans rather than forcing a 0/1 boolean.
1018 * This should save an instruction or two.
1020 * TODO: Make the native bool bit_size an option.
1022 if (mode
== nir_var_mem_shared
)
1023 value
= nir_b2b32(b
, value
);
1025 value
= nir_b2i(b
, value
, 32);
1028 store
->src
[0] = nir_src_for_ssa(value
);
1029 if (addr_format_is_global(addr_format
)) {
1030 store
->src
[1] = nir_src_for_ssa(addr_to_global(b
, addr
, addr_format
));
1031 } else if (addr_format
== nir_address_format_32bit_offset
) {
1032 assert(addr
->num_components
== 1);
1033 store
->src
[1] = nir_src_for_ssa(addr
);
1035 store
->src
[1] = nir_src_for_ssa(addr_to_index(b
, addr
, addr_format
));
1036 store
->src
[2] = nir_src_for_ssa(addr_to_offset(b
, addr
, addr_format
));
1039 nir_intrinsic_set_write_mask(store
, write_mask
);
1041 if (mode
!= nir_var_mem_shared
)
1042 nir_intrinsic_set_access(store
, nir_intrinsic_access(intrin
));
1044 /* TODO: We should try and provide a better alignment. For OpenCL, we need
1045 * to plumb the alignment through from SPIR-V when we have one.
1047 nir_intrinsic_set_align(store
, value
->bit_size
/ 8, 0);
1049 assert(value
->num_components
== 1 ||
1050 value
->num_components
== intrin
->num_components
);
1051 store
->num_components
= value
->num_components
;
1053 assert(value
->bit_size
% 8 == 0);
1055 if (addr_format_needs_bounds_check(addr_format
)) {
1056 const unsigned store_size
= (value
->bit_size
/ 8) * store
->num_components
;
1057 nir_push_if(b
, addr_is_in_bounds(b
, addr
, addr_format
, store_size
));
1059 nir_builder_instr_insert(b
, &store
->instr
);
1061 nir_pop_if(b
, NULL
);
1063 nir_builder_instr_insert(b
, &store
->instr
);
1067 static nir_ssa_def
*
1068 build_explicit_io_atomic(nir_builder
*b
, nir_intrinsic_instr
*intrin
,
1069 nir_ssa_def
*addr
, nir_address_format addr_format
)
1071 nir_variable_mode mode
= nir_src_as_deref(intrin
->src
[0])->mode
;
1072 const unsigned num_data_srcs
=
1073 nir_intrinsic_infos
[intrin
->intrinsic
].num_srcs
- 1;
1075 nir_intrinsic_op op
;
1077 case nir_var_mem_ssbo
:
1078 if (addr_format_is_global(addr_format
))
1079 op
= global_atomic_for_deref(intrin
->intrinsic
);
1081 op
= ssbo_atomic_for_deref(intrin
->intrinsic
);
1083 case nir_var_mem_global
:
1084 assert(addr_format_is_global(addr_format
));
1085 op
= global_atomic_for_deref(intrin
->intrinsic
);
1087 case nir_var_mem_shared
:
1088 assert(addr_format
== nir_address_format_32bit_offset
);
1089 op
= shared_atomic_for_deref(intrin
->intrinsic
);
1092 unreachable("Unsupported explicit IO variable mode");
1095 nir_intrinsic_instr
*atomic
= nir_intrinsic_instr_create(b
->shader
, op
);
1098 if (addr_format_is_global(addr_format
)) {
1099 atomic
->src
[src
++] = nir_src_for_ssa(addr_to_global(b
, addr
, addr_format
));
1100 } else if (addr_format
== nir_address_format_32bit_offset
) {
1101 assert(addr
->num_components
== 1);
1102 atomic
->src
[src
++] = nir_src_for_ssa(addr
);
1104 atomic
->src
[src
++] = nir_src_for_ssa(addr_to_index(b
, addr
, addr_format
));
1105 atomic
->src
[src
++] = nir_src_for_ssa(addr_to_offset(b
, addr
, addr_format
));
1107 for (unsigned i
= 0; i
< num_data_srcs
; i
++) {
1108 atomic
->src
[src
++] = nir_src_for_ssa(intrin
->src
[1 + i
].ssa
);
1111 /* Global atomics don't have access flags because they assume that the
1112 * address may be non-uniform.
1114 if (!addr_format_is_global(addr_format
) && mode
!= nir_var_mem_shared
)
1115 nir_intrinsic_set_access(atomic
, nir_intrinsic_access(intrin
));
1117 assert(intrin
->dest
.ssa
.num_components
== 1);
1118 nir_ssa_dest_init(&atomic
->instr
, &atomic
->dest
,
1119 1, intrin
->dest
.ssa
.bit_size
, intrin
->dest
.ssa
.name
);
1121 assert(atomic
->dest
.ssa
.bit_size
% 8 == 0);
1123 if (addr_format_needs_bounds_check(addr_format
)) {
1124 const unsigned atomic_size
= atomic
->dest
.ssa
.bit_size
/ 8;
1125 nir_push_if(b
, addr_is_in_bounds(b
, addr
, addr_format
, atomic_size
));
1127 nir_builder_instr_insert(b
, &atomic
->instr
);
1129 nir_pop_if(b
, NULL
);
1130 return nir_if_phi(b
, &atomic
->dest
.ssa
,
1131 nir_ssa_undef(b
, 1, atomic
->dest
.ssa
.bit_size
));
1133 nir_builder_instr_insert(b
, &atomic
->instr
);
1134 return &atomic
->dest
.ssa
;
1139 nir_explicit_io_address_from_deref(nir_builder
*b
, nir_deref_instr
*deref
,
1140 nir_ssa_def
*base_addr
,
1141 nir_address_format addr_format
)
1143 assert(deref
->dest
.is_ssa
);
1144 switch (deref
->deref_type
) {
1145 case nir_deref_type_var
:
1146 assert(deref
->mode
& (nir_var_shader_in
| nir_var_mem_shared
));
1147 return nir_imm_intN_t(b
, deref
->var
->data
.driver_location
,
1148 deref
->dest
.ssa
.bit_size
);
1150 case nir_deref_type_array
: {
1151 nir_deref_instr
*parent
= nir_deref_instr_parent(deref
);
1153 unsigned stride
= glsl_get_explicit_stride(parent
->type
);
1154 if ((glsl_type_is_matrix(parent
->type
) &&
1155 glsl_matrix_type_is_row_major(parent
->type
)) ||
1156 (glsl_type_is_vector(parent
->type
) && stride
== 0))
1157 stride
= type_scalar_size_bytes(parent
->type
);
1161 nir_ssa_def
*index
= nir_ssa_for_src(b
, deref
->arr
.index
, 1);
1162 index
= nir_i2i(b
, index
, base_addr
->bit_size
);
1163 return build_addr_iadd(b
, base_addr
, addr_format
,
1164 nir_amul_imm(b
, index
, stride
));
1167 case nir_deref_type_ptr_as_array
: {
1168 nir_ssa_def
*index
= nir_ssa_for_src(b
, deref
->arr
.index
, 1);
1169 index
= nir_i2i(b
, index
, base_addr
->bit_size
);
1170 unsigned stride
= nir_deref_instr_ptr_as_array_stride(deref
);
1171 return build_addr_iadd(b
, base_addr
, addr_format
,
1172 nir_amul_imm(b
, index
, stride
));
1175 case nir_deref_type_array_wildcard
:
1176 unreachable("Wildcards should be lowered by now");
1179 case nir_deref_type_struct
: {
1180 nir_deref_instr
*parent
= nir_deref_instr_parent(deref
);
1181 int offset
= glsl_get_struct_field_offset(parent
->type
,
1182 deref
->strct
.index
);
1183 assert(offset
>= 0);
1184 return build_addr_iadd_imm(b
, base_addr
, addr_format
, offset
);
1187 case nir_deref_type_cast
:
1188 /* Nothing to do here */
1192 unreachable("Invalid NIR deref type");
1196 nir_lower_explicit_io_instr(nir_builder
*b
,
1197 nir_intrinsic_instr
*intrin
,
1199 nir_address_format addr_format
)
1201 b
->cursor
= nir_after_instr(&intrin
->instr
);
1203 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
1204 unsigned vec_stride
= glsl_get_explicit_stride(deref
->type
);
1205 unsigned scalar_size
= type_scalar_size_bytes(deref
->type
);
1206 assert(vec_stride
== 0 || glsl_type_is_vector(deref
->type
));
1207 assert(vec_stride
== 0 || vec_stride
>= scalar_size
);
1209 if (intrin
->intrinsic
== nir_intrinsic_load_deref
) {
1211 if (vec_stride
> scalar_size
) {
1212 nir_ssa_def
*comps
[4] = { NULL
, };
1213 for (unsigned i
= 0; i
< intrin
->num_components
; i
++) {
1214 nir_ssa_def
*comp_addr
= build_addr_iadd_imm(b
, addr
, addr_format
,
1216 comps
[i
] = build_explicit_io_load(b
, intrin
, comp_addr
,
1219 value
= nir_vec(b
, comps
, intrin
->num_components
);
1221 value
= build_explicit_io_load(b
, intrin
, addr
, addr_format
,
1222 intrin
->num_components
);
1224 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
, nir_src_for_ssa(value
));
1225 } else if (intrin
->intrinsic
== nir_intrinsic_store_deref
) {
1226 assert(intrin
->src
[1].is_ssa
);
1227 nir_ssa_def
*value
= intrin
->src
[1].ssa
;
1228 nir_component_mask_t write_mask
= nir_intrinsic_write_mask(intrin
);
1229 if (vec_stride
> scalar_size
) {
1230 for (unsigned i
= 0; i
< intrin
->num_components
; i
++) {
1231 if (!(write_mask
& (1 << i
)))
1234 nir_ssa_def
*comp_addr
= build_addr_iadd_imm(b
, addr
, addr_format
,
1236 build_explicit_io_store(b
, intrin
, comp_addr
, addr_format
,
1237 nir_channel(b
, value
, i
), 1);
1240 build_explicit_io_store(b
, intrin
, addr
, addr_format
,
1244 nir_ssa_def
*value
=
1245 build_explicit_io_atomic(b
, intrin
, addr
, addr_format
);
1246 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
, nir_src_for_ssa(value
));
1249 nir_instr_remove(&intrin
->instr
);
1253 lower_explicit_io_deref(nir_builder
*b
, nir_deref_instr
*deref
,
1254 nir_address_format addr_format
)
1256 /* Just delete the deref if it's not used. We can't use
1257 * nir_deref_instr_remove_if_unused here because it may remove more than
1258 * one deref which could break our list walking since we walk the list
1261 assert(list_is_empty(&deref
->dest
.ssa
.if_uses
));
1262 if (list_is_empty(&deref
->dest
.ssa
.uses
)) {
1263 nir_instr_remove(&deref
->instr
);
1267 b
->cursor
= nir_after_instr(&deref
->instr
);
1269 nir_ssa_def
*base_addr
= NULL
;
1270 if (deref
->deref_type
!= nir_deref_type_var
) {
1271 assert(deref
->parent
.is_ssa
);
1272 base_addr
= deref
->parent
.ssa
;
1275 nir_ssa_def
*addr
= nir_explicit_io_address_from_deref(b
, deref
, base_addr
,
1278 nir_instr_remove(&deref
->instr
);
1279 nir_ssa_def_rewrite_uses(&deref
->dest
.ssa
, nir_src_for_ssa(addr
));
1283 lower_explicit_io_access(nir_builder
*b
, nir_intrinsic_instr
*intrin
,
1284 nir_address_format addr_format
)
1286 assert(intrin
->src
[0].is_ssa
);
1287 nir_lower_explicit_io_instr(b
, intrin
, intrin
->src
[0].ssa
, addr_format
);
1291 lower_explicit_io_array_length(nir_builder
*b
, nir_intrinsic_instr
*intrin
,
1292 nir_address_format addr_format
)
1294 b
->cursor
= nir_after_instr(&intrin
->instr
);
1296 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
1298 assert(glsl_type_is_array(deref
->type
));
1299 assert(glsl_get_length(deref
->type
) == 0);
1300 unsigned stride
= glsl_get_explicit_stride(deref
->type
);
1303 assert(addr_format
== nir_address_format_32bit_index_offset
||
1304 addr_format
== nir_address_format_vec2_index_32bit_offset
);
1305 nir_ssa_def
*addr
= &deref
->dest
.ssa
;
1306 nir_ssa_def
*index
= addr_to_index(b
, addr
, addr_format
);
1307 nir_ssa_def
*offset
= addr_to_offset(b
, addr
, addr_format
);
1309 nir_intrinsic_instr
*bsize
=
1310 nir_intrinsic_instr_create(b
->shader
, nir_intrinsic_get_buffer_size
);
1311 bsize
->src
[0] = nir_src_for_ssa(index
);
1312 nir_ssa_dest_init(&bsize
->instr
, &bsize
->dest
, 1, 32, NULL
);
1313 nir_builder_instr_insert(b
, &bsize
->instr
);
1315 nir_ssa_def
*arr_size
=
1316 nir_idiv(b
, nir_isub(b
, &bsize
->dest
.ssa
, offset
),
1317 nir_imm_int(b
, stride
));
1319 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
, nir_src_for_ssa(arr_size
));
1320 nir_instr_remove(&intrin
->instr
);
1324 nir_lower_explicit_io_impl(nir_function_impl
*impl
, nir_variable_mode modes
,
1325 nir_address_format addr_format
)
1327 bool progress
= false;
1330 nir_builder_init(&b
, impl
);
1332 /* Walk in reverse order so that we can see the full deref chain when we
1333 * lower the access operations. We lower them assuming that the derefs
1334 * will be turned into address calculations later.
1336 nir_foreach_block_reverse(block
, impl
) {
1337 nir_foreach_instr_reverse_safe(instr
, block
) {
1338 switch (instr
->type
) {
1339 case nir_instr_type_deref
: {
1340 nir_deref_instr
*deref
= nir_instr_as_deref(instr
);
1341 if (deref
->mode
& modes
) {
1342 lower_explicit_io_deref(&b
, deref
, addr_format
);
1348 case nir_instr_type_intrinsic
: {
1349 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
1350 switch (intrin
->intrinsic
) {
1351 case nir_intrinsic_load_deref
:
1352 case nir_intrinsic_store_deref
:
1353 case nir_intrinsic_deref_atomic_add
:
1354 case nir_intrinsic_deref_atomic_imin
:
1355 case nir_intrinsic_deref_atomic_umin
:
1356 case nir_intrinsic_deref_atomic_imax
:
1357 case nir_intrinsic_deref_atomic_umax
:
1358 case nir_intrinsic_deref_atomic_and
:
1359 case nir_intrinsic_deref_atomic_or
:
1360 case nir_intrinsic_deref_atomic_xor
:
1361 case nir_intrinsic_deref_atomic_exchange
:
1362 case nir_intrinsic_deref_atomic_comp_swap
:
1363 case nir_intrinsic_deref_atomic_fadd
:
1364 case nir_intrinsic_deref_atomic_fmin
:
1365 case nir_intrinsic_deref_atomic_fmax
:
1366 case nir_intrinsic_deref_atomic_fcomp_swap
: {
1367 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
1368 if (deref
->mode
& modes
) {
1369 lower_explicit_io_access(&b
, intrin
, addr_format
);
1375 case nir_intrinsic_deref_buffer_array_length
: {
1376 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
1377 if (deref
->mode
& modes
) {
1378 lower_explicit_io_array_length(&b
, intrin
, addr_format
);
1398 nir_metadata_preserve(impl
, nir_metadata_block_index
|
1399 nir_metadata_dominance
);
1406 nir_lower_explicit_io(nir_shader
*shader
, nir_variable_mode modes
,
1407 nir_address_format addr_format
)
1409 bool progress
= false;
1411 nir_foreach_function(function
, shader
) {
1412 if (function
->impl
&&
1413 nir_lower_explicit_io_impl(function
->impl
, modes
, addr_format
))
1421 nir_lower_vars_to_explicit_types_impl(nir_function_impl
*impl
,
1422 nir_variable_mode modes
,
1423 glsl_type_size_align_func type_info
)
1425 bool progress
= false;
1427 nir_foreach_block(block
, impl
) {
1428 nir_foreach_instr(instr
, block
) {
1429 if (instr
->type
!= nir_instr_type_deref
)
1432 nir_deref_instr
*deref
= nir_instr_as_deref(instr
);
1433 if (!(deref
->mode
& modes
))
1436 unsigned size
, alignment
;
1437 const struct glsl_type
*new_type
=
1438 glsl_get_explicit_type_for_size_align(deref
->type
, type_info
, &size
, &alignment
);
1439 if (new_type
!= deref
->type
) {
1441 deref
->type
= new_type
;
1443 if (deref
->deref_type
== nir_deref_type_cast
) {
1444 /* See also glsl_type::get_explicit_type_for_size_align() */
1445 unsigned new_stride
= align(size
, alignment
);
1446 if (new_stride
!= deref
->cast
.ptr_stride
) {
1447 deref
->cast
.ptr_stride
= new_stride
;
1455 nir_metadata_preserve(impl
, nir_metadata_block_index
|
1456 nir_metadata_dominance
|
1457 nir_metadata_live_ssa_defs
|
1458 nir_metadata_loop_analysis
);
1465 lower_vars_to_explicit(nir_shader
*shader
,
1466 struct exec_list
*vars
, nir_variable_mode mode
,
1467 glsl_type_size_align_func type_info
)
1469 bool progress
= false;
1470 unsigned offset
= 0;
1471 nir_foreach_variable(var
, vars
) {
1472 unsigned size
, align
;
1473 const struct glsl_type
*explicit_type
=
1474 glsl_get_explicit_type_for_size_align(var
->type
, type_info
, &size
, &align
);
1476 if (explicit_type
!= var
->type
) {
1478 var
->type
= explicit_type
;
1481 var
->data
.driver_location
= ALIGN_POT(offset
, align
);
1482 offset
= var
->data
.driver_location
+ size
;
1485 if (mode
== nir_var_mem_shared
) {
1486 shader
->info
.cs
.shared_size
= offset
;
1487 shader
->num_shared
= offset
;
1494 nir_lower_vars_to_explicit_types(nir_shader
*shader
,
1495 nir_variable_mode modes
,
1496 glsl_type_size_align_func type_info
)
1498 /* TODO: Situations which need to be handled to support more modes:
1499 * - row-major matrices
1500 * - compact shader inputs/outputs
1503 ASSERTED nir_variable_mode supported
= nir_var_mem_shared
|
1504 nir_var_shader_temp
| nir_var_function_temp
;
1505 assert(!(modes
& ~supported
) && "unsupported");
1507 bool progress
= false;
1509 if (modes
& nir_var_mem_shared
)
1510 progress
|= lower_vars_to_explicit(shader
, &shader
->shared
, nir_var_mem_shared
, type_info
);
1511 if (modes
& nir_var_shader_temp
)
1512 progress
|= lower_vars_to_explicit(shader
, &shader
->globals
, nir_var_shader_temp
, type_info
);
1514 nir_foreach_function(function
, shader
) {
1515 if (function
->impl
) {
1516 if (modes
& nir_var_function_temp
)
1517 progress
|= lower_vars_to_explicit(shader
, &function
->impl
->locals
, nir_var_function_temp
, type_info
);
1519 progress
|= nir_lower_vars_to_explicit_types_impl(function
->impl
, modes
, type_info
);
1527 * Return the offset source for a load/store intrinsic.
1530 nir_get_io_offset_src(nir_intrinsic_instr
*instr
)
1532 switch (instr
->intrinsic
) {
1533 case nir_intrinsic_load_input
:
1534 case nir_intrinsic_load_output
:
1535 case nir_intrinsic_load_shared
:
1536 case nir_intrinsic_load_uniform
:
1537 case nir_intrinsic_load_global
:
1538 case nir_intrinsic_load_scratch
:
1539 case nir_intrinsic_load_fs_input_interp_deltas
:
1540 return &instr
->src
[0];
1541 case nir_intrinsic_load_ubo
:
1542 case nir_intrinsic_load_ssbo
:
1543 case nir_intrinsic_load_per_vertex_input
:
1544 case nir_intrinsic_load_per_vertex_output
:
1545 case nir_intrinsic_load_interpolated_input
:
1546 case nir_intrinsic_store_output
:
1547 case nir_intrinsic_store_shared
:
1548 case nir_intrinsic_store_global
:
1549 case nir_intrinsic_store_scratch
:
1550 case nir_intrinsic_ssbo_atomic_add
:
1551 case nir_intrinsic_ssbo_atomic_imin
:
1552 case nir_intrinsic_ssbo_atomic_umin
:
1553 case nir_intrinsic_ssbo_atomic_imax
:
1554 case nir_intrinsic_ssbo_atomic_umax
:
1555 case nir_intrinsic_ssbo_atomic_and
:
1556 case nir_intrinsic_ssbo_atomic_or
:
1557 case nir_intrinsic_ssbo_atomic_xor
:
1558 case nir_intrinsic_ssbo_atomic_exchange
:
1559 case nir_intrinsic_ssbo_atomic_comp_swap
:
1560 case nir_intrinsic_ssbo_atomic_fadd
:
1561 case nir_intrinsic_ssbo_atomic_fmin
:
1562 case nir_intrinsic_ssbo_atomic_fmax
:
1563 case nir_intrinsic_ssbo_atomic_fcomp_swap
:
1564 return &instr
->src
[1];
1565 case nir_intrinsic_store_ssbo
:
1566 case nir_intrinsic_store_per_vertex_output
:
1567 return &instr
->src
[2];
1574 * Return the vertex index source for a load/store per_vertex intrinsic.
1577 nir_get_io_vertex_index_src(nir_intrinsic_instr
*instr
)
1579 switch (instr
->intrinsic
) {
1580 case nir_intrinsic_load_per_vertex_input
:
1581 case nir_intrinsic_load_per_vertex_output
:
1582 return &instr
->src
[0];
1583 case nir_intrinsic_store_per_vertex_output
:
1584 return &instr
->src
[1];
1591 * Return the numeric constant that identify a NULL pointer for each address
1594 const nir_const_value
*
1595 nir_address_format_null_value(nir_address_format addr_format
)
1597 const static nir_const_value null_values
[][NIR_MAX_VEC_COMPONENTS
] = {
1598 [nir_address_format_32bit_global
] = {{0}},
1599 [nir_address_format_64bit_global
] = {{0}},
1600 [nir_address_format_64bit_bounded_global
] = {{0}},
1601 [nir_address_format_32bit_index_offset
] = {{.u32
= ~0}, {.u32
= ~0}},
1602 [nir_address_format_vec2_index_32bit_offset
] = {{.u32
= ~0}, {.u32
= ~0}, {.u32
= ~0}},
1603 [nir_address_format_32bit_offset
] = {{.u32
= ~0}},
1604 [nir_address_format_logical
] = {{.u32
= ~0}},
1607 assert(addr_format
< ARRAY_SIZE(null_values
));
1608 return null_values
[addr_format
];
1612 nir_build_addr_ieq(nir_builder
*b
, nir_ssa_def
*addr0
, nir_ssa_def
*addr1
,
1613 nir_address_format addr_format
)
1615 switch (addr_format
) {
1616 case nir_address_format_32bit_global
:
1617 case nir_address_format_64bit_global
:
1618 case nir_address_format_64bit_bounded_global
:
1619 case nir_address_format_32bit_index_offset
:
1620 case nir_address_format_vec2_index_32bit_offset
:
1621 case nir_address_format_32bit_offset
:
1622 return nir_ball_iequal(b
, addr0
, addr1
);
1624 case nir_address_format_logical
:
1625 unreachable("Unsupported address format");
1628 unreachable("Invalid address format");
1632 nir_build_addr_isub(nir_builder
*b
, nir_ssa_def
*addr0
, nir_ssa_def
*addr1
,
1633 nir_address_format addr_format
)
1635 switch (addr_format
) {
1636 case nir_address_format_32bit_global
:
1637 case nir_address_format_64bit_global
:
1638 case nir_address_format_32bit_offset
:
1639 assert(addr0
->num_components
== 1);
1640 assert(addr1
->num_components
== 1);
1641 return nir_isub(b
, addr0
, addr1
);
1643 case nir_address_format_64bit_bounded_global
:
1644 return nir_isub(b
, addr_to_global(b
, addr0
, addr_format
),
1645 addr_to_global(b
, addr1
, addr_format
));
1647 case nir_address_format_32bit_index_offset
:
1648 assert(addr0
->num_components
== 2);
1649 assert(addr1
->num_components
== 2);
1650 /* Assume the same buffer index. */
1651 return nir_isub(b
, nir_channel(b
, addr0
, 1), nir_channel(b
, addr1
, 1));
1653 case nir_address_format_vec2_index_32bit_offset
:
1654 assert(addr0
->num_components
== 3);
1655 assert(addr1
->num_components
== 3);
1656 /* Assume the same buffer index. */
1657 return nir_isub(b
, nir_channel(b
, addr0
, 2), nir_channel(b
, addr1
, 2));
1659 case nir_address_format_logical
:
1660 unreachable("Unsupported address format");
1663 unreachable("Invalid address format");
1667 is_input(nir_intrinsic_instr
*intrin
)
1669 return intrin
->intrinsic
== nir_intrinsic_load_input
||
1670 intrin
->intrinsic
== nir_intrinsic_load_per_vertex_input
||
1671 intrin
->intrinsic
== nir_intrinsic_load_interpolated_input
||
1672 intrin
->intrinsic
== nir_intrinsic_load_fs_input_interp_deltas
;
1676 is_output(nir_intrinsic_instr
*intrin
)
1678 return intrin
->intrinsic
== nir_intrinsic_load_output
||
1679 intrin
->intrinsic
== nir_intrinsic_load_per_vertex_output
||
1680 intrin
->intrinsic
== nir_intrinsic_store_output
||
1681 intrin
->intrinsic
== nir_intrinsic_store_per_vertex_output
;
1686 * This pass adds constant offsets to instr->const_index[0] for input/output
1687 * intrinsics, and resets the offset source to 0. Non-constant offsets remain
1688 * unchanged - since we don't know what part of a compound variable is
1689 * accessed, we allocate storage for the entire thing. For drivers that use
1690 * nir_lower_io_to_temporaries() before nir_lower_io(), this guarantees that
1691 * the offset source will be 0, so that they don't have to add it in manually.
1695 add_const_offset_to_base_block(nir_block
*block
, nir_builder
*b
,
1696 nir_variable_mode mode
)
1698 bool progress
= false;
1699 nir_foreach_instr_safe(instr
, block
) {
1700 if (instr
->type
!= nir_instr_type_intrinsic
)
1703 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
1705 if ((mode
== nir_var_shader_in
&& is_input(intrin
)) ||
1706 (mode
== nir_var_shader_out
&& is_output(intrin
))) {
1707 nir_src
*offset
= nir_get_io_offset_src(intrin
);
1709 if (nir_src_is_const(*offset
)) {
1710 intrin
->const_index
[0] += nir_src_as_uint(*offset
);
1711 b
->cursor
= nir_before_instr(&intrin
->instr
);
1712 nir_instr_rewrite_src(&intrin
->instr
, offset
,
1713 nir_src_for_ssa(nir_imm_int(b
, 0)));
1723 nir_io_add_const_offset_to_base(nir_shader
*nir
, nir_variable_mode mode
)
1725 bool progress
= false;
1727 nir_foreach_function(f
, nir
) {
1730 nir_builder_init(&b
, f
->impl
);
1731 nir_foreach_block(block
, f
->impl
) {
1732 progress
|= add_const_offset_to_base_block(block
, &b
, mode
);