2 * Copyright © 2019 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "nir_builder.h"
26 #include "nir_deref.h"
28 /** @file nir_lower_io_to_vector.c
30 * Merges compatible input/output variables residing in different components
31 * of the same location. It's expected that further passes such as
32 * nir_lower_io_to_temporaries will combine loads and stores of the merged
33 * variables, producing vector nir_load_input/nir_store_output instructions
34 * when all is said and done.
37 /* FRAG_RESULT_MAX+1 instead of just FRAG_RESULT_MAX because of how this pass
38 * handles dual source blending */
39 #define MAX_SLOTS MAX2(VARYING_SLOT_TESS_MAX, FRAG_RESULT_MAX+1)
42 get_slot(const nir_variable
*var
)
44 /* This handling of dual-source blending might not be correct when more than
45 * one render target is supported, but it seems no driver supports more than
47 return var
->data
.location
+ var
->data
.index
;
50 static const struct glsl_type
*
51 get_per_vertex_type(const nir_shader
*shader
, const nir_variable
*var
,
52 unsigned *num_vertices
)
54 if (nir_is_per_vertex_io(var
, shader
->info
.stage
)) {
55 assert(glsl_type_is_array(var
->type
));
57 *num_vertices
= glsl_get_length(var
->type
);
58 return glsl_get_array_element(var
->type
);
66 static const struct glsl_type
*
67 resize_array_vec_type(const struct glsl_type
*type
, unsigned num_components
)
69 if (glsl_type_is_array(type
)) {
70 const struct glsl_type
*arr_elem
=
71 resize_array_vec_type(glsl_get_array_element(type
), num_components
);
72 return glsl_array_type(arr_elem
, glsl_get_length(type
), 0);
74 assert(glsl_type_is_vector_or_scalar(type
));
75 return glsl_vector_type(glsl_get_base_type(type
), num_components
);
80 variables_can_merge(const nir_shader
*shader
,
81 const nir_variable
*a
, const nir_variable
*b
,
82 bool same_array_structure
)
84 if (a
->data
.compact
|| b
->data
.compact
)
87 if (a
->data
.per_view
|| b
->data
.per_view
)
90 const struct glsl_type
*a_type_tail
= a
->type
;
91 const struct glsl_type
*b_type_tail
= b
->type
;
93 if (nir_is_per_vertex_io(a
, shader
->info
.stage
) !=
94 nir_is_per_vertex_io(b
, shader
->info
.stage
))
97 /* They must have the same array structure */
98 if (same_array_structure
) {
99 while (glsl_type_is_array(a_type_tail
)) {
100 if (!glsl_type_is_array(b_type_tail
))
103 if (glsl_get_length(a_type_tail
) != glsl_get_length(b_type_tail
))
106 a_type_tail
= glsl_get_array_element(a_type_tail
);
107 b_type_tail
= glsl_get_array_element(b_type_tail
);
109 if (glsl_type_is_array(b_type_tail
))
112 a_type_tail
= glsl_without_array(a_type_tail
);
113 b_type_tail
= glsl_without_array(b_type_tail
);
116 if (!glsl_type_is_vector_or_scalar(a_type_tail
) ||
117 !glsl_type_is_vector_or_scalar(b_type_tail
))
120 if (glsl_get_base_type(a_type_tail
) != glsl_get_base_type(b_type_tail
))
123 /* TODO: add 64/16bit support ? */
124 if (glsl_get_bit_size(a_type_tail
) != 32)
127 assert(a
->data
.mode
== b
->data
.mode
);
128 if (shader
->info
.stage
== MESA_SHADER_FRAGMENT
&&
129 a
->data
.mode
== nir_var_shader_in
&&
130 a
->data
.interpolation
!= b
->data
.interpolation
)
133 if (shader
->info
.stage
== MESA_SHADER_FRAGMENT
&&
134 a
->data
.mode
== nir_var_shader_out
&&
135 a
->data
.index
!= b
->data
.index
)
138 /* It's tricky to merge XFB-outputs correctly, because we need there
139 * to not be any overlaps when we get to
140 * nir_gather_xfb_info_with_varyings later on. We'll end up
141 * triggering an assert there if we merge here.
143 if ((shader
->info
.stage
== MESA_SHADER_VERTEX
||
144 shader
->info
.stage
== MESA_SHADER_TESS_EVAL
||
145 shader
->info
.stage
== MESA_SHADER_GEOMETRY
) &&
146 a
->data
.mode
== nir_var_shader_out
&&
147 (a
->data
.explicit_xfb_buffer
|| b
->data
.explicit_xfb_buffer
))
153 static const struct glsl_type
*
154 get_flat_type(const nir_shader
*shader
, nir_variable
*old_vars
[MAX_SLOTS
][4],
155 unsigned *loc
, nir_variable
**first_var
, unsigned *num_vertices
)
159 unsigned num_vars
= 0;
160 enum glsl_base_type base
;
165 assert(*loc
< MAX_SLOTS
);
166 for (unsigned frac
= 0; frac
< 4; frac
++) {
167 nir_variable
*var
= old_vars
[*loc
][frac
];
171 !variables_can_merge(shader
, var
, *first_var
, false)) ||
178 if (!glsl_type_is_vector_or_scalar(glsl_without_array(var
->type
))) {
183 base
= glsl_get_base_type(
184 glsl_without_array(get_per_vertex_type(shader
, var
, NULL
)));
187 bool vs_in
= shader
->info
.stage
== MESA_SHADER_VERTEX
&&
188 var
->data
.mode
== nir_var_shader_in
;
189 unsigned var_slots
= glsl_count_attribute_slots(
190 get_per_vertex_type(shader
, var
, num_vertices
), vs_in
);
191 todo
= MAX2(todo
, var_slots
);
203 return glsl_vector_type(base
, 4);
205 return glsl_array_type(glsl_vector_type(base
, 4), slots
, 0);
209 create_new_io_vars(nir_shader
*shader
, struct exec_list
*io_list
,
210 nir_variable
*new_vars
[MAX_SLOTS
][4],
211 bool flat_vars
[MAX_SLOTS
])
213 if (exec_list_is_empty(io_list
))
216 nir_variable
*old_vars
[MAX_SLOTS
][4] = {{0}};
218 nir_foreach_variable(var
, io_list
) {
219 unsigned frac
= var
->data
.location_frac
;
220 old_vars
[get_slot(var
)][frac
] = var
;
223 bool merged_any_vars
= false;
225 for (unsigned loc
= 0; loc
< MAX_SLOTS
; loc
++) {
228 nir_variable
*first_var
= old_vars
[loc
][frac
];
235 bool found_merge
= false;
238 nir_variable
*var
= old_vars
[loc
][frac
];
242 if (var
!= first_var
) {
243 if (!variables_can_merge(shader
, first_var
, var
, true))
249 const unsigned num_components
=
250 glsl_get_components(glsl_without_array(var
->type
));
251 if (!num_components
) {
254 break; /* The type was a struct. */
257 /* We had better not have any overlapping vars */
258 for (unsigned i
= 1; i
< num_components
; i
++)
259 assert(old_vars
[loc
][frac
+ i
] == NULL
);
261 frac
+= num_components
;
267 merged_any_vars
= true;
269 nir_variable
*var
= nir_variable_clone(old_vars
[loc
][first
], shader
);
270 var
->data
.location_frac
= first
;
271 var
->type
= resize_array_vec_type(var
->type
, frac
- first
);
273 nir_shader_add_variable(shader
, var
);
274 for (unsigned i
= first
; i
< frac
; i
++) {
275 new_vars
[loc
][i
] = var
;
276 old_vars
[loc
][i
] = NULL
;
279 old_vars
[loc
][first
] = var
;
283 /* "flat" mode: tries to ensure there is at most one variable per slot by
284 * merging variables into vec4s
286 for (unsigned loc
= 0; loc
< MAX_SLOTS
;) {
287 nir_variable
*first_var
;
288 unsigned num_vertices
;
289 unsigned new_loc
= loc
;
290 const struct glsl_type
*flat_type
=
291 get_flat_type(shader
, old_vars
, &new_loc
, &first_var
, &num_vertices
);
293 merged_any_vars
= true;
295 nir_variable
*var
= nir_variable_clone(first_var
, shader
);
296 var
->data
.location_frac
= 0;
298 var
->type
= glsl_array_type(flat_type
, num_vertices
, 0);
300 var
->type
= flat_type
;
302 nir_shader_add_variable(shader
, var
);
303 for (unsigned i
= 0; i
< glsl_get_length(flat_type
); i
++) {
304 for (unsigned j
= 0; j
< 4; j
++)
305 new_vars
[loc
+ i
][j
] = var
;
306 flat_vars
[loc
+ i
] = true;
312 return merged_any_vars
;
315 static nir_deref_instr
*
316 build_array_deref_of_new_var(nir_builder
*b
, nir_variable
*new_var
,
317 nir_deref_instr
*leader
)
319 if (leader
->deref_type
== nir_deref_type_var
)
320 return nir_build_deref_var(b
, new_var
);
322 nir_deref_instr
*parent
=
323 build_array_deref_of_new_var(b
, new_var
, nir_deref_instr_parent(leader
));
325 return nir_build_deref_follower(b
, parent
, leader
);
329 build_array_index(nir_builder
*b
, nir_deref_instr
*deref
, nir_ssa_def
*base
,
332 switch (deref
->deref_type
) {
333 case nir_deref_type_var
:
335 case nir_deref_type_array
: {
336 nir_ssa_def
*index
= nir_i2i(b
, deref
->arr
.index
.ssa
,
337 deref
->dest
.ssa
.bit_size
);
339 b
, build_array_index(b
, nir_deref_instr_parent(deref
), base
, vs_in
),
340 nir_amul_imm(b
, index
, glsl_count_attribute_slots(deref
->type
, vs_in
)));
343 unreachable("Invalid deref instruction type");
347 static nir_deref_instr
*
348 build_array_deref_of_new_var_flat(nir_shader
*shader
,
349 nir_builder
*b
, nir_variable
*new_var
,
350 nir_deref_instr
*leader
, unsigned base
)
352 nir_deref_instr
*deref
= nir_build_deref_var(b
, new_var
);
354 if (nir_is_per_vertex_io(new_var
, shader
->info
.stage
)) {
355 assert(leader
->deref_type
== nir_deref_type_array
);
356 nir_ssa_def
*index
= leader
->arr
.index
.ssa
;
357 leader
= nir_deref_instr_parent(leader
);
358 deref
= nir_build_deref_array(b
, deref
, index
);
361 if (!glsl_type_is_array(deref
->type
))
364 bool vs_in
= shader
->info
.stage
== MESA_SHADER_VERTEX
&&
365 new_var
->data
.mode
== nir_var_shader_in
;
366 return nir_build_deref_array(
367 b
, deref
, build_array_index(b
, leader
, nir_imm_int(b
, base
), vs_in
));
371 nir_lower_io_to_vector_impl(nir_function_impl
*impl
, nir_variable_mode modes
)
373 assert(!(modes
& ~(nir_var_shader_in
| nir_var_shader_out
)));
376 nir_builder_init(&b
, impl
);
378 nir_metadata_require(impl
, nir_metadata_dominance
);
380 nir_shader
*shader
= impl
->function
->shader
;
381 nir_variable
*new_inputs
[MAX_SLOTS
][4] = {{0}};
382 nir_variable
*new_outputs
[MAX_SLOTS
][4] = {{0}};
383 bool flat_inputs
[MAX_SLOTS
] = {0};
384 bool flat_outputs
[MAX_SLOTS
] = {0};
386 if (modes
& nir_var_shader_in
) {
387 /* Vertex shaders support overlapping inputs. We don't do those */
388 assert(b
.shader
->info
.stage
!= MESA_SHADER_VERTEX
);
390 /* If we don't actually merge any variables, remove that bit from modes
391 * so we don't bother doing extra non-work.
393 if (!create_new_io_vars(shader
, &shader
->inputs
,
394 new_inputs
, flat_inputs
))
395 modes
&= ~nir_var_shader_in
;
398 if (modes
& nir_var_shader_out
) {
399 /* If we don't actually merge any variables, remove that bit from modes
400 * so we don't bother doing extra non-work.
402 if (!create_new_io_vars(shader
, &shader
->outputs
,
403 new_outputs
, flat_outputs
))
404 modes
&= ~nir_var_shader_out
;
410 bool progress
= false;
412 /* Actually lower all the IO load/store intrinsics. Load instructions are
413 * lowered to a vector load and an ALU instruction to grab the channels we
414 * want. Outputs are lowered to a write-masked store of the vector output.
415 * For non-TCS outputs, we then run nir_lower_io_to_temporaries at the end
416 * to clean up the partial writes.
418 nir_foreach_block(block
, impl
) {
419 nir_foreach_instr_safe(instr
, block
) {
420 if (instr
->type
!= nir_instr_type_intrinsic
)
423 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
425 switch (intrin
->intrinsic
) {
426 case nir_intrinsic_load_deref
:
427 case nir_intrinsic_interp_deref_at_centroid
:
428 case nir_intrinsic_interp_deref_at_sample
:
429 case nir_intrinsic_interp_deref_at_offset
:
430 case nir_intrinsic_interp_deref_at_vertex
: {
431 nir_deref_instr
*old_deref
= nir_src_as_deref(intrin
->src
[0]);
432 if (!(old_deref
->mode
& modes
))
435 if (old_deref
->mode
== nir_var_shader_out
)
436 assert(b
.shader
->info
.stage
== MESA_SHADER_TESS_CTRL
||
437 b
.shader
->info
.stage
== MESA_SHADER_FRAGMENT
);
439 nir_variable
*old_var
= nir_deref_instr_get_variable(old_deref
);
441 const unsigned loc
= get_slot(old_var
);
442 const unsigned old_frac
= old_var
->data
.location_frac
;
443 nir_variable
*new_var
= old_deref
->mode
== nir_var_shader_in
?
444 new_inputs
[loc
][old_frac
] :
445 new_outputs
[loc
][old_frac
];
446 bool flat
= old_deref
->mode
== nir_var_shader_in
?
447 flat_inputs
[loc
] : flat_outputs
[loc
];
451 const unsigned new_frac
= new_var
->data
.location_frac
;
453 nir_component_mask_t vec4_comp_mask
=
454 ((1 << intrin
->num_components
) - 1) << old_frac
;
456 b
.cursor
= nir_before_instr(&intrin
->instr
);
458 /* Rewrite the load to use the new variable and only select a
459 * portion of the result.
461 nir_deref_instr
*new_deref
;
463 new_deref
= build_array_deref_of_new_var_flat(
464 shader
, &b
, new_var
, old_deref
, loc
- get_slot(new_var
));
466 assert(get_slot(new_var
) == loc
);
467 new_deref
= build_array_deref_of_new_var(&b
, new_var
, old_deref
);
468 assert(glsl_type_is_vector(new_deref
->type
));
470 nir_instr_rewrite_src(&intrin
->instr
, &intrin
->src
[0],
471 nir_src_for_ssa(&new_deref
->dest
.ssa
));
473 intrin
->num_components
=
474 glsl_get_components(new_deref
->type
);
475 intrin
->dest
.ssa
.num_components
= intrin
->num_components
;
477 b
.cursor
= nir_after_instr(&intrin
->instr
);
479 nir_ssa_def
*new_vec
= nir_channels(&b
, &intrin
->dest
.ssa
,
480 vec4_comp_mask
>> new_frac
);
481 nir_ssa_def_rewrite_uses_after(&intrin
->dest
.ssa
,
482 nir_src_for_ssa(new_vec
),
483 new_vec
->parent_instr
);
489 case nir_intrinsic_store_deref
: {
490 nir_deref_instr
*old_deref
= nir_src_as_deref(intrin
->src
[0]);
491 if (old_deref
->mode
!= nir_var_shader_out
)
494 nir_variable
*old_var
= nir_deref_instr_get_variable(old_deref
);
496 const unsigned loc
= get_slot(old_var
);
497 const unsigned old_frac
= old_var
->data
.location_frac
;
498 nir_variable
*new_var
= new_outputs
[loc
][old_frac
];
499 bool flat
= flat_outputs
[loc
];
503 const unsigned new_frac
= new_var
->data
.location_frac
;
505 b
.cursor
= nir_before_instr(&intrin
->instr
);
507 /* Rewrite the store to be a masked store to the new variable */
508 nir_deref_instr
*new_deref
;
510 new_deref
= build_array_deref_of_new_var_flat(
511 shader
, &b
, new_var
, old_deref
, loc
- get_slot(new_var
));
513 assert(get_slot(new_var
) == loc
);
514 new_deref
= build_array_deref_of_new_var(&b
, new_var
, old_deref
);
515 assert(glsl_type_is_vector(new_deref
->type
));
517 nir_instr_rewrite_src(&intrin
->instr
, &intrin
->src
[0],
518 nir_src_for_ssa(&new_deref
->dest
.ssa
));
520 intrin
->num_components
=
521 glsl_get_components(new_deref
->type
);
523 nir_component_mask_t old_wrmask
= nir_intrinsic_write_mask(intrin
);
525 assert(intrin
->src
[1].is_ssa
);
526 nir_ssa_def
*old_value
= intrin
->src
[1].ssa
;
527 nir_ssa_def
*comps
[4];
528 for (unsigned c
= 0; c
< intrin
->num_components
; c
++) {
529 if (new_frac
+ c
>= old_frac
&&
530 (old_wrmask
& 1 << (new_frac
+ c
- old_frac
))) {
531 comps
[c
] = nir_channel(&b
, old_value
,
532 new_frac
+ c
- old_frac
);
534 comps
[c
] = nir_ssa_undef(&b
, old_value
->num_components
,
535 old_value
->bit_size
);
538 nir_ssa_def
*new_value
= nir_vec(&b
, comps
, intrin
->num_components
);
539 nir_instr_rewrite_src(&intrin
->instr
, &intrin
->src
[1],
540 nir_src_for_ssa(new_value
));
542 nir_intrinsic_set_write_mask(intrin
,
543 old_wrmask
<< (old_frac
- new_frac
));
556 nir_metadata_preserve(impl
, nir_metadata_block_index
|
557 nir_metadata_dominance
);
564 nir_lower_io_to_vector(nir_shader
*shader
, nir_variable_mode modes
)
566 bool progress
= false;
568 nir_foreach_function(function
, shader
) {
570 progress
|= nir_lower_io_to_vector_impl(function
->impl
, modes
);