2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Connor Abbott (cwabbott0@gmail.com)
25 * Jason Ekstrand (jason@jlekstrand.net)
30 * This lowering pass converts references to input/output variables with
31 * loads/stores to actual input/output intrinsics.
35 #include "nir_builder.h"
37 struct lower_io_state
{
44 type_size_vec4(const struct glsl_type
*type
)
49 switch (glsl_get_base_type(type
)) {
54 if (glsl_type_is_matrix(type
)) {
55 return glsl_get_matrix_columns(type
);
60 return type_size_vec4(glsl_get_array_element(type
)) * glsl_get_length(type
);
61 case GLSL_TYPE_STRUCT
:
63 for (i
= 0; i
< glsl_get_length(type
); i
++) {
64 size
+= type_size_vec4(glsl_get_struct_field(type
, i
));
67 case GLSL_TYPE_SUBROUTINE
:
69 case GLSL_TYPE_SAMPLER
:
71 case GLSL_TYPE_ATOMIC_UINT
:
75 case GLSL_TYPE_DOUBLE
:
77 case GLSL_TYPE_INTERFACE
:
78 unreachable("not reached");
85 type_size_scalar(const struct glsl_type
*type
)
89 switch (glsl_get_base_type(type
)) {
94 return glsl_get_components(type
);
96 return type_size_scalar(glsl_get_array_element(type
)) * glsl_get_length(type
);
97 case GLSL_TYPE_STRUCT
:
99 for (i
= 0; i
< glsl_get_length(type
); i
++) {
100 size
+= type_size_scalar(glsl_get_struct_field(type
, i
));
103 case GLSL_TYPE_SUBROUTINE
:
105 case GLSL_TYPE_SAMPLER
:
107 case GLSL_TYPE_ATOMIC_UINT
:
109 case GLSL_TYPE_INTERFACE
:
111 case GLSL_TYPE_IMAGE
:
114 case GLSL_TYPE_ERROR
:
115 case GLSL_TYPE_DOUBLE
:
116 unreachable("not reached");
123 type_size(const struct glsl_type
*type
, bool is_scalar
)
126 return type_size_scalar(type
);
128 return type_size_vec4(type
);
132 nir_assign_var_locations(struct exec_list
*var_list
, unsigned *size
, bool is_scalar
)
134 unsigned location
= 0;
136 foreach_list_typed(nir_variable
, var
, node
, var_list
) {
138 * UBO's have their own address spaces, so don't count them towards the
139 * number of global uniforms
141 if ((var
->data
.mode
== nir_var_uniform
|| var
->data
.mode
== nir_var_shader_storage
) &&
142 var
->interface_type
!= NULL
)
145 var
->data
.driver_location
= location
;
146 location
+= type_size(var
->type
, is_scalar
);
153 deref_has_indirect(nir_deref_var
*deref
)
155 for (nir_deref
*tail
= deref
->deref
.child
; tail
; tail
= tail
->child
) {
156 if (tail
->deref_type
== nir_deref_type_array
) {
157 nir_deref_array
*arr
= nir_deref_as_array(tail
);
158 if (arr
->deref_array_type
== nir_deref_array_type_indirect
)
167 mark_indirect_uses_block(nir_block
*block
, void *void_state
)
169 struct set
*indirect_set
= void_state
;
171 nir_foreach_instr(block
, instr
) {
172 if (instr
->type
!= nir_instr_type_intrinsic
)
175 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
178 i
< nir_intrinsic_infos
[intrin
->intrinsic
].num_variables
; i
++) {
179 if (deref_has_indirect(intrin
->variables
[i
]))
180 _mesa_set_add(indirect_set
, intrin
->variables
[i
]->var
);
187 /* Identical to nir_assign_var_locations_packed except that it assigns
188 * locations to the variables that are used 100% directly first and then
189 * assigns locations to variables that are used indirectly.
192 nir_assign_var_locations_direct_first(nir_shader
*shader
,
193 struct exec_list
*var_list
,
194 unsigned *direct_size
,
198 struct set
*indirect_set
= _mesa_set_create(NULL
, _mesa_hash_pointer
,
199 _mesa_key_pointer_equal
);
201 nir_foreach_overload(shader
, overload
) {
203 nir_foreach_block(overload
->impl
, mark_indirect_uses_block
,
207 unsigned location
= 0;
209 foreach_list_typed(nir_variable
, var
, node
, var_list
) {
210 if ((var
->data
.mode
== nir_var_uniform
|| var
->data
.mode
== nir_var_shader_storage
) &&
211 var
->interface_type
!= NULL
)
214 if (_mesa_set_search(indirect_set
, var
))
217 var
->data
.driver_location
= location
;
218 location
+= type_size(var
->type
, is_scalar
);
221 *direct_size
= location
;
223 foreach_list_typed(nir_variable
, var
, node
, var_list
) {
224 if ((var
->data
.mode
== nir_var_uniform
|| var
->data
.mode
== nir_var_shader_storage
) &&
225 var
->interface_type
!= NULL
)
228 if (!_mesa_set_search(indirect_set
, var
))
231 var
->data
.driver_location
= location
;
232 location
+= type_size(var
->type
, is_scalar
);
237 _mesa_set_destroy(indirect_set
, NULL
);
241 get_io_offset(nir_deref_var
*deref
, nir_instr
*instr
, nir_src
*indirect
,
242 struct lower_io_state
*state
)
244 bool found_indirect
= false;
245 unsigned base_offset
= 0;
247 nir_builder
*b
= &state
->builder
;
248 nir_builder_insert_before_instr(b
, instr
);
250 nir_deref
*tail
= &deref
->deref
;
251 while (tail
->child
!= NULL
) {
252 const struct glsl_type
*parent_type
= tail
->type
;
255 if (tail
->deref_type
== nir_deref_type_array
) {
256 nir_deref_array
*deref_array
= nir_deref_as_array(tail
);
257 unsigned size
= type_size(tail
->type
, state
->is_scalar
);
259 base_offset
+= size
* deref_array
->base_offset
;
261 if (deref_array
->deref_array_type
== nir_deref_array_type_indirect
) {
263 nir_imul(b
, nir_imm_int(b
, size
),
264 nir_ssa_for_src(b
, deref_array
->indirect
, 1));
266 if (found_indirect
) {
268 nir_iadd(b
, nir_ssa_for_src(b
, *indirect
, 1), mul
);
272 indirect
->is_ssa
= true;
273 found_indirect
= true;
275 } else if (tail
->deref_type
== nir_deref_type_struct
) {
276 nir_deref_struct
*deref_struct
= nir_deref_as_struct(tail
);
278 for (unsigned i
= 0; i
< deref_struct
->index
; i
++)
279 base_offset
+= type_size(glsl_get_struct_field(parent_type
, i
),
287 static nir_intrinsic_op
288 load_op(nir_variable_mode mode
, bool has_indirect
)
292 case nir_var_shader_in
:
293 op
= has_indirect
? nir_intrinsic_load_input_indirect
:
294 nir_intrinsic_load_input
;
296 case nir_var_uniform
:
297 op
= has_indirect
? nir_intrinsic_load_uniform_indirect
:
298 nir_intrinsic_load_uniform
;
301 unreachable("Unknown variable mode");
307 nir_lower_io_block(nir_block
*block
, void *void_state
)
309 struct lower_io_state
*state
= void_state
;
311 nir_foreach_instr_safe(block
, instr
) {
312 if (instr
->type
!= nir_instr_type_intrinsic
)
315 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
317 switch (intrin
->intrinsic
) {
318 case nir_intrinsic_load_var
: {
319 nir_variable_mode mode
= intrin
->variables
[0]->var
->data
.mode
;
320 if (mode
!= nir_var_shader_in
&& mode
!= nir_var_uniform
)
323 bool has_indirect
= deref_has_indirect(intrin
->variables
[0]);
325 nir_intrinsic_instr
*load
=
326 nir_intrinsic_instr_create(state
->mem_ctx
,
327 load_op(mode
, has_indirect
));
328 load
->num_components
= intrin
->num_components
;
331 unsigned offset
= get_io_offset(intrin
->variables
[0],
332 &intrin
->instr
, &indirect
, state
);
333 offset
+= intrin
->variables
[0]->var
->data
.driver_location
;
335 load
->const_index
[0] = offset
;
338 load
->src
[0] = indirect
;
340 if (intrin
->dest
.is_ssa
) {
341 nir_ssa_dest_init(&load
->instr
, &load
->dest
,
342 intrin
->num_components
, NULL
);
343 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
,
344 nir_src_for_ssa(&load
->dest
.ssa
),
347 nir_dest_copy(&load
->dest
, &intrin
->dest
, state
->mem_ctx
);
350 nir_instr_insert_before(&intrin
->instr
, &load
->instr
);
351 nir_instr_remove(&intrin
->instr
);
355 case nir_intrinsic_store_var
: {
356 if (intrin
->variables
[0]->var
->data
.mode
!= nir_var_shader_out
)
359 bool has_indirect
= deref_has_indirect(intrin
->variables
[0]);
361 nir_intrinsic_op store_op
;
363 store_op
= nir_intrinsic_store_output_indirect
;
365 store_op
= nir_intrinsic_store_output
;
368 nir_intrinsic_instr
*store
= nir_intrinsic_instr_create(state
->mem_ctx
,
370 store
->num_components
= intrin
->num_components
;
373 unsigned offset
= get_io_offset(intrin
->variables
[0],
374 &intrin
->instr
, &indirect
, state
);
375 offset
+= intrin
->variables
[0]->var
->data
.driver_location
;
377 store
->const_index
[0] = offset
;
379 nir_src_copy(&store
->src
[0], &intrin
->src
[0], state
->mem_ctx
);
382 store
->src
[1] = indirect
;
384 nir_instr_insert_before(&intrin
->instr
, &store
->instr
);
385 nir_instr_remove(&intrin
->instr
);
398 nir_lower_io_impl(nir_function_impl
*impl
, bool is_scalar
)
400 struct lower_io_state state
;
402 nir_builder_init(&state
.builder
, impl
);
403 state
.mem_ctx
= ralloc_parent(impl
);
404 state
.is_scalar
= is_scalar
;
406 nir_foreach_block(impl
, nir_lower_io_block
, &state
);
408 nir_metadata_preserve(impl
, nir_metadata_block_index
|
409 nir_metadata_dominance
);
413 nir_lower_io(nir_shader
*shader
, bool is_scalar
)
415 nir_foreach_overload(shader
, overload
) {
417 nir_lower_io_impl(overload
->impl
, is_scalar
);