2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Connor Abbott (cwabbott0@gmail.com)
25 * Jason Ekstrand (jason@jlekstrand.net)
30 * This lowering pass converts references to input/output variables with
31 * loads/stores to actual input/output intrinsics.
35 #include "nir_builder.h"
37 struct lower_io_state
{
40 int (*type_size
)(const struct glsl_type
*type
);
41 nir_variable_mode modes
;
45 nir_assign_var_locations(struct exec_list
*var_list
, unsigned *size
,
47 int (*type_size
)(const struct glsl_type
*))
49 unsigned location
= 0;
51 /* There are 32 regular and 32 patch varyings allowed */
53 for (unsigned i
= 0; i
< 64; i
++) {
54 for (unsigned j
= 0; j
< 2; j
++)
58 nir_foreach_variable(var
, var_list
) {
60 * UBO's have their own address spaces, so don't count them towards the
61 * number of global uniforms
63 if ((var
->data
.mode
== nir_var_uniform
|| var
->data
.mode
== nir_var_shader_storage
) &&
64 var
->interface_type
!= NULL
)
67 /* Make sure we give the same location to varyings packed with
68 * ARB_enhanced_layouts.
70 int idx
= var
->data
.location
- base_offset
;
71 if (base_offset
&& idx
>= 0) {
72 assert(idx
< ARRAY_SIZE(locations
));
74 if (locations
[idx
][var
->data
.index
] == -1) {
75 var
->data
.driver_location
= location
;
76 locations
[idx
][var
->data
.index
] = location
;
77 location
+= type_size(var
->type
);
79 var
->data
.driver_location
= locations
[idx
][var
->data
.index
];
82 var
->data
.driver_location
= location
;
83 location
+= type_size(var
->type
);
91 * Returns true if we're processing a stage whose inputs are arrays indexed
92 * by a vertex number (such as geometry shader inputs).
95 is_per_vertex_input(struct lower_io_state
*state
, nir_variable
*var
)
97 gl_shader_stage stage
= state
->builder
.shader
->stage
;
99 return var
->data
.mode
== nir_var_shader_in
&& !var
->data
.patch
&&
100 (stage
== MESA_SHADER_TESS_CTRL
||
101 stage
== MESA_SHADER_TESS_EVAL
||
102 stage
== MESA_SHADER_GEOMETRY
);
106 is_per_vertex_output(struct lower_io_state
*state
, nir_variable
*var
)
108 gl_shader_stage stage
= state
->builder
.shader
->stage
;
109 return var
->data
.mode
== nir_var_shader_out
&& !var
->data
.patch
&&
110 stage
== MESA_SHADER_TESS_CTRL
;
114 get_io_offset(nir_builder
*b
, nir_deref_var
*deref
,
115 nir_ssa_def
**vertex_index
,
116 int (*type_size
)(const struct glsl_type
*))
118 nir_deref
*tail
= &deref
->deref
;
120 /* For per-vertex input arrays (i.e. geometry shader inputs), keep the
121 * outermost array index separate. Process the rest normally.
123 if (vertex_index
!= NULL
) {
125 assert(tail
->deref_type
== nir_deref_type_array
);
126 nir_deref_array
*deref_array
= nir_deref_as_array(tail
);
128 nir_ssa_def
*vtx
= nir_imm_int(b
, deref_array
->base_offset
);
129 if (deref_array
->deref_array_type
== nir_deref_array_type_indirect
) {
130 vtx
= nir_iadd(b
, vtx
, nir_ssa_for_src(b
, deref_array
->indirect
, 1));
135 /* Just emit code and let constant-folding go to town */
136 nir_ssa_def
*offset
= nir_imm_int(b
, 0);
138 while (tail
->child
!= NULL
) {
139 const struct glsl_type
*parent_type
= tail
->type
;
142 if (tail
->deref_type
== nir_deref_type_array
) {
143 nir_deref_array
*deref_array
= nir_deref_as_array(tail
);
144 unsigned size
= type_size(tail
->type
);
146 offset
= nir_iadd(b
, offset
,
147 nir_imm_int(b
, size
* deref_array
->base_offset
));
149 if (deref_array
->deref_array_type
== nir_deref_array_type_indirect
) {
151 nir_imul(b
, nir_imm_int(b
, size
),
152 nir_ssa_for_src(b
, deref_array
->indirect
, 1));
154 offset
= nir_iadd(b
, offset
, mul
);
156 } else if (tail
->deref_type
== nir_deref_type_struct
) {
157 nir_deref_struct
*deref_struct
= nir_deref_as_struct(tail
);
159 unsigned field_offset
= 0;
160 for (unsigned i
= 0; i
< deref_struct
->index
; i
++) {
161 field_offset
+= type_size(glsl_get_struct_field(parent_type
, i
));
163 offset
= nir_iadd(b
, offset
, nir_imm_int(b
, field_offset
));
170 static nir_intrinsic_op
171 load_op(nir_variable_mode mode
, bool per_vertex
)
175 case nir_var_shader_in
:
176 op
= per_vertex
? nir_intrinsic_load_per_vertex_input
:
177 nir_intrinsic_load_input
;
179 case nir_var_shader_out
:
180 op
= per_vertex
? nir_intrinsic_load_per_vertex_output
:
181 nir_intrinsic_load_output
;
183 case nir_var_uniform
:
184 op
= nir_intrinsic_load_uniform
;
187 op
= nir_intrinsic_load_shared
;
190 unreachable("Unknown variable mode");
195 static nir_intrinsic_op
196 store_op(struct lower_io_state
*state
,
197 nir_variable_mode mode
, bool per_vertex
)
201 case nir_var_shader_out
:
202 op
= per_vertex
? nir_intrinsic_store_per_vertex_output
:
203 nir_intrinsic_store_output
;
206 op
= nir_intrinsic_store_shared
;
209 unreachable("Unknown variable mode");
214 static nir_intrinsic_op
215 atomic_op(nir_intrinsic_op opcode
)
218 #define OP(O) case nir_intrinsic_var_##O: return nir_intrinsic_shared_##O;
231 unreachable("Invalid atomic");
236 nir_lower_io_block(nir_block
*block
,
237 struct lower_io_state
*state
)
239 nir_builder
*b
= &state
->builder
;
241 nir_foreach_instr_safe(instr
, block
) {
242 if (instr
->type
!= nir_instr_type_intrinsic
)
245 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
247 switch (intrin
->intrinsic
) {
248 case nir_intrinsic_load_var
:
249 case nir_intrinsic_store_var
:
250 case nir_intrinsic_var_atomic_add
:
251 case nir_intrinsic_var_atomic_imin
:
252 case nir_intrinsic_var_atomic_umin
:
253 case nir_intrinsic_var_atomic_imax
:
254 case nir_intrinsic_var_atomic_umax
:
255 case nir_intrinsic_var_atomic_and
:
256 case nir_intrinsic_var_atomic_or
:
257 case nir_intrinsic_var_atomic_xor
:
258 case nir_intrinsic_var_atomic_exchange
:
259 case nir_intrinsic_var_atomic_comp_swap
:
260 /* We can lower the io for this nir instrinsic */
263 /* We can't lower the io for this nir instrinsic, so skip it */
267 nir_variable
*var
= intrin
->variables
[0]->var
;
268 nir_variable_mode mode
= var
->data
.mode
;
270 if ((state
->modes
& mode
) == 0)
273 if (mode
!= nir_var_shader_in
&&
274 mode
!= nir_var_shader_out
&&
275 mode
!= nir_var_shared
&&
276 mode
!= nir_var_uniform
)
279 b
->cursor
= nir_before_instr(instr
);
281 const bool per_vertex
=
282 is_per_vertex_input(state
, var
) || is_per_vertex_output(state
, var
);
285 nir_ssa_def
*vertex_index
;
287 offset
= get_io_offset(b
, intrin
->variables
[0],
288 per_vertex
? &vertex_index
: NULL
,
291 nir_intrinsic_instr
*replacement
;
293 switch (intrin
->intrinsic
) {
294 case nir_intrinsic_load_var
: {
295 nir_intrinsic_instr
*load
=
296 nir_intrinsic_instr_create(state
->mem_ctx
,
297 load_op(mode
, per_vertex
));
298 load
->num_components
= intrin
->num_components
;
300 nir_intrinsic_set_base(load
,
301 var
->data
.driver_location
);
302 if (mode
== nir_var_shader_in
|| mode
== nir_var_shader_out
) {
303 nir_intrinsic_set_component(load
, var
->data
.location_frac
);
306 if (load
->intrinsic
== nir_intrinsic_load_uniform
) {
307 nir_intrinsic_set_range(load
, state
->type_size(var
->type
));
311 load
->src
[0] = nir_src_for_ssa(vertex_index
);
313 load
->src
[per_vertex
? 1 : 0] = nir_src_for_ssa(offset
);
319 case nir_intrinsic_store_var
: {
320 assert(mode
== nir_var_shader_out
|| mode
== nir_var_shared
);
322 nir_intrinsic_instr
*store
=
323 nir_intrinsic_instr_create(state
->mem_ctx
,
324 store_op(state
, mode
, per_vertex
));
325 store
->num_components
= intrin
->num_components
;
327 nir_src_copy(&store
->src
[0], &intrin
->src
[0], store
);
329 nir_intrinsic_set_base(store
,
330 var
->data
.driver_location
);
331 if (mode
== nir_var_shader_out
) {
332 nir_intrinsic_set_component(store
, var
->data
.location_frac
);
334 nir_intrinsic_set_write_mask(store
, nir_intrinsic_write_mask(intrin
));
337 store
->src
[1] = nir_src_for_ssa(vertex_index
);
339 store
->src
[per_vertex
? 2 : 1] = nir_src_for_ssa(offset
);
345 case nir_intrinsic_var_atomic_add
:
346 case nir_intrinsic_var_atomic_imin
:
347 case nir_intrinsic_var_atomic_umin
:
348 case nir_intrinsic_var_atomic_imax
:
349 case nir_intrinsic_var_atomic_umax
:
350 case nir_intrinsic_var_atomic_and
:
351 case nir_intrinsic_var_atomic_or
:
352 case nir_intrinsic_var_atomic_xor
:
353 case nir_intrinsic_var_atomic_exchange
:
354 case nir_intrinsic_var_atomic_comp_swap
: {
355 assert(mode
== nir_var_shared
);
357 nir_intrinsic_instr
*atomic
=
358 nir_intrinsic_instr_create(state
->mem_ctx
,
359 atomic_op(intrin
->intrinsic
));
361 atomic
->src
[0] = nir_src_for_ssa(offset
);
363 atomic
->const_index
[0] = var
->data
.driver_location
;
366 i
< nir_op_infos
[intrin
->intrinsic
].num_inputs
;
368 nir_src_copy(&atomic
->src
[i
+1], &intrin
->src
[i
], atomic
);
371 replacement
= atomic
;
379 if (nir_intrinsic_infos
[intrin
->intrinsic
].has_dest
) {
380 if (intrin
->dest
.is_ssa
) {
381 nir_ssa_dest_init(&replacement
->instr
, &replacement
->dest
,
382 intrin
->num_components
,
383 intrin
->dest
.ssa
.bit_size
, NULL
);
384 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
,
385 nir_src_for_ssa(&replacement
->dest
.ssa
));
387 nir_dest_copy(&replacement
->dest
, &intrin
->dest
, state
->mem_ctx
);
391 nir_instr_insert_before(&intrin
->instr
, &replacement
->instr
);
392 nir_instr_remove(&intrin
->instr
);
399 nir_lower_io_impl(nir_function_impl
*impl
,
400 nir_variable_mode modes
,
401 int (*type_size
)(const struct glsl_type
*))
403 struct lower_io_state state
;
405 nir_builder_init(&state
.builder
, impl
);
406 state
.mem_ctx
= ralloc_parent(impl
);
408 state
.type_size
= type_size
;
410 nir_foreach_block(block
, impl
) {
411 nir_lower_io_block(block
, &state
);
414 nir_metadata_preserve(impl
, nir_metadata_block_index
|
415 nir_metadata_dominance
);
419 nir_lower_io(nir_shader
*shader
, nir_variable_mode modes
,
420 int (*type_size
)(const struct glsl_type
*))
422 nir_foreach_function(function
, shader
) {
424 nir_lower_io_impl(function
->impl
, modes
, type_size
);
429 * Return the offset soruce for a load/store intrinsic.
432 nir_get_io_offset_src(nir_intrinsic_instr
*instr
)
434 switch (instr
->intrinsic
) {
435 case nir_intrinsic_load_input
:
436 case nir_intrinsic_load_output
:
437 case nir_intrinsic_load_uniform
:
438 return &instr
->src
[0];
439 case nir_intrinsic_load_ubo
:
440 case nir_intrinsic_load_ssbo
:
441 case nir_intrinsic_load_per_vertex_input
:
442 case nir_intrinsic_load_per_vertex_output
:
443 case nir_intrinsic_store_output
:
444 return &instr
->src
[1];
445 case nir_intrinsic_store_ssbo
:
446 case nir_intrinsic_store_per_vertex_output
:
447 return &instr
->src
[2];
454 * Return the vertex index source for a load/store per_vertex intrinsic.
457 nir_get_io_vertex_index_src(nir_intrinsic_instr
*instr
)
459 switch (instr
->intrinsic
) {
460 case nir_intrinsic_load_per_vertex_input
:
461 case nir_intrinsic_load_per_vertex_output
:
462 return &instr
->src
[0];
463 case nir_intrinsic_store_per_vertex_output
:
464 return &instr
->src
[1];