2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Connor Abbott (cwabbott0@gmail.com)
25 * Jason Ekstrand (jason@jlekstrand.net)
30 * This lowering pass converts references to input/output variables with
31 * loads/stores to actual input/output intrinsics.
35 #include "nir_builder.h"
37 struct lower_io_state
{
40 int (*type_size
)(const struct glsl_type
*type
);
41 nir_variable_mode modes
;
45 nir_assign_var_locations(struct exec_list
*var_list
, unsigned *size
,
46 int (*type_size
)(const struct glsl_type
*))
48 unsigned location
= 0;
50 nir_foreach_variable(var
, var_list
) {
52 * UBO's have their own address spaces, so don't count them towards the
53 * number of global uniforms
55 if ((var
->data
.mode
== nir_var_uniform
|| var
->data
.mode
== nir_var_shader_storage
) &&
56 var
->interface_type
!= NULL
)
59 var
->data
.driver_location
= location
;
60 location
+= type_size(var
->type
);
67 * Returns true if we're processing a stage whose inputs are arrays indexed
68 * by a vertex number (such as geometry shader inputs).
71 is_per_vertex_input(struct lower_io_state
*state
, nir_variable
*var
)
73 gl_shader_stage stage
= state
->builder
.shader
->stage
;
75 return var
->data
.mode
== nir_var_shader_in
&& !var
->data
.patch
&&
76 (stage
== MESA_SHADER_TESS_CTRL
||
77 stage
== MESA_SHADER_TESS_EVAL
||
78 stage
== MESA_SHADER_GEOMETRY
);
82 is_per_vertex_output(struct lower_io_state
*state
, nir_variable
*var
)
84 gl_shader_stage stage
= state
->builder
.shader
->stage
;
85 return var
->data
.mode
== nir_var_shader_out
&& !var
->data
.patch
&&
86 stage
== MESA_SHADER_TESS_CTRL
;
90 get_io_offset(nir_builder
*b
, nir_deref_var
*deref
,
91 nir_ssa_def
**vertex_index
,
92 int (*type_size
)(const struct glsl_type
*))
94 nir_deref
*tail
= &deref
->deref
;
96 /* For per-vertex input arrays (i.e. geometry shader inputs), keep the
97 * outermost array index separate. Process the rest normally.
99 if (vertex_index
!= NULL
) {
101 assert(tail
->deref_type
== nir_deref_type_array
);
102 nir_deref_array
*deref_array
= nir_deref_as_array(tail
);
104 nir_ssa_def
*vtx
= nir_imm_int(b
, deref_array
->base_offset
);
105 if (deref_array
->deref_array_type
== nir_deref_array_type_indirect
) {
106 vtx
= nir_iadd(b
, vtx
, nir_ssa_for_src(b
, deref_array
->indirect
, 1));
111 /* Just emit code and let constant-folding go to town */
112 nir_ssa_def
*offset
= nir_imm_int(b
, 0);
114 while (tail
->child
!= NULL
) {
115 const struct glsl_type
*parent_type
= tail
->type
;
118 if (tail
->deref_type
== nir_deref_type_array
) {
119 nir_deref_array
*deref_array
= nir_deref_as_array(tail
);
120 unsigned size
= type_size(tail
->type
);
122 offset
= nir_iadd(b
, offset
,
123 nir_imm_int(b
, size
* deref_array
->base_offset
));
125 if (deref_array
->deref_array_type
== nir_deref_array_type_indirect
) {
127 nir_imul(b
, nir_imm_int(b
, size
),
128 nir_ssa_for_src(b
, deref_array
->indirect
, 1));
130 offset
= nir_iadd(b
, offset
, mul
);
132 } else if (tail
->deref_type
== nir_deref_type_struct
) {
133 nir_deref_struct
*deref_struct
= nir_deref_as_struct(tail
);
135 unsigned field_offset
= 0;
136 for (unsigned i
= 0; i
< deref_struct
->index
; i
++) {
137 field_offset
+= type_size(glsl_get_struct_field(parent_type
, i
));
139 offset
= nir_iadd(b
, offset
, nir_imm_int(b
, field_offset
));
146 static nir_intrinsic_op
147 load_op(struct lower_io_state
*state
,
148 nir_variable_mode mode
, bool per_vertex
)
152 case nir_var_shader_in
:
153 op
= per_vertex
? nir_intrinsic_load_per_vertex_input
:
154 nir_intrinsic_load_input
;
156 case nir_var_shader_out
:
157 op
= per_vertex
? nir_intrinsic_load_per_vertex_output
:
158 nir_intrinsic_load_output
;
160 case nir_var_uniform
:
161 op
= nir_intrinsic_load_uniform
;
164 op
= nir_intrinsic_load_shared
;
167 unreachable("Unknown variable mode");
172 static nir_intrinsic_op
173 store_op(struct lower_io_state
*state
,
174 nir_variable_mode mode
, bool per_vertex
)
178 case nir_var_shader_in
:
179 case nir_var_shader_out
:
180 op
= per_vertex
? nir_intrinsic_store_per_vertex_output
:
181 nir_intrinsic_store_output
;
184 op
= nir_intrinsic_store_shared
;
187 unreachable("Unknown variable mode");
192 static nir_intrinsic_op
193 atomic_op(nir_intrinsic_op opcode
)
196 #define OP(O) case nir_intrinsic_var_##O: return nir_intrinsic_shared_##O;
209 unreachable("Invalid atomic");
214 nir_lower_io_block(nir_block
*block
,
215 struct lower_io_state
*state
)
217 nir_builder
*b
= &state
->builder
;
219 nir_foreach_instr_safe(instr
, block
) {
220 if (instr
->type
!= nir_instr_type_intrinsic
)
223 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
225 switch (intrin
->intrinsic
) {
226 case nir_intrinsic_load_var
:
227 case nir_intrinsic_store_var
:
228 case nir_intrinsic_var_atomic_add
:
229 case nir_intrinsic_var_atomic_imin
:
230 case nir_intrinsic_var_atomic_umin
:
231 case nir_intrinsic_var_atomic_imax
:
232 case nir_intrinsic_var_atomic_umax
:
233 case nir_intrinsic_var_atomic_and
:
234 case nir_intrinsic_var_atomic_or
:
235 case nir_intrinsic_var_atomic_xor
:
236 case nir_intrinsic_var_atomic_exchange
:
237 case nir_intrinsic_var_atomic_comp_swap
:
238 /* We can lower the io for this nir instrinsic */
241 /* We can't lower the io for this nir instrinsic, so skip it */
245 nir_variable_mode mode
= intrin
->variables
[0]->var
->data
.mode
;
247 if ((state
->modes
& mode
) == 0)
250 if (mode
!= nir_var_shader_in
&&
251 mode
!= nir_var_shader_out
&&
252 mode
!= nir_var_shared
&&
253 mode
!= nir_var_uniform
)
256 b
->cursor
= nir_before_instr(instr
);
258 switch (intrin
->intrinsic
) {
259 case nir_intrinsic_load_var
: {
261 is_per_vertex_input(state
, intrin
->variables
[0]->var
) ||
262 is_per_vertex_output(state
, intrin
->variables
[0]->var
);
265 nir_ssa_def
*vertex_index
;
267 offset
= get_io_offset(b
, intrin
->variables
[0],
268 per_vertex
? &vertex_index
: NULL
,
271 nir_intrinsic_instr
*load
=
272 nir_intrinsic_instr_create(state
->mem_ctx
,
273 load_op(state
, mode
, per_vertex
));
274 load
->num_components
= intrin
->num_components
;
276 nir_intrinsic_set_base(load
,
277 intrin
->variables
[0]->var
->data
.driver_location
);
279 if (load
->intrinsic
== nir_intrinsic_load_uniform
) {
280 nir_intrinsic_set_range(load
,
281 state
->type_size(intrin
->variables
[0]->var
->type
));
285 load
->src
[0] = nir_src_for_ssa(vertex_index
);
287 load
->src
[per_vertex
? 1 : 0] = nir_src_for_ssa(offset
);
289 if (intrin
->dest
.is_ssa
) {
290 nir_ssa_dest_init(&load
->instr
, &load
->dest
,
291 intrin
->num_components
,
292 intrin
->dest
.ssa
.bit_size
, NULL
);
293 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
,
294 nir_src_for_ssa(&load
->dest
.ssa
));
296 nir_dest_copy(&load
->dest
, &intrin
->dest
, state
->mem_ctx
);
299 nir_instr_insert_before(&intrin
->instr
, &load
->instr
);
300 nir_instr_remove(&intrin
->instr
);
304 case nir_intrinsic_store_var
: {
305 assert(mode
== nir_var_shader_out
|| mode
== nir_var_shared
);
308 nir_ssa_def
*vertex_index
;
311 is_per_vertex_output(state
, intrin
->variables
[0]->var
);
313 offset
= get_io_offset(b
, intrin
->variables
[0],
314 per_vertex
? &vertex_index
: NULL
,
317 nir_intrinsic_instr
*store
=
318 nir_intrinsic_instr_create(state
->mem_ctx
,
319 store_op(state
, mode
, per_vertex
));
320 store
->num_components
= intrin
->num_components
;
322 nir_src_copy(&store
->src
[0], &intrin
->src
[0], store
);
324 nir_intrinsic_set_base(store
,
325 intrin
->variables
[0]->var
->data
.driver_location
);
326 nir_intrinsic_set_write_mask(store
, nir_intrinsic_write_mask(intrin
));
329 store
->src
[1] = nir_src_for_ssa(vertex_index
);
331 store
->src
[per_vertex
? 2 : 1] = nir_src_for_ssa(offset
);
333 nir_instr_insert_before(&intrin
->instr
, &store
->instr
);
334 nir_instr_remove(&intrin
->instr
);
338 case nir_intrinsic_var_atomic_add
:
339 case nir_intrinsic_var_atomic_imin
:
340 case nir_intrinsic_var_atomic_umin
:
341 case nir_intrinsic_var_atomic_imax
:
342 case nir_intrinsic_var_atomic_umax
:
343 case nir_intrinsic_var_atomic_and
:
344 case nir_intrinsic_var_atomic_or
:
345 case nir_intrinsic_var_atomic_xor
:
346 case nir_intrinsic_var_atomic_exchange
:
347 case nir_intrinsic_var_atomic_comp_swap
: {
348 assert(mode
== nir_var_shared
);
352 offset
= get_io_offset(b
, intrin
->variables
[0],
353 NULL
, state
->type_size
);
355 nir_intrinsic_instr
*atomic
=
356 nir_intrinsic_instr_create(state
->mem_ctx
,
357 atomic_op(intrin
->intrinsic
));
359 atomic
->src
[0] = nir_src_for_ssa(offset
);
361 atomic
->const_index
[0] =
362 intrin
->variables
[0]->var
->data
.driver_location
;
365 i
< nir_op_infos
[intrin
->intrinsic
].num_inputs
;
367 nir_src_copy(&atomic
->src
[i
+1], &intrin
->src
[i
], atomic
);
370 if (intrin
->dest
.is_ssa
) {
371 nir_ssa_dest_init(&atomic
->instr
, &atomic
->dest
,
372 intrin
->dest
.ssa
.num_components
,
373 intrin
->dest
.ssa
.bit_size
, NULL
);
374 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
,
375 nir_src_for_ssa(&atomic
->dest
.ssa
));
377 nir_dest_copy(&atomic
->dest
, &intrin
->dest
, state
->mem_ctx
);
380 nir_instr_insert_before(&intrin
->instr
, &atomic
->instr
);
381 nir_instr_remove(&intrin
->instr
);
394 nir_lower_io_impl(nir_function_impl
*impl
,
395 nir_variable_mode modes
,
396 int (*type_size
)(const struct glsl_type
*))
398 struct lower_io_state state
;
400 nir_builder_init(&state
.builder
, impl
);
401 state
.mem_ctx
= ralloc_parent(impl
);
403 state
.type_size
= type_size
;
405 nir_foreach_block(block
, impl
) {
406 nir_lower_io_block(block
, &state
);
409 nir_metadata_preserve(impl
, nir_metadata_block_index
|
410 nir_metadata_dominance
);
414 nir_lower_io(nir_shader
*shader
, nir_variable_mode modes
,
415 int (*type_size
)(const struct glsl_type
*))
417 nir_foreach_function(function
, shader
) {
419 nir_lower_io_impl(function
->impl
, modes
, type_size
);
424 * Return the offset soruce for a load/store intrinsic.
427 nir_get_io_offset_src(nir_intrinsic_instr
*instr
)
429 switch (instr
->intrinsic
) {
430 case nir_intrinsic_load_input
:
431 case nir_intrinsic_load_output
:
432 case nir_intrinsic_load_uniform
:
433 return &instr
->src
[0];
434 case nir_intrinsic_load_ubo
:
435 case nir_intrinsic_load_ssbo
:
436 case nir_intrinsic_load_per_vertex_input
:
437 case nir_intrinsic_load_per_vertex_output
:
438 case nir_intrinsic_store_output
:
439 return &instr
->src
[1];
440 case nir_intrinsic_store_ssbo
:
441 case nir_intrinsic_store_per_vertex_output
:
442 return &instr
->src
[2];
449 * Return the vertex index source for a load/store per_vertex intrinsic.
452 nir_get_io_vertex_index_src(nir_intrinsic_instr
*instr
)
454 switch (instr
->intrinsic
) {
455 case nir_intrinsic_load_per_vertex_input
:
456 case nir_intrinsic_load_per_vertex_output
:
457 return &instr
->src
[0];
458 case nir_intrinsic_store_per_vertex_output
:
459 return &instr
->src
[1];