2 * Copyright © 2015 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "tgsi/tgsi_info.h"
26 #include "glsl/nir/nir_builder.h"
29 * Walks the NIR generated by TGSI-to-NIR to lower its io intrinsics into
30 * something amenable to the VC4 architecture.
32 * Currently, it split inputs, outputs, and uniforms into scalars, drops any
33 * non-position outputs in coordinate shaders, and fixes up the addressing on
34 * indirect uniform loads.
38 replace_intrinsic_with_vec4(nir_builder
*b
, nir_intrinsic_instr
*intr
,
42 /* Batch things back together into a vec4. This will get split by the
43 * later ALU scalarization pass.
45 nir_ssa_def
*vec
= nir_vec4(b
, comps
[0], comps
[1], comps
[2], comps
[3]);
47 /* Replace the old intrinsic with a reference to our reconstructed
50 nir_ssa_def_rewrite_uses(&intr
->dest
.ssa
, nir_src_for_ssa(vec
),
51 ralloc_parent(b
->impl
));
52 nir_instr_remove(&intr
->instr
);
56 vc4_nir_lower_input(struct vc4_compile
*c
, nir_builder
*b
,
57 nir_intrinsic_instr
*intr
)
59 /* All TGSI-to-NIR inputs are vec4. */
60 assert(intr
->num_components
== 4);
62 nir_builder_insert_before_instr(b
, &intr
->instr
);
64 nir_variable
*input_var
= NULL
;
65 foreach_list_typed(nir_variable
, var
, node
, &c
->s
->inputs
) {
66 if (var
->data
.driver_location
== intr
->const_index
[0]) {
72 int semantic_name
= input_var
->data
.location
;
73 int semantic_index
= input_var
->data
.index
;
75 /* Generate scalar loads equivalent to the original VEC4. */
76 nir_ssa_def
*dests
[4];
77 for (unsigned i
= 0; i
< intr
->num_components
; i
++) {
78 nir_intrinsic_instr
*intr_comp
=
79 nir_intrinsic_instr_create(c
->s
, nir_intrinsic_load_input
);
80 intr_comp
->num_components
= 1;
81 intr_comp
->const_index
[0] = intr
->const_index
[0] * 4 + i
;
82 nir_ssa_dest_init(&intr_comp
->instr
, &intr_comp
->dest
, 1, NULL
);
83 nir_builder_instr_insert(b
, &intr_comp
->instr
);
85 dests
[i
] = &intr_comp
->dest
.ssa
;
90 switch (semantic_name
) {
91 case TGSI_SEMANTIC_FACE
:
92 dests
[0] = nir_fsub(b
,
93 nir_imm_float(b
, 1.0),
96 nir_imm_float(b
, 2.0)));
97 dests
[1] = nir_imm_float(b
, 0.0);
98 dests
[2] = nir_imm_float(b
, 0.0);
99 dests
[3] = nir_imm_float(b
, 1.0);
101 case TGSI_SEMANTIC_GENERIC
:
102 if (c
->fs_key
->point_sprite_mask
&
103 (1 << semantic_index
)) {
104 if (!c
->fs_key
->is_points
) {
105 dests
[0] = nir_imm_float(b
, 0.0);
106 dests
[1] = nir_imm_float(b
, 0.0);
108 if (c
->fs_key
->point_coord_upper_left
) {
109 dests
[1] = nir_fsub(b
,
110 nir_imm_float(b
, 1.0),
113 dests
[2] = nir_imm_float(b
, 0.0);
114 dests
[3] = nir_imm_float(b
, 1.0);
124 replace_intrinsic_with_vec4(b
, intr
, dests
);
128 vc4_nir_lower_output(struct vc4_compile
*c
, nir_builder
*b
,
129 nir_intrinsic_instr
*intr
)
131 nir_variable
*output_var
= NULL
;
132 foreach_list_typed(nir_variable
, var
, node
, &c
->s
->outputs
) {
133 if (var
->data
.driver_location
== intr
->const_index
[0]) {
139 unsigned semantic_name
= output_var
->data
.location
;
141 if (c
->stage
== QSTAGE_COORD
&&
142 (semantic_name
!= TGSI_SEMANTIC_POSITION
&&
143 semantic_name
!= TGSI_SEMANTIC_PSIZE
)) {
144 nir_instr_remove(&intr
->instr
);
148 /* All TGSI-to-NIR outputs are VEC4. */
149 assert(intr
->num_components
== 4);
151 nir_builder_insert_before_instr(b
, &intr
->instr
);
153 for (unsigned i
= 0; i
< intr
->num_components
; i
++) {
154 nir_intrinsic_instr
*intr_comp
=
155 nir_intrinsic_instr_create(c
->s
, nir_intrinsic_store_output
);
156 intr_comp
->num_components
= 1;
157 intr_comp
->const_index
[0] = intr
->const_index
[0] * 4 + i
;
159 assert(intr
->src
[0].is_ssa
);
160 intr_comp
->src
[0] = nir_src_for_ssa(nir_swizzle(b
,
163 nir_builder_instr_insert(b
, &intr_comp
->instr
);
166 nir_instr_remove(&intr
->instr
);
170 vc4_nir_lower_uniform(struct vc4_compile
*c
, nir_builder
*b
,
171 nir_intrinsic_instr
*intr
)
173 /* All TGSI-to-NIR uniform loads are vec4. */
174 assert(intr
->num_components
== 4);
176 nir_builder_insert_before_instr(b
, &intr
->instr
);
178 /* Generate scalar loads equivalent to the original VEC4. */
179 nir_ssa_def
*dests
[4];
180 for (unsigned i
= 0; i
< intr
->num_components
; i
++) {
181 nir_intrinsic_instr
*intr_comp
=
182 nir_intrinsic_instr_create(c
->s
, intr
->intrinsic
);
183 intr_comp
->num_components
= 1;
184 nir_ssa_dest_init(&intr_comp
->instr
, &intr_comp
->dest
, 1, NULL
);
186 if (intr
->intrinsic
== nir_intrinsic_load_uniform_indirect
) {
187 /* Convert the variable TGSI register index to a byte
191 nir_src_for_ssa(nir_ishl(b
,
195 /* Convert the offset to be a byte index, too. */
196 intr_comp
->const_index
[0] = (intr
->const_index
[0] * 16 +
199 /* We want a dword index for non-indirect uniform
202 intr_comp
->const_index
[0] = (intr
->const_index
[0] * 4 +
206 dests
[i
] = &intr_comp
->dest
.ssa
;
208 nir_builder_instr_insert(b
, &intr_comp
->instr
);
211 replace_intrinsic_with_vec4(b
, intr
, dests
);
215 vc4_nir_lower_io_instr(struct vc4_compile
*c
, nir_builder
*b
,
216 struct nir_instr
*instr
)
218 if (instr
->type
!= nir_instr_type_intrinsic
)
220 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
222 switch (intr
->intrinsic
) {
223 case nir_intrinsic_load_input
:
224 vc4_nir_lower_input(c
, b
, intr
);
227 case nir_intrinsic_store_output
:
228 vc4_nir_lower_output(c
, b
, intr
);
231 case nir_intrinsic_load_uniform
:
232 case nir_intrinsic_load_uniform_indirect
:
233 vc4_nir_lower_uniform(c
, b
, intr
);
242 vc4_nir_lower_io_block(nir_block
*block
, void *arg
)
244 struct vc4_compile
*c
= arg
;
245 nir_function_impl
*impl
=
246 nir_cf_node_get_function(&block
->cf_node
);
249 nir_builder_init(&b
, impl
);
251 nir_foreach_instr_safe(block
, instr
)
252 vc4_nir_lower_io_instr(c
, &b
, instr
);
258 vc4_nir_lower_io_impl(struct vc4_compile
*c
, nir_function_impl
*impl
)
260 nir_foreach_block(impl
, vc4_nir_lower_io_block
, c
);
262 nir_metadata_preserve(impl
, nir_metadata_block_index
|
263 nir_metadata_dominance
);
269 vc4_nir_lower_io(struct vc4_compile
*c
)
271 nir_foreach_overload(c
->s
, overload
) {
273 vc4_nir_lower_io_impl(c
, overload
->impl
);