Merge remote-tracking branch 'mesa-public/master' into vulkan
[mesa.git] / src / gallium / drivers / vc4 / vc4_nir_lower_io.c
1 /*
2 * Copyright © 2015 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "vc4_qir.h"
25 #include "tgsi/tgsi_info.h"
26 #include "glsl/nir/nir_builder.h"
27
28 /**
29 * Walks the NIR generated by TGSI-to-NIR to lower its io intrinsics into
30 * something amenable to the VC4 architecture.
31 *
32 * Currently, it split inputs, outputs, and uniforms into scalars, drops any
33 * non-position outputs in coordinate shaders, and fixes up the addressing on
34 * indirect uniform loads.
35 */
36
37 static void
38 replace_intrinsic_with_vec4(nir_builder *b, nir_intrinsic_instr *intr,
39 nir_ssa_def **comps)
40 {
41
42 /* Batch things back together into a vec4. This will get split by the
43 * later ALU scalarization pass.
44 */
45 nir_ssa_def *vec = nir_vec4(b, comps[0], comps[1], comps[2], comps[3]);
46
47 /* Replace the old intrinsic with a reference to our reconstructed
48 * vec4.
49 */
50 nir_ssa_def_rewrite_uses(&intr->dest.ssa, nir_src_for_ssa(vec),
51 ralloc_parent(b->impl));
52 nir_instr_remove(&intr->instr);
53 }
54
55 static void
56 vc4_nir_lower_input(struct vc4_compile *c, nir_builder *b,
57 nir_intrinsic_instr *intr)
58 {
59 nir_builder_insert_before_instr(b, &intr->instr);
60
61 if (c->stage == QSTAGE_FRAG && intr->const_index[0] ==
62 VC4_NIR_TLB_COLOR_READ_INPUT) {
63 /* This doesn't need any lowering. */
64 return;
65 }
66
67 nir_variable *input_var = NULL;
68 foreach_list_typed(nir_variable, var, node, &c->s->inputs) {
69 if (var->data.driver_location == intr->const_index[0]) {
70 input_var = var;
71 break;
72 }
73 }
74 assert(input_var);
75 int semantic_name = input_var->data.location;
76 int semantic_index = input_var->data.index;
77
78 /* All TGSI-to-NIR inputs are vec4. */
79 assert(intr->num_components == 4);
80
81 /* Generate scalar loads equivalent to the original VEC4. */
82 nir_ssa_def *dests[4];
83 for (unsigned i = 0; i < intr->num_components; i++) {
84 nir_intrinsic_instr *intr_comp =
85 nir_intrinsic_instr_create(c->s, nir_intrinsic_load_input);
86 intr_comp->num_components = 1;
87 intr_comp->const_index[0] = intr->const_index[0] * 4 + i;
88 nir_ssa_dest_init(&intr_comp->instr, &intr_comp->dest, 1, NULL);
89 nir_builder_instr_insert(b, &intr_comp->instr);
90
91 dests[i] = &intr_comp->dest.ssa;
92 }
93
94 switch (c->stage) {
95 case QSTAGE_FRAG:
96 switch (semantic_name) {
97 case TGSI_SEMANTIC_FACE:
98 dests[0] = nir_fsub(b,
99 nir_imm_float(b, 1.0),
100 nir_fmul(b,
101 nir_i2f(b, dests[0]),
102 nir_imm_float(b, 2.0)));
103 dests[1] = nir_imm_float(b, 0.0);
104 dests[2] = nir_imm_float(b, 0.0);
105 dests[3] = nir_imm_float(b, 1.0);
106 break;
107 case TGSI_SEMANTIC_GENERIC:
108 if (c->fs_key->point_sprite_mask &
109 (1 << semantic_index)) {
110 if (!c->fs_key->is_points) {
111 dests[0] = nir_imm_float(b, 0.0);
112 dests[1] = nir_imm_float(b, 0.0);
113 }
114 if (c->fs_key->point_coord_upper_left) {
115 dests[1] = nir_fsub(b,
116 nir_imm_float(b, 1.0),
117 dests[1]);
118 }
119 dests[2] = nir_imm_float(b, 0.0);
120 dests[3] = nir_imm_float(b, 1.0);
121 }
122 break;
123 }
124 break;
125 case QSTAGE_COORD:
126 case QSTAGE_VERT:
127 break;
128 }
129
130 replace_intrinsic_with_vec4(b, intr, dests);
131 }
132
133 static void
134 vc4_nir_lower_output(struct vc4_compile *c, nir_builder *b,
135 nir_intrinsic_instr *intr)
136 {
137 nir_variable *output_var = NULL;
138 foreach_list_typed(nir_variable, var, node, &c->s->outputs) {
139 if (var->data.driver_location == intr->const_index[0]) {
140 output_var = var;
141 break;
142 }
143 }
144 assert(output_var);
145 unsigned semantic_name = output_var->data.location;
146
147 if (c->stage == QSTAGE_COORD &&
148 (semantic_name != TGSI_SEMANTIC_POSITION &&
149 semantic_name != TGSI_SEMANTIC_PSIZE)) {
150 nir_instr_remove(&intr->instr);
151 return;
152 }
153
154 /* Color output is lowered by vc4_nir_lower_blend(). */
155 if (c->stage == QSTAGE_FRAG && semantic_name == TGSI_SEMANTIC_COLOR) {
156 intr->const_index[0] *= 4;
157 return;
158 }
159
160 /* All TGSI-to-NIR outputs are VEC4. */
161 assert(intr->num_components == 4);
162
163 nir_builder_insert_before_instr(b, &intr->instr);
164
165 for (unsigned i = 0; i < intr->num_components; i++) {
166 nir_intrinsic_instr *intr_comp =
167 nir_intrinsic_instr_create(c->s, nir_intrinsic_store_output);
168 intr_comp->num_components = 1;
169 intr_comp->const_index[0] = intr->const_index[0] * 4 + i;
170
171 assert(intr->src[0].is_ssa);
172 intr_comp->src[0] = nir_src_for_ssa(nir_swizzle(b,
173 intr->src[0].ssa,
174 &i, 1, false));
175 nir_builder_instr_insert(b, &intr_comp->instr);
176 }
177
178 nir_instr_remove(&intr->instr);
179 }
180
181 static void
182 vc4_nir_lower_uniform(struct vc4_compile *c, nir_builder *b,
183 nir_intrinsic_instr *intr)
184 {
185 /* All TGSI-to-NIR uniform loads are vec4, but we may create dword
186 * loads in our lowering passes.
187 */
188 if (intr->num_components == 1)
189 return;
190 assert(intr->num_components == 4);
191
192 nir_builder_insert_before_instr(b, &intr->instr);
193
194 /* Generate scalar loads equivalent to the original VEC4. */
195 nir_ssa_def *dests[4];
196 for (unsigned i = 0; i < intr->num_components; i++) {
197 nir_intrinsic_instr *intr_comp =
198 nir_intrinsic_instr_create(c->s, intr->intrinsic);
199 intr_comp->num_components = 1;
200 nir_ssa_dest_init(&intr_comp->instr, &intr_comp->dest, 1, NULL);
201
202 if (intr->intrinsic == nir_intrinsic_load_uniform_indirect) {
203 /* Convert the variable TGSI register index to a byte
204 * offset.
205 */
206 intr_comp->src[0] =
207 nir_src_for_ssa(nir_ishl(b,
208 intr->src[0].ssa,
209 nir_imm_int(b, 4)));
210
211 /* Convert the offset to be a byte index, too. */
212 intr_comp->const_index[0] = (intr->const_index[0] * 16 +
213 i * 4);
214 } else {
215 /* We want a dword index for non-indirect uniform
216 * loads.
217 */
218 intr_comp->const_index[0] = (intr->const_index[0] * 4 +
219 i);
220 }
221
222 dests[i] = &intr_comp->dest.ssa;
223
224 nir_builder_instr_insert(b, &intr_comp->instr);
225 }
226
227 replace_intrinsic_with_vec4(b, intr, dests);
228 }
229
230 static void
231 vc4_nir_lower_io_instr(struct vc4_compile *c, nir_builder *b,
232 struct nir_instr *instr)
233 {
234 if (instr->type != nir_instr_type_intrinsic)
235 return;
236 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
237
238 switch (intr->intrinsic) {
239 case nir_intrinsic_load_input:
240 vc4_nir_lower_input(c, b, intr);
241 break;
242
243 case nir_intrinsic_store_output:
244 vc4_nir_lower_output(c, b, intr);
245 break;
246
247 case nir_intrinsic_load_uniform:
248 case nir_intrinsic_load_uniform_indirect:
249 vc4_nir_lower_uniform(c, b, intr);
250 break;
251
252 default:
253 break;
254 }
255 }
256
257 static bool
258 vc4_nir_lower_io_block(nir_block *block, void *arg)
259 {
260 struct vc4_compile *c = arg;
261 nir_function_impl *impl =
262 nir_cf_node_get_function(&block->cf_node);
263
264 nir_builder b;
265 nir_builder_init(&b, impl);
266
267 nir_foreach_instr_safe(block, instr)
268 vc4_nir_lower_io_instr(c, &b, instr);
269
270 return true;
271 }
272
273 static bool
274 vc4_nir_lower_io_impl(struct vc4_compile *c, nir_function_impl *impl)
275 {
276 nir_foreach_block(impl, vc4_nir_lower_io_block, c);
277
278 nir_metadata_preserve(impl, nir_metadata_block_index |
279 nir_metadata_dominance);
280
281 return true;
282 }
283
284 void
285 vc4_nir_lower_io(struct vc4_compile *c)
286 {
287 nir_foreach_overload(c->s, overload) {
288 if (overload->impl)
289 vc4_nir_lower_io_impl(c, overload->impl);
290 }
291 }