vc4: Lower uniform loads to scalar in NIR.
[mesa.git] / src / gallium / drivers / vc4 / vc4_nir_lower_io.c
1 /*
2 * Copyright © 2015 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "vc4_qir.h"
25 #include "tgsi/tgsi_info.h"
26 #include "glsl/nir/nir_builder.h"
27
28 /**
29 * Walks the NIR generated by TGSI-to-NIR to lower its io intrinsics into
30 * something amenable to the VC4 architecture.
31 *
32 * Currently, it split inputs, outputs, and uniforms into scalars, drops any
33 * non-position outputs in coordinate shaders, and fixes up the addressing on
34 * indirect uniform loads.
35 */
36
37 static void
38 replace_intrinsic_with_vec4(nir_builder *b, nir_intrinsic_instr *intr,
39 nir_ssa_def **comps)
40 {
41
42 /* Batch things back together into a vec4. This will get split by the
43 * later ALU scalarization pass.
44 */
45 nir_ssa_def *vec = nir_vec4(b, comps[0], comps[1], comps[2], comps[3]);
46
47 /* Replace the old intrinsic with a reference to our reconstructed
48 * vec4.
49 */
50 nir_ssa_def_rewrite_uses(&intr->dest.ssa, nir_src_for_ssa(vec),
51 ralloc_parent(b->impl));
52 nir_instr_remove(&intr->instr);
53 }
54
55 static void
56 vc4_nir_lower_input(struct vc4_compile *c, nir_builder *b,
57 nir_intrinsic_instr *intr)
58 {
59 /* All TGSI-to-NIR inputs are vec4. */
60 assert(intr->num_components == 4);
61
62 nir_builder_insert_before_instr(b, &intr->instr);
63
64 nir_variable *input_var = NULL;
65 foreach_list_typed(nir_variable, var, node, &c->s->inputs) {
66 if (var->data.driver_location == intr->const_index[0]) {
67 input_var = var;
68 break;
69 }
70 }
71 assert(input_var);
72 int semantic_name = input_var->data.location;
73 int semantic_index = input_var->data.index;
74
75 /* Generate scalar loads equivalent to the original VEC4. */
76 nir_ssa_def *dests[4];
77 for (unsigned i = 0; i < intr->num_components; i++) {
78 nir_intrinsic_instr *intr_comp =
79 nir_intrinsic_instr_create(c->s, nir_intrinsic_load_input);
80 intr_comp->num_components = 1;
81 intr_comp->const_index[0] = intr->const_index[0] * 4 + i;
82 nir_ssa_dest_init(&intr_comp->instr, &intr_comp->dest, 1, NULL);
83 nir_builder_instr_insert(b, &intr_comp->instr);
84
85 dests[i] = &intr_comp->dest.ssa;
86 }
87
88 switch (c->stage) {
89 case QSTAGE_FRAG:
90 switch (semantic_name) {
91 case TGSI_SEMANTIC_FACE:
92 dests[0] = nir_fsub(b,
93 nir_imm_float(b, 1.0),
94 nir_fmul(b,
95 nir_i2f(b, dests[0]),
96 nir_imm_float(b, 2.0)));
97 dests[1] = nir_imm_float(b, 0.0);
98 dests[2] = nir_imm_float(b, 0.0);
99 dests[3] = nir_imm_float(b, 1.0);
100 break;
101 case TGSI_SEMANTIC_GENERIC:
102 if (c->fs_key->point_sprite_mask &
103 (1 << semantic_index)) {
104 if (!c->fs_key->is_points) {
105 dests[0] = nir_imm_float(b, 0.0);
106 dests[1] = nir_imm_float(b, 0.0);
107 }
108 if (c->fs_key->point_coord_upper_left) {
109 dests[1] = nir_fsub(b,
110 nir_imm_float(b, 1.0),
111 dests[1]);
112 }
113 dests[2] = nir_imm_float(b, 0.0);
114 dests[3] = nir_imm_float(b, 1.0);
115 }
116 break;
117 }
118 break;
119 case QSTAGE_COORD:
120 case QSTAGE_VERT:
121 break;
122 }
123
124 replace_intrinsic_with_vec4(b, intr, dests);
125 }
126
127 static void
128 vc4_nir_lower_output(struct vc4_compile *c, nir_builder *b,
129 nir_intrinsic_instr *intr)
130 {
131 nir_variable *output_var = NULL;
132 foreach_list_typed(nir_variable, var, node, &c->s->outputs) {
133 if (var->data.driver_location == intr->const_index[0]) {
134 output_var = var;
135 break;
136 }
137 }
138 assert(output_var);
139 unsigned semantic_name = output_var->data.location;
140
141 if (c->stage == QSTAGE_COORD &&
142 (semantic_name != TGSI_SEMANTIC_POSITION &&
143 semantic_name != TGSI_SEMANTIC_PSIZE)) {
144 nir_instr_remove(&intr->instr);
145 return;
146 }
147
148 /* All TGSI-to-NIR outputs are VEC4. */
149 assert(intr->num_components == 4);
150
151 nir_builder_insert_before_instr(b, &intr->instr);
152
153 for (unsigned i = 0; i < intr->num_components; i++) {
154 nir_intrinsic_instr *intr_comp =
155 nir_intrinsic_instr_create(c->s, nir_intrinsic_store_output);
156 intr_comp->num_components = 1;
157 intr_comp->const_index[0] = intr->const_index[0] * 4 + i;
158
159 assert(intr->src[0].is_ssa);
160 intr_comp->src[0] = nir_src_for_ssa(nir_swizzle(b,
161 intr->src[0].ssa,
162 &i, 1, false));
163 nir_builder_instr_insert(b, &intr_comp->instr);
164 }
165
166 nir_instr_remove(&intr->instr);
167 }
168
169 static void
170 vc4_nir_lower_uniform(struct vc4_compile *c, nir_builder *b,
171 nir_intrinsic_instr *intr)
172 {
173 /* All TGSI-to-NIR uniform loads are vec4. */
174 assert(intr->num_components == 4);
175
176 nir_builder_insert_before_instr(b, &intr->instr);
177
178 /* Generate scalar loads equivalent to the original VEC4. */
179 nir_ssa_def *dests[4];
180 for (unsigned i = 0; i < intr->num_components; i++) {
181 nir_intrinsic_instr *intr_comp =
182 nir_intrinsic_instr_create(c->s, intr->intrinsic);
183 intr_comp->num_components = 1;
184 nir_ssa_dest_init(&intr_comp->instr, &intr_comp->dest, 1, NULL);
185
186 if (intr->intrinsic == nir_intrinsic_load_uniform_indirect) {
187 /* Convert the variable TGSI register index to a byte
188 * offset.
189 */
190 intr_comp->src[0] =
191 nir_src_for_ssa(nir_ishl(b,
192 intr->src[0].ssa,
193 nir_imm_int(b, 4)));
194
195 /* Convert the offset to be a byte index, too. */
196 intr_comp->const_index[0] = (intr->const_index[0] * 16 +
197 i * 4);
198 } else {
199 /* We want a dword index for non-indirect uniform
200 * loads.
201 */
202 intr_comp->const_index[0] = (intr->const_index[0] * 4 +
203 i);
204 }
205
206 dests[i] = &intr_comp->dest.ssa;
207
208 nir_builder_instr_insert(b, &intr_comp->instr);
209 }
210
211 replace_intrinsic_with_vec4(b, intr, dests);
212 }
213
214 static void
215 vc4_nir_lower_io_instr(struct vc4_compile *c, nir_builder *b,
216 struct nir_instr *instr)
217 {
218 if (instr->type != nir_instr_type_intrinsic)
219 return;
220 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
221
222 switch (intr->intrinsic) {
223 case nir_intrinsic_load_input:
224 vc4_nir_lower_input(c, b, intr);
225 break;
226
227 case nir_intrinsic_store_output:
228 vc4_nir_lower_output(c, b, intr);
229 break;
230
231 case nir_intrinsic_load_uniform:
232 case nir_intrinsic_load_uniform_indirect:
233 vc4_nir_lower_uniform(c, b, intr);
234 break;
235
236 default:
237 break;
238 }
239 }
240
241 static bool
242 vc4_nir_lower_io_block(nir_block *block, void *arg)
243 {
244 struct vc4_compile *c = arg;
245 nir_function_impl *impl =
246 nir_cf_node_get_function(&block->cf_node);
247
248 nir_builder b;
249 nir_builder_init(&b, impl);
250
251 nir_foreach_instr_safe(block, instr)
252 vc4_nir_lower_io_instr(c, &b, instr);
253
254 return true;
255 }
256
257 static bool
258 vc4_nir_lower_io_impl(struct vc4_compile *c, nir_function_impl *impl)
259 {
260 nir_foreach_block(impl, vc4_nir_lower_io_block, c);
261
262 nir_metadata_preserve(impl, nir_metadata_block_index |
263 nir_metadata_dominance);
264
265 return true;
266 }
267
268 void
269 vc4_nir_lower_io(struct vc4_compile *c)
270 {
271 nir_foreach_overload(c->s, overload) {
272 if (overload->impl)
273 vc4_nir_lower_io_impl(c, overload->impl);
274 }
275 }