Merge remote-tracking branch 'mesa-public/master' into vulkan
[mesa.git] / src / gallium / drivers / vc4 / vc4_nir_lower_io.c
1 /*
2 * Copyright © 2015 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "vc4_qir.h"
25 #include "glsl/nir/nir_builder.h"
26 #include "util/u_format.h"
27
28 /**
29 * Walks the NIR generated by TGSI-to-NIR to lower its io intrinsics into
30 * something amenable to the VC4 architecture.
31 *
32 * Currently, it split inputs, outputs, and uniforms into scalars, drops any
33 * non-position outputs in coordinate shaders, and fixes up the addressing on
34 * indirect uniform loads.
35 */
36
37 static void
38 replace_intrinsic_with_vec4(nir_builder *b, nir_intrinsic_instr *intr,
39 nir_ssa_def **comps)
40 {
41
42 /* Batch things back together into a vec4. This will get split by the
43 * later ALU scalarization pass.
44 */
45 nir_ssa_def *vec = nir_vec4(b, comps[0], comps[1], comps[2], comps[3]);
46
47 /* Replace the old intrinsic with a reference to our reconstructed
48 * vec4.
49 */
50 nir_ssa_def_rewrite_uses(&intr->dest.ssa, nir_src_for_ssa(vec));
51 nir_instr_remove(&intr->instr);
52 }
53
54 static nir_ssa_def *
55 vc4_nir_unpack_8i(nir_builder *b, nir_ssa_def *src, unsigned chan)
56 {
57 return nir_ubitfield_extract(b,
58 src,
59 nir_imm_int(b, 8 * chan),
60 nir_imm_int(b, 8));
61 }
62
63 /** Returns the 16 bit field as a sign-extended 32-bit value. */
64 static nir_ssa_def *
65 vc4_nir_unpack_16i(nir_builder *b, nir_ssa_def *src, unsigned chan)
66 {
67 return nir_ibitfield_extract(b,
68 src,
69 nir_imm_int(b, 16 * chan),
70 nir_imm_int(b, 16));
71 }
72
73 /** Returns the 16 bit field as an unsigned 32 bit value. */
74 static nir_ssa_def *
75 vc4_nir_unpack_16u(nir_builder *b, nir_ssa_def *src, unsigned chan)
76 {
77 if (chan == 0) {
78 return nir_iand(b, src, nir_imm_int(b, 0xffff));
79 } else {
80 return nir_ushr(b, src, nir_imm_int(b, 16));
81 }
82 }
83
84 static nir_ssa_def *
85 vc4_nir_unpack_8f(nir_builder *b, nir_ssa_def *src, unsigned chan)
86 {
87 return nir_channel(b, nir_unpack_unorm_4x8(b, src), chan);
88 }
89
90 static nir_ssa_def *
91 vc4_nir_get_vattr_channel_vpm(struct vc4_compile *c,
92 nir_builder *b,
93 nir_ssa_def **vpm_reads,
94 uint8_t swiz,
95 const struct util_format_description *desc)
96 {
97 const struct util_format_channel_description *chan =
98 &desc->channel[swiz];
99 nir_ssa_def *temp;
100
101 if (swiz > UTIL_FORMAT_SWIZZLE_W) {
102 return vc4_nir_get_swizzled_channel(b, vpm_reads, swiz);
103 } else if (chan->size == 32 && chan->type == UTIL_FORMAT_TYPE_FLOAT) {
104 return vc4_nir_get_swizzled_channel(b, vpm_reads, swiz);
105 } else if (chan->size == 32 && chan->type == UTIL_FORMAT_TYPE_SIGNED) {
106 if (chan->normalized) {
107 return nir_fmul(b,
108 nir_i2f(b, vpm_reads[swiz]),
109 nir_imm_float(b,
110 1.0 / 0x7fffffff));
111 } else {
112 return nir_i2f(b, vpm_reads[swiz]);
113 }
114 } else if (chan->size == 8 &&
115 (chan->type == UTIL_FORMAT_TYPE_UNSIGNED ||
116 chan->type == UTIL_FORMAT_TYPE_SIGNED)) {
117 nir_ssa_def *vpm = vpm_reads[0];
118 if (chan->type == UTIL_FORMAT_TYPE_SIGNED) {
119 temp = nir_ixor(b, vpm, nir_imm_int(b, 0x80808080));
120 if (chan->normalized) {
121 return nir_fsub(b, nir_fmul(b,
122 vc4_nir_unpack_8f(b, temp, swiz),
123 nir_imm_float(b, 2.0)),
124 nir_imm_float(b, 1.0));
125 } else {
126 return nir_fadd(b,
127 nir_i2f(b,
128 vc4_nir_unpack_8i(b, temp,
129 swiz)),
130 nir_imm_float(b, -128.0));
131 }
132 } else {
133 if (chan->normalized) {
134 return vc4_nir_unpack_8f(b, vpm, swiz);
135 } else {
136 return nir_i2f(b, vc4_nir_unpack_8i(b, vpm, swiz));
137 }
138 }
139 } else if (chan->size == 16 &&
140 (chan->type == UTIL_FORMAT_TYPE_UNSIGNED ||
141 chan->type == UTIL_FORMAT_TYPE_SIGNED)) {
142 nir_ssa_def *vpm = vpm_reads[swiz / 2];
143
144 /* Note that UNPACK_16F eats a half float, not ints, so we use
145 * UNPACK_16_I for all of these.
146 */
147 if (chan->type == UTIL_FORMAT_TYPE_SIGNED) {
148 temp = nir_i2f(b, vc4_nir_unpack_16i(b, vpm, swiz & 1));
149 if (chan->normalized) {
150 return nir_fmul(b, temp,
151 nir_imm_float(b, 1/32768.0f));
152 } else {
153 return temp;
154 }
155 } else {
156 temp = nir_i2f(b, vc4_nir_unpack_16u(b, vpm, swiz & 1));
157 if (chan->normalized) {
158 return nir_fmul(b, temp,
159 nir_imm_float(b, 1 / 65535.0));
160 } else {
161 return temp;
162 }
163 }
164 } else {
165 return NULL;
166 }
167 }
168
169 static void
170 vc4_nir_lower_vertex_attr(struct vc4_compile *c, nir_builder *b,
171 nir_intrinsic_instr *intr)
172 {
173 b->cursor = nir_before_instr(&intr->instr);
174
175 int attr = intr->const_index[0];
176 enum pipe_format format = c->vs_key->attr_formats[attr];
177 uint32_t attr_size = util_format_get_blocksize(format);
178
179 /* All TGSI-to-NIR inputs are vec4. */
180 assert(intr->num_components == 4);
181
182 /* Generate dword loads for the VPM values (Since these intrinsics may
183 * be reordered, the actual reads will be generated at the top of the
184 * shader by ntq_setup_inputs().
185 */
186 nir_ssa_def *vpm_reads[4];
187 for (int i = 0; i < align(attr_size, 4) / 4; i++) {
188 nir_intrinsic_instr *intr_comp =
189 nir_intrinsic_instr_create(c->s,
190 nir_intrinsic_load_input);
191 intr_comp->num_components = 1;
192 intr_comp->const_index[0] = intr->const_index[0] * 4 + i;
193 nir_ssa_dest_init(&intr_comp->instr, &intr_comp->dest, 1, NULL);
194 nir_builder_instr_insert(b, &intr_comp->instr);
195
196 vpm_reads[i] = &intr_comp->dest.ssa;
197 }
198
199 bool format_warned = false;
200 const struct util_format_description *desc =
201 util_format_description(format);
202
203 nir_ssa_def *dests[4];
204 for (int i = 0; i < 4; i++) {
205 uint8_t swiz = desc->swizzle[i];
206 dests[i] = vc4_nir_get_vattr_channel_vpm(c, b, vpm_reads, swiz,
207 desc);
208
209 if (!dests[i]) {
210 if (!format_warned) {
211 fprintf(stderr,
212 "vtx element %d unsupported type: %s\n",
213 attr, util_format_name(format));
214 format_warned = true;
215 }
216 dests[i] = nir_imm_float(b, 0.0);
217 }
218 }
219
220 replace_intrinsic_with_vec4(b, intr, dests);
221 }
222
223 static void
224 vc4_nir_lower_fs_input(struct vc4_compile *c, nir_builder *b,
225 nir_intrinsic_instr *intr)
226 {
227 b->cursor = nir_before_instr(&intr->instr);
228
229 if (intr->const_index[0] == VC4_NIR_TLB_COLOR_READ_INPUT) {
230 /* This doesn't need any lowering. */
231 return;
232 }
233
234 nir_variable *input_var = NULL;
235 nir_foreach_variable(var, &c->s->inputs) {
236 if (var->data.driver_location == intr->const_index[0]) {
237 input_var = var;
238 break;
239 }
240 }
241 assert(input_var);
242
243 /* All TGSI-to-NIR inputs are vec4. */
244 assert(intr->num_components == 4);
245
246 /* Generate scalar loads equivalent to the original VEC4. */
247 nir_ssa_def *dests[4];
248 for (unsigned i = 0; i < intr->num_components; i++) {
249 nir_intrinsic_instr *intr_comp =
250 nir_intrinsic_instr_create(c->s, nir_intrinsic_load_input);
251 intr_comp->num_components = 1;
252 intr_comp->const_index[0] = intr->const_index[0] * 4 + i;
253 nir_ssa_dest_init(&intr_comp->instr, &intr_comp->dest, 1, NULL);
254 nir_builder_instr_insert(b, &intr_comp->instr);
255
256 dests[i] = &intr_comp->dest.ssa;
257 }
258
259 if (input_var->data.location == VARYING_SLOT_FACE) {
260 dests[0] = nir_fsub(b,
261 nir_imm_float(b, 1.0),
262 nir_fmul(b,
263 nir_i2f(b, dests[0]),
264 nir_imm_float(b, 2.0)));
265 dests[1] = nir_imm_float(b, 0.0);
266 dests[2] = nir_imm_float(b, 0.0);
267 dests[3] = nir_imm_float(b, 1.0);
268 } else if (input_var->data.location >= VARYING_SLOT_VAR0) {
269 if (c->fs_key->point_sprite_mask &
270 (1 << (input_var->data.location -
271 VARYING_SLOT_VAR0))) {
272 if (!c->fs_key->is_points) {
273 dests[0] = nir_imm_float(b, 0.0);
274 dests[1] = nir_imm_float(b, 0.0);
275 }
276 if (c->fs_key->point_coord_upper_left) {
277 dests[1] = nir_fsub(b,
278 nir_imm_float(b, 1.0),
279 dests[1]);
280 }
281 dests[2] = nir_imm_float(b, 0.0);
282 dests[3] = nir_imm_float(b, 1.0);
283 }
284 }
285
286 replace_intrinsic_with_vec4(b, intr, dests);
287 }
288
289 static void
290 vc4_nir_lower_output(struct vc4_compile *c, nir_builder *b,
291 nir_intrinsic_instr *intr)
292 {
293 nir_variable *output_var = NULL;
294 nir_foreach_variable(var, &c->s->outputs) {
295 if (var->data.driver_location == intr->const_index[0]) {
296 output_var = var;
297 break;
298 }
299 }
300 assert(output_var);
301
302 if (c->stage == QSTAGE_COORD &&
303 output_var->data.location != VARYING_SLOT_POS &&
304 output_var->data.location != VARYING_SLOT_PSIZ) {
305 nir_instr_remove(&intr->instr);
306 return;
307 }
308
309 /* Color output is lowered by vc4_nir_lower_blend(). */
310 if (c->stage == QSTAGE_FRAG &&
311 (output_var->data.location == FRAG_RESULT_COLOR ||
312 output_var->data.location == FRAG_RESULT_DATA0)) {
313 intr->const_index[0] *= 4;
314 return;
315 }
316
317 /* All TGSI-to-NIR outputs are VEC4. */
318 assert(intr->num_components == 4);
319
320 b->cursor = nir_before_instr(&intr->instr);
321
322 for (unsigned i = 0; i < intr->num_components; i++) {
323 nir_intrinsic_instr *intr_comp =
324 nir_intrinsic_instr_create(c->s, nir_intrinsic_store_output);
325 intr_comp->num_components = 1;
326 intr_comp->const_index[0] = intr->const_index[0] * 4 + i;
327
328 assert(intr->src[0].is_ssa);
329 intr_comp->src[0] =
330 nir_src_for_ssa(nir_channel(b, intr->src[0].ssa, i));
331 nir_builder_instr_insert(b, &intr_comp->instr);
332 }
333
334 nir_instr_remove(&intr->instr);
335 }
336
337 static void
338 vc4_nir_lower_uniform(struct vc4_compile *c, nir_builder *b,
339 nir_intrinsic_instr *intr)
340 {
341 /* All TGSI-to-NIR uniform loads are vec4, but we may create dword
342 * loads in our lowering passes.
343 */
344 if (intr->num_components == 1)
345 return;
346 assert(intr->num_components == 4);
347
348 b->cursor = nir_before_instr(&intr->instr);
349
350 /* Generate scalar loads equivalent to the original VEC4. */
351 nir_ssa_def *dests[4];
352 for (unsigned i = 0; i < intr->num_components; i++) {
353 nir_intrinsic_instr *intr_comp =
354 nir_intrinsic_instr_create(c->s, intr->intrinsic);
355 intr_comp->num_components = 1;
356 nir_ssa_dest_init(&intr_comp->instr, &intr_comp->dest, 1, NULL);
357
358 if (intr->intrinsic == nir_intrinsic_load_uniform_indirect) {
359 /* Convert the variable TGSI register index to a byte
360 * offset.
361 */
362 intr_comp->src[0] =
363 nir_src_for_ssa(nir_ishl(b,
364 intr->src[0].ssa,
365 nir_imm_int(b, 4)));
366
367 /* Convert the offset to be a byte index, too. */
368 intr_comp->const_index[0] = (intr->const_index[0] * 16 +
369 i * 4);
370 } else {
371 /* We want a dword index for non-indirect uniform
372 * loads.
373 */
374 intr_comp->const_index[0] = (intr->const_index[0] * 4 +
375 i);
376 }
377
378 dests[i] = &intr_comp->dest.ssa;
379
380 nir_builder_instr_insert(b, &intr_comp->instr);
381 }
382
383 replace_intrinsic_with_vec4(b, intr, dests);
384 }
385
386 static void
387 vc4_nir_lower_io_instr(struct vc4_compile *c, nir_builder *b,
388 struct nir_instr *instr)
389 {
390 if (instr->type != nir_instr_type_intrinsic)
391 return;
392 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
393
394 switch (intr->intrinsic) {
395 case nir_intrinsic_load_input:
396 if (c->stage == QSTAGE_FRAG)
397 vc4_nir_lower_fs_input(c, b, intr);
398 else
399 vc4_nir_lower_vertex_attr(c, b, intr);
400 break;
401
402 case nir_intrinsic_store_output:
403 vc4_nir_lower_output(c, b, intr);
404 break;
405
406 case nir_intrinsic_load_uniform:
407 case nir_intrinsic_load_uniform_indirect:
408 case nir_intrinsic_load_user_clip_plane:
409 vc4_nir_lower_uniform(c, b, intr);
410 break;
411
412 default:
413 break;
414 }
415 }
416
417 static bool
418 vc4_nir_lower_io_block(nir_block *block, void *arg)
419 {
420 struct vc4_compile *c = arg;
421 nir_function_impl *impl =
422 nir_cf_node_get_function(&block->cf_node);
423
424 nir_builder b;
425 nir_builder_init(&b, impl);
426
427 nir_foreach_instr_safe(block, instr)
428 vc4_nir_lower_io_instr(c, &b, instr);
429
430 return true;
431 }
432
433 static bool
434 vc4_nir_lower_io_impl(struct vc4_compile *c, nir_function_impl *impl)
435 {
436 nir_foreach_block(impl, vc4_nir_lower_io_block, c);
437
438 nir_metadata_preserve(impl, nir_metadata_block_index |
439 nir_metadata_dominance);
440
441 return true;
442 }
443
444 void
445 vc4_nir_lower_io(struct vc4_compile *c)
446 {
447 nir_foreach_overload(c->s, overload) {
448 if (overload->impl)
449 vc4_nir_lower_io_impl(c, overload->impl);
450 }
451 }