Merge ../mesa into vulkan
[mesa.git] / src / gallium / drivers / vc4 / vc4_nir_lower_io.c
1 /*
2 * Copyright © 2015 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "vc4_qir.h"
25 #include "glsl/nir/nir_builder.h"
26 #include "util/u_format.h"
27
28 /**
29 * Walks the NIR generated by TGSI-to-NIR to lower its io intrinsics into
30 * something amenable to the VC4 architecture.
31 *
32 * Currently, it split inputs, outputs, and uniforms into scalars, drops any
33 * non-position outputs in coordinate shaders, and fixes up the addressing on
34 * indirect uniform loads.
35 */
36
37 static void
38 replace_intrinsic_with_vec4(nir_builder *b, nir_intrinsic_instr *intr,
39 nir_ssa_def **comps)
40 {
41
42 /* Batch things back together into a vec4. This will get split by the
43 * later ALU scalarization pass.
44 */
45 nir_ssa_def *vec = nir_vec4(b, comps[0], comps[1], comps[2], comps[3]);
46
47 /* Replace the old intrinsic with a reference to our reconstructed
48 * vec4.
49 */
50 nir_ssa_def_rewrite_uses(&intr->dest.ssa, nir_src_for_ssa(vec));
51 nir_instr_remove(&intr->instr);
52 }
53
54 static nir_ssa_def *
55 vc4_nir_unpack_8i(nir_builder *b, nir_ssa_def *src, unsigned chan)
56 {
57 return nir_ubitfield_extract(b,
58 src,
59 nir_imm_int(b, 8 * chan),
60 nir_imm_int(b, 8));
61 }
62
63 /** Returns the 16 bit field as a sign-extended 32-bit value. */
64 static nir_ssa_def *
65 vc4_nir_unpack_16i(nir_builder *b, nir_ssa_def *src, unsigned chan)
66 {
67 return nir_ibitfield_extract(b,
68 src,
69 nir_imm_int(b, 16 * chan),
70 nir_imm_int(b, 16));
71 }
72
73 /** Returns the 16 bit field as an unsigned 32 bit value. */
74 static nir_ssa_def *
75 vc4_nir_unpack_16u(nir_builder *b, nir_ssa_def *src, unsigned chan)
76 {
77 if (chan == 0) {
78 return nir_iand(b, src, nir_imm_int(b, 0xffff));
79 } else {
80 return nir_ushr(b, src, nir_imm_int(b, 16));
81 }
82 }
83
84 static nir_ssa_def *
85 vc4_nir_unpack_8f(nir_builder *b, nir_ssa_def *src, unsigned chan)
86 {
87 return nir_channel(b, nir_unpack_unorm_4x8(b, src), chan);
88 }
89
90 static nir_ssa_def *
91 vc4_nir_get_vattr_channel_vpm(struct vc4_compile *c,
92 nir_builder *b,
93 nir_ssa_def **vpm_reads,
94 uint8_t swiz,
95 const struct util_format_description *desc)
96 {
97 const struct util_format_channel_description *chan =
98 &desc->channel[swiz];
99 nir_ssa_def *temp;
100
101 if (swiz > UTIL_FORMAT_SWIZZLE_W) {
102 return vc4_nir_get_swizzled_channel(b, vpm_reads, swiz);
103 } else if (chan->size == 32 && chan->type == UTIL_FORMAT_TYPE_FLOAT) {
104 return vc4_nir_get_swizzled_channel(b, vpm_reads, swiz);
105 } else if (chan->size == 32 && chan->type == UTIL_FORMAT_TYPE_SIGNED) {
106 if (chan->normalized) {
107 return nir_fmul(b,
108 nir_i2f(b, vpm_reads[swiz]),
109 nir_imm_float(b,
110 1.0 / 0x7fffffff));
111 } else {
112 return nir_i2f(b, vpm_reads[swiz]);
113 }
114 } else if (chan->size == 8 &&
115 (chan->type == UTIL_FORMAT_TYPE_UNSIGNED ||
116 chan->type == UTIL_FORMAT_TYPE_SIGNED)) {
117 nir_ssa_def *vpm = vpm_reads[0];
118 if (chan->type == UTIL_FORMAT_TYPE_SIGNED) {
119 temp = nir_ixor(b, vpm, nir_imm_int(b, 0x80808080));
120 if (chan->normalized) {
121 return nir_fsub(b, nir_fmul(b,
122 vc4_nir_unpack_8f(b, temp, swiz),
123 nir_imm_float(b, 2.0)),
124 nir_imm_float(b, 1.0));
125 } else {
126 return nir_fadd(b,
127 nir_i2f(b,
128 vc4_nir_unpack_8i(b, temp,
129 swiz)),
130 nir_imm_float(b, -128.0));
131 }
132 } else {
133 if (chan->normalized) {
134 return vc4_nir_unpack_8f(b, vpm, swiz);
135 } else {
136 return nir_i2f(b, vc4_nir_unpack_8i(b, vpm, swiz));
137 }
138 }
139 } else if (chan->size == 16 &&
140 (chan->type == UTIL_FORMAT_TYPE_UNSIGNED ||
141 chan->type == UTIL_FORMAT_TYPE_SIGNED)) {
142 nir_ssa_def *vpm = vpm_reads[swiz / 2];
143
144 /* Note that UNPACK_16F eats a half float, not ints, so we use
145 * UNPACK_16_I for all of these.
146 */
147 if (chan->type == UTIL_FORMAT_TYPE_SIGNED) {
148 temp = nir_i2f(b, vc4_nir_unpack_16i(b, vpm, swiz & 1));
149 if (chan->normalized) {
150 return nir_fmul(b, temp,
151 nir_imm_float(b, 1/32768.0f));
152 } else {
153 return temp;
154 }
155 } else {
156 temp = nir_i2f(b, vc4_nir_unpack_16u(b, vpm, swiz & 1));
157 if (chan->normalized) {
158 return nir_fmul(b, temp,
159 nir_imm_float(b, 1 / 65535.0));
160 } else {
161 return temp;
162 }
163 }
164 } else {
165 return NULL;
166 }
167 }
168
169 static void
170 vc4_nir_lower_vertex_attr(struct vc4_compile *c, nir_builder *b,
171 nir_intrinsic_instr *intr)
172 {
173 b->cursor = nir_before_instr(&intr->instr);
174
175 int attr = intr->const_index[0];
176 enum pipe_format format = c->vs_key->attr_formats[attr];
177 uint32_t attr_size = util_format_get_blocksize(format);
178
179 /* All TGSI-to-NIR inputs are vec4. */
180 assert(intr->num_components == 4);
181
182 /* We only accept direct outputs and TGSI only ever gives them to us
183 * with an offset value of 0.
184 */
185 assert(nir_src_as_const_value(intr->src[0]) &&
186 nir_src_as_const_value(intr->src[0])->u[0] == 0);
187
188 /* Generate dword loads for the VPM values (Since these intrinsics may
189 * be reordered, the actual reads will be generated at the top of the
190 * shader by ntq_setup_inputs().
191 */
192 nir_ssa_def *vpm_reads[4];
193 for (int i = 0; i < align(attr_size, 4) / 4; i++) {
194 nir_intrinsic_instr *intr_comp =
195 nir_intrinsic_instr_create(c->s,
196 nir_intrinsic_load_input);
197 intr_comp->num_components = 1;
198 intr_comp->const_index[0] = intr->const_index[0] * 4 + i;
199 intr_comp->src[0] = nir_src_for_ssa(nir_imm_int(b, 0));
200 nir_ssa_dest_init(&intr_comp->instr, &intr_comp->dest, 1, NULL);
201 nir_builder_instr_insert(b, &intr_comp->instr);
202
203 vpm_reads[i] = &intr_comp->dest.ssa;
204 }
205
206 bool format_warned = false;
207 const struct util_format_description *desc =
208 util_format_description(format);
209
210 nir_ssa_def *dests[4];
211 for (int i = 0; i < 4; i++) {
212 uint8_t swiz = desc->swizzle[i];
213 dests[i] = vc4_nir_get_vattr_channel_vpm(c, b, vpm_reads, swiz,
214 desc);
215
216 if (!dests[i]) {
217 if (!format_warned) {
218 fprintf(stderr,
219 "vtx element %d unsupported type: %s\n",
220 attr, util_format_name(format));
221 format_warned = true;
222 }
223 dests[i] = nir_imm_float(b, 0.0);
224 }
225 }
226
227 replace_intrinsic_with_vec4(b, intr, dests);
228 }
229
230 static void
231 vc4_nir_lower_fs_input(struct vc4_compile *c, nir_builder *b,
232 nir_intrinsic_instr *intr)
233 {
234 b->cursor = nir_before_instr(&intr->instr);
235
236 if (intr->const_index[0] >= VC4_NIR_TLB_COLOR_READ_INPUT &&
237 intr->const_index[0] < (VC4_NIR_TLB_COLOR_READ_INPUT +
238 VC4_MAX_SAMPLES)) {
239 /* This doesn't need any lowering. */
240 return;
241 }
242
243 nir_variable *input_var = NULL;
244 nir_foreach_variable(var, &c->s->inputs) {
245 if (var->data.driver_location == intr->const_index[0]) {
246 input_var = var;
247 break;
248 }
249 }
250 assert(input_var);
251
252 /* All TGSI-to-NIR inputs are vec4. */
253 assert(intr->num_components == 4);
254
255 /* We only accept direct inputs and TGSI only ever gives them to us
256 * with an offset value of 0.
257 */
258 assert(nir_src_as_const_value(intr->src[0]) &&
259 nir_src_as_const_value(intr->src[0])->u[0] == 0);
260
261 /* Generate scalar loads equivalent to the original VEC4. */
262 nir_ssa_def *dests[4];
263 for (unsigned i = 0; i < intr->num_components; i++) {
264 nir_intrinsic_instr *intr_comp =
265 nir_intrinsic_instr_create(c->s, nir_intrinsic_load_input);
266 intr_comp->num_components = 1;
267 intr_comp->const_index[0] = intr->const_index[0] * 4 + i;
268 intr_comp->src[0] = nir_src_for_ssa(nir_imm_int(b, 0));
269
270 nir_ssa_dest_init(&intr_comp->instr, &intr_comp->dest, 1, NULL);
271 nir_builder_instr_insert(b, &intr_comp->instr);
272
273 dests[i] = &intr_comp->dest.ssa;
274 }
275
276 if (input_var->data.location == VARYING_SLOT_FACE) {
277 dests[0] = nir_fsub(b,
278 nir_imm_float(b, 1.0),
279 nir_fmul(b,
280 nir_i2f(b, dests[0]),
281 nir_imm_float(b, 2.0)));
282 dests[1] = nir_imm_float(b, 0.0);
283 dests[2] = nir_imm_float(b, 0.0);
284 dests[3] = nir_imm_float(b, 1.0);
285 } else if (input_var->data.location >= VARYING_SLOT_VAR0) {
286 if (c->fs_key->point_sprite_mask &
287 (1 << (input_var->data.location -
288 VARYING_SLOT_VAR0))) {
289 if (!c->fs_key->is_points) {
290 dests[0] = nir_imm_float(b, 0.0);
291 dests[1] = nir_imm_float(b, 0.0);
292 }
293 if (c->fs_key->point_coord_upper_left) {
294 dests[1] = nir_fsub(b,
295 nir_imm_float(b, 1.0),
296 dests[1]);
297 }
298 dests[2] = nir_imm_float(b, 0.0);
299 dests[3] = nir_imm_float(b, 1.0);
300 }
301 }
302
303 replace_intrinsic_with_vec4(b, intr, dests);
304 }
305
306 static void
307 vc4_nir_lower_output(struct vc4_compile *c, nir_builder *b,
308 nir_intrinsic_instr *intr)
309 {
310 nir_variable *output_var = NULL;
311 nir_foreach_variable(var, &c->s->outputs) {
312 if (var->data.driver_location == intr->const_index[0]) {
313 output_var = var;
314 break;
315 }
316 }
317 assert(output_var);
318
319 if (c->stage == QSTAGE_COORD &&
320 output_var->data.location != VARYING_SLOT_POS &&
321 output_var->data.location != VARYING_SLOT_PSIZ) {
322 nir_instr_remove(&intr->instr);
323 return;
324 }
325
326 /* Color output is lowered by vc4_nir_lower_blend(). */
327 if (c->stage == QSTAGE_FRAG &&
328 (output_var->data.location == FRAG_RESULT_COLOR ||
329 output_var->data.location == FRAG_RESULT_DATA0 ||
330 output_var->data.location == FRAG_RESULT_SAMPLE_MASK)) {
331 intr->const_index[0] *= 4;
332 return;
333 }
334
335 /* All TGSI-to-NIR outputs are VEC4. */
336 assert(intr->num_components == 4);
337
338 /* We only accept direct outputs and TGSI only ever gives them to us
339 * with an offset value of 0.
340 */
341 assert(nir_src_as_const_value(intr->src[1]) &&
342 nir_src_as_const_value(intr->src[1])->u[0] == 0);
343
344 b->cursor = nir_before_instr(&intr->instr);
345
346 for (unsigned i = 0; i < intr->num_components; i++) {
347 nir_intrinsic_instr *intr_comp =
348 nir_intrinsic_instr_create(c->s, nir_intrinsic_store_output);
349 intr_comp->num_components = 1;
350 intr_comp->const_index[0] = intr->const_index[0] * 4 + i;
351
352 assert(intr->src[0].is_ssa);
353 intr_comp->src[0] =
354 nir_src_for_ssa(nir_channel(b, intr->src[0].ssa, i));
355 intr_comp->src[1] = nir_src_for_ssa(nir_imm_int(b, 0));
356 nir_builder_instr_insert(b, &intr_comp->instr);
357 }
358
359 nir_instr_remove(&intr->instr);
360 }
361
362 static void
363 vc4_nir_lower_uniform(struct vc4_compile *c, nir_builder *b,
364 nir_intrinsic_instr *intr)
365 {
366 /* All TGSI-to-NIR uniform loads are vec4, but we need byte offsets
367 * in the backend.
368 */
369 if (intr->num_components == 1)
370 return;
371 assert(intr->num_components == 4);
372
373 b->cursor = nir_before_instr(&intr->instr);
374
375 /* Generate scalar loads equivalent to the original VEC4. */
376 nir_ssa_def *dests[4];
377 for (unsigned i = 0; i < intr->num_components; i++) {
378 nir_intrinsic_instr *intr_comp =
379 nir_intrinsic_instr_create(c->s, intr->intrinsic);
380 intr_comp->num_components = 1;
381 nir_ssa_dest_init(&intr_comp->instr, &intr_comp->dest, 1, NULL);
382
383 /* Convert the uniform (not user_clip_plane) offset to bytes.
384 * If it happens to be a constant, constant-folding will clean
385 * up the shift for us.
386 */
387 if (intr->intrinsic == nir_intrinsic_load_uniform) {
388 /* Convert the base offset to bytes and add the
389 * component
390 */
391 intr_comp->const_index[0] = (intr->const_index[0] * 16 + i * 4);
392
393 intr_comp->src[0] =
394 nir_src_for_ssa(nir_ishl(b, intr->src[0].ssa,
395 nir_imm_int(b, 4)));
396 } else {
397 assert(intr->intrinsic ==
398 nir_intrinsic_load_user_clip_plane);
399 intr_comp->const_index[0] = intr->const_index[0] * 4 + i;
400 }
401
402 dests[i] = &intr_comp->dest.ssa;
403
404 nir_builder_instr_insert(b, &intr_comp->instr);
405 }
406
407 replace_intrinsic_with_vec4(b, intr, dests);
408 }
409
410 static void
411 vc4_nir_lower_io_instr(struct vc4_compile *c, nir_builder *b,
412 struct nir_instr *instr)
413 {
414 if (instr->type != nir_instr_type_intrinsic)
415 return;
416 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
417
418 switch (intr->intrinsic) {
419 case nir_intrinsic_load_input:
420 if (c->stage == QSTAGE_FRAG)
421 vc4_nir_lower_fs_input(c, b, intr);
422 else
423 vc4_nir_lower_vertex_attr(c, b, intr);
424 break;
425
426 case nir_intrinsic_store_output:
427 vc4_nir_lower_output(c, b, intr);
428 break;
429
430 case nir_intrinsic_load_uniform:
431 case nir_intrinsic_load_user_clip_plane:
432 vc4_nir_lower_uniform(c, b, intr);
433 break;
434
435 default:
436 break;
437 }
438 }
439
440 static bool
441 vc4_nir_lower_io_block(nir_block *block, void *arg)
442 {
443 struct vc4_compile *c = arg;
444 nir_function_impl *impl =
445 nir_cf_node_get_function(&block->cf_node);
446
447 nir_builder b;
448 nir_builder_init(&b, impl);
449
450 nir_foreach_instr_safe(block, instr)
451 vc4_nir_lower_io_instr(c, &b, instr);
452
453 return true;
454 }
455
456 static bool
457 vc4_nir_lower_io_impl(struct vc4_compile *c, nir_function_impl *impl)
458 {
459 nir_foreach_block(impl, vc4_nir_lower_io_block, c);
460
461 nir_metadata_preserve(impl, nir_metadata_block_index |
462 nir_metadata_dominance);
463
464 return true;
465 }
466
467 void
468 vc4_nir_lower_io(struct vc4_compile *c)
469 {
470 nir_foreach_overload(c->s, overload) {
471 if (overload->impl)
472 vc4_nir_lower_io_impl(c, overload->impl);
473 }
474 }