nir: Define system values for vc4's blending-lowering arguments.
[mesa.git] / src / gallium / drivers / vc4 / vc4_nir_lower_io.c
1 /*
2 * Copyright © 2015 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "vc4_qir.h"
25 #include "compiler/nir/nir_builder.h"
26 #include "util/u_format.h"
27
28 /**
29 * Walks the NIR generated by TGSI-to-NIR to lower its io intrinsics into
30 * something amenable to the VC4 architecture.
31 *
32 * Currently, it splits VS inputs and uniforms into scalars, drops any
33 * non-position outputs in coordinate shaders, and fixes up the addressing on
34 * indirect uniform loads. FS input and VS output scalarization is handled by
35 * nir_lower_io_to_scalar().
36 */
37
38 static void
39 replace_intrinsic_with_vec4(nir_builder *b, nir_intrinsic_instr *intr,
40 nir_ssa_def **comps)
41 {
42
43 /* Batch things back together into a vec4. This will get split by the
44 * later ALU scalarization pass.
45 */
46 nir_ssa_def *vec = nir_vec4(b, comps[0], comps[1], comps[2], comps[3]);
47
48 /* Replace the old intrinsic with a reference to our reconstructed
49 * vec4.
50 */
51 nir_ssa_def_rewrite_uses(&intr->dest.ssa, nir_src_for_ssa(vec));
52 nir_instr_remove(&intr->instr);
53 }
54
55 static nir_ssa_def *
56 vc4_nir_unpack_8i(nir_builder *b, nir_ssa_def *src, unsigned chan)
57 {
58 return nir_ubitfield_extract(b,
59 src,
60 nir_imm_int(b, 8 * chan),
61 nir_imm_int(b, 8));
62 }
63
64 /** Returns the 16 bit field as a sign-extended 32-bit value. */
65 static nir_ssa_def *
66 vc4_nir_unpack_16i(nir_builder *b, nir_ssa_def *src, unsigned chan)
67 {
68 return nir_ibitfield_extract(b,
69 src,
70 nir_imm_int(b, 16 * chan),
71 nir_imm_int(b, 16));
72 }
73
74 /** Returns the 16 bit field as an unsigned 32 bit value. */
75 static nir_ssa_def *
76 vc4_nir_unpack_16u(nir_builder *b, nir_ssa_def *src, unsigned chan)
77 {
78 if (chan == 0) {
79 return nir_iand(b, src, nir_imm_int(b, 0xffff));
80 } else {
81 return nir_ushr(b, src, nir_imm_int(b, 16));
82 }
83 }
84
85 static nir_ssa_def *
86 vc4_nir_unpack_8f(nir_builder *b, nir_ssa_def *src, unsigned chan)
87 {
88 return nir_channel(b, nir_unpack_unorm_4x8(b, src), chan);
89 }
90
91 static nir_ssa_def *
92 vc4_nir_get_vattr_channel_vpm(struct vc4_compile *c,
93 nir_builder *b,
94 nir_ssa_def **vpm_reads,
95 uint8_t swiz,
96 const struct util_format_description *desc)
97 {
98 const struct util_format_channel_description *chan =
99 &desc->channel[swiz];
100 nir_ssa_def *temp;
101
102 if (swiz > PIPE_SWIZZLE_W) {
103 return vc4_nir_get_swizzled_channel(b, vpm_reads, swiz);
104 } else if (chan->size == 32 && chan->type == UTIL_FORMAT_TYPE_FLOAT) {
105 return vc4_nir_get_swizzled_channel(b, vpm_reads, swiz);
106 } else if (chan->size == 32 && chan->type == UTIL_FORMAT_TYPE_SIGNED) {
107 if (chan->normalized) {
108 return nir_fmul(b,
109 nir_i2f(b, vpm_reads[swiz]),
110 nir_imm_float(b,
111 1.0 / 0x7fffffff));
112 } else {
113 return nir_i2f(b, vpm_reads[swiz]);
114 }
115 } else if (chan->size == 8 &&
116 (chan->type == UTIL_FORMAT_TYPE_UNSIGNED ||
117 chan->type == UTIL_FORMAT_TYPE_SIGNED)) {
118 nir_ssa_def *vpm = vpm_reads[0];
119 if (chan->type == UTIL_FORMAT_TYPE_SIGNED) {
120 temp = nir_ixor(b, vpm, nir_imm_int(b, 0x80808080));
121 if (chan->normalized) {
122 return nir_fsub(b, nir_fmul(b,
123 vc4_nir_unpack_8f(b, temp, swiz),
124 nir_imm_float(b, 2.0)),
125 nir_imm_float(b, 1.0));
126 } else {
127 return nir_fadd(b,
128 nir_i2f(b,
129 vc4_nir_unpack_8i(b, temp,
130 swiz)),
131 nir_imm_float(b, -128.0));
132 }
133 } else {
134 if (chan->normalized) {
135 return vc4_nir_unpack_8f(b, vpm, swiz);
136 } else {
137 return nir_i2f(b, vc4_nir_unpack_8i(b, vpm, swiz));
138 }
139 }
140 } else if (chan->size == 16 &&
141 (chan->type == UTIL_FORMAT_TYPE_UNSIGNED ||
142 chan->type == UTIL_FORMAT_TYPE_SIGNED)) {
143 nir_ssa_def *vpm = vpm_reads[swiz / 2];
144
145 /* Note that UNPACK_16F eats a half float, not ints, so we use
146 * UNPACK_16_I for all of these.
147 */
148 if (chan->type == UTIL_FORMAT_TYPE_SIGNED) {
149 temp = nir_i2f(b, vc4_nir_unpack_16i(b, vpm, swiz & 1));
150 if (chan->normalized) {
151 return nir_fmul(b, temp,
152 nir_imm_float(b, 1/32768.0f));
153 } else {
154 return temp;
155 }
156 } else {
157 temp = nir_i2f(b, vc4_nir_unpack_16u(b, vpm, swiz & 1));
158 if (chan->normalized) {
159 return nir_fmul(b, temp,
160 nir_imm_float(b, 1 / 65535.0));
161 } else {
162 return temp;
163 }
164 }
165 } else {
166 return NULL;
167 }
168 }
169
170 static void
171 vc4_nir_lower_vertex_attr(struct vc4_compile *c, nir_builder *b,
172 nir_intrinsic_instr *intr)
173 {
174 b->cursor = nir_before_instr(&intr->instr);
175
176 int attr = nir_intrinsic_base(intr);
177 enum pipe_format format = c->vs_key->attr_formats[attr];
178 uint32_t attr_size = util_format_get_blocksize(format);
179
180 /* All TGSI-to-NIR inputs are vec4. */
181 assert(intr->num_components == 4);
182
183 /* We only accept direct outputs and TGSI only ever gives them to us
184 * with an offset value of 0.
185 */
186 assert(nir_src_as_const_value(intr->src[0]) &&
187 nir_src_as_const_value(intr->src[0])->u32[0] == 0);
188
189 /* Generate dword loads for the VPM values (Since these intrinsics may
190 * be reordered, the actual reads will be generated at the top of the
191 * shader by ntq_setup_inputs().
192 */
193 nir_ssa_def *vpm_reads[4];
194 for (int i = 0; i < align(attr_size, 4) / 4; i++) {
195 nir_intrinsic_instr *intr_comp =
196 nir_intrinsic_instr_create(c->s,
197 nir_intrinsic_load_input);
198 intr_comp->num_components = 1;
199 nir_intrinsic_set_base(intr_comp, nir_intrinsic_base(intr));
200 nir_intrinsic_set_component(intr_comp, i);
201 intr_comp->src[0] = nir_src_for_ssa(nir_imm_int(b, 0));
202 nir_ssa_dest_init(&intr_comp->instr, &intr_comp->dest, 1, 32, NULL);
203 nir_builder_instr_insert(b, &intr_comp->instr);
204
205 vpm_reads[i] = &intr_comp->dest.ssa;
206 }
207
208 bool format_warned = false;
209 const struct util_format_description *desc =
210 util_format_description(format);
211
212 nir_ssa_def *dests[4];
213 for (int i = 0; i < 4; i++) {
214 uint8_t swiz = desc->swizzle[i];
215 dests[i] = vc4_nir_get_vattr_channel_vpm(c, b, vpm_reads, swiz,
216 desc);
217
218 if (!dests[i]) {
219 if (!format_warned) {
220 fprintf(stderr,
221 "vtx element %d unsupported type: %s\n",
222 attr, util_format_name(format));
223 format_warned = true;
224 }
225 dests[i] = nir_imm_float(b, 0.0);
226 }
227 }
228
229 replace_intrinsic_with_vec4(b, intr, dests);
230 }
231
232 static bool
233 is_point_sprite(struct vc4_compile *c, nir_variable *var)
234 {
235 if (var->data.location < VARYING_SLOT_VAR0 ||
236 var->data.location > VARYING_SLOT_VAR31)
237 return false;
238
239 return (c->fs_key->point_sprite_mask &
240 (1 << (var->data.location - VARYING_SLOT_VAR0)));
241 }
242
243 static void
244 vc4_nir_lower_fs_input(struct vc4_compile *c, nir_builder *b,
245 nir_intrinsic_instr *intr)
246 {
247 b->cursor = nir_after_instr(&intr->instr);
248
249 if (nir_intrinsic_base(intr) >= VC4_NIR_TLB_COLOR_READ_INPUT &&
250 nir_intrinsic_base(intr) < (VC4_NIR_TLB_COLOR_READ_INPUT +
251 VC4_MAX_SAMPLES)) {
252 /* This doesn't need any lowering. */
253 return;
254 }
255
256 nir_variable *input_var = NULL;
257 nir_foreach_variable(var, &c->s->inputs) {
258 if (var->data.driver_location == nir_intrinsic_base(intr)) {
259 input_var = var;
260 break;
261 }
262 }
263 assert(input_var);
264
265 int comp = nir_intrinsic_component(intr);
266
267 /* Lower away point coordinates, and fix up PNTC. */
268 if (is_point_sprite(c, input_var) ||
269 input_var->data.location == VARYING_SLOT_PNTC) {
270 assert(intr->num_components == 1);
271
272 nir_ssa_def *result = &intr->dest.ssa;
273
274 switch (comp) {
275 case 0:
276 case 1:
277 /* If we're not rendering points, we need to set a
278 * defined value for the input that would come from
279 * PNTC.
280 */
281 if (!c->fs_key->is_points)
282 result = nir_imm_float(b, 0.0);
283 break;
284 case 2:
285 result = nir_imm_float(b, 0.0);
286 break;
287 case 3:
288 result = nir_imm_float(b, 1.0);
289 break;
290 }
291
292 if (c->fs_key->point_coord_upper_left && comp == 1)
293 result = nir_fsub(b, nir_imm_float(b, 1.0), result);
294
295 if (result != &intr->dest.ssa) {
296 nir_ssa_def_rewrite_uses_after(&intr->dest.ssa,
297 nir_src_for_ssa(result),
298 result->parent_instr);
299 }
300 }
301 }
302
303 static void
304 vc4_nir_lower_output(struct vc4_compile *c, nir_builder *b,
305 nir_intrinsic_instr *intr)
306 {
307 nir_variable *output_var = NULL;
308 nir_foreach_variable(var, &c->s->outputs) {
309 if (var->data.driver_location == nir_intrinsic_base(intr)) {
310 output_var = var;
311 break;
312 }
313 }
314 assert(output_var);
315
316 if (c->stage == QSTAGE_COORD &&
317 output_var->data.location != VARYING_SLOT_POS &&
318 output_var->data.location != VARYING_SLOT_PSIZ) {
319 nir_instr_remove(&intr->instr);
320 return;
321 }
322 }
323
324 static void
325 vc4_nir_lower_uniform(struct vc4_compile *c, nir_builder *b,
326 nir_intrinsic_instr *intr)
327 {
328 /* All TGSI-to-NIR uniform loads are vec4, but we need byte offsets
329 * in the backend.
330 */
331 if (intr->num_components == 1)
332 return;
333 assert(intr->num_components == 4);
334
335 b->cursor = nir_before_instr(&intr->instr);
336
337 /* Generate scalar loads equivalent to the original VEC4. */
338 nir_ssa_def *dests[4];
339 for (unsigned i = 0; i < intr->num_components; i++) {
340 nir_intrinsic_instr *intr_comp =
341 nir_intrinsic_instr_create(c->s, intr->intrinsic);
342 intr_comp->num_components = 1;
343 nir_ssa_dest_init(&intr_comp->instr, &intr_comp->dest, 1, 32, NULL);
344
345 /* Convert the uniform offset to bytes. If it happens
346 * to be a constant, constant-folding will clean up
347 * the shift for us.
348 */
349 nir_intrinsic_set_base(intr_comp,
350 nir_intrinsic_base(intr) * 16 +
351 i * 4);
352
353 intr_comp->src[0] =
354 nir_src_for_ssa(nir_ishl(b, intr->src[0].ssa,
355 nir_imm_int(b, 4)));
356
357 dests[i] = &intr_comp->dest.ssa;
358
359 nir_builder_instr_insert(b, &intr_comp->instr);
360 }
361
362 replace_intrinsic_with_vec4(b, intr, dests);
363 }
364
365 static void
366 vc4_nir_lower_io_instr(struct vc4_compile *c, nir_builder *b,
367 struct nir_instr *instr)
368 {
369 if (instr->type != nir_instr_type_intrinsic)
370 return;
371 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
372
373 switch (intr->intrinsic) {
374 case nir_intrinsic_load_input:
375 if (c->stage == QSTAGE_FRAG)
376 vc4_nir_lower_fs_input(c, b, intr);
377 else
378 vc4_nir_lower_vertex_attr(c, b, intr);
379 break;
380
381 case nir_intrinsic_store_output:
382 vc4_nir_lower_output(c, b, intr);
383 break;
384
385 case nir_intrinsic_load_uniform:
386 vc4_nir_lower_uniform(c, b, intr);
387 break;
388
389 case nir_intrinsic_load_user_clip_plane:
390 default:
391 break;
392 }
393 }
394
395 static bool
396 vc4_nir_lower_io_impl(struct vc4_compile *c, nir_function_impl *impl)
397 {
398 nir_builder b;
399 nir_builder_init(&b, impl);
400
401 nir_foreach_block(block, impl) {
402 nir_foreach_instr_safe(instr, block)
403 vc4_nir_lower_io_instr(c, &b, instr);
404 }
405
406 nir_metadata_preserve(impl, nir_metadata_block_index |
407 nir_metadata_dominance);
408
409 return true;
410 }
411
412 void
413 vc4_nir_lower_io(nir_shader *s, struct vc4_compile *c)
414 {
415 nir_foreach_function(function, s) {
416 if (function->impl)
417 vc4_nir_lower_io_impl(c, function->impl);
418 }
419 }