Added few more stubs so that control reaches to DestroyDevice().
[mesa.git] / src / gallium / drivers / vc4 / vc4_nir_lower_io.c
1 /*
2 * Copyright © 2015 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "vc4_qir.h"
25 #include "compiler/nir/nir_builder.h"
26 #include "util/format/u_format.h"
27 #include "util/u_helpers.h"
28
29 /**
30 * Walks the NIR generated by TGSI-to-NIR or GLSL-to-NIR to lower its io
31 * intrinsics into something amenable to the VC4 architecture.
32 *
33 * Currently, it splits VS inputs and uniforms into scalars, drops any
34 * non-position outputs in coordinate shaders, and fixes up the addressing on
35 * indirect uniform loads. FS input and VS output scalarization is handled by
36 * nir_lower_io_to_scalar().
37 */
38
39 static void
40 replace_intrinsic_with_vec(nir_builder *b, nir_intrinsic_instr *intr,
41 nir_ssa_def **comps)
42 {
43
44 /* Batch things back together into a vector. This will get split by
45 * the later ALU scalarization pass.
46 */
47 nir_ssa_def *vec = nir_vec(b, comps, intr->num_components);
48
49 /* Replace the old intrinsic with a reference to our reconstructed
50 * vector.
51 */
52 nir_ssa_def_rewrite_uses(&intr->dest.ssa, nir_src_for_ssa(vec));
53 nir_instr_remove(&intr->instr);
54 }
55
56 static nir_ssa_def *
57 vc4_nir_unpack_8i(nir_builder *b, nir_ssa_def *src, unsigned chan)
58 {
59 return nir_ubitfield_extract(b,
60 src,
61 nir_imm_int(b, 8 * chan),
62 nir_imm_int(b, 8));
63 }
64
65 /** Returns the 16 bit field as a sign-extended 32-bit value. */
66 static nir_ssa_def *
67 vc4_nir_unpack_16i(nir_builder *b, nir_ssa_def *src, unsigned chan)
68 {
69 return nir_ibitfield_extract(b,
70 src,
71 nir_imm_int(b, 16 * chan),
72 nir_imm_int(b, 16));
73 }
74
75 /** Returns the 16 bit field as an unsigned 32 bit value. */
76 static nir_ssa_def *
77 vc4_nir_unpack_16u(nir_builder *b, nir_ssa_def *src, unsigned chan)
78 {
79 if (chan == 0) {
80 return nir_iand(b, src, nir_imm_int(b, 0xffff));
81 } else {
82 return nir_ushr(b, src, nir_imm_int(b, 16));
83 }
84 }
85
86 static nir_ssa_def *
87 vc4_nir_unpack_8f(nir_builder *b, nir_ssa_def *src, unsigned chan)
88 {
89 return nir_channel(b, nir_unpack_unorm_4x8(b, src), chan);
90 }
91
92 static nir_ssa_def *
93 vc4_nir_get_vattr_channel_vpm(struct vc4_compile *c,
94 nir_builder *b,
95 nir_ssa_def **vpm_reads,
96 uint8_t swiz,
97 const struct util_format_description *desc)
98 {
99 const struct util_format_channel_description *chan =
100 &desc->channel[swiz];
101 nir_ssa_def *temp;
102
103 if (swiz > PIPE_SWIZZLE_W) {
104 return vc4_nir_get_swizzled_channel(b, vpm_reads, swiz);
105 } else if (chan->size == 32 && chan->type == UTIL_FORMAT_TYPE_FLOAT) {
106 return vc4_nir_get_swizzled_channel(b, vpm_reads, swiz);
107 } else if (chan->size == 32 && chan->type == UTIL_FORMAT_TYPE_SIGNED) {
108 if (chan->normalized) {
109 return nir_fmul(b,
110 nir_i2f32(b, vpm_reads[swiz]),
111 nir_imm_float(b,
112 1.0 / 0x7fffffff));
113 } else {
114 return nir_i2f32(b, vpm_reads[swiz]);
115 }
116 } else if (chan->size == 8 &&
117 (chan->type == UTIL_FORMAT_TYPE_UNSIGNED ||
118 chan->type == UTIL_FORMAT_TYPE_SIGNED)) {
119 nir_ssa_def *vpm = vpm_reads[0];
120 if (chan->type == UTIL_FORMAT_TYPE_SIGNED) {
121 temp = nir_ixor(b, vpm, nir_imm_int(b, 0x80808080));
122 if (chan->normalized) {
123 return nir_fsub(b, nir_fmul(b,
124 vc4_nir_unpack_8f(b, temp, swiz),
125 nir_imm_float(b, 2.0)),
126 nir_imm_float(b, 1.0));
127 } else {
128 return nir_fadd(b,
129 nir_i2f32(b,
130 vc4_nir_unpack_8i(b, temp,
131 swiz)),
132 nir_imm_float(b, -128.0));
133 }
134 } else {
135 if (chan->normalized) {
136 return vc4_nir_unpack_8f(b, vpm, swiz);
137 } else {
138 return nir_i2f32(b, vc4_nir_unpack_8i(b, vpm, swiz));
139 }
140 }
141 } else if (chan->size == 16 &&
142 (chan->type == UTIL_FORMAT_TYPE_UNSIGNED ||
143 chan->type == UTIL_FORMAT_TYPE_SIGNED)) {
144 nir_ssa_def *vpm = vpm_reads[swiz / 2];
145
146 /* Note that UNPACK_16F eats a half float, not ints, so we use
147 * UNPACK_16_I for all of these.
148 */
149 if (chan->type == UTIL_FORMAT_TYPE_SIGNED) {
150 temp = nir_i2f32(b, vc4_nir_unpack_16i(b, vpm, swiz & 1));
151 if (chan->normalized) {
152 return nir_fmul(b, temp,
153 nir_imm_float(b, 1/32768.0f));
154 } else {
155 return temp;
156 }
157 } else {
158 temp = nir_i2f32(b, vc4_nir_unpack_16u(b, vpm, swiz & 1));
159 if (chan->normalized) {
160 return nir_fmul(b, temp,
161 nir_imm_float(b, 1 / 65535.0));
162 } else {
163 return temp;
164 }
165 }
166 } else {
167 return NULL;
168 }
169 }
170
171 static void
172 vc4_nir_lower_vertex_attr(struct vc4_compile *c, nir_builder *b,
173 nir_intrinsic_instr *intr)
174 {
175 b->cursor = nir_before_instr(&intr->instr);
176
177 int attr = nir_intrinsic_base(intr);
178 enum pipe_format format = c->vs_key->attr_formats[attr];
179 uint32_t attr_size = util_format_get_blocksize(format);
180
181 /* We only accept direct outputs and TGSI only ever gives them to us
182 * with an offset value of 0.
183 */
184 assert(nir_src_as_uint(intr->src[0]) == 0);
185
186 /* Generate dword loads for the VPM values (Since these intrinsics may
187 * be reordered, the actual reads will be generated at the top of the
188 * shader by ntq_setup_inputs().
189 */
190 nir_ssa_def *vpm_reads[4];
191 for (int i = 0; i < align(attr_size, 4) / 4; i++) {
192 nir_intrinsic_instr *intr_comp =
193 nir_intrinsic_instr_create(c->s,
194 nir_intrinsic_load_input);
195 intr_comp->num_components = 1;
196 nir_intrinsic_set_base(intr_comp, nir_intrinsic_base(intr));
197 nir_intrinsic_set_component(intr_comp, i);
198 intr_comp->src[0] = nir_src_for_ssa(nir_imm_int(b, 0));
199 nir_ssa_dest_init(&intr_comp->instr, &intr_comp->dest, 1, 32, NULL);
200 nir_builder_instr_insert(b, &intr_comp->instr);
201
202 vpm_reads[i] = &intr_comp->dest.ssa;
203 }
204
205 bool format_warned = false;
206 const struct util_format_description *desc =
207 util_format_description(format);
208
209 nir_ssa_def *dests[4];
210 for (int i = 0; i < intr->num_components; i++) {
211 uint8_t swiz = desc->swizzle[i];
212 dests[i] = vc4_nir_get_vattr_channel_vpm(c, b, vpm_reads, swiz,
213 desc);
214
215 if (!dests[i]) {
216 if (!format_warned) {
217 fprintf(stderr,
218 "vtx element %d unsupported type: %s\n",
219 attr, util_format_name(format));
220 format_warned = true;
221 }
222 dests[i] = nir_imm_float(b, 0.0);
223 }
224 }
225
226 replace_intrinsic_with_vec(b, intr, dests);
227 }
228
229 static void
230 vc4_nir_lower_fs_input(struct vc4_compile *c, nir_builder *b,
231 nir_intrinsic_instr *intr)
232 {
233 b->cursor = nir_after_instr(&intr->instr);
234
235 if (nir_intrinsic_base(intr) >= VC4_NIR_TLB_COLOR_READ_INPUT &&
236 nir_intrinsic_base(intr) < (VC4_NIR_TLB_COLOR_READ_INPUT +
237 VC4_MAX_SAMPLES)) {
238 /* This doesn't need any lowering. */
239 return;
240 }
241
242 nir_variable *input_var =
243 nir_find_variable_with_driver_location(c->s, nir_var_shader_in,
244 nir_intrinsic_base(intr));
245 assert(input_var);
246
247 int comp = nir_intrinsic_component(intr);
248
249 /* Lower away point coordinates, and fix up PNTC. */
250 if (util_varying_is_point_coord(input_var->data.location,
251 c->fs_key->point_sprite_mask)) {
252 assert(intr->num_components == 1);
253
254 nir_ssa_def *result = &intr->dest.ssa;
255
256 switch (comp) {
257 case 0:
258 case 1:
259 /* If we're not rendering points, we need to set a
260 * defined value for the input that would come from
261 * PNTC.
262 */
263 if (!c->fs_key->is_points)
264 result = nir_imm_float(b, 0.0);
265 break;
266 case 2:
267 result = nir_imm_float(b, 0.0);
268 break;
269 case 3:
270 result = nir_imm_float(b, 1.0);
271 break;
272 }
273
274 if (c->fs_key->point_coord_upper_left && comp == 1)
275 result = nir_fsub(b, nir_imm_float(b, 1.0), result);
276
277 if (result != &intr->dest.ssa) {
278 nir_ssa_def_rewrite_uses_after(&intr->dest.ssa,
279 nir_src_for_ssa(result),
280 result->parent_instr);
281 }
282 }
283 }
284
285 static void
286 vc4_nir_lower_output(struct vc4_compile *c, nir_builder *b,
287 nir_intrinsic_instr *intr)
288 {
289 nir_variable *output_var =
290 nir_find_variable_with_driver_location(c->s, nir_var_shader_out,
291 nir_intrinsic_base(intr));
292 assert(output_var);
293
294 if (c->stage == QSTAGE_COORD &&
295 output_var->data.location != VARYING_SLOT_POS &&
296 output_var->data.location != VARYING_SLOT_PSIZ) {
297 nir_instr_remove(&intr->instr);
298 return;
299 }
300 }
301
302 static void
303 vc4_nir_lower_uniform(struct vc4_compile *c, nir_builder *b,
304 nir_intrinsic_instr *intr)
305 {
306 b->cursor = nir_before_instr(&intr->instr);
307
308 /* Generate scalar loads equivalent to the original vector. */
309 nir_ssa_def *dests[4];
310 for (unsigned i = 0; i < intr->num_components; i++) {
311 nir_intrinsic_instr *intr_comp =
312 nir_intrinsic_instr_create(c->s, intr->intrinsic);
313 intr_comp->num_components = 1;
314 nir_ssa_dest_init(&intr_comp->instr, &intr_comp->dest, 1,
315 intr->dest.ssa.bit_size, NULL);
316
317 /* Convert the uniform offset to bytes. If it happens
318 * to be a constant, constant-folding will clean up
319 * the shift for us.
320 */
321 nir_intrinsic_set_base(intr_comp,
322 nir_intrinsic_base(intr) * 16 +
323 i * 4);
324 nir_intrinsic_set_range(intr_comp,
325 nir_intrinsic_range(intr) * 16 - i * 4);
326
327 intr_comp->src[0] =
328 nir_src_for_ssa(nir_ishl(b, intr->src[0].ssa,
329 nir_imm_int(b, 4)));
330
331 dests[i] = &intr_comp->dest.ssa;
332
333 nir_builder_instr_insert(b, &intr_comp->instr);
334 }
335
336 replace_intrinsic_with_vec(b, intr, dests);
337 }
338
339 static void
340 vc4_nir_lower_io_instr(struct vc4_compile *c, nir_builder *b,
341 struct nir_instr *instr)
342 {
343 if (instr->type != nir_instr_type_intrinsic)
344 return;
345 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
346
347 switch (intr->intrinsic) {
348 case nir_intrinsic_load_input:
349 if (c->stage == QSTAGE_FRAG)
350 vc4_nir_lower_fs_input(c, b, intr);
351 else
352 vc4_nir_lower_vertex_attr(c, b, intr);
353 break;
354
355 case nir_intrinsic_store_output:
356 vc4_nir_lower_output(c, b, intr);
357 break;
358
359 case nir_intrinsic_load_uniform:
360 vc4_nir_lower_uniform(c, b, intr);
361 break;
362
363 case nir_intrinsic_load_user_clip_plane:
364 default:
365 break;
366 }
367 }
368
369 static bool
370 vc4_nir_lower_io_impl(struct vc4_compile *c, nir_function_impl *impl)
371 {
372 nir_builder b;
373 nir_builder_init(&b, impl);
374
375 nir_foreach_block(block, impl) {
376 nir_foreach_instr_safe(instr, block)
377 vc4_nir_lower_io_instr(c, &b, instr);
378 }
379
380 nir_metadata_preserve(impl, nir_metadata_block_index |
381 nir_metadata_dominance);
382
383 return true;
384 }
385
386 void
387 vc4_nir_lower_io(nir_shader *s, struct vc4_compile *c)
388 {
389 nir_foreach_function(function, s) {
390 if (function->impl)
391 vc4_nir_lower_io_impl(c, function->impl);
392 }
393 }