nir: Support sysval tess levels in SPIR-V to NIR
[mesa.git] / src / compiler / spirv / vtn_glsl450.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Jason Ekstrand (jason@jlekstrand.net)
25 *
26 */
27
28 #include <math.h>
29
30 #include "nir/nir_builtin_builder.h"
31
32 #include "vtn_private.h"
33 #include "GLSL.std.450.h"
34
35 #define M_PIf ((float) M_PI)
36 #define M_PI_2f ((float) M_PI_2)
37 #define M_PI_4f ((float) M_PI_4)
38
39 static nir_ssa_def *
40 build_mat2_det(nir_builder *b, nir_ssa_def *col[2])
41 {
42 unsigned swiz[2] = {1, 0 };
43 nir_ssa_def *p = nir_fmul(b, col[0], nir_swizzle(b, col[1], swiz, 2));
44 return nir_fsub(b, nir_channel(b, p, 0), nir_channel(b, p, 1));
45 }
46
47 static nir_ssa_def *
48 build_mat3_det(nir_builder *b, nir_ssa_def *col[3])
49 {
50 unsigned yzx[3] = {1, 2, 0 };
51 unsigned zxy[3] = {2, 0, 1 };
52
53 nir_ssa_def *prod0 =
54 nir_fmul(b, col[0],
55 nir_fmul(b, nir_swizzle(b, col[1], yzx, 3),
56 nir_swizzle(b, col[2], zxy, 3)));
57 nir_ssa_def *prod1 =
58 nir_fmul(b, col[0],
59 nir_fmul(b, nir_swizzle(b, col[1], zxy, 3),
60 nir_swizzle(b, col[2], yzx, 3)));
61
62 nir_ssa_def *diff = nir_fsub(b, prod0, prod1);
63
64 return nir_fadd(b, nir_channel(b, diff, 0),
65 nir_fadd(b, nir_channel(b, diff, 1),
66 nir_channel(b, diff, 2)));
67 }
68
69 static nir_ssa_def *
70 build_mat4_det(nir_builder *b, nir_ssa_def **col)
71 {
72 nir_ssa_def *subdet[4];
73 for (unsigned i = 0; i < 4; i++) {
74 unsigned swiz[3];
75 for (unsigned j = 0; j < 3; j++)
76 swiz[j] = j + (j >= i);
77
78 nir_ssa_def *subcol[3];
79 subcol[0] = nir_swizzle(b, col[1], swiz, 3);
80 subcol[1] = nir_swizzle(b, col[2], swiz, 3);
81 subcol[2] = nir_swizzle(b, col[3], swiz, 3);
82
83 subdet[i] = build_mat3_det(b, subcol);
84 }
85
86 nir_ssa_def *prod = nir_fmul(b, col[0], nir_vec(b, subdet, 4));
87
88 return nir_fadd(b, nir_fsub(b, nir_channel(b, prod, 0),
89 nir_channel(b, prod, 1)),
90 nir_fsub(b, nir_channel(b, prod, 2),
91 nir_channel(b, prod, 3)));
92 }
93
94 static nir_ssa_def *
95 build_mat_det(struct vtn_builder *b, struct vtn_ssa_value *src)
96 {
97 unsigned size = glsl_get_vector_elements(src->type);
98
99 nir_ssa_def *cols[4];
100 for (unsigned i = 0; i < size; i++)
101 cols[i] = src->elems[i]->def;
102
103 switch(size) {
104 case 2: return build_mat2_det(&b->nb, cols);
105 case 3: return build_mat3_det(&b->nb, cols);
106 case 4: return build_mat4_det(&b->nb, cols);
107 default:
108 vtn_fail("Invalid matrix size");
109 }
110 }
111
112 /* Computes the determinate of the submatrix given by taking src and
113 * removing the specified row and column.
114 */
115 static nir_ssa_def *
116 build_mat_subdet(struct nir_builder *b, struct vtn_ssa_value *src,
117 unsigned size, unsigned row, unsigned col)
118 {
119 assert(row < size && col < size);
120 if (size == 2) {
121 return nir_channel(b, src->elems[1 - col]->def, 1 - row);
122 } else {
123 /* Swizzle to get all but the specified row */
124 unsigned swiz[NIR_MAX_VEC_COMPONENTS] = {0};
125 for (unsigned j = 0; j < 3; j++)
126 swiz[j] = j + (j >= row);
127
128 /* Grab all but the specified column */
129 nir_ssa_def *subcol[3];
130 for (unsigned j = 0; j < size; j++) {
131 if (j != col) {
132 subcol[j - (j > col)] = nir_swizzle(b, src->elems[j]->def,
133 swiz, size - 1);
134 }
135 }
136
137 if (size == 3) {
138 return build_mat2_det(b, subcol);
139 } else {
140 assert(size == 4);
141 return build_mat3_det(b, subcol);
142 }
143 }
144 }
145
146 static struct vtn_ssa_value *
147 matrix_inverse(struct vtn_builder *b, struct vtn_ssa_value *src)
148 {
149 nir_ssa_def *adj_col[4];
150 unsigned size = glsl_get_vector_elements(src->type);
151
152 /* Build up an adjugate matrix */
153 for (unsigned c = 0; c < size; c++) {
154 nir_ssa_def *elem[4];
155 for (unsigned r = 0; r < size; r++) {
156 elem[r] = build_mat_subdet(&b->nb, src, size, c, r);
157
158 if ((r + c) % 2)
159 elem[r] = nir_fneg(&b->nb, elem[r]);
160 }
161
162 adj_col[c] = nir_vec(&b->nb, elem, size);
163 }
164
165 nir_ssa_def *det_inv = nir_frcp(&b->nb, build_mat_det(b, src));
166
167 struct vtn_ssa_value *val = vtn_create_ssa_value(b, src->type);
168 for (unsigned i = 0; i < size; i++)
169 val->elems[i]->def = nir_fmul(&b->nb, adj_col[i], det_inv);
170
171 return val;
172 }
173
174 /**
175 * Approximate asin(x) by the piecewise formula:
176 * for |x| < 0.5, asin~(x) = x * (1 + x²(pS0 + x²(pS1 + x²*pS2)) / (1 + x²*qS1))
177 * for |x| ≥ 0.5, asin~(x) = sign(x) * (π/2 - sqrt(1 - |x|) * (π/2 + |x|(π/4 - 1 + |x|(p0 + |x|p1))))
178 *
179 * The latter is correct to first order at x=0 and x=±1 regardless of the p
180 * coefficients but can be made second-order correct at both ends by selecting
181 * the fit coefficients appropriately. Different p coefficients can be used
182 * in the asin and acos implementation to minimize some relative error metric
183 * in each case.
184 */
185 static nir_ssa_def *
186 build_asin(nir_builder *b, nir_ssa_def *x, float p0, float p1, bool piecewise)
187 {
188 if (x->bit_size == 16) {
189 /* The polynomial approximation isn't precise enough to meet half-float
190 * precision requirements. Alternatively, we could implement this using
191 * the formula:
192 *
193 * asin(x) = atan2(x, sqrt(1 - x*x))
194 *
195 * But that is very expensive, so instead we just do the polynomial
196 * approximation in 32-bit math and then we convert the result back to
197 * 16-bit.
198 */
199 return nir_f2f16(b, build_asin(b, nir_f2f32(b, x), p0, p1, piecewise));
200 }
201 nir_ssa_def *one = nir_imm_floatN_t(b, 1.0f, x->bit_size);
202 nir_ssa_def *half = nir_imm_floatN_t(b, 0.5f, x->bit_size);
203 nir_ssa_def *abs_x = nir_fabs(b, x);
204
205 nir_ssa_def *p0_plus_xp1 = nir_fadd_imm(b, nir_fmul_imm(b, abs_x, p1), p0);
206
207 nir_ssa_def *expr_tail =
208 nir_fadd_imm(b, nir_fmul(b, abs_x,
209 nir_fadd_imm(b, nir_fmul(b, abs_x,
210 p0_plus_xp1),
211 M_PI_4f - 1.0f)),
212 M_PI_2f);
213
214 nir_ssa_def *result0 = nir_fmul(b, nir_fsign(b, x),
215 nir_fsub(b, nir_imm_floatN_t(b, M_PI_2f, x->bit_size),
216 nir_fmul(b, nir_fsqrt(b, nir_fsub(b, one, abs_x)),
217 expr_tail)));
218 if (piecewise) {
219 /* approximation for |x| < 0.5 */
220 const float pS0 = 1.6666586697e-01f;
221 const float pS1 = -4.2743422091e-02f;
222 const float pS2 = -8.6563630030e-03f;
223 const float qS1 = -7.0662963390e-01f;
224
225 nir_ssa_def *x2 = nir_fmul(b, x, x);
226 nir_ssa_def *p = nir_fmul(b,
227 x2,
228 nir_fadd_imm(b,
229 nir_fmul(b,
230 x2,
231 nir_fadd_imm(b, nir_fmul_imm(b, x2, pS2),
232 pS1)),
233 pS0));
234
235 nir_ssa_def *q = nir_fadd(b, one, nir_fmul_imm(b, x2, qS1));
236 nir_ssa_def *result1 = nir_fadd(b, x, nir_fmul(b, x, nir_fdiv(b, p, q)));
237 return nir_bcsel(b, nir_flt(b, abs_x, half), result1, result0);
238 } else {
239 return result0;
240 }
241 }
242
243 static nir_op
244 vtn_nir_alu_op_for_spirv_glsl_opcode(struct vtn_builder *b,
245 enum GLSLstd450 opcode,
246 unsigned execution_mode,
247 bool *exact)
248 {
249 *exact = false;
250 switch (opcode) {
251 case GLSLstd450Round: return nir_op_fround_even;
252 case GLSLstd450RoundEven: return nir_op_fround_even;
253 case GLSLstd450Trunc: return nir_op_ftrunc;
254 case GLSLstd450FAbs: return nir_op_fabs;
255 case GLSLstd450SAbs: return nir_op_iabs;
256 case GLSLstd450FSign: return nir_op_fsign;
257 case GLSLstd450SSign: return nir_op_isign;
258 case GLSLstd450Floor: return nir_op_ffloor;
259 case GLSLstd450Ceil: return nir_op_fceil;
260 case GLSLstd450Fract: return nir_op_ffract;
261 case GLSLstd450Sin: return nir_op_fsin;
262 case GLSLstd450Cos: return nir_op_fcos;
263 case GLSLstd450Pow: return nir_op_fpow;
264 case GLSLstd450Exp2: return nir_op_fexp2;
265 case GLSLstd450Log2: return nir_op_flog2;
266 case GLSLstd450Sqrt: return nir_op_fsqrt;
267 case GLSLstd450InverseSqrt: return nir_op_frsq;
268 case GLSLstd450NMin: *exact = true; return nir_op_fmin;
269 case GLSLstd450FMin: return nir_op_fmin;
270 case GLSLstd450UMin: return nir_op_umin;
271 case GLSLstd450SMin: return nir_op_imin;
272 case GLSLstd450NMax: *exact = true; return nir_op_fmax;
273 case GLSLstd450FMax: return nir_op_fmax;
274 case GLSLstd450UMax: return nir_op_umax;
275 case GLSLstd450SMax: return nir_op_imax;
276 case GLSLstd450FMix: return nir_op_flrp;
277 case GLSLstd450Fma: return nir_op_ffma;
278 case GLSLstd450Ldexp: return nir_op_ldexp;
279 case GLSLstd450FindILsb: return nir_op_find_lsb;
280 case GLSLstd450FindSMsb: return nir_op_ifind_msb;
281 case GLSLstd450FindUMsb: return nir_op_ufind_msb;
282
283 /* Packing/Unpacking functions */
284 case GLSLstd450PackSnorm4x8: return nir_op_pack_snorm_4x8;
285 case GLSLstd450PackUnorm4x8: return nir_op_pack_unorm_4x8;
286 case GLSLstd450PackSnorm2x16: return nir_op_pack_snorm_2x16;
287 case GLSLstd450PackUnorm2x16: return nir_op_pack_unorm_2x16;
288 case GLSLstd450PackHalf2x16: return nir_op_pack_half_2x16;
289 case GLSLstd450PackDouble2x32: return nir_op_pack_64_2x32;
290 case GLSLstd450UnpackSnorm4x8: return nir_op_unpack_snorm_4x8;
291 case GLSLstd450UnpackUnorm4x8: return nir_op_unpack_unorm_4x8;
292 case GLSLstd450UnpackSnorm2x16: return nir_op_unpack_snorm_2x16;
293 case GLSLstd450UnpackUnorm2x16: return nir_op_unpack_unorm_2x16;
294 case GLSLstd450UnpackHalf2x16:
295 if (execution_mode & FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP16)
296 return nir_op_unpack_half_2x16_flush_to_zero;
297 else
298 return nir_op_unpack_half_2x16;
299 case GLSLstd450UnpackDouble2x32: return nir_op_unpack_64_2x32;
300
301 default:
302 vtn_fail("No NIR equivalent");
303 }
304 }
305
306 #define NIR_IMM_FP(n, v) (nir_imm_floatN_t(n, v, src[0]->bit_size))
307
308 static void
309 handle_glsl450_alu(struct vtn_builder *b, enum GLSLstd450 entrypoint,
310 const uint32_t *w, unsigned count)
311 {
312 struct nir_builder *nb = &b->nb;
313 const struct glsl_type *dest_type =
314 vtn_value(b, w[1], vtn_value_type_type)->type->type;
315
316 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
317 val->ssa = vtn_create_ssa_value(b, dest_type);
318
319 /* Collect the various SSA sources */
320 unsigned num_inputs = count - 5;
321 nir_ssa_def *src[3] = { NULL, };
322 for (unsigned i = 0; i < num_inputs; i++) {
323 /* These are handled specially below */
324 if (vtn_untyped_value(b, w[i + 5])->value_type == vtn_value_type_pointer)
325 continue;
326
327 src[i] = vtn_ssa_value(b, w[i + 5])->def;
328 }
329
330 switch (entrypoint) {
331 case GLSLstd450Radians:
332 val->ssa->def = nir_radians(nb, src[0]);
333 return;
334 case GLSLstd450Degrees:
335 val->ssa->def = nir_degrees(nb, src[0]);
336 return;
337 case GLSLstd450Tan:
338 val->ssa->def = nir_ftan(nb, src[0]);
339 return;
340
341 case GLSLstd450Modf: {
342 nir_ssa_def *sign = nir_fsign(nb, src[0]);
343 nir_ssa_def *abs = nir_fabs(nb, src[0]);
344 val->ssa->def = nir_fmul(nb, sign, nir_ffract(nb, abs));
345 nir_store_deref(nb, vtn_nir_deref(b, w[6]),
346 nir_fmul(nb, sign, nir_ffloor(nb, abs)), 0xf);
347 return;
348 }
349
350 case GLSLstd450ModfStruct: {
351 nir_ssa_def *sign = nir_fsign(nb, src[0]);
352 nir_ssa_def *abs = nir_fabs(nb, src[0]);
353 vtn_assert(glsl_type_is_struct_or_ifc(val->ssa->type));
354 val->ssa->elems[0]->def = nir_fmul(nb, sign, nir_ffract(nb, abs));
355 val->ssa->elems[1]->def = nir_fmul(nb, sign, nir_ffloor(nb, abs));
356 return;
357 }
358
359 case GLSLstd450Step:
360 val->ssa->def = nir_sge(nb, src[1], src[0]);
361 return;
362
363 case GLSLstd450Length:
364 val->ssa->def = nir_fast_length(nb, src[0]);
365 return;
366 case GLSLstd450Distance:
367 val->ssa->def = nir_fast_distance(nb, src[0], src[1]);
368 return;
369 case GLSLstd450Normalize:
370 val->ssa->def = nir_fast_normalize(nb, src[0]);
371 return;
372
373 case GLSLstd450Exp:
374 val->ssa->def = nir_fexp(nb, src[0]);
375 return;
376
377 case GLSLstd450Log:
378 val->ssa->def = nir_flog(nb, src[0]);
379 return;
380
381 case GLSLstd450FClamp:
382 val->ssa->def = nir_fclamp(nb, src[0], src[1], src[2]);
383 return;
384 case GLSLstd450NClamp:
385 nb->exact = true;
386 val->ssa->def = nir_fclamp(nb, src[0], src[1], src[2]);
387 nb->exact = false;
388 return;
389 case GLSLstd450UClamp:
390 val->ssa->def = nir_uclamp(nb, src[0], src[1], src[2]);
391 return;
392 case GLSLstd450SClamp:
393 val->ssa->def = nir_iclamp(nb, src[0], src[1], src[2]);
394 return;
395
396 case GLSLstd450Cross: {
397 val->ssa->def = nir_cross3(nb, src[0], src[1]);
398 return;
399 }
400
401 case GLSLstd450SmoothStep: {
402 val->ssa->def = nir_smoothstep(nb, src[0], src[1], src[2]);
403 return;
404 }
405
406 case GLSLstd450FaceForward:
407 val->ssa->def =
408 nir_bcsel(nb, nir_flt(nb, nir_fdot(nb, src[2], src[1]),
409 NIR_IMM_FP(nb, 0.0)),
410 src[0], nir_fneg(nb, src[0]));
411 return;
412
413 case GLSLstd450Reflect:
414 /* I - 2 * dot(N, I) * N */
415 val->ssa->def =
416 nir_fsub(nb, src[0], nir_fmul(nb, NIR_IMM_FP(nb, 2.0),
417 nir_fmul(nb, nir_fdot(nb, src[0], src[1]),
418 src[1])));
419 return;
420
421 case GLSLstd450Refract: {
422 nir_ssa_def *I = src[0];
423 nir_ssa_def *N = src[1];
424 nir_ssa_def *eta = src[2];
425 nir_ssa_def *n_dot_i = nir_fdot(nb, N, I);
426 nir_ssa_def *one = NIR_IMM_FP(nb, 1.0);
427 nir_ssa_def *zero = NIR_IMM_FP(nb, 0.0);
428 /* According to the SPIR-V and GLSL specs, eta is always a float
429 * regardless of the type of the other operands. However in practice it
430 * seems that if you try to pass it a float then glslang will just
431 * promote it to a double and generate invalid SPIR-V. In order to
432 * support a hypothetical fixed version of glslang we’ll promote eta to
433 * double if the other operands are double also.
434 */
435 if (I->bit_size != eta->bit_size) {
436 nir_op conversion_op =
437 nir_type_conversion_op(nir_type_float | eta->bit_size,
438 nir_type_float | I->bit_size,
439 nir_rounding_mode_undef);
440 eta = nir_build_alu(nb, conversion_op, eta, NULL, NULL, NULL);
441 }
442 /* k = 1.0 - eta * eta * (1.0 - dot(N, I) * dot(N, I)) */
443 nir_ssa_def *k =
444 nir_fsub(nb, one, nir_fmul(nb, eta, nir_fmul(nb, eta,
445 nir_fsub(nb, one, nir_fmul(nb, n_dot_i, n_dot_i)))));
446 nir_ssa_def *result =
447 nir_fsub(nb, nir_fmul(nb, eta, I),
448 nir_fmul(nb, nir_fadd(nb, nir_fmul(nb, eta, n_dot_i),
449 nir_fsqrt(nb, k)), N));
450 /* XXX: bcsel, or if statement? */
451 val->ssa->def = nir_bcsel(nb, nir_flt(nb, k, zero), zero, result);
452 return;
453 }
454
455 case GLSLstd450Sinh:
456 /* 0.5 * (e^x - e^(-x)) */
457 val->ssa->def =
458 nir_fmul_imm(nb, nir_fsub(nb, nir_fexp(nb, src[0]),
459 nir_fexp(nb, nir_fneg(nb, src[0]))),
460 0.5f);
461 return;
462
463 case GLSLstd450Cosh:
464 /* 0.5 * (e^x + e^(-x)) */
465 val->ssa->def =
466 nir_fmul_imm(nb, nir_fadd(nb, nir_fexp(nb, src[0]),
467 nir_fexp(nb, nir_fneg(nb, src[0]))),
468 0.5f);
469 return;
470
471 case GLSLstd450Tanh: {
472 /* tanh(x) := (e^x - e^(-x)) / (e^x + e^(-x))
473 *
474 * We clamp x to [-10, +10] to avoid precision problems. When x > 10,
475 * e^x dominates the sum, e^(-x) is lost and tanh(x) is 1.0 for 32 bit
476 * floating point.
477 *
478 * For 16-bit precision this we clamp x to [-4.2, +4.2].
479 */
480 const uint32_t bit_size = src[0]->bit_size;
481 const double clamped_x = bit_size > 16 ? 10.0 : 4.2;
482 nir_ssa_def *x = nir_fclamp(nb, src[0],
483 nir_imm_floatN_t(nb, -clamped_x, bit_size),
484 nir_imm_floatN_t(nb, clamped_x, bit_size));
485 val->ssa->def =
486 nir_fdiv(nb, nir_fsub(nb, nir_fexp(nb, x),
487 nir_fexp(nb, nir_fneg(nb, x))),
488 nir_fadd(nb, nir_fexp(nb, x),
489 nir_fexp(nb, nir_fneg(nb, x))));
490 return;
491 }
492
493 case GLSLstd450Asinh:
494 val->ssa->def = nir_fmul(nb, nir_fsign(nb, src[0]),
495 nir_flog(nb, nir_fadd(nb, nir_fabs(nb, src[0]),
496 nir_fsqrt(nb, nir_fadd_imm(nb, nir_fmul(nb, src[0], src[0]),
497 1.0f)))));
498 return;
499 case GLSLstd450Acosh:
500 val->ssa->def = nir_flog(nb, nir_fadd(nb, src[0],
501 nir_fsqrt(nb, nir_fadd_imm(nb, nir_fmul(nb, src[0], src[0]),
502 -1.0f))));
503 return;
504 case GLSLstd450Atanh: {
505 nir_ssa_def *one = nir_imm_floatN_t(nb, 1.0, src[0]->bit_size);
506 val->ssa->def =
507 nir_fmul_imm(nb, nir_flog(nb, nir_fdiv(nb, nir_fadd(nb, src[0], one),
508 nir_fsub(nb, one, src[0]))),
509 0.5f);
510 return;
511 }
512
513 case GLSLstd450Asin:
514 val->ssa->def = build_asin(nb, src[0], 0.086566724, -0.03102955, true);
515 return;
516
517 case GLSLstd450Acos:
518 val->ssa->def =
519 nir_fsub(nb, nir_imm_floatN_t(nb, M_PI_2f, src[0]->bit_size),
520 build_asin(nb, src[0], 0.08132463, -0.02363318, false));
521 return;
522
523 case GLSLstd450Atan:
524 val->ssa->def = nir_atan(nb, src[0]);
525 return;
526
527 case GLSLstd450Atan2:
528 val->ssa->def = nir_atan2(nb, src[0], src[1]);
529 return;
530
531 case GLSLstd450Frexp: {
532 nir_ssa_def *exponent = nir_frexp_exp(nb, src[0]);
533 val->ssa->def = nir_frexp_sig(nb, src[0]);
534 nir_store_deref(nb, vtn_nir_deref(b, w[6]), exponent, 0xf);
535 return;
536 }
537
538 case GLSLstd450FrexpStruct: {
539 vtn_assert(glsl_type_is_struct_or_ifc(val->ssa->type));
540 val->ssa->elems[0]->def = nir_frexp_sig(nb, src[0]);
541 val->ssa->elems[1]->def = nir_frexp_exp(nb, src[0]);
542 return;
543 }
544
545 default: {
546 unsigned execution_mode =
547 b->shader->info.float_controls_execution_mode;
548 bool exact;
549 nir_op op = vtn_nir_alu_op_for_spirv_glsl_opcode(b, entrypoint, execution_mode, &exact);
550 b->nb.exact = exact;
551 val->ssa->def = nir_build_alu(&b->nb, op, src[0], src[1], src[2], NULL);
552 b->nb.exact = false;
553 return;
554 }
555 }
556 }
557
558 static void
559 handle_glsl450_interpolation(struct vtn_builder *b, enum GLSLstd450 opcode,
560 const uint32_t *w, unsigned count)
561 {
562 const struct glsl_type *dest_type =
563 vtn_value(b, w[1], vtn_value_type_type)->type->type;
564
565 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
566 val->ssa = vtn_create_ssa_value(b, dest_type);
567
568 nir_intrinsic_op op;
569 switch (opcode) {
570 case GLSLstd450InterpolateAtCentroid:
571 op = nir_intrinsic_interp_deref_at_centroid;
572 break;
573 case GLSLstd450InterpolateAtSample:
574 op = nir_intrinsic_interp_deref_at_sample;
575 break;
576 case GLSLstd450InterpolateAtOffset:
577 op = nir_intrinsic_interp_deref_at_offset;
578 break;
579 default:
580 vtn_fail("Invalid opcode");
581 }
582
583 nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->nb.shader, op);
584
585 struct vtn_pointer *ptr =
586 vtn_value(b, w[5], vtn_value_type_pointer)->pointer;
587 nir_deref_instr *deref = vtn_pointer_to_deref(b, ptr);
588
589 /* If the value we are interpolating has an index into a vector then
590 * interpolate the vector and index the result of that instead. This is
591 * necessary because the index will get generated as a series of nir_bcsel
592 * instructions so it would no longer be an input variable.
593 */
594 const bool vec_array_deref = deref->deref_type == nir_deref_type_array &&
595 glsl_type_is_vector(nir_deref_instr_parent(deref)->type);
596
597 nir_deref_instr *vec_deref = NULL;
598 if (vec_array_deref) {
599 vec_deref = deref;
600 deref = nir_deref_instr_parent(deref);
601 }
602 intrin->src[0] = nir_src_for_ssa(&deref->dest.ssa);
603
604 switch (opcode) {
605 case GLSLstd450InterpolateAtCentroid:
606 break;
607 case GLSLstd450InterpolateAtSample:
608 case GLSLstd450InterpolateAtOffset:
609 intrin->src[1] = nir_src_for_ssa(vtn_ssa_value(b, w[6])->def);
610 break;
611 default:
612 vtn_fail("Invalid opcode");
613 }
614
615 intrin->num_components = glsl_get_vector_elements(deref->type);
616 nir_ssa_dest_init(&intrin->instr, &intrin->dest,
617 glsl_get_vector_elements(deref->type),
618 glsl_get_bit_size(deref->type), NULL);
619
620 nir_builder_instr_insert(&b->nb, &intrin->instr);
621
622 if (vec_array_deref) {
623 assert(vec_deref);
624 val->ssa->def = nir_vector_extract(&b->nb, &intrin->dest.ssa,
625 vec_deref->arr.index.ssa);
626 } else {
627 val->ssa->def = &intrin->dest.ssa;
628 }
629 }
630
631 bool
632 vtn_handle_glsl450_instruction(struct vtn_builder *b, SpvOp ext_opcode,
633 const uint32_t *w, unsigned count)
634 {
635 switch ((enum GLSLstd450)ext_opcode) {
636 case GLSLstd450Determinant: {
637 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
638 val->ssa = rzalloc(b, struct vtn_ssa_value);
639 val->ssa->type = vtn_value(b, w[1], vtn_value_type_type)->type->type;
640 val->ssa->def = build_mat_det(b, vtn_ssa_value(b, w[5]));
641 break;
642 }
643
644 case GLSLstd450MatrixInverse: {
645 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
646 val->ssa = matrix_inverse(b, vtn_ssa_value(b, w[5]));
647 break;
648 }
649
650 case GLSLstd450InterpolateAtCentroid:
651 case GLSLstd450InterpolateAtSample:
652 case GLSLstd450InterpolateAtOffset:
653 handle_glsl450_interpolation(b, (enum GLSLstd450)ext_opcode, w, count);
654 break;
655
656 default:
657 handle_glsl450_alu(b, (enum GLSLstd450)ext_opcode, w, count);
658 }
659
660 return true;
661 }