2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
30 #include "nir/nir_builtin_builder.h"
32 #include "vtn_private.h"
33 #include "GLSL.std.450.h"
35 #define M_PIf ((float) M_PI)
36 #define M_PI_2f ((float) M_PI_2)
37 #define M_PI_4f ((float) M_PI_4)
40 build_mat2_det(nir_builder
*b
, nir_ssa_def
*col
[2])
42 unsigned swiz
[2] = {1, 0 };
43 nir_ssa_def
*p
= nir_fmul(b
, col
[0], nir_swizzle(b
, col
[1], swiz
, 2, true));
44 return nir_fsub(b
, nir_channel(b
, p
, 0), nir_channel(b
, p
, 1));
48 build_mat3_det(nir_builder
*b
, nir_ssa_def
*col
[3])
50 unsigned yzx
[3] = {1, 2, 0 };
51 unsigned zxy
[3] = {2, 0, 1 };
55 nir_fmul(b
, nir_swizzle(b
, col
[1], yzx
, 3, true),
56 nir_swizzle(b
, col
[2], zxy
, 3, true)));
59 nir_fmul(b
, nir_swizzle(b
, col
[1], zxy
, 3, true),
60 nir_swizzle(b
, col
[2], yzx
, 3, true)));
62 nir_ssa_def
*diff
= nir_fsub(b
, prod0
, prod1
);
64 return nir_fadd(b
, nir_channel(b
, diff
, 0),
65 nir_fadd(b
, nir_channel(b
, diff
, 1),
66 nir_channel(b
, diff
, 2)));
70 build_mat4_det(nir_builder
*b
, nir_ssa_def
**col
)
72 nir_ssa_def
*subdet
[4];
73 for (unsigned i
= 0; i
< 4; i
++) {
75 for (unsigned j
= 0; j
< 3; j
++)
76 swiz
[j
] = j
+ (j
>= i
);
78 nir_ssa_def
*subcol
[3];
79 subcol
[0] = nir_swizzle(b
, col
[1], swiz
, 3, true);
80 subcol
[1] = nir_swizzle(b
, col
[2], swiz
, 3, true);
81 subcol
[2] = nir_swizzle(b
, col
[3], swiz
, 3, true);
83 subdet
[i
] = build_mat3_det(b
, subcol
);
86 nir_ssa_def
*prod
= nir_fmul(b
, col
[0], nir_vec(b
, subdet
, 4));
88 return nir_fadd(b
, nir_fsub(b
, nir_channel(b
, prod
, 0),
89 nir_channel(b
, prod
, 1)),
90 nir_fsub(b
, nir_channel(b
, prod
, 2),
91 nir_channel(b
, prod
, 3)));
95 build_mat_det(struct vtn_builder
*b
, struct vtn_ssa_value
*src
)
97 unsigned size
= glsl_get_vector_elements(src
->type
);
100 for (unsigned i
= 0; i
< size
; i
++)
101 cols
[i
] = src
->elems
[i
]->def
;
104 case 2: return build_mat2_det(&b
->nb
, cols
);
105 case 3: return build_mat3_det(&b
->nb
, cols
);
106 case 4: return build_mat4_det(&b
->nb
, cols
);
108 vtn_fail("Invalid matrix size");
112 /* Computes the determinate of the submatrix given by taking src and
113 * removing the specified row and column.
116 build_mat_subdet(struct nir_builder
*b
, struct vtn_ssa_value
*src
,
117 unsigned size
, unsigned row
, unsigned col
)
119 assert(row
< size
&& col
< size
);
121 return nir_channel(b
, src
->elems
[1 - col
]->def
, 1 - row
);
123 /* Swizzle to get all but the specified row */
125 for (unsigned j
= 0; j
< 3; j
++)
126 swiz
[j
] = j
+ (j
>= row
);
128 /* Grab all but the specified column */
129 nir_ssa_def
*subcol
[3];
130 for (unsigned j
= 0; j
< size
; j
++) {
132 subcol
[j
- (j
> col
)] = nir_swizzle(b
, src
->elems
[j
]->def
,
133 swiz
, size
- 1, true);
138 return build_mat2_det(b
, subcol
);
141 return build_mat3_det(b
, subcol
);
146 static struct vtn_ssa_value
*
147 matrix_inverse(struct vtn_builder
*b
, struct vtn_ssa_value
*src
)
149 nir_ssa_def
*adj_col
[4];
150 unsigned size
= glsl_get_vector_elements(src
->type
);
152 /* Build up an adjugate matrix */
153 for (unsigned c
= 0; c
< size
; c
++) {
154 nir_ssa_def
*elem
[4];
155 for (unsigned r
= 0; r
< size
; r
++) {
156 elem
[r
] = build_mat_subdet(&b
->nb
, src
, size
, c
, r
);
159 elem
[r
] = nir_fneg(&b
->nb
, elem
[r
]);
162 adj_col
[c
] = nir_vec(&b
->nb
, elem
, size
);
165 nir_ssa_def
*det_inv
= nir_frcp(&b
->nb
, build_mat_det(b
, src
));
167 struct vtn_ssa_value
*val
= vtn_create_ssa_value(b
, src
->type
);
168 for (unsigned i
= 0; i
< size
; i
++)
169 val
->elems
[i
]->def
= nir_fmul(&b
->nb
, adj_col
[i
], det_inv
);
178 build_exp(nir_builder
*b
, nir_ssa_def
*x
)
180 return nir_fexp2(b
, nir_fmul_imm(b
, x
, M_LOG2E
));
184 * Return ln(x) - the natural logarithm of x.
187 build_log(nir_builder
*b
, nir_ssa_def
*x
)
189 return nir_fmul_imm(b
, nir_flog2(b
, x
), 1.0 / M_LOG2E
);
193 * Approximate asin(x) by the formula:
194 * asin~(x) = sign(x) * (pi/2 - sqrt(1 - |x|) * (pi/2 + |x|(pi/4 - 1 + |x|(p0 + |x|p1))))
196 * which is correct to first order at x=0 and x=±1 regardless of the p
197 * coefficients but can be made second-order correct at both ends by selecting
198 * the fit coefficients appropriately. Different p coefficients can be used
199 * in the asin and acos implementation to minimize some relative error metric
203 build_asin(nir_builder
*b
, nir_ssa_def
*x
, float p0
, float p1
)
205 if (x
->bit_size
== 16) {
206 /* The polynomial approximation isn't precise enough to meet half-float
207 * precision requirements. Alternatively, we could implement this using
210 * asin(x) = atan2(x, sqrt(1 - x*x))
212 * But that is very expensive, so instead we just do the polynomial
213 * approximation in 32-bit math and then we convert the result back to
216 return nir_f2f16(b
, build_asin(b
, nir_f2f32(b
, x
), p0
, p1
));
219 nir_ssa_def
*one
= nir_imm_floatN_t(b
, 1.0f
, x
->bit_size
);
220 nir_ssa_def
*abs_x
= nir_fabs(b
, x
);
222 nir_ssa_def
*p0_plus_xp1
= nir_fadd_imm(b
, nir_fmul_imm(b
, abs_x
, p1
), p0
);
224 nir_ssa_def
*expr_tail
=
225 nir_fadd_imm(b
, nir_fmul(b
, abs_x
,
226 nir_fadd_imm(b
, nir_fmul(b
, abs_x
,
231 return nir_fmul(b
, nir_fsign(b
, x
),
232 nir_fsub(b
, nir_imm_floatN_t(b
, M_PI_2f
, x
->bit_size
),
233 nir_fmul(b
, nir_fsqrt(b
, nir_fsub(b
, one
, abs_x
)),
238 * Compute xs[0] + xs[1] + xs[2] + ... using fadd.
241 build_fsum(nir_builder
*b
, nir_ssa_def
**xs
, int terms
)
243 nir_ssa_def
*accum
= xs
[0];
245 for (int i
= 1; i
< terms
; i
++)
246 accum
= nir_fadd(b
, accum
, xs
[i
]);
252 build_atan(nir_builder
*b
, nir_ssa_def
*y_over_x
)
254 const uint32_t bit_size
= y_over_x
->bit_size
;
256 nir_ssa_def
*abs_y_over_x
= nir_fabs(b
, y_over_x
);
257 nir_ssa_def
*one
= nir_imm_floatN_t(b
, 1.0f
, bit_size
);
260 * range-reduction, first step:
262 * / y_over_x if |y_over_x| <= 1.0;
264 * \ 1.0 / y_over_x otherwise
266 nir_ssa_def
*x
= nir_fdiv(b
, nir_fmin(b
, abs_y_over_x
, one
),
267 nir_fmax(b
, abs_y_over_x
, one
));
270 * approximate atan by evaluating polynomial:
272 * x * 0.9999793128310355 - x^3 * 0.3326756418091246 +
273 * x^5 * 0.1938924977115610 - x^7 * 0.1173503194786851 +
274 * x^9 * 0.0536813784310406 - x^11 * 0.0121323213173444
276 nir_ssa_def
*x_2
= nir_fmul(b
, x
, x
);
277 nir_ssa_def
*x_3
= nir_fmul(b
, x_2
, x
);
278 nir_ssa_def
*x_5
= nir_fmul(b
, x_3
, x_2
);
279 nir_ssa_def
*x_7
= nir_fmul(b
, x_5
, x_2
);
280 nir_ssa_def
*x_9
= nir_fmul(b
, x_7
, x_2
);
281 nir_ssa_def
*x_11
= nir_fmul(b
, x_9
, x_2
);
283 nir_ssa_def
*polynomial_terms
[] = {
284 nir_fmul_imm(b
, x
, 0.9999793128310355f
),
285 nir_fmul_imm(b
, x_3
, -0.3326756418091246f
),
286 nir_fmul_imm(b
, x_5
, 0.1938924977115610f
),
287 nir_fmul_imm(b
, x_7
, -0.1173503194786851f
),
288 nir_fmul_imm(b
, x_9
, 0.0536813784310406f
),
289 nir_fmul_imm(b
, x_11
, -0.0121323213173444f
),
293 build_fsum(b
, polynomial_terms
, ARRAY_SIZE(polynomial_terms
));
295 /* range-reduction fixup */
296 tmp
= nir_fadd(b
, tmp
,
297 nir_fmul(b
, nir_b2f(b
, nir_flt(b
, one
, abs_y_over_x
), bit_size
),
298 nir_fadd_imm(b
, nir_fmul_imm(b
, tmp
, -2.0f
), M_PI_2f
)));
301 return nir_fmul(b
, tmp
, nir_fsign(b
, y_over_x
));
305 build_atan2(nir_builder
*b
, nir_ssa_def
*y
, nir_ssa_def
*x
)
307 assert(y
->bit_size
== x
->bit_size
);
308 const uint32_t bit_size
= x
->bit_size
;
310 nir_ssa_def
*zero
= nir_imm_floatN_t(b
, 0, bit_size
);
311 nir_ssa_def
*one
= nir_imm_floatN_t(b
, 1, bit_size
);
313 /* If we're on the left half-plane rotate the coordinates π/2 clock-wise
314 * for the y=0 discontinuity to end up aligned with the vertical
315 * discontinuity of atan(s/t) along t=0. This also makes sure that we
316 * don't attempt to divide by zero along the vertical line, which may give
317 * unspecified results on non-GLSL 4.1-capable hardware.
319 nir_ssa_def
*flip
= nir_fge(b
, zero
, x
);
320 nir_ssa_def
*s
= nir_bcsel(b
, flip
, nir_fabs(b
, x
), y
);
321 nir_ssa_def
*t
= nir_bcsel(b
, flip
, y
, nir_fabs(b
, x
));
323 /* If the magnitude of the denominator exceeds some huge value, scale down
324 * the arguments in order to prevent the reciprocal operation from flushing
325 * its result to zero, which would cause precision problems, and for s
326 * infinite would cause us to return a NaN instead of the correct finite
329 * If fmin and fmax are respectively the smallest and largest positive
330 * normalized floating point values representable by the implementation,
331 * the constants below should be in agreement with:
334 * scale <= 1 / fmin / fmax (for |t| >= huge)
336 * In addition scale should be a negative power of two in order to avoid
337 * loss of precision. The values chosen below should work for most usual
338 * floating point representations with at least the dynamic range of ATI's
339 * 24-bit representation.
341 const double huge_val
= bit_size
>= 32 ? 1e18
: 16384;
342 nir_ssa_def
*huge
= nir_imm_floatN_t(b
, huge_val
, bit_size
);
343 nir_ssa_def
*scale
= nir_bcsel(b
, nir_fge(b
, nir_fabs(b
, t
), huge
),
344 nir_imm_floatN_t(b
, 0.25, bit_size
), one
);
345 nir_ssa_def
*rcp_scaled_t
= nir_frcp(b
, nir_fmul(b
, t
, scale
));
346 nir_ssa_def
*s_over_t
= nir_fmul(b
, nir_fmul(b
, s
, scale
), rcp_scaled_t
);
348 /* For |x| = |y| assume tan = 1 even if infinite (i.e. pretend momentarily
349 * that ∞/∞ = 1) in order to comply with the rather artificial rules
350 * inherited from IEEE 754-2008, namely:
352 * "atan2(±∞, −∞) is ±3π/4
353 * atan2(±∞, +∞) is ±π/4"
355 * Note that this is inconsistent with the rules for the neighborhood of
356 * zero that are based on iterated limits:
358 * "atan2(±0, −0) is ±π
359 * atan2(±0, +0) is ±0"
361 * but GLSL specifically allows implementations to deviate from IEEE rules
362 * at (0,0), so we take that license (i.e. pretend that 0/0 = 1 here as
365 nir_ssa_def
*tan
= nir_bcsel(b
, nir_feq(b
, nir_fabs(b
, x
), nir_fabs(b
, y
)),
366 one
, nir_fabs(b
, s_over_t
));
368 /* Calculate the arctangent and fix up the result if we had flipped the
372 nir_fadd(b
, nir_fmul_imm(b
, nir_b2f(b
, flip
, bit_size
), M_PI_2f
),
375 /* Rather convoluted calculation of the sign of the result. When x < 0 we
376 * cannot use fsign because we need to be able to distinguish between
377 * negative and positive zero. We don't use bitwise arithmetic tricks for
378 * consistency with the GLSL front-end. When x >= 0 rcp_scaled_t will
379 * always be non-negative so this won't be able to distinguish between
380 * negative and positive zero, but we don't care because atan2 is
381 * continuous along the whole positive y = 0 half-line, so it won't affect
382 * the result significantly.
384 return nir_bcsel(b
, nir_flt(b
, nir_fmin(b
, y
, rcp_scaled_t
), zero
),
385 nir_fneg(b
, arc
), arc
);
389 build_frexp16(nir_builder
*b
, nir_ssa_def
*x
, nir_ssa_def
**exponent
)
391 assert(x
->bit_size
== 16);
393 nir_ssa_def
*abs_x
= nir_fabs(b
, x
);
394 nir_ssa_def
*zero
= nir_imm_floatN_t(b
, 0, 16);
396 /* Half-precision floating-point values are stored as
401 * An exponent shift of 10 will shift the mantissa out, leaving only the
402 * exponent and sign bit (which itself may be zero, if the absolute value
403 * was taken before the bitcast and shift).
405 nir_ssa_def
*exponent_shift
= nir_imm_int(b
, 10);
406 nir_ssa_def
*exponent_bias
= nir_imm_intN_t(b
, -14, 16);
408 nir_ssa_def
*sign_mantissa_mask
= nir_imm_intN_t(b
, 0x83ffu
, 16);
410 /* Exponent of floating-point values in the range [0.5, 1.0). */
411 nir_ssa_def
*exponent_value
= nir_imm_intN_t(b
, 0x3800u
, 16);
413 nir_ssa_def
*is_not_zero
= nir_fne(b
, abs_x
, zero
);
415 /* Significand return must be of the same type as the input, but the
416 * exponent must be a 32-bit integer.
420 nir_iadd(b
, nir_ushr(b
, abs_x
, exponent_shift
),
421 nir_bcsel(b
, is_not_zero
, exponent_bias
, zero
)));
423 return nir_ior(b
, nir_iand(b
, x
, sign_mantissa_mask
),
424 nir_bcsel(b
, is_not_zero
, exponent_value
, zero
));
428 build_frexp32(nir_builder
*b
, nir_ssa_def
*x
, nir_ssa_def
**exponent
)
430 nir_ssa_def
*abs_x
= nir_fabs(b
, x
);
431 nir_ssa_def
*zero
= nir_imm_float(b
, 0.0f
);
433 /* Single-precision floating-point values are stored as
438 * An exponent shift of 23 will shift the mantissa out, leaving only the
439 * exponent and sign bit (which itself may be zero, if the absolute value
440 * was taken before the bitcast and shift.
442 nir_ssa_def
*exponent_shift
= nir_imm_int(b
, 23);
443 nir_ssa_def
*exponent_bias
= nir_imm_int(b
, -126);
445 nir_ssa_def
*sign_mantissa_mask
= nir_imm_int(b
, 0x807fffffu
);
447 /* Exponent of floating-point values in the range [0.5, 1.0). */
448 nir_ssa_def
*exponent_value
= nir_imm_int(b
, 0x3f000000u
);
450 nir_ssa_def
*is_not_zero
= nir_fne(b
, abs_x
, zero
);
453 nir_iadd(b
, nir_ushr(b
, abs_x
, exponent_shift
),
454 nir_bcsel(b
, is_not_zero
, exponent_bias
, zero
));
456 return nir_ior(b
, nir_iand(b
, x
, sign_mantissa_mask
),
457 nir_bcsel(b
, is_not_zero
, exponent_value
, zero
));
461 build_frexp64(nir_builder
*b
, nir_ssa_def
*x
, nir_ssa_def
**exponent
)
463 nir_ssa_def
*abs_x
= nir_fabs(b
, x
);
464 nir_ssa_def
*zero
= nir_imm_double(b
, 0.0);
465 nir_ssa_def
*zero32
= nir_imm_float(b
, 0.0f
);
467 /* Double-precision floating-point values are stored as
472 * We only need to deal with the exponent so first we extract the upper 32
473 * bits using nir_unpack_64_2x32_split_y.
475 nir_ssa_def
*upper_x
= nir_unpack_64_2x32_split_y(b
, x
);
476 nir_ssa_def
*abs_upper_x
= nir_unpack_64_2x32_split_y(b
, abs_x
);
478 /* An exponent shift of 20 will shift the remaining mantissa bits out,
479 * leaving only the exponent and sign bit (which itself may be zero, if the
480 * absolute value was taken before the bitcast and shift.
482 nir_ssa_def
*exponent_shift
= nir_imm_int(b
, 20);
483 nir_ssa_def
*exponent_bias
= nir_imm_int(b
, -1022);
485 nir_ssa_def
*sign_mantissa_mask
= nir_imm_int(b
, 0x800fffffu
);
487 /* Exponent of floating-point values in the range [0.5, 1.0). */
488 nir_ssa_def
*exponent_value
= nir_imm_int(b
, 0x3fe00000u
);
490 nir_ssa_def
*is_not_zero
= nir_fne(b
, abs_x
, zero
);
493 nir_iadd(b
, nir_ushr(b
, abs_upper_x
, exponent_shift
),
494 nir_bcsel(b
, is_not_zero
, exponent_bias
, zero32
));
496 nir_ssa_def
*new_upper
=
497 nir_ior(b
, nir_iand(b
, upper_x
, sign_mantissa_mask
),
498 nir_bcsel(b
, is_not_zero
, exponent_value
, zero32
));
500 nir_ssa_def
*lower_x
= nir_unpack_64_2x32_split_x(b
, x
);
502 return nir_pack_64_2x32_split(b
, lower_x
, new_upper
);
506 vtn_nir_alu_op_for_spirv_glsl_opcode(struct vtn_builder
*b
,
507 enum GLSLstd450 opcode
)
510 case GLSLstd450Round
: return nir_op_fround_even
;
511 case GLSLstd450RoundEven
: return nir_op_fround_even
;
512 case GLSLstd450Trunc
: return nir_op_ftrunc
;
513 case GLSLstd450FAbs
: return nir_op_fabs
;
514 case GLSLstd450SAbs
: return nir_op_iabs
;
515 case GLSLstd450FSign
: return nir_op_fsign
;
516 case GLSLstd450SSign
: return nir_op_isign
;
517 case GLSLstd450Floor
: return nir_op_ffloor
;
518 case GLSLstd450Ceil
: return nir_op_fceil
;
519 case GLSLstd450Fract
: return nir_op_ffract
;
520 case GLSLstd450Sin
: return nir_op_fsin
;
521 case GLSLstd450Cos
: return nir_op_fcos
;
522 case GLSLstd450Pow
: return nir_op_fpow
;
523 case GLSLstd450Exp2
: return nir_op_fexp2
;
524 case GLSLstd450Log2
: return nir_op_flog2
;
525 case GLSLstd450Sqrt
: return nir_op_fsqrt
;
526 case GLSLstd450InverseSqrt
: return nir_op_frsq
;
527 case GLSLstd450NMin
: return nir_op_fmin
;
528 case GLSLstd450FMin
: return nir_op_fmin
;
529 case GLSLstd450UMin
: return nir_op_umin
;
530 case GLSLstd450SMin
: return nir_op_imin
;
531 case GLSLstd450NMax
: return nir_op_fmax
;
532 case GLSLstd450FMax
: return nir_op_fmax
;
533 case GLSLstd450UMax
: return nir_op_umax
;
534 case GLSLstd450SMax
: return nir_op_imax
;
535 case GLSLstd450FMix
: return nir_op_flrp
;
536 case GLSLstd450Fma
: return nir_op_ffma
;
537 case GLSLstd450Ldexp
: return nir_op_ldexp
;
538 case GLSLstd450FindILsb
: return nir_op_find_lsb
;
539 case GLSLstd450FindSMsb
: return nir_op_ifind_msb
;
540 case GLSLstd450FindUMsb
: return nir_op_ufind_msb
;
542 /* Packing/Unpacking functions */
543 case GLSLstd450PackSnorm4x8
: return nir_op_pack_snorm_4x8
;
544 case GLSLstd450PackUnorm4x8
: return nir_op_pack_unorm_4x8
;
545 case GLSLstd450PackSnorm2x16
: return nir_op_pack_snorm_2x16
;
546 case GLSLstd450PackUnorm2x16
: return nir_op_pack_unorm_2x16
;
547 case GLSLstd450PackHalf2x16
: return nir_op_pack_half_2x16
;
548 case GLSLstd450PackDouble2x32
: return nir_op_pack_64_2x32
;
549 case GLSLstd450UnpackSnorm4x8
: return nir_op_unpack_snorm_4x8
;
550 case GLSLstd450UnpackUnorm4x8
: return nir_op_unpack_unorm_4x8
;
551 case GLSLstd450UnpackSnorm2x16
: return nir_op_unpack_snorm_2x16
;
552 case GLSLstd450UnpackUnorm2x16
: return nir_op_unpack_unorm_2x16
;
553 case GLSLstd450UnpackHalf2x16
: return nir_op_unpack_half_2x16
;
554 case GLSLstd450UnpackDouble2x32
: return nir_op_unpack_64_2x32
;
557 vtn_fail("No NIR equivalent");
561 #define NIR_IMM_FP(n, v) (nir_imm_floatN_t(n, v, src[0]->bit_size))
564 handle_glsl450_alu(struct vtn_builder
*b
, enum GLSLstd450 entrypoint
,
565 const uint32_t *w
, unsigned count
)
567 struct nir_builder
*nb
= &b
->nb
;
568 const struct glsl_type
*dest_type
=
569 vtn_value(b
, w
[1], vtn_value_type_type
)->type
->type
;
571 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
572 val
->ssa
= vtn_create_ssa_value(b
, dest_type
);
574 /* Collect the various SSA sources */
575 unsigned num_inputs
= count
- 5;
576 nir_ssa_def
*src
[3] = { NULL
, };
577 for (unsigned i
= 0; i
< num_inputs
; i
++) {
578 /* These are handled specially below */
579 if (vtn_untyped_value(b
, w
[i
+ 5])->value_type
== vtn_value_type_pointer
)
582 src
[i
] = vtn_ssa_value(b
, w
[i
+ 5])->def
;
585 switch (entrypoint
) {
586 case GLSLstd450Radians
:
587 val
->ssa
->def
= nir_radians(nb
, src
[0]);
589 case GLSLstd450Degrees
:
590 val
->ssa
->def
= nir_degrees(nb
, src
[0]);
593 val
->ssa
->def
= nir_fdiv(nb
, nir_fsin(nb
, src
[0]),
594 nir_fcos(nb
, src
[0]));
597 case GLSLstd450Modf
: {
598 nir_ssa_def
*sign
= nir_fsign(nb
, src
[0]);
599 nir_ssa_def
*abs
= nir_fabs(nb
, src
[0]);
600 val
->ssa
->def
= nir_fmul(nb
, sign
, nir_ffract(nb
, abs
));
601 nir_store_deref(nb
, vtn_nir_deref(b
, w
[6]),
602 nir_fmul(nb
, sign
, nir_ffloor(nb
, abs
)), 0xf);
606 case GLSLstd450ModfStruct
: {
607 nir_ssa_def
*sign
= nir_fsign(nb
, src
[0]);
608 nir_ssa_def
*abs
= nir_fabs(nb
, src
[0]);
609 vtn_assert(glsl_type_is_struct(val
->ssa
->type
));
610 val
->ssa
->elems
[0]->def
= nir_fmul(nb
, sign
, nir_ffract(nb
, abs
));
611 val
->ssa
->elems
[1]->def
= nir_fmul(nb
, sign
, nir_ffloor(nb
, abs
));
616 val
->ssa
->def
= nir_sge(nb
, src
[1], src
[0]);
619 case GLSLstd450Length
:
620 val
->ssa
->def
= nir_fast_length(nb
, src
[0]);
622 case GLSLstd450Distance
:
623 val
->ssa
->def
= nir_fast_distance(nb
, src
[0], src
[1]);
625 case GLSLstd450Normalize
:
626 val
->ssa
->def
= nir_fast_normalize(nb
, src
[0]);
630 val
->ssa
->def
= build_exp(nb
, src
[0]);
634 val
->ssa
->def
= build_log(nb
, src
[0]);
637 case GLSLstd450FClamp
:
638 case GLSLstd450NClamp
:
639 val
->ssa
->def
= nir_fclamp(nb
, src
[0], src
[1], src
[2]);
641 case GLSLstd450UClamp
:
642 val
->ssa
->def
= nir_uclamp(nb
, src
[0], src
[1], src
[2]);
644 case GLSLstd450SClamp
:
645 val
->ssa
->def
= nir_iclamp(nb
, src
[0], src
[1], src
[2]);
648 case GLSLstd450Cross
: {
649 val
->ssa
->def
= nir_cross3(nb
, src
[0], src
[1]);
653 case GLSLstd450SmoothStep
: {
654 val
->ssa
->def
= nir_smoothstep(nb
, src
[0], src
[1], src
[2]);
658 case GLSLstd450FaceForward
:
660 nir_bcsel(nb
, nir_flt(nb
, nir_fdot(nb
, src
[2], src
[1]),
661 NIR_IMM_FP(nb
, 0.0)),
662 src
[0], nir_fneg(nb
, src
[0]));
665 case GLSLstd450Reflect
:
666 /* I - 2 * dot(N, I) * N */
668 nir_fsub(nb
, src
[0], nir_fmul(nb
, NIR_IMM_FP(nb
, 2.0),
669 nir_fmul(nb
, nir_fdot(nb
, src
[0], src
[1]),
673 case GLSLstd450Refract
: {
674 nir_ssa_def
*I
= src
[0];
675 nir_ssa_def
*N
= src
[1];
676 nir_ssa_def
*eta
= src
[2];
677 nir_ssa_def
*n_dot_i
= nir_fdot(nb
, N
, I
);
678 nir_ssa_def
*one
= NIR_IMM_FP(nb
, 1.0);
679 nir_ssa_def
*zero
= NIR_IMM_FP(nb
, 0.0);
680 /* According to the SPIR-V and GLSL specs, eta is always a float
681 * regardless of the type of the other operands. However in practice it
682 * seems that if you try to pass it a float then glslang will just
683 * promote it to a double and generate invalid SPIR-V. In order to
684 * support a hypothetical fixed version of glslang we’ll promote eta to
685 * double if the other operands are double also.
687 if (I
->bit_size
!= eta
->bit_size
) {
688 nir_op conversion_op
=
689 nir_type_conversion_op(nir_type_float
| eta
->bit_size
,
690 nir_type_float
| I
->bit_size
,
691 nir_rounding_mode_undef
);
692 eta
= nir_build_alu(nb
, conversion_op
, eta
, NULL
, NULL
, NULL
);
694 /* k = 1.0 - eta * eta * (1.0 - dot(N, I) * dot(N, I)) */
696 nir_fsub(nb
, one
, nir_fmul(nb
, eta
, nir_fmul(nb
, eta
,
697 nir_fsub(nb
, one
, nir_fmul(nb
, n_dot_i
, n_dot_i
)))));
698 nir_ssa_def
*result
=
699 nir_fsub(nb
, nir_fmul(nb
, eta
, I
),
700 nir_fmul(nb
, nir_fadd(nb
, nir_fmul(nb
, eta
, n_dot_i
),
701 nir_fsqrt(nb
, k
)), N
));
702 /* XXX: bcsel, or if statement? */
703 val
->ssa
->def
= nir_bcsel(nb
, nir_flt(nb
, k
, zero
), zero
, result
);
708 /* 0.5 * (e^x - e^(-x)) */
710 nir_fmul_imm(nb
, nir_fsub(nb
, build_exp(nb
, src
[0]),
711 build_exp(nb
, nir_fneg(nb
, src
[0]))),
716 /* 0.5 * (e^x + e^(-x)) */
718 nir_fmul_imm(nb
, nir_fadd(nb
, build_exp(nb
, src
[0]),
719 build_exp(nb
, nir_fneg(nb
, src
[0]))),
723 case GLSLstd450Tanh
: {
724 /* tanh(x) := (0.5 * (e^x - e^(-x))) / (0.5 * (e^x + e^(-x)))
726 * With a little algebra this reduces to (e^2x - 1) / (e^2x + 1)
728 * We clamp x to (-inf, +10] to avoid precision problems. When x > 10,
729 * e^2x is so much larger than 1.0 that 1.0 gets flushed to zero in the
730 * computation e^2x +/- 1 so it can be ignored.
732 * For 16-bit precision we clamp x to (-inf, +4.2] since the maximum
733 * representable number is only 65,504 and e^(2*6) exceeds that. Also,
734 * if x > 4.2, tanh(x) will return 1.0 in fp16.
736 const uint32_t bit_size
= src
[0]->bit_size
;
737 const double clamped_x
= bit_size
> 16 ? 10.0 : 4.2;
738 nir_ssa_def
*x
= nir_fmin(nb
, src
[0],
739 nir_imm_floatN_t(nb
, clamped_x
, bit_size
));
740 nir_ssa_def
*exp2x
= build_exp(nb
, nir_fmul_imm(nb
, x
, 2.0));
741 val
->ssa
->def
= nir_fdiv(nb
, nir_fadd_imm(nb
, exp2x
, -1.0),
742 nir_fadd_imm(nb
, exp2x
, 1.0));
746 case GLSLstd450Asinh
:
747 val
->ssa
->def
= nir_fmul(nb
, nir_fsign(nb
, src
[0]),
748 build_log(nb
, nir_fadd(nb
, nir_fabs(nb
, src
[0]),
749 nir_fsqrt(nb
, nir_fadd_imm(nb
, nir_fmul(nb
, src
[0], src
[0]),
752 case GLSLstd450Acosh
:
753 val
->ssa
->def
= build_log(nb
, nir_fadd(nb
, src
[0],
754 nir_fsqrt(nb
, nir_fadd_imm(nb
, nir_fmul(nb
, src
[0], src
[0]),
757 case GLSLstd450Atanh
: {
758 nir_ssa_def
*one
= nir_imm_floatN_t(nb
, 1.0, src
[0]->bit_size
);
760 nir_fmul_imm(nb
, build_log(nb
, nir_fdiv(nb
, nir_fadd(nb
, src
[0], one
),
761 nir_fsub(nb
, one
, src
[0]))),
767 val
->ssa
->def
= build_asin(nb
, src
[0], 0.086566724, -0.03102955);
772 nir_fsub(nb
, nir_imm_floatN_t(nb
, M_PI_2f
, src
[0]->bit_size
),
773 build_asin(nb
, src
[0], 0.08132463, -0.02363318));
777 val
->ssa
->def
= build_atan(nb
, src
[0]);
780 case GLSLstd450Atan2
:
781 val
->ssa
->def
= build_atan2(nb
, src
[0], src
[1]);
784 case GLSLstd450Frexp
: {
785 nir_ssa_def
*exponent
;
786 if (src
[0]->bit_size
== 64)
787 val
->ssa
->def
= build_frexp64(nb
, src
[0], &exponent
);
788 else if (src
[0]->bit_size
== 32)
789 val
->ssa
->def
= build_frexp32(nb
, src
[0], &exponent
);
791 val
->ssa
->def
= build_frexp16(nb
, src
[0], &exponent
);
792 nir_store_deref(nb
, vtn_nir_deref(b
, w
[6]), exponent
, 0xf);
796 case GLSLstd450FrexpStruct
: {
797 vtn_assert(glsl_type_is_struct(val
->ssa
->type
));
798 if (src
[0]->bit_size
== 64)
799 val
->ssa
->elems
[0]->def
= build_frexp64(nb
, src
[0],
800 &val
->ssa
->elems
[1]->def
);
801 else if (src
[0]->bit_size
== 32)
802 val
->ssa
->elems
[0]->def
= build_frexp32(nb
, src
[0],
803 &val
->ssa
->elems
[1]->def
);
805 val
->ssa
->elems
[0]->def
= build_frexp16(nb
, src
[0],
806 &val
->ssa
->elems
[1]->def
);
812 nir_build_alu(&b
->nb
,
813 vtn_nir_alu_op_for_spirv_glsl_opcode(b
, entrypoint
),
814 src
[0], src
[1], src
[2], NULL
);
820 handle_glsl450_interpolation(struct vtn_builder
*b
, enum GLSLstd450 opcode
,
821 const uint32_t *w
, unsigned count
)
823 const struct glsl_type
*dest_type
=
824 vtn_value(b
, w
[1], vtn_value_type_type
)->type
->type
;
826 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
827 val
->ssa
= vtn_create_ssa_value(b
, dest_type
);
831 case GLSLstd450InterpolateAtCentroid
:
832 op
= nir_intrinsic_interp_deref_at_centroid
;
834 case GLSLstd450InterpolateAtSample
:
835 op
= nir_intrinsic_interp_deref_at_sample
;
837 case GLSLstd450InterpolateAtOffset
:
838 op
= nir_intrinsic_interp_deref_at_offset
;
841 vtn_fail("Invalid opcode");
844 nir_intrinsic_instr
*intrin
= nir_intrinsic_instr_create(b
->nb
.shader
, op
);
846 struct vtn_pointer
*ptr
=
847 vtn_value(b
, w
[5], vtn_value_type_pointer
)->pointer
;
848 nir_deref_instr
*deref
= vtn_pointer_to_deref(b
, ptr
);
850 /* If the value we are interpolating has an index into a vector then
851 * interpolate the vector and index the result of that instead. This is
852 * necessary because the index will get generated as a series of nir_bcsel
853 * instructions so it would no longer be an input variable.
855 const bool vec_array_deref
= deref
->deref_type
== nir_deref_type_array
&&
856 glsl_type_is_vector(nir_deref_instr_parent(deref
)->type
);
858 nir_deref_instr
*vec_deref
= NULL
;
859 if (vec_array_deref
) {
861 deref
= nir_deref_instr_parent(deref
);
863 intrin
->src
[0] = nir_src_for_ssa(&deref
->dest
.ssa
);
866 case GLSLstd450InterpolateAtCentroid
:
868 case GLSLstd450InterpolateAtSample
:
869 case GLSLstd450InterpolateAtOffset
:
870 intrin
->src
[1] = nir_src_for_ssa(vtn_ssa_value(b
, w
[6])->def
);
873 vtn_fail("Invalid opcode");
876 intrin
->num_components
= glsl_get_vector_elements(deref
->type
);
877 nir_ssa_dest_init(&intrin
->instr
, &intrin
->dest
,
878 glsl_get_vector_elements(deref
->type
),
879 glsl_get_bit_size(deref
->type
), NULL
);
881 nir_builder_instr_insert(&b
->nb
, &intrin
->instr
);
883 if (vec_array_deref
) {
885 if (nir_src_is_const(vec_deref
->arr
.index
)) {
886 val
->ssa
->def
= vtn_vector_extract(b
, &intrin
->dest
.ssa
,
887 nir_src_as_uint(vec_deref
->arr
.index
));
889 val
->ssa
->def
= vtn_vector_extract_dynamic(b
, &intrin
->dest
.ssa
,
890 vec_deref
->arr
.index
.ssa
);
893 val
->ssa
->def
= &intrin
->dest
.ssa
;
898 vtn_handle_glsl450_instruction(struct vtn_builder
*b
, SpvOp ext_opcode
,
899 const uint32_t *w
, unsigned count
)
901 switch ((enum GLSLstd450
)ext_opcode
) {
902 case GLSLstd450Determinant
: {
903 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
904 val
->ssa
= rzalloc(b
, struct vtn_ssa_value
);
905 val
->ssa
->type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
->type
;
906 val
->ssa
->def
= build_mat_det(b
, vtn_ssa_value(b
, w
[5]));
910 case GLSLstd450MatrixInverse
: {
911 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
912 val
->ssa
= matrix_inverse(b
, vtn_ssa_value(b
, w
[5]));
916 case GLSLstd450InterpolateAtCentroid
:
917 case GLSLstd450InterpolateAtSample
:
918 case GLSLstd450InterpolateAtOffset
:
919 handle_glsl450_interpolation(b
, ext_opcode
, w
, count
);
923 handle_glsl450_alu(b
, (enum GLSLstd450
)ext_opcode
, w
, count
);