spirv: update arguments for vtn_nir_alu_op_for_spirv_opcode()
[mesa.git] / src / compiler / spirv / vtn_alu.c
1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "vtn_private.h"
25
26 /*
27 * Normally, column vectors in SPIR-V correspond to a single NIR SSA
28 * definition. But for matrix multiplies, we want to do one routine for
29 * multiplying a matrix by a matrix and then pretend that vectors are matrices
30 * with one column. So we "wrap" these things, and unwrap the result before we
31 * send it off.
32 */
33
34 static struct vtn_ssa_value *
35 wrap_matrix(struct vtn_builder *b, struct vtn_ssa_value *val)
36 {
37 if (val == NULL)
38 return NULL;
39
40 if (glsl_type_is_matrix(val->type))
41 return val;
42
43 struct vtn_ssa_value *dest = rzalloc(b, struct vtn_ssa_value);
44 dest->type = val->type;
45 dest->elems = ralloc_array(b, struct vtn_ssa_value *, 1);
46 dest->elems[0] = val;
47
48 return dest;
49 }
50
51 static struct vtn_ssa_value *
52 unwrap_matrix(struct vtn_ssa_value *val)
53 {
54 if (glsl_type_is_matrix(val->type))
55 return val;
56
57 return val->elems[0];
58 }
59
60 static struct vtn_ssa_value *
61 matrix_multiply(struct vtn_builder *b,
62 struct vtn_ssa_value *_src0, struct vtn_ssa_value *_src1)
63 {
64
65 struct vtn_ssa_value *src0 = wrap_matrix(b, _src0);
66 struct vtn_ssa_value *src1 = wrap_matrix(b, _src1);
67 struct vtn_ssa_value *src0_transpose = wrap_matrix(b, _src0->transposed);
68 struct vtn_ssa_value *src1_transpose = wrap_matrix(b, _src1->transposed);
69
70 unsigned src0_rows = glsl_get_vector_elements(src0->type);
71 unsigned src0_columns = glsl_get_matrix_columns(src0->type);
72 unsigned src1_columns = glsl_get_matrix_columns(src1->type);
73
74 const struct glsl_type *dest_type;
75 if (src1_columns > 1) {
76 dest_type = glsl_matrix_type(glsl_get_base_type(src0->type),
77 src0_rows, src1_columns);
78 } else {
79 dest_type = glsl_vector_type(glsl_get_base_type(src0->type), src0_rows);
80 }
81 struct vtn_ssa_value *dest = vtn_create_ssa_value(b, dest_type);
82
83 dest = wrap_matrix(b, dest);
84
85 bool transpose_result = false;
86 if (src0_transpose && src1_transpose) {
87 /* transpose(A) * transpose(B) = transpose(B * A) */
88 src1 = src0_transpose;
89 src0 = src1_transpose;
90 src0_transpose = NULL;
91 src1_transpose = NULL;
92 transpose_result = true;
93 }
94
95 if (src0_transpose && !src1_transpose &&
96 glsl_get_base_type(src0->type) == GLSL_TYPE_FLOAT) {
97 /* We already have the rows of src0 and the columns of src1 available,
98 * so we can just take the dot product of each row with each column to
99 * get the result.
100 */
101
102 for (unsigned i = 0; i < src1_columns; i++) {
103 nir_ssa_def *vec_src[4];
104 for (unsigned j = 0; j < src0_rows; j++) {
105 vec_src[j] = nir_fdot(&b->nb, src0_transpose->elems[j]->def,
106 src1->elems[i]->def);
107 }
108 dest->elems[i]->def = nir_vec(&b->nb, vec_src, src0_rows);
109 }
110 } else {
111 /* We don't handle the case where src1 is transposed but not src0, since
112 * the general case only uses individual components of src1 so the
113 * optimizer should chew through the transpose we emitted for src1.
114 */
115
116 for (unsigned i = 0; i < src1_columns; i++) {
117 /* dest[i] = sum(src0[j] * src1[i][j] for all j) */
118 dest->elems[i]->def =
119 nir_fmul(&b->nb, src0->elems[0]->def,
120 nir_channel(&b->nb, src1->elems[i]->def, 0));
121 for (unsigned j = 1; j < src0_columns; j++) {
122 dest->elems[i]->def =
123 nir_fadd(&b->nb, dest->elems[i]->def,
124 nir_fmul(&b->nb, src0->elems[j]->def,
125 nir_channel(&b->nb, src1->elems[i]->def, j)));
126 }
127 }
128 }
129
130 dest = unwrap_matrix(dest);
131
132 if (transpose_result)
133 dest = vtn_ssa_transpose(b, dest);
134
135 return dest;
136 }
137
138 static struct vtn_ssa_value *
139 mat_times_scalar(struct vtn_builder *b,
140 struct vtn_ssa_value *mat,
141 nir_ssa_def *scalar)
142 {
143 struct vtn_ssa_value *dest = vtn_create_ssa_value(b, mat->type);
144 for (unsigned i = 0; i < glsl_get_matrix_columns(mat->type); i++) {
145 if (glsl_base_type_is_integer(glsl_get_base_type(mat->type)))
146 dest->elems[i]->def = nir_imul(&b->nb, mat->elems[i]->def, scalar);
147 else
148 dest->elems[i]->def = nir_fmul(&b->nb, mat->elems[i]->def, scalar);
149 }
150
151 return dest;
152 }
153
154 static void
155 vtn_handle_matrix_alu(struct vtn_builder *b, SpvOp opcode,
156 struct vtn_value *dest,
157 struct vtn_ssa_value *src0, struct vtn_ssa_value *src1)
158 {
159 switch (opcode) {
160 case SpvOpFNegate: {
161 dest->ssa = vtn_create_ssa_value(b, src0->type);
162 unsigned cols = glsl_get_matrix_columns(src0->type);
163 for (unsigned i = 0; i < cols; i++)
164 dest->ssa->elems[i]->def = nir_fneg(&b->nb, src0->elems[i]->def);
165 break;
166 }
167
168 case SpvOpFAdd: {
169 dest->ssa = vtn_create_ssa_value(b, src0->type);
170 unsigned cols = glsl_get_matrix_columns(src0->type);
171 for (unsigned i = 0; i < cols; i++)
172 dest->ssa->elems[i]->def =
173 nir_fadd(&b->nb, src0->elems[i]->def, src1->elems[i]->def);
174 break;
175 }
176
177 case SpvOpFSub: {
178 dest->ssa = vtn_create_ssa_value(b, src0->type);
179 unsigned cols = glsl_get_matrix_columns(src0->type);
180 for (unsigned i = 0; i < cols; i++)
181 dest->ssa->elems[i]->def =
182 nir_fsub(&b->nb, src0->elems[i]->def, src1->elems[i]->def);
183 break;
184 }
185
186 case SpvOpTranspose:
187 dest->ssa = vtn_ssa_transpose(b, src0);
188 break;
189
190 case SpvOpMatrixTimesScalar:
191 if (src0->transposed) {
192 dest->ssa = vtn_ssa_transpose(b, mat_times_scalar(b, src0->transposed,
193 src1->def));
194 } else {
195 dest->ssa = mat_times_scalar(b, src0, src1->def);
196 }
197 break;
198
199 case SpvOpVectorTimesMatrix:
200 case SpvOpMatrixTimesVector:
201 case SpvOpMatrixTimesMatrix:
202 if (opcode == SpvOpVectorTimesMatrix) {
203 dest->ssa = matrix_multiply(b, vtn_ssa_transpose(b, src1), src0);
204 } else {
205 dest->ssa = matrix_multiply(b, src0, src1);
206 }
207 break;
208
209 default: vtn_fail("unknown matrix opcode");
210 }
211 }
212
213 static void
214 vtn_handle_bitcast(struct vtn_builder *b, struct vtn_ssa_value *dest,
215 struct nir_ssa_def *src)
216 {
217 if (glsl_get_vector_elements(dest->type) == src->num_components) {
218 /* From the definition of OpBitcast in the SPIR-V 1.2 spec:
219 *
220 * "If Result Type has the same number of components as Operand, they
221 * must also have the same component width, and results are computed per
222 * component."
223 */
224 dest->def = nir_imov(&b->nb, src);
225 return;
226 }
227
228 /* From the definition of OpBitcast in the SPIR-V 1.2 spec:
229 *
230 * "If Result Type has a different number of components than Operand, the
231 * total number of bits in Result Type must equal the total number of bits
232 * in Operand. Let L be the type, either Result Type or Operand’s type, that
233 * has the larger number of components. Let S be the other type, with the
234 * smaller number of components. The number of components in L must be an
235 * integer multiple of the number of components in S. The first component
236 * (that is, the only or lowest-numbered component) of S maps to the first
237 * components of L, and so on, up to the last component of S mapping to the
238 * last components of L. Within this mapping, any single component of S
239 * (mapping to multiple components of L) maps its lower-ordered bits to the
240 * lower-numbered components of L."
241 */
242 unsigned src_bit_size = src->bit_size;
243 unsigned dest_bit_size = glsl_get_bit_size(dest->type);
244 unsigned src_components = src->num_components;
245 unsigned dest_components = glsl_get_vector_elements(dest->type);
246 vtn_assert(src_bit_size * src_components == dest_bit_size * dest_components);
247
248 nir_ssa_def *dest_chan[4];
249 if (src_bit_size > dest_bit_size) {
250 vtn_assert(src_bit_size % dest_bit_size == 0);
251 unsigned divisor = src_bit_size / dest_bit_size;
252 for (unsigned comp = 0; comp < src_components; comp++) {
253 vtn_assert(src_bit_size == 64);
254 vtn_assert(dest_bit_size == 32);
255 nir_ssa_def *split =
256 nir_unpack_64_2x32(&b->nb, nir_channel(&b->nb, src, comp));
257 for (unsigned i = 0; i < divisor; i++)
258 dest_chan[divisor * comp + i] = nir_channel(&b->nb, split, i);
259 }
260 } else {
261 vtn_assert(dest_bit_size % src_bit_size == 0);
262 unsigned divisor = dest_bit_size / src_bit_size;
263 for (unsigned comp = 0; comp < dest_components; comp++) {
264 unsigned channels = ((1 << divisor) - 1) << (comp * divisor);
265 nir_ssa_def *src_chan =
266 nir_channels(&b->nb, src, channels);
267 vtn_assert(dest_bit_size == 64);
268 vtn_assert(src_bit_size == 32);
269 dest_chan[comp] = nir_pack_64_2x32(&b->nb, src_chan);
270 }
271 }
272 dest->def = nir_vec(&b->nb, dest_chan, dest_components);
273 }
274
275 nir_op
276 vtn_nir_alu_op_for_spirv_opcode(struct vtn_builder *b,
277 SpvOp opcode, bool *swap,
278 unsigned src_bit_size, unsigned dst_bit_size)
279 {
280 /* Indicates that the first two arguments should be swapped. This is
281 * used for implementing greater-than and less-than-or-equal.
282 */
283 *swap = false;
284
285 switch (opcode) {
286 case SpvOpSNegate: return nir_op_ineg;
287 case SpvOpFNegate: return nir_op_fneg;
288 case SpvOpNot: return nir_op_inot;
289 case SpvOpIAdd: return nir_op_iadd;
290 case SpvOpFAdd: return nir_op_fadd;
291 case SpvOpISub: return nir_op_isub;
292 case SpvOpFSub: return nir_op_fsub;
293 case SpvOpIMul: return nir_op_imul;
294 case SpvOpFMul: return nir_op_fmul;
295 case SpvOpUDiv: return nir_op_udiv;
296 case SpvOpSDiv: return nir_op_idiv;
297 case SpvOpFDiv: return nir_op_fdiv;
298 case SpvOpUMod: return nir_op_umod;
299 case SpvOpSMod: return nir_op_imod;
300 case SpvOpFMod: return nir_op_fmod;
301 case SpvOpSRem: return nir_op_irem;
302 case SpvOpFRem: return nir_op_frem;
303
304 case SpvOpShiftRightLogical: return nir_op_ushr;
305 case SpvOpShiftRightArithmetic: return nir_op_ishr;
306 case SpvOpShiftLeftLogical: return nir_op_ishl;
307 case SpvOpLogicalOr: return nir_op_ior;
308 case SpvOpLogicalEqual: return nir_op_ieq;
309 case SpvOpLogicalNotEqual: return nir_op_ine;
310 case SpvOpLogicalAnd: return nir_op_iand;
311 case SpvOpLogicalNot: return nir_op_inot;
312 case SpvOpBitwiseOr: return nir_op_ior;
313 case SpvOpBitwiseXor: return nir_op_ixor;
314 case SpvOpBitwiseAnd: return nir_op_iand;
315 case SpvOpSelect: return nir_op_bcsel;
316 case SpvOpIEqual: return nir_op_ieq;
317
318 case SpvOpBitFieldInsert: return nir_op_bitfield_insert;
319 case SpvOpBitFieldSExtract: return nir_op_ibitfield_extract;
320 case SpvOpBitFieldUExtract: return nir_op_ubitfield_extract;
321 case SpvOpBitReverse: return nir_op_bitfield_reverse;
322 case SpvOpBitCount: return nir_op_bit_count;
323
324 /* The ordered / unordered operators need special implementation besides
325 * the logical operator to use since they also need to check if operands are
326 * ordered.
327 */
328 case SpvOpFOrdEqual: return nir_op_feq;
329 case SpvOpFUnordEqual: return nir_op_feq;
330 case SpvOpINotEqual: return nir_op_ine;
331 case SpvOpFOrdNotEqual: return nir_op_fne;
332 case SpvOpFUnordNotEqual: return nir_op_fne;
333 case SpvOpULessThan: return nir_op_ult;
334 case SpvOpSLessThan: return nir_op_ilt;
335 case SpvOpFOrdLessThan: return nir_op_flt;
336 case SpvOpFUnordLessThan: return nir_op_flt;
337 case SpvOpUGreaterThan: *swap = true; return nir_op_ult;
338 case SpvOpSGreaterThan: *swap = true; return nir_op_ilt;
339 case SpvOpFOrdGreaterThan: *swap = true; return nir_op_flt;
340 case SpvOpFUnordGreaterThan: *swap = true; return nir_op_flt;
341 case SpvOpULessThanEqual: *swap = true; return nir_op_uge;
342 case SpvOpSLessThanEqual: *swap = true; return nir_op_ige;
343 case SpvOpFOrdLessThanEqual: *swap = true; return nir_op_fge;
344 case SpvOpFUnordLessThanEqual: *swap = true; return nir_op_fge;
345 case SpvOpUGreaterThanEqual: return nir_op_uge;
346 case SpvOpSGreaterThanEqual: return nir_op_ige;
347 case SpvOpFOrdGreaterThanEqual: return nir_op_fge;
348 case SpvOpFUnordGreaterThanEqual: return nir_op_fge;
349
350 /* Conversions: */
351 case SpvOpQuantizeToF16: return nir_op_fquantize2f16;
352 case SpvOpUConvert:
353 case SpvOpConvertFToU:
354 case SpvOpConvertFToS:
355 case SpvOpConvertSToF:
356 case SpvOpConvertUToF:
357 case SpvOpSConvert:
358 case SpvOpFConvert: {
359 nir_alu_type src_type;
360 nir_alu_type dst_type;
361
362 switch (opcode) {
363 case SpvOpConvertFToS:
364 src_type = nir_type_float;
365 dst_type = nir_type_int;
366 break;
367 case SpvOpConvertFToU:
368 src_type = nir_type_float;
369 dst_type = nir_type_uint;
370 break;
371 case SpvOpFConvert:
372 src_type = dst_type = nir_type_float;
373 break;
374 case SpvOpConvertSToF:
375 src_type = nir_type_int;
376 dst_type = nir_type_float;
377 break;
378 case SpvOpSConvert:
379 src_type = dst_type = nir_type_int;
380 break;
381 case SpvOpConvertUToF:
382 src_type = nir_type_uint;
383 dst_type = nir_type_float;
384 break;
385 case SpvOpUConvert:
386 src_type = dst_type = nir_type_uint;
387 break;
388 default:
389 unreachable("Invalid opcode");
390 }
391 src_type |= src_bit_size;
392 dst_type |= dst_bit_size;
393 return nir_type_conversion_op(src_type, dst_type, nir_rounding_mode_undef);
394 }
395 /* Derivatives: */
396 case SpvOpDPdx: return nir_op_fddx;
397 case SpvOpDPdy: return nir_op_fddy;
398 case SpvOpDPdxFine: return nir_op_fddx_fine;
399 case SpvOpDPdyFine: return nir_op_fddy_fine;
400 case SpvOpDPdxCoarse: return nir_op_fddx_coarse;
401 case SpvOpDPdyCoarse: return nir_op_fddy_coarse;
402
403 default:
404 vtn_fail("No NIR equivalent");
405 }
406 }
407
408 static void
409 handle_no_contraction(struct vtn_builder *b, struct vtn_value *val, int member,
410 const struct vtn_decoration *dec, void *_void)
411 {
412 vtn_assert(dec->scope == VTN_DEC_DECORATION);
413 if (dec->decoration != SpvDecorationNoContraction)
414 return;
415
416 b->nb.exact = true;
417 }
418
419 static void
420 handle_rounding_mode(struct vtn_builder *b, struct vtn_value *val, int member,
421 const struct vtn_decoration *dec, void *_out_rounding_mode)
422 {
423 nir_rounding_mode *out_rounding_mode = _out_rounding_mode;
424 assert(dec->scope == VTN_DEC_DECORATION);
425 if (dec->decoration != SpvDecorationFPRoundingMode)
426 return;
427 switch (dec->literals[0]) {
428 case SpvFPRoundingModeRTE:
429 *out_rounding_mode = nir_rounding_mode_rtne;
430 break;
431 case SpvFPRoundingModeRTZ:
432 *out_rounding_mode = nir_rounding_mode_rtz;
433 break;
434 default:
435 unreachable("Not supported rounding mode");
436 break;
437 }
438 }
439
440 void
441 vtn_handle_alu(struct vtn_builder *b, SpvOp opcode,
442 const uint32_t *w, unsigned count)
443 {
444 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
445 const struct glsl_type *type =
446 vtn_value(b, w[1], vtn_value_type_type)->type->type;
447
448 vtn_foreach_decoration(b, val, handle_no_contraction, NULL);
449
450 /* Collect the various SSA sources */
451 const unsigned num_inputs = count - 3;
452 struct vtn_ssa_value *vtn_src[4] = { NULL, };
453 for (unsigned i = 0; i < num_inputs; i++)
454 vtn_src[i] = vtn_ssa_value(b, w[i + 3]);
455
456 if (glsl_type_is_matrix(vtn_src[0]->type) ||
457 (num_inputs >= 2 && glsl_type_is_matrix(vtn_src[1]->type))) {
458 vtn_handle_matrix_alu(b, opcode, val, vtn_src[0], vtn_src[1]);
459 b->nb.exact = false;
460 return;
461 }
462
463 val->ssa = vtn_create_ssa_value(b, type);
464 nir_ssa_def *src[4] = { NULL, };
465 for (unsigned i = 0; i < num_inputs; i++) {
466 vtn_assert(glsl_type_is_vector_or_scalar(vtn_src[i]->type));
467 src[i] = vtn_src[i]->def;
468 }
469
470 switch (opcode) {
471 case SpvOpAny:
472 if (src[0]->num_components == 1) {
473 val->ssa->def = nir_imov(&b->nb, src[0]);
474 } else {
475 nir_op op;
476 switch (src[0]->num_components) {
477 case 2: op = nir_op_bany_inequal2; break;
478 case 3: op = nir_op_bany_inequal3; break;
479 case 4: op = nir_op_bany_inequal4; break;
480 default: vtn_fail("invalid number of components");
481 }
482 val->ssa->def = nir_build_alu(&b->nb, op, src[0],
483 nir_imm_int(&b->nb, NIR_FALSE),
484 NULL, NULL);
485 }
486 break;
487
488 case SpvOpAll:
489 if (src[0]->num_components == 1) {
490 val->ssa->def = nir_imov(&b->nb, src[0]);
491 } else {
492 nir_op op;
493 switch (src[0]->num_components) {
494 case 2: op = nir_op_ball_iequal2; break;
495 case 3: op = nir_op_ball_iequal3; break;
496 case 4: op = nir_op_ball_iequal4; break;
497 default: vtn_fail("invalid number of components");
498 }
499 val->ssa->def = nir_build_alu(&b->nb, op, src[0],
500 nir_imm_int(&b->nb, NIR_TRUE),
501 NULL, NULL);
502 }
503 break;
504
505 case SpvOpOuterProduct: {
506 for (unsigned i = 0; i < src[1]->num_components; i++) {
507 val->ssa->elems[i]->def =
508 nir_fmul(&b->nb, src[0], nir_channel(&b->nb, src[1], i));
509 }
510 break;
511 }
512
513 case SpvOpDot:
514 val->ssa->def = nir_fdot(&b->nb, src[0], src[1]);
515 break;
516
517 case SpvOpIAddCarry:
518 vtn_assert(glsl_type_is_struct(val->ssa->type));
519 val->ssa->elems[0]->def = nir_iadd(&b->nb, src[0], src[1]);
520 val->ssa->elems[1]->def = nir_uadd_carry(&b->nb, src[0], src[1]);
521 break;
522
523 case SpvOpISubBorrow:
524 vtn_assert(glsl_type_is_struct(val->ssa->type));
525 val->ssa->elems[0]->def = nir_isub(&b->nb, src[0], src[1]);
526 val->ssa->elems[1]->def = nir_usub_borrow(&b->nb, src[0], src[1]);
527 break;
528
529 case SpvOpUMulExtended:
530 vtn_assert(glsl_type_is_struct(val->ssa->type));
531 val->ssa->elems[0]->def = nir_imul(&b->nb, src[0], src[1]);
532 val->ssa->elems[1]->def = nir_umul_high(&b->nb, src[0], src[1]);
533 break;
534
535 case SpvOpSMulExtended:
536 vtn_assert(glsl_type_is_struct(val->ssa->type));
537 val->ssa->elems[0]->def = nir_imul(&b->nb, src[0], src[1]);
538 val->ssa->elems[1]->def = nir_imul_high(&b->nb, src[0], src[1]);
539 break;
540
541 case SpvOpFwidth:
542 val->ssa->def = nir_fadd(&b->nb,
543 nir_fabs(&b->nb, nir_fddx(&b->nb, src[0])),
544 nir_fabs(&b->nb, nir_fddy(&b->nb, src[0])));
545 break;
546 case SpvOpFwidthFine:
547 val->ssa->def = nir_fadd(&b->nb,
548 nir_fabs(&b->nb, nir_fddx_fine(&b->nb, src[0])),
549 nir_fabs(&b->nb, nir_fddy_fine(&b->nb, src[0])));
550 break;
551 case SpvOpFwidthCoarse:
552 val->ssa->def = nir_fadd(&b->nb,
553 nir_fabs(&b->nb, nir_fddx_coarse(&b->nb, src[0])),
554 nir_fabs(&b->nb, nir_fddy_coarse(&b->nb, src[0])));
555 break;
556
557 case SpvOpVectorTimesScalar:
558 /* The builder will take care of splatting for us. */
559 val->ssa->def = nir_fmul(&b->nb, src[0], src[1]);
560 break;
561
562 case SpvOpIsNan:
563 val->ssa->def = nir_fne(&b->nb, src[0], src[0]);
564 break;
565
566 case SpvOpIsInf:
567 val->ssa->def = nir_ieq(&b->nb, nir_fabs(&b->nb, src[0]),
568 nir_imm_float(&b->nb, INFINITY));
569 break;
570
571 case SpvOpFUnordEqual:
572 case SpvOpFUnordNotEqual:
573 case SpvOpFUnordLessThan:
574 case SpvOpFUnordGreaterThan:
575 case SpvOpFUnordLessThanEqual:
576 case SpvOpFUnordGreaterThanEqual: {
577 bool swap;
578 unsigned src_bit_size = glsl_get_bit_size(vtn_src[0]->type);
579 unsigned dst_bit_size = glsl_get_bit_size(type);
580 nir_op op = vtn_nir_alu_op_for_spirv_opcode(b, opcode, &swap,
581 src_bit_size, dst_bit_size);
582
583 if (swap) {
584 nir_ssa_def *tmp = src[0];
585 src[0] = src[1];
586 src[1] = tmp;
587 }
588
589 val->ssa->def =
590 nir_ior(&b->nb,
591 nir_build_alu(&b->nb, op, src[0], src[1], NULL, NULL),
592 nir_ior(&b->nb,
593 nir_fne(&b->nb, src[0], src[0]),
594 nir_fne(&b->nb, src[1], src[1])));
595 break;
596 }
597
598 case SpvOpFOrdEqual:
599 case SpvOpFOrdNotEqual:
600 case SpvOpFOrdLessThan:
601 case SpvOpFOrdGreaterThan:
602 case SpvOpFOrdLessThanEqual:
603 case SpvOpFOrdGreaterThanEqual: {
604 bool swap;
605 unsigned src_bit_size = glsl_get_bit_size(vtn_src[0]->type);
606 unsigned dst_bit_size = glsl_get_bit_size(type);
607 nir_op op = vtn_nir_alu_op_for_spirv_opcode(b, opcode, &swap,
608 src_bit_size, dst_bit_size);
609
610 if (swap) {
611 nir_ssa_def *tmp = src[0];
612 src[0] = src[1];
613 src[1] = tmp;
614 }
615
616 val->ssa->def =
617 nir_iand(&b->nb,
618 nir_build_alu(&b->nb, op, src[0], src[1], NULL, NULL),
619 nir_iand(&b->nb,
620 nir_feq(&b->nb, src[0], src[0]),
621 nir_feq(&b->nb, src[1], src[1])));
622 break;
623 }
624
625 case SpvOpBitcast:
626 vtn_handle_bitcast(b, val->ssa, src[0]);
627 break;
628
629 case SpvOpFConvert: {
630 nir_alu_type src_alu_type = nir_get_nir_type_for_glsl_type(vtn_src[0]->type);
631 nir_alu_type dst_alu_type = nir_get_nir_type_for_glsl_type(type);
632 nir_rounding_mode rounding_mode = nir_rounding_mode_undef;
633
634 vtn_foreach_decoration(b, val, handle_rounding_mode, &rounding_mode);
635 nir_op op = nir_type_conversion_op(src_alu_type, dst_alu_type, rounding_mode);
636
637 val->ssa->def = nir_build_alu(&b->nb, op, src[0], src[1], NULL, NULL);
638 break;
639 }
640
641 default: {
642 bool swap;
643 unsigned src_bit_size = glsl_get_bit_size(vtn_src[0]->type);
644 unsigned dst_bit_size = glsl_get_bit_size(type);
645 nir_op op = vtn_nir_alu_op_for_spirv_opcode(b, opcode, &swap,
646 src_bit_size, dst_bit_size);
647
648 if (swap) {
649 nir_ssa_def *tmp = src[0];
650 src[0] = src[1];
651 src[1] = tmp;
652 }
653
654 val->ssa->def = nir_build_alu(&b->nb, op, src[0], src[1], src[2], src[3]);
655 break;
656 } /* default */
657 }
658
659 b->nb.exact = false;
660 }