1 /**************************************************************************
3 * Copyright 2009 VMware, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
30 * Helper functions for logical operations.
32 * @author Jose Fonseca <jfonseca@vmware.com>
36 #include "util/u_cpu_detect.h"
37 #include "util/u_memory.h"
38 #include "util/u_debug.h"
40 #include "lp_bld_type.h"
41 #include "lp_bld_const.h"
42 #include "lp_bld_swizzle.h"
43 #include "lp_bld_init.h"
44 #include "lp_bld_intr.h"
45 #include "lp_bld_debug.h"
46 #include "lp_bld_logic.h"
52 * Selection with vector conditional like
54 * select <4 x i1> %C, %A, %B
56 * is valid IR (e.g. llvm/test/Assembler/vector-select.ll), but it is only
57 * supported on some backends (x86) starting with llvm 3.1.
59 * Expanding the boolean vector to full SIMD register width, as in
61 * sext <4 x i1> %C to <4 x i32>
63 * is valid and supported (e.g., llvm/test/CodeGen/X86/vec_compare.ll), but
64 * it causes assertion failures in LLVM 2.6. It appears to work correctly on
70 * Build code to compare two values 'a' and 'b' of 'type' using the given func.
71 * \param func one of PIPE_FUNC_x
72 * If the ordered argument is true the function will use LLVM's ordered
73 * comparisons, otherwise unordered comparisons will be used.
74 * The result values will be 0 for false or ~0 for true.
77 lp_build_compare_ext(struct gallivm_state
*gallivm
,
78 const struct lp_type type
,
84 LLVMBuilderRef builder
= gallivm
->builder
;
85 LLVMTypeRef int_vec_type
= lp_build_int_vec_type(gallivm
, type
);
86 LLVMValueRef zeros
= LLVMConstNull(int_vec_type
);
87 LLVMValueRef ones
= LLVMConstAllOnes(int_vec_type
);
91 assert(lp_check_value(type
, a
));
92 assert(lp_check_value(type
, b
));
94 if(func
== PIPE_FUNC_NEVER
)
96 if(func
== PIPE_FUNC_ALWAYS
)
99 assert(func
> PIPE_FUNC_NEVER
);
100 assert(func
< PIPE_FUNC_ALWAYS
);
103 LLVMRealPredicate op
;
105 case PIPE_FUNC_EQUAL
:
106 op
= ordered
? LLVMRealOEQ
: LLVMRealUEQ
;
108 case PIPE_FUNC_NOTEQUAL
:
109 op
= ordered
? LLVMRealONE
: LLVMRealUNE
;
112 op
= ordered
? LLVMRealOLT
: LLVMRealULT
;
114 case PIPE_FUNC_LEQUAL
:
115 op
= ordered
? LLVMRealOLE
: LLVMRealULE
;
117 case PIPE_FUNC_GREATER
:
118 op
= ordered
? LLVMRealOGT
: LLVMRealUGT
;
120 case PIPE_FUNC_GEQUAL
:
121 op
= ordered
? LLVMRealOGE
: LLVMRealUGE
;
125 return lp_build_undef(gallivm
, type
);
128 cond
= LLVMBuildFCmp(builder
, op
, a
, b
, "");
129 res
= LLVMBuildSExt(builder
, cond
, int_vec_type
, "");
134 case PIPE_FUNC_EQUAL
:
137 case PIPE_FUNC_NOTEQUAL
:
141 op
= type
.sign
? LLVMIntSLT
: LLVMIntULT
;
143 case PIPE_FUNC_LEQUAL
:
144 op
= type
.sign
? LLVMIntSLE
: LLVMIntULE
;
146 case PIPE_FUNC_GREATER
:
147 op
= type
.sign
? LLVMIntSGT
: LLVMIntUGT
;
149 case PIPE_FUNC_GEQUAL
:
150 op
= type
.sign
? LLVMIntSGE
: LLVMIntUGE
;
154 return lp_build_undef(gallivm
, type
);
157 cond
= LLVMBuildICmp(builder
, op
, a
, b
, "");
158 res
= LLVMBuildSExt(builder
, cond
, int_vec_type
, "");
165 * Build code to compare two values 'a' and 'b' of 'type' using the given func.
166 * \param func one of PIPE_FUNC_x
167 * The result values will be 0 for false or ~0 for true.
170 lp_build_compare(struct gallivm_state
*gallivm
,
171 const struct lp_type type
,
176 LLVMTypeRef int_vec_type
= lp_build_int_vec_type(gallivm
, type
);
177 LLVMValueRef zeros
= LLVMConstNull(int_vec_type
);
178 LLVMValueRef ones
= LLVMConstAllOnes(int_vec_type
);
180 assert(lp_check_value(type
, a
));
181 assert(lp_check_value(type
, b
));
183 if(func
== PIPE_FUNC_NEVER
)
185 if(func
== PIPE_FUNC_ALWAYS
)
188 assert(func
> PIPE_FUNC_NEVER
);
189 assert(func
< PIPE_FUNC_ALWAYS
);
191 #if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
193 * There are no unsigned integer comparison instructions in SSE.
196 if (!type
.floating
&& !type
.sign
&&
197 type
.width
* type
.length
== 128 &&
198 util_cpu_caps
.has_sse2
&&
199 (func
== PIPE_FUNC_LESS
||
200 func
== PIPE_FUNC_LEQUAL
||
201 func
== PIPE_FUNC_GREATER
||
202 func
== PIPE_FUNC_GEQUAL
) &&
203 (gallivm_debug
& GALLIVM_DEBUG_PERF
)) {
204 debug_printf("%s: inefficient <%u x i%u> unsigned comparison\n",
205 __FUNCTION__
, type
.length
, type
.width
);
209 return lp_build_compare_ext(gallivm
, type
, func
, a
, b
, FALSE
);
213 * Build code to compare two values 'a' and 'b' using the given func.
214 * \param func one of PIPE_FUNC_x
215 * If the operands are floating point numbers, the function will use
216 * ordered comparison which means that it will return true if both
217 * operands are not a NaN and the specified condition evaluates to true.
218 * The result values will be 0 for false or ~0 for true.
221 lp_build_cmp_ordered(struct lp_build_context
*bld
,
226 return lp_build_compare_ext(bld
->gallivm
, bld
->type
, func
, a
, b
, TRUE
);
230 * Build code to compare two values 'a' and 'b' using the given func.
231 * \param func one of PIPE_FUNC_x
232 * If the operands are floating point numbers, the function will use
233 * unordered comparison which means that it will return true if either
234 * operand is a NaN or the specified condition evaluates to true.
235 * The result values will be 0 for false or ~0 for true.
238 lp_build_cmp(struct lp_build_context
*bld
,
243 return lp_build_compare(bld
->gallivm
, bld
->type
, func
, a
, b
);
248 * Return (mask & a) | (~mask & b);
251 lp_build_select_bitwise(struct lp_build_context
*bld
,
256 LLVMBuilderRef builder
= bld
->gallivm
->builder
;
257 struct lp_type type
= bld
->type
;
260 assert(lp_check_value(type
, a
));
261 assert(lp_check_value(type
, b
));
268 LLVMTypeRef int_vec_type
= lp_build_int_vec_type(bld
->gallivm
, type
);
269 a
= LLVMBuildBitCast(builder
, a
, int_vec_type
, "");
270 b
= LLVMBuildBitCast(builder
, b
, int_vec_type
, "");
273 a
= LLVMBuildAnd(builder
, a
, mask
, "");
275 /* This often gets translated to PANDN, but sometimes the NOT is
276 * pre-computed and stored in another constant. The best strategy depends
277 * on available registers, so it is not a big deal -- hopefully LLVM does
278 * the right decision attending the rest of the program.
280 b
= LLVMBuildAnd(builder
, b
, LLVMBuildNot(builder
, mask
, ""), "");
282 res
= LLVMBuildOr(builder
, a
, b
, "");
285 LLVMTypeRef vec_type
= lp_build_vec_type(bld
->gallivm
, type
);
286 res
= LLVMBuildBitCast(builder
, res
, vec_type
, "");
294 * Return mask ? a : b;
296 * mask is a bitwise mask, composed of 0 or ~0 for each element. Any other value
297 * will yield unpredictable results.
300 lp_build_select(struct lp_build_context
*bld
,
305 LLVMBuilderRef builder
= bld
->gallivm
->builder
;
306 LLVMContextRef lc
= bld
->gallivm
->context
;
307 struct lp_type type
= bld
->type
;
310 assert(lp_check_value(type
, a
));
311 assert(lp_check_value(type
, b
));
316 if (type
.length
== 1) {
317 mask
= LLVMBuildTrunc(builder
, mask
, LLVMInt1TypeInContext(lc
), "");
318 res
= LLVMBuildSelect(builder
, mask
, a
, b
, "");
320 else if (!(HAVE_LLVM
== 0x0307) &&
321 (LLVMIsConstant(mask
) ||
322 LLVMGetInstructionOpcode(mask
) == LLVMSExt
)) {
323 /* Generate a vector select.
325 * Using vector selects should avoid emitting intrinsics hence avoid
326 * hindering optimization passes, but vector selects weren't properly
327 * supported yet for a long time, and LLVM will generate poor code when
328 * the mask is not the result of a comparison.
329 * Also, llvm 3.7 may miscompile them (bug 94972).
332 /* Convert the mask to a vector of booleans.
334 * XXX: In x86 the mask is controlled by the MSB, so if we shifted the
335 * mask by `type.width - 1`, LLVM should realize the mask is ready. Alas
336 * what really happens is that LLVM will emit two shifts back to back.
339 LLVMValueRef shift
= LLVMConstInt(bld
->int_elem_type
, bld
->type
.width
- 1, 0);
340 shift
= lp_build_broadcast(bld
->gallivm
, bld
->int_vec_type
, shift
);
341 mask
= LLVMBuildLShr(builder
, mask
, shift
, "");
343 LLVMTypeRef bool_vec_type
= LLVMVectorType(LLVMInt1TypeInContext(lc
), type
.length
);
344 mask
= LLVMBuildTrunc(builder
, mask
, bool_vec_type
, "");
346 res
= LLVMBuildSelect(builder
, mask
, a
, b
, "");
348 else if (((util_cpu_caps
.has_sse4_1
&&
349 type
.width
* type
.length
== 128) ||
350 (util_cpu_caps
.has_avx
&&
351 type
.width
* type
.length
== 256 && type
.width
>= 32) ||
352 (util_cpu_caps
.has_avx2
&&
353 type
.width
* type
.length
== 256)) &&
354 !LLVMIsConstant(a
) &&
355 !LLVMIsConstant(b
) &&
356 !LLVMIsConstant(mask
)) {
357 const char *intrinsic
;
358 LLVMTypeRef arg_type
;
359 LLVMValueRef args
[3];
362 * There's only float blend in AVX but can just cast i32/i64
365 if (type
.width
* type
.length
== 256) {
366 if (type
.width
== 64) {
367 intrinsic
= "llvm.x86.avx.blendv.pd.256";
368 arg_type
= LLVMVectorType(LLVMDoubleTypeInContext(lc
), 4);
370 else if (type
.width
== 32) {
371 intrinsic
= "llvm.x86.avx.blendv.ps.256";
372 arg_type
= LLVMVectorType(LLVMFloatTypeInContext(lc
), 8);
374 assert(util_cpu_caps
.has_avx2
);
375 intrinsic
= "llvm.x86.avx2.pblendvb";
376 arg_type
= LLVMVectorType(LLVMInt8TypeInContext(lc
), 32);
379 else if (type
.floating
&&
381 intrinsic
= "llvm.x86.sse41.blendvpd";
382 arg_type
= LLVMVectorType(LLVMDoubleTypeInContext(lc
), 2);
383 } else if (type
.floating
&&
385 intrinsic
= "llvm.x86.sse41.blendvps";
386 arg_type
= LLVMVectorType(LLVMFloatTypeInContext(lc
), 4);
388 intrinsic
= "llvm.x86.sse41.pblendvb";
389 arg_type
= LLVMVectorType(LLVMInt8TypeInContext(lc
), 16);
392 if (arg_type
!= bld
->int_vec_type
) {
393 mask
= LLVMBuildBitCast(builder
, mask
, arg_type
, "");
396 if (arg_type
!= bld
->vec_type
) {
397 a
= LLVMBuildBitCast(builder
, a
, arg_type
, "");
398 b
= LLVMBuildBitCast(builder
, b
, arg_type
, "");
405 res
= lp_build_intrinsic(builder
, intrinsic
,
406 arg_type
, args
, ARRAY_SIZE(args
), 0);
408 if (arg_type
!= bld
->vec_type
) {
409 res
= LLVMBuildBitCast(builder
, res
, bld
->vec_type
, "");
413 res
= lp_build_select_bitwise(bld
, mask
, a
, b
);
421 * Return mask ? a : b;
423 * mask is a TGSI_WRITEMASK_xxx.
426 lp_build_select_aos(struct lp_build_context
*bld
,
430 unsigned num_channels
)
432 LLVMBuilderRef builder
= bld
->gallivm
->builder
;
433 const struct lp_type type
= bld
->type
;
434 const unsigned n
= type
.length
;
437 assert((mask
& ~0xf) == 0);
438 assert(lp_check_value(type
, a
));
439 assert(lp_check_value(type
, b
));
443 if((mask
& 0xf) == 0xf)
445 if((mask
& 0xf) == 0x0)
447 if(a
== bld
->undef
|| b
== bld
->undef
)
451 * There are two major ways of accomplishing this:
455 * The flip between these is empirical and might need to be adjusted.
461 LLVMTypeRef elem_type
= LLVMInt32TypeInContext(bld
->gallivm
->context
);
462 LLVMValueRef shuffles
[LP_MAX_VECTOR_LENGTH
];
464 for(j
= 0; j
< n
; j
+= num_channels
)
465 for(i
= 0; i
< num_channels
; ++i
)
466 shuffles
[j
+ i
] = LLVMConstInt(elem_type
,
467 (mask
& (1 << i
) ? 0 : n
) + j
+ i
,
470 return LLVMBuildShuffleVector(builder
, a
, b
, LLVMConstVector(shuffles
, n
), "");
473 LLVMValueRef mask_vec
= lp_build_const_mask_aos(bld
->gallivm
, type
, mask
, num_channels
);
474 return lp_build_select(bld
, mask_vec
, a
, b
);
480 * Return (scalar-cast)val ? true : false;
483 lp_build_any_true_range(struct lp_build_context
*bld
,
484 unsigned real_length
,
487 LLVMBuilderRef builder
= bld
->gallivm
->builder
;
488 LLVMTypeRef scalar_type
;
489 LLVMTypeRef true_type
;
491 assert(real_length
<= bld
->type
.length
);
493 true_type
= LLVMIntTypeInContext(bld
->gallivm
->context
,
494 bld
->type
.width
* real_length
);
495 scalar_type
= LLVMIntTypeInContext(bld
->gallivm
->context
,
496 bld
->type
.width
* bld
->type
.length
);
497 val
= LLVMBuildBitCast(builder
, val
, scalar_type
, "");
499 * We're using always native types so we can use intrinsics.
500 * However, if we don't do per-element calculations, we must ensure
501 * the excess elements aren't used since they may contain garbage.
503 if (real_length
< bld
->type
.length
) {
504 val
= LLVMBuildTrunc(builder
, val
, true_type
, "");
506 return LLVMBuildICmp(builder
, LLVMIntNE
,
507 val
, LLVMConstNull(true_type
), "");