1 /**************************************************************************
3 * Copyright 2009 VMware, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
30 * Helper functions for logical operations.
32 * @author Jose Fonseca <jfonseca@vmware.com>
36 #include "util/u_cpu_detect.h"
37 #include "util/u_memory.h"
38 #include "util/u_debug.h"
40 #include "lp_bld_type.h"
41 #include "lp_bld_const.h"
42 #include "lp_bld_init.h"
43 #include "lp_bld_intr.h"
44 #include "lp_bld_debug.h"
45 #include "lp_bld_logic.h"
51 * Selection with vector conditional like
53 * select <4 x i1> %C, %A, %B
55 * is valid IR (e.g. llvm/test/Assembler/vector-select.ll), but it is only
56 * supported on some backends (x86) starting with llvm 3.1.
58 * Expanding the boolean vector to full SIMD register width, as in
60 * sext <4 x i1> %C to <4 x i32>
62 * is valid and supported (e.g., llvm/test/CodeGen/X86/vec_compare.ll), but
63 * it causes assertion failures in LLVM 2.6. It appears to work correctly on
69 * Build code to compare two values 'a' and 'b' of 'type' using the given func.
70 * \param func one of PIPE_FUNC_x
71 * If the ordered argument is true the function will use LLVM's ordered
72 * comparisons, otherwise unordered comparisons will be used.
73 * The result values will be 0 for false or ~0 for true.
76 lp_build_compare_ext(struct gallivm_state
*gallivm
,
77 const struct lp_type type
,
83 LLVMBuilderRef builder
= gallivm
->builder
;
84 LLVMTypeRef int_vec_type
= lp_build_int_vec_type(gallivm
, type
);
85 LLVMValueRef zeros
= LLVMConstNull(int_vec_type
);
86 LLVMValueRef ones
= LLVMConstAllOnes(int_vec_type
);
90 assert(func
>= PIPE_FUNC_NEVER
);
91 assert(func
<= PIPE_FUNC_ALWAYS
);
92 assert(lp_check_value(type
, a
));
93 assert(lp_check_value(type
, b
));
95 if(func
== PIPE_FUNC_NEVER
)
97 if(func
== PIPE_FUNC_ALWAYS
)
101 LLVMRealPredicate op
;
103 case PIPE_FUNC_EQUAL
:
104 op
= ordered
? LLVMRealOEQ
: LLVMRealUEQ
;
106 case PIPE_FUNC_NOTEQUAL
:
107 op
= ordered
? LLVMRealONE
: LLVMRealUNE
;
110 op
= ordered
? LLVMRealOLT
: LLVMRealULT
;
112 case PIPE_FUNC_LEQUAL
:
113 op
= ordered
? LLVMRealOLE
: LLVMRealULE
;
115 case PIPE_FUNC_GREATER
:
116 op
= ordered
? LLVMRealOGT
: LLVMRealUGT
;
118 case PIPE_FUNC_GEQUAL
:
119 op
= ordered
? LLVMRealOGE
: LLVMRealUGE
;
123 return lp_build_undef(gallivm
, type
);
126 cond
= LLVMBuildFCmp(builder
, op
, a
, b
, "");
127 res
= LLVMBuildSExt(builder
, cond
, int_vec_type
, "");
132 case PIPE_FUNC_EQUAL
:
135 case PIPE_FUNC_NOTEQUAL
:
139 op
= type
.sign
? LLVMIntSLT
: LLVMIntULT
;
141 case PIPE_FUNC_LEQUAL
:
142 op
= type
.sign
? LLVMIntSLE
: LLVMIntULE
;
144 case PIPE_FUNC_GREATER
:
145 op
= type
.sign
? LLVMIntSGT
: LLVMIntUGT
;
147 case PIPE_FUNC_GEQUAL
:
148 op
= type
.sign
? LLVMIntSGE
: LLVMIntUGE
;
152 return lp_build_undef(gallivm
, type
);
155 cond
= LLVMBuildICmp(builder
, op
, a
, b
, "");
156 res
= LLVMBuildSExt(builder
, cond
, int_vec_type
, "");
163 * Build code to compare two values 'a' and 'b' of 'type' using the given func.
164 * \param func one of PIPE_FUNC_x
165 * The result values will be 0 for false or ~0 for true.
168 lp_build_compare(struct gallivm_state
*gallivm
,
169 const struct lp_type type
,
174 LLVMTypeRef int_vec_type
= lp_build_int_vec_type(gallivm
, type
);
175 LLVMValueRef zeros
= LLVMConstNull(int_vec_type
);
176 LLVMValueRef ones
= LLVMConstAllOnes(int_vec_type
);
178 assert(func
>= PIPE_FUNC_NEVER
);
179 assert(func
<= PIPE_FUNC_ALWAYS
);
180 assert(lp_check_value(type
, a
));
181 assert(lp_check_value(type
, b
));
183 if(func
== PIPE_FUNC_NEVER
)
185 if(func
== PIPE_FUNC_ALWAYS
)
188 #if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
190 * There are no unsigned integer comparison instructions in SSE.
193 if (!type
.floating
&& !type
.sign
&&
194 type
.width
* type
.length
== 128 &&
195 util_cpu_caps
.has_sse2
&&
196 (func
== PIPE_FUNC_LESS
||
197 func
== PIPE_FUNC_LEQUAL
||
198 func
== PIPE_FUNC_GREATER
||
199 func
== PIPE_FUNC_GEQUAL
) &&
200 (gallivm_debug
& GALLIVM_DEBUG_PERF
)) {
201 debug_printf("%s: inefficient <%u x i%u> unsigned comparison\n",
202 __FUNCTION__
, type
.length
, type
.width
);
206 return lp_build_compare_ext(gallivm
, type
, func
, a
, b
, FALSE
);
210 * Build code to compare two values 'a' and 'b' using the given func.
211 * \param func one of PIPE_FUNC_x
212 * If the operands are floating point numbers, the function will use
213 * ordered comparison which means that it will return true if both
214 * operands are not a NaN and the specified condition evaluates to true.
215 * The result values will be 0 for false or ~0 for true.
218 lp_build_cmp_ordered(struct lp_build_context
*bld
,
223 return lp_build_compare_ext(bld
->gallivm
, bld
->type
, func
, a
, b
, TRUE
);
227 * Build code to compare two values 'a' and 'b' using the given func.
228 * \param func one of PIPE_FUNC_x
229 * If the operands are floating point numbers, the function will use
230 * unordered comparison which means that it will return true if either
231 * operand is a NaN or the specified condition evaluates to true.
232 * The result values will be 0 for false or ~0 for true.
235 lp_build_cmp(struct lp_build_context
*bld
,
240 return lp_build_compare(bld
->gallivm
, bld
->type
, func
, a
, b
);
245 * Return (mask & a) | (~mask & b);
248 lp_build_select_bitwise(struct lp_build_context
*bld
,
253 LLVMBuilderRef builder
= bld
->gallivm
->builder
;
254 struct lp_type type
= bld
->type
;
257 assert(lp_check_value(type
, a
));
258 assert(lp_check_value(type
, b
));
265 LLVMTypeRef int_vec_type
= lp_build_int_vec_type(bld
->gallivm
, type
);
266 a
= LLVMBuildBitCast(builder
, a
, int_vec_type
, "");
267 b
= LLVMBuildBitCast(builder
, b
, int_vec_type
, "");
270 a
= LLVMBuildAnd(builder
, a
, mask
, "");
272 /* This often gets translated to PANDN, but sometimes the NOT is
273 * pre-computed and stored in another constant. The best strategy depends
274 * on available registers, so it is not a big deal -- hopefully LLVM does
275 * the right decision attending the rest of the program.
277 b
= LLVMBuildAnd(builder
, b
, LLVMBuildNot(builder
, mask
, ""), "");
279 res
= LLVMBuildOr(builder
, a
, b
, "");
282 LLVMTypeRef vec_type
= lp_build_vec_type(bld
->gallivm
, type
);
283 res
= LLVMBuildBitCast(builder
, res
, vec_type
, "");
291 * Return mask ? a : b;
293 * mask is a bitwise mask, composed of 0 or ~0 for each element. Any other value
294 * will yield unpredictable results.
297 lp_build_select(struct lp_build_context
*bld
,
302 LLVMBuilderRef builder
= bld
->gallivm
->builder
;
303 LLVMContextRef lc
= bld
->gallivm
->context
;
304 struct lp_type type
= bld
->type
;
307 assert(lp_check_value(type
, a
));
308 assert(lp_check_value(type
, b
));
313 if (type
.length
== 1) {
314 mask
= LLVMBuildTrunc(builder
, mask
, LLVMInt1TypeInContext(lc
), "");
315 res
= LLVMBuildSelect(builder
, mask
, a
, b
, "");
318 /* Generate a vector select.
320 * XXX: Using vector selects would avoid emitting intrinsics, but they aren't
321 * properly supported yet.
323 * LLVM 3.1 supports it, but it yields buggy code (e.g. lp_blend_test).
325 * LLVM 3.0 includes experimental support provided the -promote-elements
326 * options is passed to LLVM's command line (e.g., via
327 * llvm::cl::ParseCommandLineOptions), but resulting code quality is much
328 * worse, probably because some optimization passes don't know how to
329 * handle vector selects.
332 * - http://lists.cs.uiuc.edu/pipermail/llvmdev/2011-October/043659.html
335 /* Convert the mask to a vector of booleans.
336 * XXX: There are two ways to do this. Decide what's best.
339 LLVMTypeRef bool_vec_type
= LLVMVectorType(LLVMInt1TypeInContext(lc
), type
.length
);
340 mask
= LLVMBuildTrunc(builder
, mask
, bool_vec_type
, "");
342 mask
= LLVMBuildICmp(builder
, LLVMIntNE
, mask
, LLVMConstNull(bld
->int_vec_type
), "");
344 res
= LLVMBuildSelect(builder
, mask
, a
, b
, "");
346 else if (((util_cpu_caps
.has_sse4_1
&&
347 type
.width
* type
.length
== 128) ||
348 (util_cpu_caps
.has_avx
&&
349 type
.width
* type
.length
== 256 && type
.width
>= 32)) &&
350 !LLVMIsConstant(a
) &&
351 !LLVMIsConstant(b
) &&
352 !LLVMIsConstant(mask
)) {
353 const char *intrinsic
;
354 LLVMTypeRef arg_type
;
355 LLVMValueRef args
[3];
358 * There's only float blend in AVX but can just cast i32/i64
361 if (type
.width
* type
.length
== 256) {
362 if (type
.width
== 64) {
363 intrinsic
= "llvm.x86.avx.blendv.pd.256";
364 arg_type
= LLVMVectorType(LLVMDoubleTypeInContext(lc
), 4);
367 intrinsic
= "llvm.x86.avx.blendv.ps.256";
368 arg_type
= LLVMVectorType(LLVMFloatTypeInContext(lc
), 8);
371 else if (type
.floating
&&
373 intrinsic
= "llvm.x86.sse41.blendvpd";
374 arg_type
= LLVMVectorType(LLVMDoubleTypeInContext(lc
), 2);
375 } else if (type
.floating
&&
377 intrinsic
= "llvm.x86.sse41.blendvps";
378 arg_type
= LLVMVectorType(LLVMFloatTypeInContext(lc
), 4);
380 intrinsic
= "llvm.x86.sse41.pblendvb";
381 arg_type
= LLVMVectorType(LLVMInt8TypeInContext(lc
), 16);
384 if (arg_type
!= bld
->int_vec_type
) {
385 mask
= LLVMBuildBitCast(builder
, mask
, arg_type
, "");
388 if (arg_type
!= bld
->vec_type
) {
389 a
= LLVMBuildBitCast(builder
, a
, arg_type
, "");
390 b
= LLVMBuildBitCast(builder
, b
, arg_type
, "");
397 res
= lp_build_intrinsic(builder
, intrinsic
,
398 arg_type
, args
, Elements(args
));
400 if (arg_type
!= bld
->vec_type
) {
401 res
= LLVMBuildBitCast(builder
, res
, bld
->vec_type
, "");
405 res
= lp_build_select_bitwise(bld
, mask
, a
, b
);
413 * Return mask ? a : b;
415 * mask is a TGSI_WRITEMASK_xxx.
418 lp_build_select_aos(struct lp_build_context
*bld
,
422 unsigned num_channels
)
424 LLVMBuilderRef builder
= bld
->gallivm
->builder
;
425 const struct lp_type type
= bld
->type
;
426 const unsigned n
= type
.length
;
429 assert((mask
& ~0xf) == 0);
430 assert(lp_check_value(type
, a
));
431 assert(lp_check_value(type
, b
));
435 if((mask
& 0xf) == 0xf)
437 if((mask
& 0xf) == 0x0)
439 if(a
== bld
->undef
|| b
== bld
->undef
)
443 * There are two major ways of accomplishing this:
447 * The flip between these is empirical and might need to be adjusted.
453 LLVMTypeRef elem_type
= LLVMInt32TypeInContext(bld
->gallivm
->context
);
454 LLVMValueRef shuffles
[LP_MAX_VECTOR_LENGTH
];
456 for(j
= 0; j
< n
; j
+= num_channels
)
457 for(i
= 0; i
< num_channels
; ++i
)
458 shuffles
[j
+ i
] = LLVMConstInt(elem_type
,
459 (mask
& (1 << i
) ? 0 : n
) + j
+ i
,
462 return LLVMBuildShuffleVector(builder
, a
, b
, LLVMConstVector(shuffles
, n
), "");
465 LLVMValueRef mask_vec
= lp_build_const_mask_aos(bld
->gallivm
, type
, mask
, num_channels
);
466 return lp_build_select(bld
, mask_vec
, a
, b
);
472 * Return (scalar-cast)val ? true : false;
475 lp_build_any_true_range(struct lp_build_context
*bld
,
476 unsigned real_length
,
479 LLVMBuilderRef builder
= bld
->gallivm
->builder
;
480 LLVMTypeRef scalar_type
;
481 LLVMTypeRef true_type
;
483 assert(real_length
<= bld
->type
.length
);
485 true_type
= LLVMIntTypeInContext(bld
->gallivm
->context
,
486 bld
->type
.width
* real_length
);
487 scalar_type
= LLVMIntTypeInContext(bld
->gallivm
->context
,
488 bld
->type
.width
* bld
->type
.length
);
489 val
= LLVMBuildBitCast(builder
, val
, scalar_type
, "");
491 * We're using always native types so we can use intrinsics.
492 * However, if we don't do per-element calculations, we must ensure
493 * the excess elements aren't used since they may contain garbage.
495 if (real_length
< bld
->type
.length
) {
496 val
= LLVMBuildTrunc(builder
, val
, true_type
, "");
498 return LLVMBuildICmp(builder
, LLVMIntNE
,
499 val
, LLVMConstNull(true_type
), "");