1 /**************************************************************************
3 * Copyright 2009-2010 VMware, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
30 * Depth/stencil testing to LLVM IR translation.
32 * To be done accurately/efficiently the depth/stencil test must be done with
33 * the same type/format of the depth/stencil buffer, which implies massaging
34 * the incoming depths to fit into place. Using a more straightforward
35 * type/format for depth/stencil values internally and only convert when
36 * flushing would avoid this, but it would most likely result in depth fighting
39 * Since we're using linear layout for everything, but we need to deal with
40 * 2x2 quads, we need to load/store multiple values and swizzle them into
41 * place (we could avoid this by doing depth/stencil testing in linear format,
42 * which would be easy for late depth/stencil test as we could do that after
43 * the fragment shader loop just as we do for color buffers, but more tricky
44 * for early depth test as we'd need both masks and interpolated depth in
48 * @author Jose Fonseca <jfonseca@vmware.com>
49 * @author Brian Paul <jfonseca@vmware.com>
52 #include "pipe/p_state.h"
53 #include "util/format/u_format.h"
54 #include "util/u_cpu_detect.h"
56 #include "gallivm/lp_bld_type.h"
57 #include "gallivm/lp_bld_arit.h"
58 #include "gallivm/lp_bld_bitarit.h"
59 #include "gallivm/lp_bld_const.h"
60 #include "gallivm/lp_bld_conv.h"
61 #include "gallivm/lp_bld_logic.h"
62 #include "gallivm/lp_bld_flow.h"
63 #include "gallivm/lp_bld_intr.h"
64 #include "gallivm/lp_bld_debug.h"
65 #include "gallivm/lp_bld_swizzle.h"
66 #include "gallivm/lp_bld_pack.h"
68 #include "lp_bld_depth.h"
71 /** Used to select fields from pipe_stencil_state */
81 * Do the stencil test comparison (compare FB stencil values against ref value).
82 * This will be used twice when generating two-sided stencil code.
83 * \param stencil the front/back stencil state
84 * \param stencilRef the stencil reference value, replicated as a vector
85 * \param stencilVals vector of stencil values from framebuffer
86 * \return vector mask of pass/fail values (~0 or 0)
89 lp_build_stencil_test_single(struct lp_build_context
*bld
,
90 const struct pipe_stencil_state
*stencil
,
91 LLVMValueRef stencilRef
,
92 LLVMValueRef stencilVals
)
94 LLVMBuilderRef builder
= bld
->gallivm
->builder
;
95 const unsigned stencilMax
= 255; /* XXX fix */
96 struct lp_type type
= bld
->type
;
100 * SSE2 has intrinsics for signed comparisons, but not unsigned ones. Values
101 * are between 0..255 so ensure we generate the fastest comparisons for
104 if (type
.width
<= 8) {
110 assert(stencil
->enabled
);
112 if (stencil
->valuemask
!= stencilMax
) {
113 /* compute stencilRef = stencilRef & valuemask */
114 LLVMValueRef valuemask
= lp_build_const_int_vec(bld
->gallivm
, type
, stencil
->valuemask
);
115 stencilRef
= LLVMBuildAnd(builder
, stencilRef
, valuemask
, "");
116 /* compute stencilVals = stencilVals & valuemask */
117 stencilVals
= LLVMBuildAnd(builder
, stencilVals
, valuemask
, "");
120 res
= lp_build_cmp(bld
, stencil
->func
, stencilRef
, stencilVals
);
127 * Do the one or two-sided stencil test comparison.
128 * \sa lp_build_stencil_test_single
129 * \param front_facing an integer vector mask, indicating front (~0) or back
130 * (0) facing polygon. If NULL, assume front-facing.
133 lp_build_stencil_test(struct lp_build_context
*bld
,
134 const struct pipe_stencil_state stencil
[2],
135 LLVMValueRef stencilRefs
[2],
136 LLVMValueRef stencilVals
,
137 LLVMValueRef front_facing
)
141 assert(stencil
[0].enabled
);
143 /* do front face test */
144 res
= lp_build_stencil_test_single(bld
, &stencil
[0],
145 stencilRefs
[0], stencilVals
);
147 if (stencil
[1].enabled
&& front_facing
!= NULL
) {
148 /* do back face test */
149 LLVMValueRef back_res
;
151 back_res
= lp_build_stencil_test_single(bld
, &stencil
[1],
152 stencilRefs
[1], stencilVals
);
154 res
= lp_build_select(bld
, front_facing
, res
, back_res
);
162 * Apply the stencil operator (add/sub/keep/etc) to the given vector
164 * \return new stencil values vector
167 lp_build_stencil_op_single(struct lp_build_context
*bld
,
168 const struct pipe_stencil_state
*stencil
,
170 LLVMValueRef stencilRef
,
171 LLVMValueRef stencilVals
)
174 LLVMBuilderRef builder
= bld
->gallivm
->builder
;
175 struct lp_type type
= bld
->type
;
177 LLVMValueRef max
= lp_build_const_int_vec(bld
->gallivm
, type
, 0xff);
184 stencil_op
= stencil
->fail_op
;
187 stencil_op
= stencil
->zfail_op
;
190 stencil_op
= stencil
->zpass_op
;
193 assert(0 && "Invalid stencil_op mode");
194 stencil_op
= PIPE_STENCIL_OP_KEEP
;
197 switch (stencil_op
) {
198 case PIPE_STENCIL_OP_KEEP
:
200 /* we can return early for this case */
202 case PIPE_STENCIL_OP_ZERO
:
205 case PIPE_STENCIL_OP_REPLACE
:
208 case PIPE_STENCIL_OP_INCR
:
209 res
= lp_build_add(bld
, stencilVals
, bld
->one
);
210 res
= lp_build_min(bld
, res
, max
);
212 case PIPE_STENCIL_OP_DECR
:
213 res
= lp_build_sub(bld
, stencilVals
, bld
->one
);
214 res
= lp_build_max(bld
, res
, bld
->zero
);
216 case PIPE_STENCIL_OP_INCR_WRAP
:
217 res
= lp_build_add(bld
, stencilVals
, bld
->one
);
218 res
= LLVMBuildAnd(builder
, res
, max
, "");
220 case PIPE_STENCIL_OP_DECR_WRAP
:
221 res
= lp_build_sub(bld
, stencilVals
, bld
->one
);
222 res
= LLVMBuildAnd(builder
, res
, max
, "");
224 case PIPE_STENCIL_OP_INVERT
:
225 res
= LLVMBuildNot(builder
, stencilVals
, "");
226 res
= LLVMBuildAnd(builder
, res
, max
, "");
229 assert(0 && "bad stencil op mode");
238 * Do the one or two-sided stencil test op/update.
241 lp_build_stencil_op(struct lp_build_context
*bld
,
242 const struct pipe_stencil_state stencil
[2],
244 LLVMValueRef stencilRefs
[2],
245 LLVMValueRef stencilVals
,
247 LLVMValueRef front_facing
)
250 LLVMBuilderRef builder
= bld
->gallivm
->builder
;
253 assert(stencil
[0].enabled
);
255 /* do front face op */
256 res
= lp_build_stencil_op_single(bld
, &stencil
[0], op
,
257 stencilRefs
[0], stencilVals
);
259 if (stencil
[1].enabled
&& front_facing
!= NULL
) {
260 /* do back face op */
261 LLVMValueRef back_res
;
263 back_res
= lp_build_stencil_op_single(bld
, &stencil
[1], op
,
264 stencilRefs
[1], stencilVals
);
266 res
= lp_build_select(bld
, front_facing
, res
, back_res
);
269 if (stencil
[0].writemask
!= 0xff ||
270 (stencil
[1].enabled
&& front_facing
!= NULL
&& stencil
[1].writemask
!= 0xff)) {
271 /* mask &= stencil[0].writemask */
272 LLVMValueRef writemask
= lp_build_const_int_vec(bld
->gallivm
, bld
->type
,
273 stencil
[0].writemask
);
274 if (stencil
[1].enabled
&& stencil
[1].writemask
!= stencil
[0].writemask
&& front_facing
!= NULL
) {
275 LLVMValueRef back_writemask
= lp_build_const_int_vec(bld
->gallivm
, bld
->type
,
276 stencil
[1].writemask
);
277 writemask
= lp_build_select(bld
, front_facing
, writemask
, back_writemask
);
280 mask
= LLVMBuildAnd(builder
, mask
, writemask
, "");
281 /* res = (res & mask) | (stencilVals & ~mask) */
282 res
= lp_build_select_bitwise(bld
, mask
, res
, stencilVals
);
285 /* res = mask ? res : stencilVals */
286 res
= lp_build_select(bld
, mask
, res
, stencilVals
);
295 * Return a type that matches the depth/stencil format.
298 lp_depth_type(const struct util_format_description
*format_desc
,
304 assert(format_desc
->colorspace
== UTIL_FORMAT_COLORSPACE_ZS
);
305 assert(format_desc
->block
.width
== 1);
306 assert(format_desc
->block
.height
== 1);
308 memset(&type
, 0, sizeof type
);
309 type
.width
= format_desc
->block
.bits
;
311 z_swizzle
= format_desc
->swizzle
[0];
313 if (format_desc
->channel
[z_swizzle
].type
== UTIL_FORMAT_TYPE_FLOAT
) {
314 type
.floating
= TRUE
;
315 assert(z_swizzle
== 0);
316 assert(format_desc
->channel
[z_swizzle
].size
== 32);
318 else if(format_desc
->channel
[z_swizzle
].type
== UTIL_FORMAT_TYPE_UNSIGNED
) {
319 assert(format_desc
->block
.bits
<= 32);
320 assert(format_desc
->channel
[z_swizzle
].normalized
);
321 if (format_desc
->channel
[z_swizzle
].size
< format_desc
->block
.bits
) {
322 /* Prefer signed integers when possible, as SSE has less support
323 * for unsigned comparison;
332 type
.length
= length
;
339 * Compute bitmask and bit shift to apply to the incoming fragment Z values
340 * and the Z buffer values needed before doing the Z comparison.
342 * Note that we leave the Z bits in the position that we find them
343 * in the Z buffer (typically 0xffffff00 or 0x00ffffff). That lets us
344 * get by with fewer bit twiddling steps.
347 get_z_shift_and_mask(const struct util_format_description
*format_desc
,
348 unsigned *shift
, unsigned *width
, unsigned *mask
)
353 assert(format_desc
->colorspace
== UTIL_FORMAT_COLORSPACE_ZS
);
354 assert(format_desc
->block
.width
== 1);
355 assert(format_desc
->block
.height
== 1);
357 /* 64bit d/s format is special already extracted 32 bits */
358 total_bits
= format_desc
->block
.bits
> 32 ? 32 : format_desc
->block
.bits
;
360 z_swizzle
= format_desc
->swizzle
[0];
362 if (z_swizzle
== PIPE_SWIZZLE_NONE
)
365 *width
= format_desc
->channel
[z_swizzle
].size
;
366 /* & 31 is for the same reason as the 32-bit limit above */
367 *shift
= format_desc
->channel
[z_swizzle
].shift
& 31;
369 if (*width
== total_bits
) {
372 *mask
= ((1 << *width
) - 1) << *shift
;
380 * Compute bitmask and bit shift to apply to the framebuffer pixel values
381 * to put the stencil bits in the least significant position.
385 get_s_shift_and_mask(const struct util_format_description
*format_desc
,
386 unsigned *shift
, unsigned *mask
)
391 s_swizzle
= format_desc
->swizzle
[1];
393 if (s_swizzle
== PIPE_SWIZZLE_NONE
)
396 /* just special case 64bit d/s format */
397 if (format_desc
->block
.bits
> 32) {
398 /* XXX big-endian? */
399 assert(format_desc
->format
== PIPE_FORMAT_Z32_FLOAT_S8X24_UINT
);
405 *shift
= format_desc
->channel
[s_swizzle
].shift
;
406 sz
= format_desc
->channel
[s_swizzle
].size
;
407 *mask
= (1U << sz
) - 1U;
414 * Perform the occlusion test and increase the counter.
415 * Test the depth mask. Add the number of channel which has none zero mask
416 * into the occlusion counter. e.g. maskvalue is {-1, -1, -1, -1}.
417 * The counter will add 4.
418 * TODO: could get that out of the fs loop.
420 * \param type holds element type of the mask vector.
421 * \param maskvalue is the depth test mask.
422 * \param counter is a pointer of the uint32 counter.
425 lp_build_occlusion_count(struct gallivm_state
*gallivm
,
427 LLVMValueRef maskvalue
,
428 LLVMValueRef counter
)
430 LLVMBuilderRef builder
= gallivm
->builder
;
431 LLVMContextRef context
= gallivm
->context
;
432 LLVMValueRef countmask
= lp_build_const_int_vec(gallivm
, type
, 1);
433 LLVMValueRef count
, newcount
;
435 assert(type
.length
<= 16);
436 assert(type
.floating
);
438 if(util_cpu_caps
.has_sse
&& type
.length
== 4) {
439 const char *movmskintr
= "llvm.x86.sse.movmsk.ps";
440 const char *popcntintr
= "llvm.ctpop.i32";
441 LLVMValueRef bits
= LLVMBuildBitCast(builder
, maskvalue
,
442 lp_build_vec_type(gallivm
, type
), "");
443 bits
= lp_build_intrinsic_unary(builder
, movmskintr
,
444 LLVMInt32TypeInContext(context
), bits
);
445 count
= lp_build_intrinsic_unary(builder
, popcntintr
,
446 LLVMInt32TypeInContext(context
), bits
);
447 count
= LLVMBuildZExt(builder
, count
, LLVMIntTypeInContext(context
, 64), "");
449 else if(util_cpu_caps
.has_avx
&& type
.length
== 8) {
450 const char *movmskintr
= "llvm.x86.avx.movmsk.ps.256";
451 const char *popcntintr
= "llvm.ctpop.i32";
452 LLVMValueRef bits
= LLVMBuildBitCast(builder
, maskvalue
,
453 lp_build_vec_type(gallivm
, type
), "");
454 bits
= lp_build_intrinsic_unary(builder
, movmskintr
,
455 LLVMInt32TypeInContext(context
), bits
);
456 count
= lp_build_intrinsic_unary(builder
, popcntintr
,
457 LLVMInt32TypeInContext(context
), bits
);
458 count
= LLVMBuildZExt(builder
, count
, LLVMIntTypeInContext(context
, 64), "");
462 LLVMValueRef countv
= LLVMBuildAnd(builder
, maskvalue
, countmask
, "countv");
463 LLVMTypeRef counttype
= LLVMIntTypeInContext(context
, type
.length
* 8);
464 LLVMTypeRef i8vntype
= LLVMVectorType(LLVMInt8TypeInContext(context
), type
.length
* 4);
465 LLVMValueRef shufflev
, countd
;
466 LLVMValueRef shuffles
[16];
467 const char *popcntintr
= NULL
;
469 countv
= LLVMBuildBitCast(builder
, countv
, i8vntype
, "");
471 for (i
= 0; i
< type
.length
; i
++) {
472 #if UTIL_ARCH_LITTLE_ENDIAN
473 shuffles
[i
] = lp_build_const_int32(gallivm
, 4*i
);
475 shuffles
[i
] = lp_build_const_int32(gallivm
, (4*i
) + 3);
479 shufflev
= LLVMConstVector(shuffles
, type
.length
);
480 countd
= LLVMBuildShuffleVector(builder
, countv
, LLVMGetUndef(i8vntype
), shufflev
, "");
481 countd
= LLVMBuildBitCast(builder
, countd
, counttype
, "countd");
485 * this is bad on cpus without popcount (on x86 supported by intel
486 * nehalem, amd barcelona, and up - not tied to sse42).
487 * Would be much faster to just sum the 4 elements of the vector with
488 * some horizontal add (shuffle/add/shuffle/add after the initial and).
490 switch (type
.length
) {
492 popcntintr
= "llvm.ctpop.i32";
495 popcntintr
= "llvm.ctpop.i64";
498 popcntintr
= "llvm.ctpop.i128";
503 count
= lp_build_intrinsic_unary(builder
, popcntintr
, counttype
, countd
);
505 if (type
.length
> 8) {
506 count
= LLVMBuildTrunc(builder
, count
, LLVMIntTypeInContext(context
, 64), "");
508 else if (type
.length
< 8) {
509 count
= LLVMBuildZExt(builder
, count
, LLVMIntTypeInContext(context
, 64), "");
512 newcount
= LLVMBuildLoad(builder
, counter
, "origcount");
513 newcount
= LLVMBuildAdd(builder
, newcount
, count
, "newcount");
514 LLVMBuildStore(builder
, newcount
, counter
);
519 * Load depth/stencil values.
520 * The stored values are linear, swizzle them.
522 * \param type the data type of the fragment depth/stencil values
523 * \param format_desc description of the depth/stencil surface
524 * \param is_1d whether this resource has only one dimension
525 * \param loop_counter the current loop iteration
526 * \param depth_ptr pointer to the depth/stencil values of this 4x4 block
527 * \param depth_stride stride of the depth/stencil buffer
528 * \param z_fb contains z values loaded from fb (may include padding)
529 * \param s_fb contains s values loaded from fb (may include padding)
532 lp_build_depth_stencil_load_swizzled(struct gallivm_state
*gallivm
,
533 struct lp_type z_src_type
,
534 const struct util_format_description
*format_desc
,
536 LLVMValueRef depth_ptr
,
537 LLVMValueRef depth_stride
,
540 LLVMValueRef loop_counter
)
542 LLVMBuilderRef builder
= gallivm
->builder
;
543 LLVMValueRef shuffles
[LP_MAX_VECTOR_LENGTH
/ 4];
544 LLVMValueRef zs_dst1
, zs_dst2
;
545 LLVMValueRef zs_dst_ptr
;
546 LLVMValueRef depth_offset1
, depth_offset2
;
547 LLVMTypeRef load_ptr_type
;
548 unsigned depth_bytes
= format_desc
->block
.bits
/ 8;
549 struct lp_type zs_type
= lp_depth_type(format_desc
, z_src_type
.length
);
550 struct lp_type zs_load_type
= zs_type
;
552 zs_load_type
.length
= zs_load_type
.length
/ 2;
553 load_ptr_type
= LLVMPointerType(lp_build_vec_type(gallivm
, zs_load_type
), 0);
555 if (z_src_type
.length
== 4) {
557 LLVMValueRef looplsb
= LLVMBuildAnd(builder
, loop_counter
,
558 lp_build_const_int32(gallivm
, 1), "");
559 LLVMValueRef loopmsb
= LLVMBuildAnd(builder
, loop_counter
,
560 lp_build_const_int32(gallivm
, 2), "");
561 LLVMValueRef offset2
= LLVMBuildMul(builder
, loopmsb
,
563 depth_offset1
= LLVMBuildMul(builder
, looplsb
,
564 lp_build_const_int32(gallivm
, depth_bytes
* 2), "");
565 depth_offset1
= LLVMBuildAdd(builder
, depth_offset1
, offset2
, "");
567 /* just concatenate the loaded 2x2 values into 4-wide vector */
568 for (i
= 0; i
< 4; i
++) {
569 shuffles
[i
] = lp_build_const_int32(gallivm
, i
);
574 LLVMValueRef loopx2
= LLVMBuildShl(builder
, loop_counter
,
575 lp_build_const_int32(gallivm
, 1), "");
576 assert(z_src_type
.length
== 8);
577 depth_offset1
= LLVMBuildMul(builder
, loopx2
, depth_stride
, "");
579 * We load 2x4 values, and need to swizzle them (order
580 * 0,1,4,5,2,3,6,7) - not so hot with avx unfortunately.
582 for (i
= 0; i
< 8; i
++) {
583 shuffles
[i
] = lp_build_const_int32(gallivm
, (i
&1) + (i
&2) * 2 + (i
&4) / 2);
587 depth_offset2
= LLVMBuildAdd(builder
, depth_offset1
, depth_stride
, "");
589 /* Load current z/stencil values from z/stencil buffer */
590 zs_dst_ptr
= LLVMBuildGEP(builder
, depth_ptr
, &depth_offset1
, 1, "");
591 zs_dst_ptr
= LLVMBuildBitCast(builder
, zs_dst_ptr
, load_ptr_type
, "");
592 zs_dst1
= LLVMBuildLoad(builder
, zs_dst_ptr
, "");
594 zs_dst2
= lp_build_undef(gallivm
, zs_load_type
);
597 zs_dst_ptr
= LLVMBuildGEP(builder
, depth_ptr
, &depth_offset2
, 1, "");
598 zs_dst_ptr
= LLVMBuildBitCast(builder
, zs_dst_ptr
, load_ptr_type
, "");
599 zs_dst2
= LLVMBuildLoad(builder
, zs_dst_ptr
, "");
602 *z_fb
= LLVMBuildShuffleVector(builder
, zs_dst1
, zs_dst2
,
603 LLVMConstVector(shuffles
, zs_type
.length
), "");
606 if (format_desc
->block
.bits
== 8) {
607 /* Extend stencil-only 8 bit values (S8_UINT) */
608 *s_fb
= LLVMBuildZExt(builder
, *s_fb
,
609 lp_build_int_vec_type(gallivm
, z_src_type
), "");
612 if (format_desc
->block
.bits
< z_src_type
.width
) {
613 /* Extend destination ZS values (e.g., when reading from Z16_UNORM) */
614 *z_fb
= LLVMBuildZExt(builder
, *z_fb
,
615 lp_build_int_vec_type(gallivm
, z_src_type
), "");
618 else if (format_desc
->block
.bits
> 32) {
619 /* rely on llvm to handle too wide vector we have here nicely */
621 struct lp_type typex2
= zs_type
;
622 struct lp_type s_type
= zs_type
;
623 LLVMValueRef shuffles1
[LP_MAX_VECTOR_LENGTH
/ 4];
624 LLVMValueRef shuffles2
[LP_MAX_VECTOR_LENGTH
/ 4];
627 typex2
.width
= typex2
.width
/ 2;
628 typex2
.length
= typex2
.length
* 2;
629 s_type
.width
= s_type
.width
/ 2;
632 tmp
= LLVMBuildBitCast(builder
, *z_fb
,
633 lp_build_vec_type(gallivm
, typex2
), "");
635 for (i
= 0; i
< zs_type
.length
; i
++) {
636 shuffles1
[i
] = lp_build_const_int32(gallivm
, i
* 2);
637 shuffles2
[i
] = lp_build_const_int32(gallivm
, i
* 2 + 1);
639 *z_fb
= LLVMBuildShuffleVector(builder
, tmp
, tmp
,
640 LLVMConstVector(shuffles1
, zs_type
.length
), "");
641 *s_fb
= LLVMBuildShuffleVector(builder
, tmp
, tmp
,
642 LLVMConstVector(shuffles2
, zs_type
.length
), "");
643 *s_fb
= LLVMBuildBitCast(builder
, *s_fb
,
644 lp_build_vec_type(gallivm
, s_type
), "");
645 lp_build_name(*s_fb
, "s_dst");
648 lp_build_name(*z_fb
, "z_dst");
649 lp_build_name(*s_fb
, "s_dst");
650 lp_build_name(*z_fb
, "z_dst");
654 * Store depth/stencil values.
655 * Incoming values are swizzled (typically n 2x2 quads), stored linear.
656 * If there's a mask it will do select/store otherwise just store.
658 * \param type the data type of the fragment depth/stencil values
659 * \param format_desc description of the depth/stencil surface
660 * \param is_1d whether this resource has only one dimension
661 * \param mask_value the alive/dead pixel mask for the quad (vector)
662 * \param z_fb z values read from fb (with padding)
663 * \param s_fb s values read from fb (with padding)
664 * \param loop_counter the current loop iteration
665 * \param depth_ptr pointer to the depth/stencil values of this 4x4 block
666 * \param depth_stride stride of the depth/stencil buffer
667 * \param z_value the depth values to store (with padding)
668 * \param s_value the stencil values to store (with padding)
671 lp_build_depth_stencil_write_swizzled(struct gallivm_state
*gallivm
,
672 struct lp_type z_src_type
,
673 const struct util_format_description
*format_desc
,
675 LLVMValueRef mask_value
,
678 LLVMValueRef loop_counter
,
679 LLVMValueRef depth_ptr
,
680 LLVMValueRef depth_stride
,
681 LLVMValueRef z_value
,
682 LLVMValueRef s_value
)
684 struct lp_build_context z_bld
;
685 LLVMValueRef shuffles
[LP_MAX_VECTOR_LENGTH
/ 4];
686 LLVMBuilderRef builder
= gallivm
->builder
;
687 LLVMValueRef zs_dst1
, zs_dst2
;
688 LLVMValueRef zs_dst_ptr1
, zs_dst_ptr2
;
689 LLVMValueRef depth_offset1
, depth_offset2
;
690 LLVMTypeRef load_ptr_type
;
691 unsigned depth_bytes
= format_desc
->block
.bits
/ 8;
692 struct lp_type zs_type
= lp_depth_type(format_desc
, z_src_type
.length
);
693 struct lp_type z_type
= zs_type
;
694 struct lp_type zs_load_type
= zs_type
;
696 zs_load_type
.length
= zs_load_type
.length
/ 2;
697 load_ptr_type
= LLVMPointerType(lp_build_vec_type(gallivm
, zs_load_type
), 0);
699 z_type
.width
= z_src_type
.width
;
701 lp_build_context_init(&z_bld
, gallivm
, z_type
);
704 * This is far from ideal, at least for late depth write we should do this
705 * outside the fs loop to avoid all the swizzle stuff.
707 if (z_src_type
.length
== 4) {
708 LLVMValueRef looplsb
= LLVMBuildAnd(builder
, loop_counter
,
709 lp_build_const_int32(gallivm
, 1), "");
710 LLVMValueRef loopmsb
= LLVMBuildAnd(builder
, loop_counter
,
711 lp_build_const_int32(gallivm
, 2), "");
712 LLVMValueRef offset2
= LLVMBuildMul(builder
, loopmsb
,
714 depth_offset1
= LLVMBuildMul(builder
, looplsb
,
715 lp_build_const_int32(gallivm
, depth_bytes
* 2), "");
716 depth_offset1
= LLVMBuildAdd(builder
, depth_offset1
, offset2
, "");
720 LLVMValueRef loopx2
= LLVMBuildShl(builder
, loop_counter
,
721 lp_build_const_int32(gallivm
, 1), "");
722 assert(z_src_type
.length
== 8);
723 depth_offset1
= LLVMBuildMul(builder
, loopx2
, depth_stride
, "");
725 * We load 2x4 values, and need to swizzle them (order
726 * 0,1,4,5,2,3,6,7) - not so hot with avx unfortunately.
728 for (i
= 0; i
< 8; i
++) {
729 shuffles
[i
] = lp_build_const_int32(gallivm
, (i
&1) + (i
&2) * 2 + (i
&4) / 2);
733 depth_offset2
= LLVMBuildAdd(builder
, depth_offset1
, depth_stride
, "");
735 zs_dst_ptr1
= LLVMBuildGEP(builder
, depth_ptr
, &depth_offset1
, 1, "");
736 zs_dst_ptr1
= LLVMBuildBitCast(builder
, zs_dst_ptr1
, load_ptr_type
, "");
737 zs_dst_ptr2
= LLVMBuildGEP(builder
, depth_ptr
, &depth_offset2
, 1, "");
738 zs_dst_ptr2
= LLVMBuildBitCast(builder
, zs_dst_ptr2
, load_ptr_type
, "");
740 if (format_desc
->block
.bits
> 32) {
741 s_value
= LLVMBuildBitCast(builder
, s_value
, z_bld
.vec_type
, "");
745 z_value
= lp_build_select(&z_bld
, mask_value
, z_value
, z_fb
);
746 if (format_desc
->block
.bits
> 32) {
747 s_fb
= LLVMBuildBitCast(builder
, s_fb
, z_bld
.vec_type
, "");
748 s_value
= lp_build_select(&z_bld
, mask_value
, s_value
, s_fb
);
752 if (zs_type
.width
< z_src_type
.width
) {
753 /* Truncate ZS values (e.g., when writing to Z16_UNORM) */
754 z_value
= LLVMBuildTrunc(builder
, z_value
,
755 lp_build_int_vec_type(gallivm
, zs_type
), "");
758 if (format_desc
->block
.bits
<= 32) {
759 if (z_src_type
.length
== 4) {
760 zs_dst1
= lp_build_extract_range(gallivm
, z_value
, 0, 2);
761 zs_dst2
= lp_build_extract_range(gallivm
, z_value
, 2, 2);
764 assert(z_src_type
.length
== 8);
765 zs_dst1
= LLVMBuildShuffleVector(builder
, z_value
, z_value
,
766 LLVMConstVector(&shuffles
[0],
767 zs_load_type
.length
), "");
768 zs_dst2
= LLVMBuildShuffleVector(builder
, z_value
, z_value
,
769 LLVMConstVector(&shuffles
[4],
770 zs_load_type
.length
), "");
774 if (z_src_type
.length
== 4) {
775 zs_dst1
= lp_build_interleave2(gallivm
, z_type
,
776 z_value
, s_value
, 0);
777 zs_dst2
= lp_build_interleave2(gallivm
, z_type
,
778 z_value
, s_value
, 1);
782 LLVMValueRef shuffles
[LP_MAX_VECTOR_LENGTH
/ 2];
783 assert(z_src_type
.length
== 8);
784 for (i
= 0; i
< 8; i
++) {
785 shuffles
[i
*2] = lp_build_const_int32(gallivm
, (i
&1) + (i
&2) * 2 + (i
&4) / 2);
786 shuffles
[i
*2+1] = lp_build_const_int32(gallivm
, (i
&1) + (i
&2) * 2 + (i
&4) / 2 +
789 zs_dst1
= LLVMBuildShuffleVector(builder
, z_value
, s_value
,
790 LLVMConstVector(&shuffles
[0],
791 z_src_type
.length
), "");
792 zs_dst2
= LLVMBuildShuffleVector(builder
, z_value
, s_value
,
793 LLVMConstVector(&shuffles
[8],
794 z_src_type
.length
), "");
796 zs_dst1
= LLVMBuildBitCast(builder
, zs_dst1
,
797 lp_build_vec_type(gallivm
, zs_load_type
), "");
798 zs_dst2
= LLVMBuildBitCast(builder
, zs_dst2
,
799 lp_build_vec_type(gallivm
, zs_load_type
), "");
802 LLVMBuildStore(builder
, zs_dst1
, zs_dst_ptr1
);
804 LLVMBuildStore(builder
, zs_dst2
, zs_dst_ptr2
);
809 * Generate code for performing depth and/or stencil tests.
810 * We operate on a vector of values (typically n 2x2 quads).
812 * \param depth the depth test state
813 * \param stencil the front/back stencil state
814 * \param type the data type of the fragment depth/stencil values
815 * \param format_desc description of the depth/stencil surface
816 * \param mask the alive/dead pixel mask for the quad (vector)
817 * \param cov_mask coverage mask
818 * \param stencil_refs the front/back stencil ref values (scalar)
819 * \param z_src the incoming depth/stencil values (n 2x2 quad values, float32)
820 * \param zs_dst the depth/stencil values in framebuffer
821 * \param face contains boolean value indicating front/back facing polygon
824 lp_build_depth_stencil_test(struct gallivm_state
*gallivm
,
825 const struct pipe_depth_state
*depth
,
826 const struct pipe_stencil_state stencil
[2],
827 struct lp_type z_src_type
,
828 const struct util_format_description
*format_desc
,
829 struct lp_build_mask_context
*mask
,
830 LLVMValueRef
*cov_mask
,
831 LLVMValueRef stencil_refs
[2],
836 LLVMValueRef
*z_value
,
837 LLVMValueRef
*s_value
,
840 LLVMBuilderRef builder
= gallivm
->builder
;
841 struct lp_type z_type
;
842 struct lp_build_context z_bld
;
843 struct lp_build_context s_bld
;
844 struct lp_type s_type
;
845 unsigned z_shift
= 0, z_width
= 0, z_mask
= 0;
846 LLVMValueRef z_dst
= NULL
;
847 LLVMValueRef stencil_vals
= NULL
;
848 LLVMValueRef z_bitmask
= NULL
, stencil_shift
= NULL
;
849 LLVMValueRef z_pass
= NULL
, s_pass_mask
= NULL
;
850 LLVMValueRef current_mask
= mask
? lp_build_mask_value(mask
) : *cov_mask
;
851 LLVMValueRef front_facing
= NULL
;
852 boolean have_z
, have_s
;
855 * Depths are expected to be between 0 and 1, even if they are stored in
856 * floats. Setting these bits here will ensure that the lp_build_conv() call
857 * below won't try to unnecessarily clamp the incoming values.
859 if(z_src_type
.floating
) {
860 z_src_type
.sign
= FALSE
;
861 z_src_type
.norm
= TRUE
;
864 assert(!z_src_type
.sign
);
865 assert(z_src_type
.norm
);
868 /* Pick the type matching the depth-stencil format. */
869 z_type
= lp_depth_type(format_desc
, z_src_type
.length
);
871 /* Pick the intermediate type for depth operations. */
872 z_type
.width
= z_src_type
.width
;
873 assert(z_type
.length
== z_src_type
.length
);
875 /* FIXME: for non-float depth/stencil might generate better code
876 * if we'd always split it up to use 128bit operations.
877 * For stencil we'd almost certainly want to pack to 8xi16 values,
878 * for z just run twice.
881 /* Sanity checking */
883 const unsigned z_swizzle
= format_desc
->swizzle
[0];
884 const unsigned s_swizzle
= format_desc
->swizzle
[1];
886 assert(z_swizzle
!= PIPE_SWIZZLE_NONE
||
887 s_swizzle
!= PIPE_SWIZZLE_NONE
);
889 assert(depth
->enabled
|| stencil
[0].enabled
);
891 assert(format_desc
->colorspace
== UTIL_FORMAT_COLORSPACE_ZS
);
892 assert(format_desc
->block
.width
== 1);
893 assert(format_desc
->block
.height
== 1);
895 if (stencil
[0].enabled
) {
896 assert(s_swizzle
< 4);
897 assert(format_desc
->channel
[s_swizzle
].type
== UTIL_FORMAT_TYPE_UNSIGNED
);
898 assert(format_desc
->channel
[s_swizzle
].pure_integer
);
899 assert(!format_desc
->channel
[s_swizzle
].normalized
);
900 assert(format_desc
->channel
[s_swizzle
].size
== 8);
903 if (depth
->enabled
) {
904 assert(z_swizzle
< 4);
905 if (z_type
.floating
) {
906 assert(z_swizzle
== 0);
907 assert(format_desc
->channel
[z_swizzle
].type
==
908 UTIL_FORMAT_TYPE_FLOAT
);
909 assert(format_desc
->channel
[z_swizzle
].size
== 32);
912 assert(format_desc
->channel
[z_swizzle
].type
==
913 UTIL_FORMAT_TYPE_UNSIGNED
);
914 assert(format_desc
->channel
[z_swizzle
].normalized
);
915 assert(!z_type
.fixed
);
921 /* Setup build context for Z vals */
922 lp_build_context_init(&z_bld
, gallivm
, z_type
);
924 /* Setup build context for stencil vals */
925 s_type
= lp_int_type(z_type
);
926 lp_build_context_init(&s_bld
, gallivm
, s_type
);
928 /* Compute and apply the Z/stencil bitmasks and shifts.
931 unsigned s_shift
, s_mask
;
936 have_z
= get_z_shift_and_mask(format_desc
, &z_shift
, &z_width
, &z_mask
);
937 have_s
= get_s_shift_and_mask(format_desc
, &s_shift
, &s_mask
);
940 if (z_mask
!= 0xffffffff) {
941 z_bitmask
= lp_build_const_int_vec(gallivm
, z_type
, z_mask
);
945 * Align the framebuffer Z 's LSB to the right.
948 LLVMValueRef shift
= lp_build_const_int_vec(gallivm
, z_type
, z_shift
);
949 z_dst
= LLVMBuildLShr(builder
, z_dst
, shift
, "z_dst");
950 } else if (z_bitmask
) {
951 z_dst
= LLVMBuildAnd(builder
, z_dst
, z_bitmask
, "z_dst");
953 lp_build_name(z_dst
, "z_dst");
959 LLVMValueRef shift
= lp_build_const_int_vec(gallivm
, s_type
, s_shift
);
960 stencil_vals
= LLVMBuildLShr(builder
, stencil_vals
, shift
, "");
961 stencil_shift
= shift
; /* used below */
964 if (s_mask
!= 0xffffffff) {
965 LLVMValueRef mask
= lp_build_const_int_vec(gallivm
, s_type
, s_mask
);
966 stencil_vals
= LLVMBuildAnd(builder
, stencil_vals
, mask
, "");
969 lp_build_name(stencil_vals
, "s_dst");
973 if (stencil
[0].enabled
) {
978 * XXX: the scalar expansion below produces atrocious code
979 * (basically producing a 64bit scalar value, then moving the 2
980 * 32bit pieces separately to simd, plus 4 shuffles, which is
981 * seriously lame). But the scalar-simd transitions are always
982 * tricky, so no big surprise there.
983 * This here would be way better, however llvm has some serious
984 * trouble later using it in the select, probably because it will
985 * recognize the expression as constant and move the simd value
986 * away (out of the loop) - and then it will suddenly try
987 * constructing i1 high-bit masks out of it later...
988 * (Try piglit stencil-twoside.)
989 * Note this is NOT due to using SExt/Trunc, it fails exactly the
990 * same even when using native compare/select.
991 * I cannot reproduce this problem when using stand-alone compiler
992 * though, suggesting some problem with optimization passes...
993 * (With stand-alone compilation, the construction of this mask
994 * value, no matter if the easy 3 instruction here or the complex
995 * 16+ one below, never gets separated from where it's used.)
996 * The scalar code still has the same problem, but the generated
997 * code looks a bit better at least for some reason, even if
998 * mostly by luck (the fundamental issue clearly is the same).
1000 front_facing
= lp_build_broadcast(gallivm
, s_bld
.vec_type
, face
);
1001 /* front_facing = face != 0 ? ~0 : 0 */
1002 front_facing
= lp_build_compare(gallivm
, s_bld
.type
,
1004 front_facing
, s_bld
.zero
);
1006 LLVMValueRef zero
= lp_build_const_int32(gallivm
, 0);
1008 /* front_facing = face != 0 ? ~0 : 0 */
1009 front_facing
= LLVMBuildICmp(builder
, LLVMIntNE
, face
, zero
, "");
1010 front_facing
= LLVMBuildSExt(builder
, front_facing
,
1011 LLVMIntTypeInContext(gallivm
->context
,
1012 s_bld
.type
.length
*s_bld
.type
.width
),
1014 front_facing
= LLVMBuildBitCast(builder
, front_facing
,
1015 s_bld
.int_vec_type
, "");
1020 s_pass_mask
= lp_build_stencil_test(&s_bld
, stencil
,
1021 stencil_refs
, stencil_vals
,
1024 /* apply stencil-fail operator */
1026 LLVMValueRef s_fail_mask
= lp_build_andnot(&s_bld
, current_mask
, s_pass_mask
);
1027 stencil_vals
= lp_build_stencil_op(&s_bld
, stencil
, S_FAIL_OP
,
1028 stencil_refs
, stencil_vals
,
1029 s_fail_mask
, front_facing
);
1033 if (depth
->enabled
) {
1035 * Convert fragment Z to the desired type, aligning the LSB to the right.
1038 assert(z_type
.width
== z_src_type
.width
);
1039 assert(z_type
.length
== z_src_type
.length
);
1040 assert(lp_check_value(z_src_type
, z_src
));
1041 if (z_src_type
.floating
) {
1043 * Convert from floating point values
1046 if (!z_type
.floating
) {
1047 z_src
= lp_build_clamped_float_to_unsigned_norm(gallivm
,
1054 * Convert from unsigned normalized values.
1057 assert(!z_src_type
.sign
);
1058 assert(!z_src_type
.fixed
);
1059 assert(z_src_type
.norm
);
1060 assert(!z_type
.floating
);
1061 if (z_src_type
.width
> z_width
) {
1062 LLVMValueRef shift
= lp_build_const_int_vec(gallivm
, z_src_type
,
1063 z_src_type
.width
- z_width
);
1064 z_src
= LLVMBuildLShr(builder
, z_src
, shift
, "");
1067 assert(lp_check_value(z_type
, z_src
));
1069 lp_build_name(z_src
, "z_src");
1071 /* compare src Z to dst Z, returning 'pass' mask */
1072 z_pass
= lp_build_cmp(&z_bld
, depth
->func
, z_src
, z_dst
);
1074 /* mask off bits that failed stencil test */
1076 current_mask
= LLVMBuildAnd(builder
, current_mask
, s_pass_mask
, "");
1079 if (!stencil
[0].enabled
&& mask
) {
1080 /* We can potentially skip all remaining operations here, but only
1081 * if stencil is disabled because we still need to update the stencil
1082 * buffer values. Don't need to update Z buffer values.
1084 lp_build_mask_update(mask
, z_pass
);
1087 lp_build_mask_check(mask
);
1091 if (depth
->writemask
) {
1092 LLVMValueRef z_pass_mask
;
1094 /* mask off bits that failed Z test */
1095 z_pass_mask
= LLVMBuildAnd(builder
, current_mask
, z_pass
, "");
1097 /* Mix the old and new Z buffer values.
1098 * z_dst[i] = zselectmask[i] ? z_src[i] : z_dst[i]
1100 z_dst
= lp_build_select(&z_bld
, z_pass_mask
, z_src
, z_dst
);
1103 if (stencil
[0].enabled
) {
1104 /* update stencil buffer values according to z pass/fail result */
1105 LLVMValueRef z_fail_mask
, z_pass_mask
;
1107 /* apply Z-fail operator */
1108 z_fail_mask
= lp_build_andnot(&s_bld
, current_mask
, z_pass
);
1109 stencil_vals
= lp_build_stencil_op(&s_bld
, stencil
, Z_FAIL_OP
,
1110 stencil_refs
, stencil_vals
,
1111 z_fail_mask
, front_facing
);
1113 /* apply Z-pass operator */
1114 z_pass_mask
= LLVMBuildAnd(builder
, current_mask
, z_pass
, "");
1115 stencil_vals
= lp_build_stencil_op(&s_bld
, stencil
, Z_PASS_OP
,
1116 stencil_refs
, stencil_vals
,
1117 z_pass_mask
, front_facing
);
1121 /* No depth test: apply Z-pass operator to stencil buffer values which
1122 * passed the stencil test.
1124 s_pass_mask
= LLVMBuildAnd(builder
, current_mask
, s_pass_mask
, "");
1125 stencil_vals
= lp_build_stencil_op(&s_bld
, stencil
, Z_PASS_OP
,
1126 stencil_refs
, stencil_vals
,
1127 s_pass_mask
, front_facing
);
1130 /* Put Z and stencil bits in the right place */
1131 if (have_z
&& z_shift
) {
1132 LLVMValueRef shift
= lp_build_const_int_vec(gallivm
, z_type
, z_shift
);
1133 z_dst
= LLVMBuildShl(builder
, z_dst
, shift
, "");
1135 if (stencil_vals
&& stencil_shift
)
1136 stencil_vals
= LLVMBuildShl(builder
, stencil_vals
,
1139 /* Finally, merge the z/stencil values */
1140 if (format_desc
->block
.bits
<= 32) {
1141 if (have_z
&& have_s
)
1142 *z_value
= LLVMBuildOr(builder
, z_dst
, stencil_vals
, "");
1146 *z_value
= stencil_vals
;
1147 *s_value
= *z_value
;
1151 *s_value
= stencil_vals
;
1156 lp_build_mask_update(mask
, s_pass_mask
);
1158 if (depth
->enabled
&& stencil
[0].enabled
)
1159 lp_build_mask_update(mask
, z_pass
);
1161 LLVMValueRef tmp_mask
= *cov_mask
;
1163 tmp_mask
= LLVMBuildAnd(builder
, tmp_mask
, s_pass_mask
, "");
1165 /* for multisample we don't do the stencil optimisation so update always */
1167 tmp_mask
= LLVMBuildAnd(builder
, tmp_mask
, z_pass
, "");
1168 *cov_mask
= tmp_mask
;