From f2a87a1f5bcd78f381409345740ed37273453c0d Mon Sep 17 00:00:00 2001 From: Roland Scheidegger Date: Sat, 12 Jan 2013 17:20:13 -0800 Subject: [PATCH] llvmpipe: more fixes for integer color buffers MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit Cast back the fake floats to ints, and make sure we don't try to do scaling in format conversion (which only makes sense with normalized values). Also need to disable blending and alpha test (as per spec) for such buffers. This makes fbo-blending from the piglit ext_texture_integer tests work for most formats (some crash, and the luminance and intensity variants have the GB or GBA channels respectively wrong). Reviewed-by: Brian Paul Reviewed-by: José Fonseca --- src/gallium/drivers/llvmpipe/lp_state_fs.c | 42 ++++++++++++++++++++-- 1 file changed, 39 insertions(+), 3 deletions(-) diff --git a/src/gallium/drivers/llvmpipe/lp_state_fs.c b/src/gallium/drivers/llvmpipe/lp_state_fs.c index 83b902de959..cf936d029b5 100644 --- a/src/gallium/drivers/llvmpipe/lp_state_fs.c +++ b/src/gallium/drivers/llvmpipe/lp_state_fs.c @@ -1143,7 +1143,10 @@ convert_to_blend_type(struct gallivm_state *gallivm, ""); /* Scale bits */ - chans[j] = scale_bits(gallivm, src_fmt->channel[j].size, blend_type.width, chans[j], src_type); + if (src_type.norm) { + chans[j] = scale_bits(gallivm, src_fmt->channel[j].size, + blend_type.width, chans[j], src_type); + } /* Insert bits into correct position */ chans[j] = LLVMBuildShl(builder, @@ -1250,7 +1253,10 @@ convert_from_blend_type(struct gallivm_state *gallivm, ""); /* Scale down bits */ - chans[j] = scale_bits(gallivm, blend_type.width, src_fmt->channel[j].size, chans[j], src_type); + if (src_type.norm) { + chans[j] = scale_bits(gallivm, blend_type.width, + src_fmt->channel[j].size, chans[j], src_type); + } /* Insert bits */ chans[j] = LLVMBuildShl(builder, @@ -1438,6 +1444,25 @@ generate_unswizzled_blend(struct gallivm_state *gallivm, } } + if (util_format_is_pure_integer(out_format)) { + /* + * In this case fs_type was really ints or uints disguised as floats, + * fix that up now. + */ + fs_type.floating = 0; + fs_type.sign = dst_type.sign; + for (i = 0; i < num_fs; ++i) { + for (j = 0; j < dst_channels; ++j) { + fs_src[i][j] = LLVMBuildBitCast(builder, fs_src[i][j], + lp_build_vec_type(gallivm, fs_type), ""); + } + if (dst_channels == 3 && !has_alpha) { + fs_src[i][3] = LLVMBuildBitCast(builder, fs_src[i][3], + lp_build_vec_type(gallivm, fs_type), ""); + } + } + } + /* * Pixel twiddle from fragment shader order to memory order @@ -2498,7 +2523,11 @@ make_variant_key(struct llvmpipe_context *lp, } } - key->alpha.enabled = lp->depth_stencil->alpha.enabled; + /* alpha test only applies if render buffer 0 is non-integer (or does not exist) */ + if (!lp->framebuffer.nr_cbufs || + !util_format_is_pure_integer(lp->framebuffer.cbufs[0]->format)) { + key->alpha.enabled = lp->depth_stencil->alpha.enabled; + } if(key->alpha.enabled) key->alpha.func = lp->depth_stencil->alpha.func; /* alpha.ref_value is passed in jit_context */ @@ -2538,6 +2567,13 @@ make_variant_key(struct llvmpipe_context *lp, */ blend_rt->colormask &= util_format_colormask(format_desc); + /* + * Disable blend for integer formats. + */ + if (util_format_is_pure_integer(format)) { + blend_rt->blend_enable = 0; + } + /* * Our swizzled render tiles always have an alpha channel, but the linear * render target format often does not, so force here the dst alpha to be -- 2.30.2