/**************************************************************************
*
* Copyright 2009 VMware, Inc.
- * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * Copyright 2007 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
for (i = 0; i < fs_type.length / 4; i++) {
unsigned j = 2 * (i % 2) + (i / 2) * 8;
- bits[4*i + 0] = LLVMConstInt(i32t, 1 << (j + 0), 0);
- bits[4*i + 1] = LLVMConstInt(i32t, 1 << (j + 1), 0);
- bits[4*i + 2] = LLVMConstInt(i32t, 1 << (j + 4), 0);
- bits[4*i + 3] = LLVMConstInt(i32t, 1 << (j + 5), 0);
+ bits[4*i + 0] = LLVMConstInt(i32t, 1ULL << (j + 0), 0);
+ bits[4*i + 1] = LLVMConstInt(i32t, 1ULL << (j + 1), 0);
+ bits[4*i + 2] = LLVMConstInt(i32t, 1ULL << (j + 4), 0);
+ bits[4*i + 3] = LLVMConstInt(i32t, 1ULL << (j + 5), 0);
}
mask = LLVMBuildAnd(builder, mask, LLVMConstVector(bits, fs_type.length), "");
const struct tgsi_token *tokens = shader->base.tokens;
LLVMTypeRef vec_type;
LLVMValueRef mask_ptr, mask_val;
- LLVMValueRef consts_ptr;
+ LLVMValueRef consts_ptr, num_consts_ptr;
LLVMValueRef z;
LLVMValueRef z_value, s_value;
LLVMValueRef z_fb, s_fb;
assert(zs_format_desc);
if (!shader->info.base.writes_z) {
- if (key->alpha.enabled || shader->info.base.uses_kill) {
+ if (key->alpha.enabled ||
+ key->blend.alpha_to_coverage ||
+ shader->info.base.uses_kill) {
/* With alpha test and kill, can do the depth test early
* and hopefully eliminate some quads. But need to do a
* special deferred depth write once the final mask value
vec_type = lp_build_vec_type(gallivm, type);
consts_ptr = lp_jit_context_constants(gallivm, context_ptr);
+ num_consts_ptr = lp_jit_context_num_constants(gallivm, context_ptr);
lp_build_for_loop_begin(&loop_state, gallivm,
lp_build_const_int32(gallivm, 0),
/* Build the actual shader */
lp_build_tgsi_soa(gallivm, tokens, type, &mask,
- consts_ptr, &system_values,
+ consts_ptr, num_consts_ptr, &system_values,
interp->inputs,
outputs, sampler, &shader->info.base, NULL);
}
}
+ /* Emulate Alpha to Coverage with Alpha test */
+ if (key->blend.alpha_to_coverage) {
+ int color0 = find_output_by_semantic(&shader->info.base,
+ TGSI_SEMANTIC_COLOR,
+ 0);
+
+ if (color0 != -1 && outputs[color0][3]) {
+ LLVMValueRef alpha = LLVMBuildLoad(builder, outputs[color0][3], "alpha");
+
+ lp_build_alpha_to_coverage(gallivm, type,
+ &mask, alpha,
+ (depth_mode & LATE_DEPTH_TEST) != 0);
+ }
+ }
+
/* Late Z test */
if (depth_mode & LATE_DEPTH_TEST) {
int pos0 = find_output_by_semantic(&shader->info.base,
unsigned chan;
if (format_expands_to_float_soa(format_desc)) {
- /* just make this a 32bit uint */
+ /* just make this a uint with width of block */
type->floating = false;
type->fixed = false;
type->sign = false;
type->norm = false;
- type->width = 32;
+ type->width = format_desc->block.bits;
type->length = 1;
return;
}
return result;
}
+/**
+ * If RT is a smallfloat (needing denorms) format
+ */
+static INLINE int
+have_smallfloat_format(struct lp_type dst_type,
+ enum pipe_format format)
+{
+ return ((dst_type.floating && dst_type.width != 32) ||
+ /* due to format handling hacks this format doesn't have floating set
+ * here (and actually has width set to 32 too) so special case this. */
+ (format == PIPE_FORMAT_R11G11B10_FLOAT));
+}
+
/**
* Convert from memory format to blending format
* This is pretty suboptimal for this case blending in SoA would be much
* better, since conversion gets us SoA values so need to convert back.
*/
- assert(src_type.width == 32);
+ assert(src_type.width == 32 || src_type.width == 16);
assert(dst_type.floating);
assert(dst_type.width == 32);
assert(dst_type.length % 4 == 0);
assert(num_srcs % 4 == 0);
+ if (src_type.width == 16) {
+ /* expand 4x16bit values to 4x32bit */
+ struct lp_type type32x4 = src_type;
+ LLVMTypeRef ltype32x4;
+ unsigned num_fetch = dst_type.length == 8 ? num_srcs / 2 : num_srcs / 4;
+ type32x4.width = 32;
+ ltype32x4 = lp_build_vec_type(gallivm, type32x4);
+ for (i = 0; i < num_fetch; i++) {
+ src[i] = LLVMBuildZExt(builder, src[i], ltype32x4, "");
+ }
+ src_type.width = 32;
+ }
for (i = 0; i < 4; i++) {
tmpsrc[i] = src[i];
}
assert(src_type.floating);
assert(src_type.width == 32);
assert(src_type.length % 4 == 0);
- assert(dst_type.width == 32);
+ assert(dst_type.width == 32 || dst_type.width == 16);
for (i = 0; i < num_srcs / 4; i++) {
LLVMValueRef tmpsoa[4], tmpdst;
src[i] = tmpdst;
}
}
+ if (dst_type.width == 16) {
+ struct lp_type type16x8 = dst_type;
+ struct lp_type type32x4 = dst_type;
+ LLVMTypeRef ltype16x4, ltypei64, ltypei128;
+ unsigned num_fetch = src_type.length == 8 ? num_srcs / 2 : num_srcs / 4;
+ type16x8.length = 8;
+ type32x4.width = 32;
+ ltypei128 = LLVMIntTypeInContext(gallivm->context, 128);
+ ltypei64 = LLVMIntTypeInContext(gallivm->context, 64);
+ ltype16x4 = lp_build_vec_type(gallivm, dst_type);
+ /* We could do vector truncation but it doesn't generate very good code */
+ for (i = 0; i < num_fetch; i++) {
+ src[i] = lp_build_pack2(gallivm, type32x4, type16x8,
+ src[i], lp_build_zero(gallivm, type32x4));
+ src[i] = LLVMBuildBitCast(builder, src[i], ltypei128, "");
+ src[i] = LLVMBuildTrunc(builder, src[i], ltypei64, "");
+ src[i] = LLVMBuildBitCast(builder, src[i], ltype16x4, "");
+ }
+ }
return;
}
* this, otherwise half-float format conversions won't work
* (again due to llvm bug #6393).
*/
- if (dst_type.floating && dst_type.width != 32) {
+ if (have_smallfloat_format(dst_type, out_format)) {
/* We need to make sure that denorms are ok for half float
conversions */
fpstate = lp_build_fpstate_get(gallivm);
partial_mask |= !variant->opaque;
i32_zero = lp_build_const_int32(gallivm, 0);
-#if HAVE_LLVM < 0x0302
- /*
- * undef triggers a crash in LLVMBuildTrunc in convert_from_blend_type in some
- * cases (seen with r10g10b10a2, 128bit wide vectors) (only used for 1d case).
- */
- undef_src_val = lp_build_zero(gallivm, fs_type);
-#else
undef_src_val = lp_build_undef(gallivm, fs_type);
-#endif
row_type.length = fs_type.length;
vector_width = dst_type.floating ? lp_native_vector_width : lp_integer_vector_width;
dst, dst_type, dst_count, dst_alignment);
}
- if (dst_type.floating && dst_type.width != 32) {
+ if (have_smallfloat_format(dst_type, out_format)) {
lp_build_fpstate_set(gallivm, fpstate);
}
struct gallivm_state *gallivm = variant->gallivm;
const struct lp_fragment_shader_variant_key *key = &variant->key;
struct lp_shader_input inputs[PIPE_MAX_SHADER_INPUTS];
- char func_name[256];
+ char func_name[64];
struct lp_type fs_type;
struct lp_type blend_type;
LLVMTypeRef fs_elem_type;
}
/* check if writes to cbuf[0] are to be copied to all cbufs */
- cbuf0_write_all = FALSE;
- for (i = 0;i < shader->info.base.num_properties; i++) {
- if (shader->info.base.properties[i].name ==
- TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS) {
- cbuf0_write_all = TRUE;
- break;
- }
- }
+ cbuf0_write_all =
+ shader->info.base.properties[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS];
/* TODO: actually pick these based on the fs and color buffer
* characteristics. */
blend_vec_type = lp_build_vec_type(gallivm, blend_type);
- util_snprintf(func_name, sizeof(func_name), "fs%u_variant%u_%s",
- shader->no, variant->no, partial_mask ? "partial" : "whole");
+ util_snprintf(func_name, sizeof(func_name), "fs%u_variant%u_%s",
+ shader->no, variant->no, partial_mask ? "partial" : "whole");
arg_types[0] = variant->jit_context_ptr_type; /* context */
arg_types[1] = int32_type; /* x */
LLVMValueRef mask_store = lp_build_array_alloca(gallivm, mask_type,
num_loop, "mask_store");
LLVMValueRef color_store[PIPE_MAX_COLOR_BUFS][TGSI_NUM_CHANNELS];
+ boolean pixel_center_integer =
+ shader->info.base.properties[TGSI_PROPERTY_FS_COORD_PIXEL_CENTER];
/*
* The shader input interpolation info is not explicitely baked in the
gallivm,
shader->info.base.num_inputs,
inputs,
- shader->info.base.pixel_center_integer,
+ pixel_center_integer,
builder, fs_type,
a0_ptr, dadx_ptr, dady_ptr,
x, y);
/* Loop over color outputs / color buffers to do blending.
*/
for(cbuf = 0; cbuf < key->nr_cbufs; cbuf++) {
- LLVMValueRef color_ptr;
- LLVMValueRef stride;
- LLVMValueRef index = lp_build_const_int32(gallivm, cbuf);
+ if (key->cbuf_format[cbuf] != PIPE_FORMAT_NONE) {
+ LLVMValueRef color_ptr;
+ LLVMValueRef stride;
+ LLVMValueRef index = lp_build_const_int32(gallivm, cbuf);
- boolean do_branch = ((key->depth.enabled
- || key->stencil[0].enabled
- || key->alpha.enabled)
- && !shader->info.base.uses_kill);
+ boolean do_branch = ((key->depth.enabled
+ || key->stencil[0].enabled
+ || key->alpha.enabled)
+ && !shader->info.base.uses_kill);
- color_ptr = LLVMBuildLoad(builder,
- LLVMBuildGEP(builder, color_ptr_ptr, &index, 1, ""),
- "");
+ color_ptr = LLVMBuildLoad(builder,
+ LLVMBuildGEP(builder, color_ptr_ptr,
+ &index, 1, ""),
+ "");
- lp_build_name(color_ptr, "color_ptr%d", cbuf);
+ lp_build_name(color_ptr, "color_ptr%d", cbuf);
- stride = LLVMBuildLoad(builder,
- LLVMBuildGEP(builder, stride_ptr, &index, 1, ""),
- "");
+ stride = LLVMBuildLoad(builder,
+ LLVMBuildGEP(builder, stride_ptr, &index, 1, ""),
+ "");
- generate_unswizzled_blend(gallivm, cbuf, variant, key->cbuf_format[cbuf],
- num_fs, fs_type, fs_mask, fs_out_color,
- context_ptr, color_ptr, stride, partial_mask, do_branch);
+ generate_unswizzled_blend(gallivm, cbuf, variant,
+ key->cbuf_format[cbuf],
+ num_fs, fs_type, fs_mask, fs_out_color,
+ context_ptr, color_ptr, stride,
+ partial_mask, do_branch);
+ }
}
LLVMBuildRetVoid(builder);
gallivm_verify_function(gallivm, function);
-
- variant->nr_instrs += lp_build_count_instructions(function);
}
debug_printf("blend.alpha_dst_factor = %s\n", util_dump_blend_factor(key->blend.rt[0].alpha_dst_factor, TRUE));
}
debug_printf("blend.colormask = 0x%x\n", key->blend.rt[0].colormask);
+ if (key->blend.alpha_to_coverage) {
+ debug_printf("blend.alpha_to_coverage is enabled\n");
+ }
for (i = 0; i < key->nr_samplers; ++i) {
const struct lp_static_sampler_state *sampler = &key->state[i].sampler_state;
debug_printf("sampler[%u] = \n", i);
struct lp_fragment_shader_variant *variant;
const struct util_format_description *cbuf0_format_desc;
boolean fullcolormask;
+ char module_name[64];
variant = CALLOC_STRUCT(lp_fragment_shader_variant);
if(!variant)
return NULL;
- variant->gallivm = gallivm_create();
+ util_snprintf(module_name, sizeof(module_name), "fs%u_variant%u",
+ shader->no, shader->variants_created);
+
+ variant->gallivm = gallivm_create(module_name, lp->context);
if (!variant->gallivm) {
FREE(variant);
return NULL;
fullcolormask &&
!key->stencil[0].enabled &&
!key->alpha.enabled &&
+ !key->blend.alpha_to_coverage &&
!key->depth.enabled &&
!shader->info.base.uses_kill
? TRUE : FALSE;
gallivm_compile_module(variant->gallivm);
+ variant->nr_instrs += lp_build_count_ir_module(variant->gallivm->module);
+
if (variant->function[RAST_EDGE_TEST]) {
variant->jit_function[RAST_EDGE_TEST] = (lp_jit_frag_func)
gallivm_jit_function(variant->gallivm,
variant->jit_function[RAST_WHOLE] = variant->jit_function[RAST_EDGE_TEST];
}
+ gallivm_free_ir(variant->gallivm);
+
return variant;
}
llvmpipe_remove_shader_variant(struct llvmpipe_context *lp,
struct lp_fragment_shader_variant *variant)
{
- unsigned i;
-
if (gallivm_debug & GALLIVM_DEBUG_IR) {
debug_printf("llvmpipe: del fs #%u var #%u v created #%u v cached"
" #%u v total cached #%u\n",
lp->nr_fs_variants);
}
- /* free all the variant's JIT'd functions */
- for (i = 0; i < Elements(variant->function); i++) {
- if (variant->function[i]) {
- gallivm_free_function(variant->gallivm,
- variant->function[i],
- variant->jit_function[i]);
- }
- }
-
gallivm_destroy(variant->gallivm);
/* remove from shader's list */
/* alpha test only applies if render buffer 0 is non-integer (or does not exist) */
if (!lp->framebuffer.nr_cbufs ||
+ !lp->framebuffer.cbufs[0] ||
!util_format_is_pure_integer(lp->framebuffer.cbufs[0]->format)) {
key->alpha.enabled = lp->depth_stencil->alpha.enabled;
}
}
for (i = 0; i < lp->framebuffer.nr_cbufs; i++) {
- enum pipe_format format = lp->framebuffer.cbufs[i]->format;
struct pipe_rt_blend_state *blend_rt = &key->blend.rt[i];
- const struct util_format_description *format_desc;
- key->cbuf_format[i] = format;
+ if (lp->framebuffer.cbufs[i]) {
+ enum pipe_format format = lp->framebuffer.cbufs[i]->format;
+ const struct util_format_description *format_desc;
- /*
- * Figure out if this is a 1d resource. Note that OpenGL allows crazy
- * mixing of 2d textures with height 1 and 1d textures, so make sure
- * we pick 1d if any cbuf or zsbuf is 1d.
- */
- if (llvmpipe_resource_is_1d(lp->framebuffer.cbufs[0]->texture)) {
- key->resource_1d = TRUE;
- }
+ key->cbuf_format[i] = format;
- format_desc = util_format_description(format);
- assert(format_desc->colorspace == UTIL_FORMAT_COLORSPACE_RGB ||
- format_desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB);
+ /*
+ * Figure out if this is a 1d resource. Note that OpenGL allows crazy
+ * mixing of 2d textures with height 1 and 1d textures, so make sure
+ * we pick 1d if any cbuf or zsbuf is 1d.
+ */
+ if (llvmpipe_resource_is_1d(lp->framebuffer.cbufs[i]->texture)) {
+ key->resource_1d = TRUE;
+ }
- /*
- * Mask out color channels not present in the color buffer.
- */
- blend_rt->colormask &= util_format_colormask(format_desc);
+ format_desc = util_format_description(format);
+ assert(format_desc->colorspace == UTIL_FORMAT_COLORSPACE_RGB ||
+ format_desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB);
- /*
- * Disable blend for integer formats.
- */
- if (util_format_is_pure_integer(format)) {
- blend_rt->blend_enable = 0;
- }
+ /*
+ * Mask out color channels not present in the color buffer.
+ */
+ blend_rt->colormask &= util_format_colormask(format_desc);
- /*
- * Our swizzled render tiles always have an alpha channel, but the linear
- * render target format often does not, so force here the dst alpha to be
- * one.
- *
- * This is not a mere optimization. Wrong results will be produced if the
- * dst alpha is used, the dst format does not have alpha, and the previous
- * rendering was not flushed from the swizzled to linear buffer. For
- * example, NonPowTwo DCT.
- *
- * TODO: This should be generalized to all channels for better
- * performance, but only alpha causes correctness issues.
- *
- * Also, force rgb/alpha func/factors match, to make AoS blending easier.
- */
- if (format_desc->swizzle[3] > UTIL_FORMAT_SWIZZLE_W ||
- format_desc->swizzle[3] == format_desc->swizzle[0]) {
- /* Doesn't cover mixed snorm/unorm but can't render to them anyway */
- boolean clamped_zero = !util_format_is_float(format) &&
- !util_format_is_snorm(format);
- blend_rt->rgb_src_factor = force_dst_alpha_one(blend_rt->rgb_src_factor,
- clamped_zero);
- blend_rt->rgb_dst_factor = force_dst_alpha_one(blend_rt->rgb_dst_factor,
- clamped_zero);
- blend_rt->alpha_func = blend_rt->rgb_func;
- blend_rt->alpha_src_factor = blend_rt->rgb_src_factor;
- blend_rt->alpha_dst_factor = blend_rt->rgb_dst_factor;
+ /*
+ * Disable blend for integer formats.
+ */
+ if (util_format_is_pure_integer(format)) {
+ blend_rt->blend_enable = 0;
+ }
+
+ /*
+ * Our swizzled render tiles always have an alpha channel, but the
+ * linear render target format often does not, so force here the dst
+ * alpha to be one.
+ *
+ * This is not a mere optimization. Wrong results will be produced if
+ * the dst alpha is used, the dst format does not have alpha, and the
+ * previous rendering was not flushed from the swizzled to linear
+ * buffer. For example, NonPowTwo DCT.
+ *
+ * TODO: This should be generalized to all channels for better
+ * performance, but only alpha causes correctness issues.
+ *
+ * Also, force rgb/alpha func/factors match, to make AoS blending
+ * easier.
+ */
+ if (format_desc->swizzle[3] > UTIL_FORMAT_SWIZZLE_W ||
+ format_desc->swizzle[3] == format_desc->swizzle[0]) {
+ /* Doesn't cover mixed snorm/unorm but can't render to them anyway */
+ boolean clamped_zero = !util_format_is_float(format) &&
+ !util_format_is_snorm(format);
+ blend_rt->rgb_src_factor =
+ force_dst_alpha_one(blend_rt->rgb_src_factor, clamped_zero);
+ blend_rt->rgb_dst_factor =
+ force_dst_alpha_one(blend_rt->rgb_dst_factor, clamped_zero);
+ blend_rt->alpha_func = blend_rt->rgb_func;
+ blend_rt->alpha_src_factor = blend_rt->rgb_src_factor;
+ blend_rt->alpha_dst_factor = blend_rt->rgb_dst_factor;
+ }
+ }
+ else {
+ /* no color buffer for this fragment output */
+ key->cbuf_format[i] = PIPE_FORMAT_NONE;
+ blend_rt->colormask = 0x0;
+ blend_rt->blend_enable = 0;
}
}
LP_COUNT_ADD(llvm_compile_time, dt);
LP_COUNT_ADD(nr_llvm_compiles, 2); /* emit vs. omit in/out test */
- llvmpipe_variant_count++;
-
/* Put the new variant into the list */
if (variant) {
insert_at_head(&shader->variants, &variant->list_item_local);