#include "gallivm/lp_bld_arit.h"
#include "gallivm/lp_bld_arit_overflow.h"
+#include "gallivm/lp_bld_bitarit.h"
+#include "gallivm/lp_bld_gather.h"
#include "gallivm/lp_bld_logic.h"
#include "gallivm/lp_bld_const.h"
#include "gallivm/lp_bld_swizzle.h"
}
}
+
static void
-generate_fetch(struct gallivm_state *gallivm,
- struct draw_context *draw,
- LLVMValueRef vbuffers_ptr,
- LLVMValueRef *res,
- struct pipe_vertex_element *velem,
- LLVMValueRef vbuf,
- LLVMValueRef index,
- LLVMValueRef instance_id,
- LLVMValueRef start_instance)
+fetch_instanced(struct gallivm_state *gallivm,
+ const struct util_format_description *format_desc,
+ struct lp_type vs_type,
+ LLVMValueRef vb_stride,
+ LLVMValueRef map_ptr,
+ LLVMValueRef buffer_size_adj,
+ LLVMValueRef ofbit,
+ LLVMValueRef *inputs,
+ LLVMValueRef index)
{
- const struct util_format_description *format_desc =
- util_format_description(velem->src_format);
LLVMValueRef zero = LLVMConstNull(LLVMInt32TypeInContext(gallivm->context));
LLVMBuilderRef builder = gallivm->builder;
- LLVMValueRef indices =
- LLVMConstInt(LLVMInt64TypeInContext(gallivm->context),
- velem->vertex_buffer_index, 0);
- LLVMValueRef vbuffer_ptr = LLVMBuildGEP(builder, vbuffers_ptr,
- &indices, 1, "");
- LLVMValueRef vb_stride = draw_jit_vbuffer_stride(gallivm, vbuf);
- LLVMValueRef vb_buffer_offset = draw_jit_vbuffer_offset(gallivm, vbuf);
- LLVMValueRef map_ptr = draw_jit_dvbuffer_map(gallivm, vbuffer_ptr);
- LLVMValueRef buffer_size = draw_jit_dvbuffer_size(gallivm, vbuffer_ptr);
- LLVMValueRef stride;
- LLVMValueRef buffer_overflowed;
- LLVMValueRef needed_buffer_size;
+ LLVMValueRef stride, buffer_overflowed, aos;
LLVMValueRef temp_ptr =
lp_build_alloca(gallivm,
lp_build_vec_type(gallivm, lp_float32_vec4_type()), "");
- LLVMValueRef ofbit = NULL;
struct lp_build_if_state if_ctx;
-
- if (velem->instance_divisor) {
- /* Index is equal to the start instance plus the number of current
- * instance divided by the divisor. In this case we compute it as:
- * index = start_instance + (instance_id / divisor)
- */
- LLVMValueRef current_instance;
- current_instance = LLVMBuildUDiv(builder, instance_id,
- lp_build_const_int32(gallivm, velem->instance_divisor),
- "instance_divisor");
- index = lp_build_uadd_overflow(gallivm, start_instance,
- current_instance, &ofbit);
- }
+ unsigned i;
stride = lp_build_umul_overflow(gallivm, vb_stride, index, &ofbit);
- stride = lp_build_uadd_overflow(gallivm, stride, vb_buffer_offset, &ofbit);
- stride = lp_build_uadd_overflow(
- gallivm, stride,
- lp_build_const_int32(gallivm, velem->src_offset), &ofbit);
- needed_buffer_size = lp_build_uadd_overflow(
- gallivm, stride,
- lp_build_const_int32(gallivm,
- util_format_get_blocksize(velem->src_format)),
- &ofbit);
buffer_overflowed = LLVMBuildICmp(builder, LLVMIntUGT,
- needed_buffer_size, buffer_size,
+ stride, buffer_size_adj,
"buffer_overflowed");
buffer_overflowed = LLVMBuildOr(builder, buffer_overflowed, ofbit, "");
-#if 0
- lp_build_printf(gallivm, "vbuf index = %u, vb_stride is %u\n",
- index, vb_stride);
- lp_build_printf(gallivm, " vb_buffer_offset = %u, src_offset is %u\n",
- vb_buffer_offset,
- lp_build_const_int32(gallivm, velem->src_offset));
- lp_build_print_value(gallivm, " blocksize = ",
- lp_build_const_int32(
- gallivm,
- util_format_get_blocksize(velem->src_format)));
- lp_build_printf(gallivm, " instance_id = %u\n", instance_id);
- lp_build_printf(gallivm, " stride = %u\n", stride);
- lp_build_printf(gallivm, " buffer size = %u\n", buffer_size);
- lp_build_printf(gallivm, " needed_buffer_size = %u\n", needed_buffer_size);
- lp_build_print_value(gallivm, " buffer overflowed = ", buffer_overflowed);
-#endif
+
+ if (0) {
+ lp_build_print_value(gallivm, " instance index = ", index);
+ lp_build_print_value(gallivm, " buffer overflowed = ", buffer_overflowed);
+ }
lp_build_if(&if_ctx, gallivm, buffer_overflowed);
{
lp_build_else(&if_ctx);
{
LLVMValueRef val;
- map_ptr = LLVMBuildGEP(builder, map_ptr, &stride, 1, "");
val = lp_build_fetch_rgba_aos(gallivm,
format_desc,
lp_float32_vec4_type(),
FALSE,
map_ptr,
- zero, zero, zero,
+ stride, zero, zero,
NULL);
LLVMBuildStore(builder, val, temp_ptr);
}
lp_build_endif(&if_ctx);
- *res = LLVMBuildLoad(builder, temp_ptr, "aos");
+ aos = LLVMBuildLoad(builder, temp_ptr, "aos");
+
+ for (i = 0; i < TGSI_NUM_CHANNELS; i++) {
+ LLVMValueRef index = lp_build_const_int32(gallivm, i);
+ inputs[i] = lp_build_extract_broadcast(gallivm,
+ lp_float32_vec4_type(),
+ vs_type, aos, index);
+ }
}
+
static void
convert_to_soa(struct gallivm_state *gallivm,
- LLVMValueRef (*src_aos)[LP_MAX_VECTOR_WIDTH / 32],
- LLVMValueRef (*dst_soa)[TGSI_NUM_CHANNELS],
- unsigned num_attribs, const struct lp_type soa_type)
+ LLVMValueRef src_aos[LP_MAX_VECTOR_WIDTH / 32],
+ LLVMValueRef dst_soa[TGSI_NUM_CHANNELS],
+ const struct lp_type soa_type)
{
- unsigned i, j, k;
+ unsigned j, k;
struct lp_type aos_channel_type = soa_type;
+ LLVMValueRef aos_channels[TGSI_NUM_CHANNELS];
+ unsigned pixels_per_channel = soa_type.length / TGSI_NUM_CHANNELS;
+
debug_assert(TGSI_NUM_CHANNELS == 4);
debug_assert((soa_type.length % TGSI_NUM_CHANNELS) == 0);
aos_channel_type.length >>= 1;
- for (i = 0; i < num_attribs; ++i) {
- LLVMValueRef aos_channels[TGSI_NUM_CHANNELS];
- unsigned pixels_per_channel = soa_type.length / TGSI_NUM_CHANNELS;
+ for (j = 0; j < TGSI_NUM_CHANNELS; ++j) {
+ LLVMValueRef channel[LP_MAX_VECTOR_LENGTH] = { 0 };
+
+ assert(pixels_per_channel <= LP_MAX_VECTOR_LENGTH);
- for (j = 0; j < TGSI_NUM_CHANNELS; ++j) {
- LLVMValueRef channel[LP_MAX_VECTOR_LENGTH] = { 0 };
+ for (k = 0; k < pixels_per_channel; ++k) {
+ channel[k] = src_aos[j + TGSI_NUM_CHANNELS * k];
+ }
- assert(pixels_per_channel <= LP_MAX_VECTOR_LENGTH);
+ aos_channels[j] = lp_build_concat(gallivm, channel, aos_channel_type, pixels_per_channel);
+ }
- for (k = 0; k < pixels_per_channel; ++k) {
- channel[k] = src_aos[i][j + TGSI_NUM_CHANNELS * k];
- }
+ lp_build_transpose_aos(gallivm, soa_type, aos_channels, dst_soa);
+}
- aos_channels[j] = lp_build_concat(gallivm, channel, aos_channel_type, pixels_per_channel);
- }
- lp_build_transpose_aos(gallivm, soa_type, aos_channels, dst_soa[i]);
+static void
+fetch_vector(struct gallivm_state *gallivm,
+ const struct util_format_description *format_desc,
+ struct lp_type vs_type,
+ LLVMValueRef vb_stride,
+ LLVMValueRef map_ptr,
+ LLVMValueRef buffer_size_adj,
+ LLVMValueRef ofmask,
+ LLVMValueRef *inputs,
+ LLVMValueRef indices)
+{
+ LLVMValueRef zero = LLVMConstNull(LLVMInt32TypeInContext(gallivm->context));
+ LLVMBuilderRef builder = gallivm->builder;
+ struct lp_build_context blduivec;
+ LLVMValueRef offset, tmp, valid_mask;
+ LLVMValueRef aos_fetch[LP_MAX_VECTOR_WIDTH / 32];
+ unsigned i;
+
+ lp_build_context_init(&blduivec, gallivm, lp_uint_type(vs_type));
+
+ vb_stride = lp_build_broadcast_scalar(&blduivec, vb_stride);
+ buffer_size_adj = lp_build_broadcast_scalar(&blduivec, buffer_size_adj);
+
+ /*
+ * Sort of interestingly, with interleaved attribs, llvm 3.7+ will
+ * recognize these calculations to be constant with different attribs
+ * (the different offset has been added to map_ptr).
+ * llvm 3.3, however, will not (I can't get llvm 3.4-3.6 to link...)
+ *
+ * XXX: could actually avoid this altogether (replacing by simple
+ * non-widening mul) by precalculating the max index instead outside
+ * the loop (at the cost of one scalar udiv per vertex element).
+ */
+ offset = lp_build_mul_32_lohi_cpu(&blduivec, vb_stride, indices, &tmp);
+
+ tmp = lp_build_compare(gallivm, blduivec.type,
+ PIPE_FUNC_EQUAL, tmp, blduivec.zero);
+ valid_mask = lp_build_andnot(&blduivec, tmp, ofmask);
+
+ tmp = lp_build_compare(gallivm, blduivec.type,
+ PIPE_FUNC_LEQUAL, offset, buffer_size_adj);
+ valid_mask = LLVMBuildAnd(builder, tmp, valid_mask, "");
+
+ /* not valid elements use offset 0 */
+ offset = LLVMBuildAnd(builder, offset, valid_mask, "");
+
+ if (0) {
+ lp_build_print_value(gallivm, " indices = ", indices);
+ lp_build_print_value(gallivm, " offsets = ", offset);
+ lp_build_print_value(gallivm, " valid_mask = ", valid_mask);
+ }
+
+ /*
+ * Note: we probably really want to use SoA fetch, not AoS one (albeit
+ * for most formats it will amount to the same as this isn't very
+ * optimized). But looks dangerous since it assumes alignment.
+ */
+ for (i = 0; i < vs_type.length; i++) {
+ LLVMValueRef offset1, elem;
+ elem = lp_build_const_int32(gallivm, i);
+ offset1 = LLVMBuildExtractElement(builder, offset, elem, "");
+
+ aos_fetch[i] = lp_build_fetch_rgba_aos(gallivm, format_desc,
+ lp_float32_vec4_type(),
+ FALSE, map_ptr, offset1,
+ zero, zero, NULL);
+ }
+ convert_to_soa(gallivm, aos_fetch, inputs, vs_type);
+
+ for (i = 0; i < TGSI_NUM_CHANNELS; i++) {
+ inputs[i] = LLVMBuildBitCast(builder, inputs[i], blduivec.vec_type, "");
+ inputs[i] = LLVMBuildAnd(builder, inputs[i], valid_mask, "");
+ inputs[i] = LLVMBuildBitCast(builder, inputs[i],
+ lp_build_vec_type(gallivm, vs_type), "");
+
}
}
/* divide by w */
out = LLVMBuildFMul(builder, out, out3, "");
- /* mult by scale */
- out = LLVMBuildFMul(builder, out, scale, "");
- /* add translation */
- out = LLVMBuildFAdd(builder, out, trans, "");
+ /* mult by scale, add translation */
+ out = lp_build_fmuladd(builder, out, scale, trans);
/* store transformed outputs */
LLVMBuildStore(builder, out, outputs[pos][i]);
unsigned ucp_enable = key->ucp_enable;
unsigned cd[2];
- cd[0] = llvm->draw->vs.clipdistance_output[0];
- cd[1] = llvm->draw->vs.clipdistance_output[1];
+ cd[0] = llvm->draw->vs.ccdistance_output[0];
+ cd[1] = llvm->draw->vs.ccdistance_output[1];
if (cd[0] != pos || cd[1] != pos)
have_cd = true;
plane_ptr = LLVMBuildGEP(builder, planes_ptr, indices, 3, "");
plane1 = LLVMBuildLoad(builder, plane_ptr, "plane_y");
planes = lp_build_broadcast(gallivm, vs_type_llvm, plane1);
- test = LLVMBuildFMul(builder, planes, cv_y, "");
- sum = LLVMBuildFAdd(builder, sum, test, "");
+ sum = lp_build_fmuladd(builder, planes, cv_y, sum);
indices[2] = lp_build_const_int32(gallivm, 2);
plane_ptr = LLVMBuildGEP(builder, planes_ptr, indices, 3, "");
plane1 = LLVMBuildLoad(builder, plane_ptr, "plane_z");
planes = lp_build_broadcast(gallivm, vs_type_llvm, plane1);
- test = LLVMBuildFMul(builder, planes, cv_z, "");
- sum = LLVMBuildFAdd(builder, sum, test, "");
+ sum = lp_build_fmuladd(builder, planes, cv_z, sum);
indices[2] = lp_build_const_int32(gallivm, 3);
plane_ptr = LLVMBuildGEP(builder, planes_ptr, indices, 3, "");
plane1 = LLVMBuildLoad(builder, plane_ptr, "plane_w");
planes = lp_build_broadcast(gallivm, vs_type_llvm, plane1);
- test = LLVMBuildFMul(builder, planes, cv_w, "");
- sum = LLVMBuildFAdd(builder, sum, test, "");
+ sum = lp_build_fmuladd(builder, planes, cv_w, sum);
test = lp_build_compare(gallivm, f32_type, PIPE_FUNC_GREATER, zero, sum);
temp = lp_build_const_int_vec(gallivm, i32_type, 1LL << plane_idx);
/**
* Returns boolean if any clipping has occurred
- * Used zero/non-zero i32 value to represent boolean
+ * Used zero/one i8 value to represent boolean
*/
static LLVMValueRef
-clipmask_booli32(struct gallivm_state *gallivm,
- const struct lp_type vs_type,
- LLVMValueRef clipmask_bool_ptr,
- boolean edgeflag_in_clipmask)
+clipmask_booli8(struct gallivm_state *gallivm,
+ const struct lp_type vs_type,
+ LLVMValueRef clipmask_bool_ptr,
+ boolean edgeflag_in_clipmask)
{
LLVMBuilderRef builder = gallivm->builder;
- LLVMTypeRef int32_type = LLVMInt32TypeInContext(gallivm->context);
+ LLVMTypeRef int8_type = LLVMInt8TypeInContext(gallivm->context);
LLVMValueRef clipmask_bool = LLVMBuildLoad(builder, clipmask_bool_ptr, "");
- LLVMValueRef ret = LLVMConstNull(int32_type);
- LLVMValueRef temp;
- int i;
+ LLVMValueRef ret;
+ struct lp_build_context bldivec;
+
+ lp_build_context_init(&bldivec, gallivm, lp_int_type(vs_type));
/*
* We need to invert the edgeflag bit from the clipmask here
* and we (may) need it if edgeflag was 0).
*/
if (edgeflag_in_clipmask) {
- struct lp_type i32_type = lp_int_type(vs_type);
- LLVMValueRef edge = lp_build_const_int_vec(gallivm, i32_type,
+ LLVMValueRef edge = lp_build_const_int_vec(gallivm, bldivec.type,
1LL << DRAW_TOTAL_CLIP_PLANES);
clipmask_bool = LLVMBuildXor(builder, clipmask_bool, edge, "");
}
+
/*
- * Could do much better with just cmp/movmskps.
+ * XXX: probably should mask off bits from the mask which come from
+ * vertices which were beyond the count (i.e. indices_valid for
+ * linear fetches, for elts ones we don't have the correct mask
+ * right now). Otherwise might run the pipeline for nothing,
+ * though everything should still work.
*/
- for (i=0; i < vs_type.length; i++) {
- temp = LLVMBuildExtractElement(builder, clipmask_bool,
- lp_build_const_int32(gallivm, i) , "");
- ret = LLVMBuildOr(builder, ret, temp, "");
- }
+ ret = lp_build_any_true_range(&bldivec, vs_type.length, clipmask_bool);
+ ret = LLVMBuildZExt(builder, ret, int8_type, "");
return ret;
}
LLVMBuilderRef builder;
char func_name[64];
struct lp_type vs_type;
- LLVMValueRef end, start;
LLVMValueRef count, fetch_elts, fetch_elt_max, fetch_count;
- LLVMValueRef vertex_id_offset, start_instance;
+ LLVMValueRef vertex_id_offset, start_instance, start;
LLVMValueRef stride, step, io_itr;
+ LLVMValueRef ind_vec;
LLVMValueRef io_ptr, vbuffers_ptr, vb_ptr;
- LLVMValueRef zero = lp_build_const_int32(gallivm, 0);
- LLVMValueRef one = lp_build_const_int32(gallivm, 1);
+ LLVMValueRef vb_stride[PIPE_MAX_ATTRIBS];
+ LLVMValueRef map_ptr[PIPE_MAX_ATTRIBS];
+ LLVMValueRef buffer_size_adj[PIPE_MAX_ATTRIBS];
+ LLVMValueRef ofmask[PIPE_MAX_ATTRIBS];
+ LLVMValueRef instance_index[PIPE_MAX_ATTRIBS];
+ LLVMValueRef fake_buf_ptr, fake_buf;
+
struct draw_context *draw = llvm->draw;
const struct tgsi_shader_info *vs_info = &draw->vs.vertex_shader->info;
unsigned i, j;
- struct lp_build_context bld;
+ struct lp_build_context bld, bldivec, blduivec;
struct lp_build_loop_state lp_loop;
const int vector_length = lp_native_vector_width / 32;
LLVMValueRef outputs[PIPE_MAX_SHADER_OUTPUTS][TGSI_NUM_CHANNELS];
* the values).
*/
const boolean bypass_viewport = key->has_gs || key->bypass_viewport ||
- llvm->draw->vs.vertex_shader->info.writes_viewport_index;
+ vs_info->writes_viewport_index;
const boolean enable_cliptest = !key->has_gs && (key->clip_xy ||
key->clip_z ||
key->clip_user ||
key->need_edgeflags);
LLVMValueRef variant_func;
- const unsigned pos = llvm->draw->vs.position_output;
- const unsigned cv = llvm->draw->vs.clipvertex_output;
+ const unsigned pos = draw->vs.position_output;
+ const unsigned cv = draw->vs.clipvertex_output;
boolean have_clipdist = FALSE;
struct lp_bld_tgsi_system_values system_values;
arg_types[i++] = int32_type; /* vertex_id_offset */
arg_types[i++] = int32_type; /* start_instance */
- func_type = LLVMFunctionType(int32_type, arg_types, num_arg_types, 0);
+ func_type = LLVMFunctionType(LLVMInt8TypeInContext(context),
+ arg_types, num_arg_types, 0);
variant_func = LLVMAddFunction(gallivm->module, func_name, func_type);
LLVMSetFunctionCallConv(variant_func, LLVMCCallConv);
for (i = 0; i < num_arg_types; ++i)
if (LLVMGetTypeKind(arg_types[i]) == LLVMPointerTypeKind)
- LLVMAddAttribute(LLVMGetParam(variant_func, i),
- LLVMNoAliasAttribute);
+ lp_add_function_attr(variant_func, i + 1, LP_FUNC_ATTR_NOALIAS);
context_ptr = LLVMGetParam(variant_func, 0);
io_ptr = LLVMGetParam(variant_func, 1);
builder = gallivm->builder;
LLVMPositionBuilderAtEnd(builder, block);
- lp_build_context_init(&bld, gallivm, lp_type_int(32));
-
memset(&vs_type, 0, sizeof vs_type);
vs_type.floating = TRUE; /* floating point values */
vs_type.sign = TRUE; /* values are signed */
vs_type.width = 32; /* 32-bit float */
vs_type.length = vector_length;
+ lp_build_context_init(&bld, gallivm, lp_type_uint(32));
+ lp_build_context_init(&bldivec, gallivm, lp_int_type(vs_type));
+ lp_build_context_init(&blduivec, gallivm, lp_uint_type(vs_type));
+
/* hold temporary "bool" clipmask */
- clipmask_bool_ptr = lp_build_alloca(gallivm, lp_build_int_vec_type(gallivm, vs_type), "");
- LLVMBuildStore(builder, lp_build_zero(gallivm, lp_int_type(vs_type)), clipmask_bool_ptr);
+ clipmask_bool_ptr = lp_build_alloca(gallivm, blduivec.vec_type, "");
+
+ fake_buf = lp_build_alloca_undef(gallivm,
+ LLVMVectorType(LLVMInt64TypeInContext(context), 4), "");
+ fake_buf = LLVMBuildBitCast(builder, fake_buf,
+ LLVMPointerType(LLVMInt8TypeInContext(context), 0), "");
+ fake_buf_ptr = LLVMBuildGEP(builder, fake_buf, &bld.zero, 1, "");
/* code generated texture sampling */
sampler = draw_llvm_sampler_soa_create(draw_llvm_variant_key_samplers(key));
+ step = lp_build_const_int32(gallivm, vector_length);
+
+ ind_vec = blduivec.undef;
+ for (i = 0; i < vs_type.length; i++) {
+ LLVMValueRef index = lp_build_const_int32(gallivm, i);
+ ind_vec = LLVMBuildInsertElement(builder, ind_vec, index, index, "");
+ }
+
+
if (elts) {
- start = zero;
- end = fetch_count;
+ fetch_max = fetch_count;
count = fetch_count;
+ start = blduivec.zero;
}
else {
- end = lp_build_add(&bld, start, count);
+ fetch_max = lp_build_add(&bld, start, count);
+ start = lp_build_broadcast_scalar(&blduivec, start);
+ ind_vec = lp_build_add(&blduivec, start, ind_vec);
}
- step = lp_build_const_int32(gallivm, vector_length);
+ fetch_max = LLVMBuildSub(builder, fetch_max, bld.one, "fetch_max");
+ fetch_max = lp_build_broadcast_scalar(&blduivec, fetch_max);
+
+ /*
+ * Pre-calculate everything which is constant per shader invocation.
+ */
+ for (j = 0; j < key->nr_vertex_elements; ++j) {
+ LLVMValueRef vb_buffer_offset, buffer_size, temp_ptr;
+ LLVMValueRef vb_info, vbuffer_ptr, buf_offset, ofbit;
+ struct pipe_vertex_element *velem = &key->vertex_element[j];
+ LLVMValueRef vb_index =
+ lp_build_const_int32(gallivm, velem->vertex_buffer_index);
+ LLVMValueRef bsize = lp_build_const_int32(gallivm,
+ util_format_get_blocksize(velem->src_format));
+ LLVMValueRef src_offset = lp_build_const_int32(gallivm,
+ velem->src_offset);
+ struct lp_build_if_state if_ctx;
+
+ if (velem->src_format != PIPE_FORMAT_NONE) {
+ vbuffer_ptr = LLVMBuildGEP(builder, vbuffers_ptr, &vb_index, 1, "");
+ vb_info = LLVMBuildGEP(builder, vb_ptr, &vb_index, 1, "");
+ vb_stride[j] = draw_jit_vbuffer_stride(gallivm, vb_info);
+ vb_buffer_offset = draw_jit_vbuffer_offset(gallivm, vb_info);
+ map_ptr[j] = draw_jit_dvbuffer_map(gallivm, vbuffer_ptr);
+ buffer_size = draw_jit_dvbuffer_size(gallivm, vbuffer_ptr);
+
+ ofbit = NULL;
+ buf_offset = lp_build_uadd_overflow(gallivm, vb_buffer_offset,
+ src_offset, &ofbit);
+ buffer_size_adj[j] = lp_build_usub_overflow(gallivm, buffer_size, bsize,
+ &ofbit);
+ buffer_size_adj[j] = lp_build_usub_overflow(gallivm, buffer_size_adj[j],
+ buf_offset, &ofbit);
+
+ /*
+ * We can't easily set fake vertex buffers outside the generated code.
+ * Hence, set fake vertex buffers here instead basically, so fetch
+ * code can always fetch using offset 0, eliminating all control flow
+ * inside the main loop.
+ * (Alternatively, could have control flow per vector skipping fetch
+ * if ofbit is true.)
+ * For instanced elements, we keep the control flow for now as it's a
+ * scalar fetch, making things easier.
+ */
+ if (velem->instance_divisor) {
+ /* Index is equal to the start instance plus the number of current
+ * instance divided by the divisor. In this case we compute it as:
+ * index = start_instance + (instance_id / divisor)
+ */
+ LLVMValueRef current_instance;
+ current_instance = LLVMBuildUDiv(builder, system_values.instance_id,
+ lp_build_const_int32(gallivm,
+ velem->instance_divisor),
+ "instance_divisor");
+ instance_index[j] = lp_build_uadd_overflow(gallivm, start_instance,
+ current_instance, &ofbit);
+ map_ptr[j] = LLVMBuildGEP(builder, map_ptr[j], &buf_offset, 1, "");
+
+ /* This is a scalar fetch, just keep the of bit */
+ ofmask[j] = ofbit;
+ }
+ else {
+ temp_ptr = lp_build_alloca_undef(gallivm,
+ LLVMPointerType(LLVMInt8TypeInContext(context), 0), "");
+
+ lp_build_if(&if_ctx, gallivm, ofbit);
+ {
+ LLVMBuildStore(builder, fake_buf_ptr, temp_ptr);
+ }
+ lp_build_else(&if_ctx);
+ {
+ map_ptr[j] = LLVMBuildGEP(builder, map_ptr[j], &buf_offset, 1, "");
+ LLVMBuildStore(builder, map_ptr[j], temp_ptr);
+ }
+ lp_build_endif(&if_ctx);
+ map_ptr[j] = LLVMBuildLoad(builder, temp_ptr, "map_ptr");
+
+ /* Expand to vector mask */
+ ofmask[j] = LLVMBuildSExt(builder, ofbit, int32_type, "");
+ ofmask[j] = lp_build_broadcast_scalar(&blduivec, ofmask[j]);
+ }
- fetch_max = LLVMBuildSub(builder, end, one, "fetch_max");
+ if (0) {
+ lp_build_printf(gallivm, "velem %d, vbuf index = %u, vb_stride = %u\n",
+ lp_build_const_int32(gallivm, j),
+ vb_index, vb_stride[j]);
+ lp_build_printf(gallivm,
+ " vb_buffer_offset = %u, src_offset = %u, buf_offset = %u\n",
+ vb_buffer_offset, src_offset, buf_offset);
+ lp_build_printf(gallivm, " buffer size = %u, blocksize = %u\n",
+ buffer_size, bsize);
+ lp_build_printf(gallivm, " instance_id = %u\n", system_values.instance_id);
+ }
+ }
+ }
- lp_build_loop_begin(&lp_loop, gallivm, zero);
+ lp_build_loop_begin(&lp_loop, gallivm, bld.zero);
{
LLVMValueRef inputs[PIPE_MAX_SHADER_INPUTS][TGSI_NUM_CHANNELS];
- LLVMValueRef aos_attribs[PIPE_MAX_SHADER_INPUTS][LP_MAX_VECTOR_WIDTH / 32] = { { 0 } };
LLVMValueRef io;
LLVMValueRef clipmask; /* holds the clipmask value */
- LLVMValueRef true_index_array = lp_build_zero(gallivm,
- lp_type_uint_vec(32, 32*vector_length));
+ LLVMValueRef true_index_array;
const LLVMValueRef (*ptr_aos)[TGSI_NUM_CHANNELS];
io_itr = lp_loop.counter;
lp_build_printf(gallivm, " --- io %d = %p, loop counter %d\n",
io_itr, io, lp_loop.counter);
#endif
- for (i = 0; i < vector_length; ++i) {
- LLVMValueRef vert_index =
- LLVMBuildAdd(builder,
- lp_loop.counter,
- lp_build_const_int32(gallivm, i), "");
- LLVMValueRef true_index =
- LLVMBuildAdd(builder, start, vert_index, "");
-
- /* make sure we're not out of bounds which can happen
- * if fetch_count % 4 != 0, because on the last iteration
- * a few of the 4 vertex fetches will be out of bounds */
- true_index = lp_build_min(&bld, true_index, fetch_max);
-
- if (elts) {
- LLVMValueRef fetch_ptr;
- LLVMValueRef index_overflowed;
- LLVMValueRef index_ptr =
- lp_build_alloca(
- gallivm,
- lp_build_vec_type(gallivm, lp_type_int(32)), "");
- struct lp_build_if_state if_ctx;
- index_overflowed = LLVMBuildICmp(builder, LLVMIntUGT,
- true_index, fetch_elt_max,
- "index_overflowed");
-
- lp_build_if(&if_ctx, gallivm, index_overflowed);
- {
- /* Generate maximum possible index so that
- * generate_fetch can treat it just like
- * any other overflow and return zeros.
- * We don't have to worry about the restart
- * primitive index because it has already been
- * handled
- */
- LLVMValueRef val =
- lp_build_const_int32(gallivm, 0xffffffff);
- LLVMBuildStore(builder, val, index_ptr);
- }
- lp_build_else(&if_ctx);
- {
- LLVMValueRef val;
- fetch_ptr = LLVMBuildGEP(builder, fetch_elts,
- &true_index, 1, "");
- val = LLVMBuildLoad(builder, fetch_ptr, "");
- LLVMBuildStore(builder, val, index_ptr);
+
+ true_index_array = lp_build_broadcast_scalar(&blduivec, lp_loop.counter);
+ true_index_array = LLVMBuildAdd(builder, true_index_array, ind_vec, "");
+
+ /*
+ * XXX: This code is really fishy. We are required to use a int min
+ * here, not uint. The reason is that for some non-indexed draws, we
+ * might get something like MAX_UINT - 3 as start value (due to start
+ * vertex). So, the first 3 elements in the vector are huge, and
+ * limiting them to fetch_max is incorrect. By using int min, we'll
+ * pick that huge value - we rely on this creating an overflow (which
+ * is guaranteed) in the stride mul later (using (signed) cmp and
+ * incorporating the result into ofmask would also work).
+ * For the later elements, this just wraps around the indices, which
+ * is apparently ok...
+ */
+ true_index_array = lp_build_min(&bldivec, true_index_array, fetch_max);
+
+ if (elts) {
+
+ /*
+ * Note: you'd expect some comparison/clamp against fetch_elt_max
+ * here.
+ * There used to be one here but it was incorrect: overflow was
+ * detected if index > fetch_elt_max - but the correct condition
+ * would be index >= fetch_elt_max (since this is just size of elts
+ * buffer / element size).
+ * Using the correct condition however will cause failures - due to
+ * vsplit/vcache code which rebases indices. So, as an example, if
+ * fetch_elt_max is just 1 and fetch_count 2, vsplit cache will
+ * replace all invalid indices with 0 - which in case of elt_bias
+ * not being zero will get a different fetch index than the valid
+ * index 0. So, just rely on vsplit code preventing out-of-bounds
+ * fetches. This is also why it's safe to do elts fetch even if there
+ * was no index buffer bound - the real buffer is never seen here.
+ */
+
+ /*
+ * XXX should not have to do this, as scale can be handled
+ * natively by loads (hits asserts though).
+ */
+ true_index_array = lp_build_shl_imm(&blduivec, true_index_array, 2);
+ fetch_elts = LLVMBuildBitCast(builder, fetch_elts,
+ LLVMPointerType(LLVMInt8TypeInContext(context),
+ 0), "");
+ true_index_array = lp_build_gather(gallivm, vs_type.length,
+ 32, 32, TRUE,
+ fetch_elts, true_index_array,
+ FALSE);
+ }
+
+ for (j = 0; j < key->nr_vertex_elements; ++j) {
+ struct pipe_vertex_element *velem = &key->vertex_element[j];
+ const struct util_format_description *format_desc =
+ util_format_description(velem->src_format);
+
+ if (format_desc->format == PIPE_FORMAT_NONE) {
+ for (i = 0; i < TGSI_NUM_CHANNELS; i++) {
+ inputs[j][i] = lp_build_zero(gallivm, vs_type);
}
- lp_build_endif(&if_ctx);
- true_index = LLVMBuildLoad(builder, index_ptr, "true_index");
}
- true_index_array = LLVMBuildInsertElement(
- gallivm->builder, true_index_array, true_index,
- lp_build_const_int32(gallivm, i), "");
-
- for (j = 0; j < draw->pt.nr_vertex_elements; ++j) {
- struct pipe_vertex_element *velem = &draw->pt.vertex_element[j];
- LLVMValueRef vb_index =
- lp_build_const_int32(gallivm, velem->vertex_buffer_index);
- LLVMValueRef vb = LLVMBuildGEP(builder, vb_ptr, &vb_index, 1, "");
- generate_fetch(gallivm, draw, vbuffers_ptr,
- &aos_attribs[j][i], velem, vb, true_index,
- system_values.instance_id, start_instance);
+ else if (velem->instance_divisor) {
+ fetch_instanced(gallivm, format_desc, vs_type,
+ vb_stride[j], map_ptr[j],
+ buffer_size_adj[j], ofmask[j],
+ inputs[j], instance_index[j]);
+ }
+ else {
+ fetch_vector(gallivm, format_desc, vs_type,
+ vb_stride[j], map_ptr[j],
+ buffer_size_adj[j], ofmask[j],
+ inputs[j], true_index_array);
}
}
- convert_to_soa(gallivm, aos_attribs, inputs,
- draw->pt.nr_vertex_elements, vs_type);
/* In the paths with elts vertex id has to be unaffected by the
* index bias and because indices inside our elements array have
* most 4095-vertices) we need to back out the original start
* index out of our vertex id here.
*/
- system_values.basevertex = lp_build_broadcast(gallivm, lp_build_vec_type(gallivm,
- lp_type_uint_vec(32, 32*vector_length)),
- vertex_id_offset);
+ system_values.basevertex = lp_build_broadcast_scalar(&blduivec,
+ vertex_id_offset);
system_values.vertex_id = true_index_array;
system_values.vertex_id_nobase = LLVMBuildSub(builder, true_index_array,
system_values.basevertex, "");
LLVMBuildStore(builder, temp, clipmask_bool_ptr);
}
else {
- clipmask = lp_build_const_int_vec(gallivm, lp_int_type(vs_type), 0);
+ clipmask = blduivec.zero;
}
/* do viewport mapping */
}
}
else {
- clipmask = lp_build_const_int_vec(gallivm, lp_int_type(vs_type), 0);
+ clipmask = blduivec.zero;
}
/* store clipmask in vertex header,
sampler->destroy(sampler);
/* return clipping boolean value for function */
- ret = clipmask_booli32(gallivm, vs_type, clipmask_bool_ptr,
- enable_cliptest && key->need_edgeflags);
+ ret = clipmask_booli8(gallivm, vs_type, clipmask_bool_ptr,
+ enable_cliptest && key->need_edgeflags);
LLVMBuildRet(builder, ret);
key->clamp_vertex_color = llvm->draw->rasterizer->clamp_vertex_color; /**/
- /* Presumably all variants of the shader should have the same
- * number of vertex elements - ie the number of shader inputs.
- * NOTE: we NEED to store the needed number of needed inputs
- * here, not the number of provided elements to match keysize
- * (and the offset of sampler state in the key).
- */
- key->nr_vertex_elements = llvm->draw->vs.vertex_shader->info.file_max[TGSI_FILE_INPUT] + 1;
- assert(key->nr_vertex_elements <= llvm->draw->pt.nr_vertex_elements);
-
/* will have to rig this up properly later */
key->clip_xy = llvm->draw->clip_xy;
key->clip_z = llvm->draw->clip_z;
key->nr_sampler_views = key->nr_samplers;
}
- draw_sampler = draw_llvm_variant_key_samplers(key);
-
+ /* Presumably all variants of the shader should have the same
+ * number of vertex elements - ie the number of shader inputs.
+ * NOTE: we NEED to store the needed number of needed inputs
+ * here, not the number of provided elements to match keysize
+ * (and the offset of sampler state in the key).
+ * If we have excess number of vertex elements, this is valid,
+ * but the excess ones don't matter.
+ * If we don't have enough vertex elements (which looks not really
+ * valid but we'll handle it gracefully) fill out missing ones with
+ * zero (we'll recognize these later by PIPE_FORMAT_NONE).
+ */
+ key->nr_vertex_elements =
+ llvm->draw->vs.vertex_shader->info.file_max[TGSI_FILE_INPUT] + 1;
+
+ if (llvm->draw->pt.nr_vertex_elements < key->nr_vertex_elements) {
+ debug_printf("draw: vs with %d inputs but only have %d vertex elements\n",
+ key->nr_vertex_elements, llvm->draw->pt.nr_vertex_elements);
+ memset(key->vertex_element, 0,
+ sizeof(struct pipe_vertex_element) * key->nr_vertex_elements);
+ }
memcpy(key->vertex_element,
llvm->draw->pt.vertex_element,
- sizeof(struct pipe_vertex_element) * key->nr_vertex_elements);
+ sizeof(struct pipe_vertex_element) *
+ MIN2(key->nr_vertex_elements, llvm->draw->pt.nr_vertex_elements));
- memset(draw_sampler, 0, MAX2(key->nr_samplers, key->nr_sampler_views) * sizeof *draw_sampler);
+ draw_sampler = draw_llvm_variant_key_samplers(key);
+ memset(draw_sampler, 0,
+ MAX2(key->nr_samplers, key->nr_sampler_views) * sizeof *draw_sampler);
for (i = 0 ; i < key->nr_samplers; i++) {
lp_sampler_static_sampler_state(&draw_sampler[i].sampler_state,
for (i = 0; i < ARRAY_SIZE(arg_types); ++i)
if (LLVMGetTypeKind(arg_types[i]) == LLVMPointerTypeKind)
- LLVMAddAttribute(LLVMGetParam(variant_func, i),
- LLVMNoAliasAttribute);
+ lp_add_function_attr(variant_func, i + 1, LP_FUNC_ATTR_NOALIAS);
context_ptr = LLVMGetParam(variant_func, 0);
input_array = LLVMGetParam(variant_func, 1);