#include "draw_gs.h"
#include "gallivm/lp_bld_arit.h"
+#include "gallivm/lp_bld_arit_overflow.h"
#include "gallivm/lp_bld_logic.h"
#include "gallivm/lp_bld_const.h"
#include "gallivm/lp_bld_swizzle.h"
return (const struct draw_gs_llvm_iface *)iface;
}
+/**
+ * Create LLVM type for draw_vertex_buffer.
+ */
+static LLVMTypeRef
+create_jit_dvbuffer_type(struct gallivm_state *gallivm,
+ const char *struct_name)
+{
+ LLVMTargetDataRef target = gallivm->target;
+ LLVMTypeRef dvbuffer_type;
+ LLVMTypeRef elem_types[DRAW_JIT_DVBUFFER_NUM_FIELDS];
+ LLVMTypeRef int32_type = LLVMInt32TypeInContext(gallivm->context);
+
+ elem_types[DRAW_JIT_DVBUFFER_MAP] =
+ LLVMPointerType(LLVMIntTypeInContext(gallivm->context, 8), 0);
+ elem_types[DRAW_JIT_DVBUFFER_SIZE] = int32_type;
+
+ dvbuffer_type = LLVMStructTypeInContext(gallivm->context, elem_types,
+ Elements(elem_types), 0);
+
+#if HAVE_LLVM < 0x0300
+ LLVMAddTypeName(gallivm->module, struct_name, dvbuffer_type);
+
+ /* Make sure the target's struct layout cache doesn't return
+ * stale/invalid data.
+ */
+ LLVMInvalidateStructLayout(gallivm->target, dvbuffer_type);
+#endif
+
+ LP_CHECK_MEMBER_OFFSET(struct draw_vertex_buffer, map,
+ target, dvbuffer_type,
+ DRAW_JIT_DVBUFFER_MAP);
+ LP_CHECK_MEMBER_OFFSET(struct draw_vertex_buffer, size,
+ target, dvbuffer_type,
+ DRAW_JIT_DVBUFFER_SIZE);
+
+ return dvbuffer_type;
+}
+
/**
* Create LLVM type for struct draw_jit_texture
*/
* Create LLVM type for struct pipe_vertex_buffer
*/
static LLVMTypeRef
-create_jit_vertex_buffer_type(struct gallivm_state *gallivm, const char *struct_name)
+create_jit_vertex_buffer_type(struct gallivm_state *gallivm,
+ const char *struct_name)
{
LLVMTargetDataRef target = gallivm->target;
LLVMTypeRef elem_types[4];
elem_types[0] =
elem_types[1] = LLVMInt32TypeInContext(gallivm->context);
elem_types[2] =
- elem_types[3] = LLVMPointerType(LLVMInt8TypeInContext(gallivm->context), 0); /* vs_constants */
+ elem_types[3] = LLVMPointerType(LLVMInt8TypeInContext(gallivm->context), 0);
vb_type = LLVMStructTypeInContext(gallivm->context, elem_types,
Elements(elem_types), 0);
create_jit_types(struct draw_llvm_variant *variant)
{
struct gallivm_state *gallivm = variant->gallivm;
- LLVMTypeRef texture_type, sampler_type, context_type, buffer_type, vb_type;
+ LLVMTypeRef texture_type, sampler_type, context_type, buffer_type,
+ vb_type;
texture_type = create_jit_texture_type(gallivm, "texture");
sampler_type = create_jit_sampler_type(gallivm, "sampler");
"draw_jit_context");
variant->context_ptr_type = LLVMPointerType(context_type, 0);
- buffer_type = LLVMPointerType(LLVMIntTypeInContext(gallivm->context, 8), 0);
+ buffer_type = create_jit_dvbuffer_type(gallivm, "draw_vertex_buffer");
variant->buffer_ptr_type = LLVMPointerType(buffer_type, 0);
-
+
vb_type = create_jit_vertex_buffer_type(gallivm, "pipe_vertex_buffer");
variant->vb_ptr_type = LLVMPointerType(vb_type, 0);
}
NULL /*struct lp_build_mask_context *mask*/,
consts_ptr,
system_values,
- NULL /*pos*/,
inputs,
outputs,
sampler,
}
}
-
static void
generate_fetch(struct gallivm_state *gallivm,
+ struct draw_context *draw,
LLVMValueRef vbuffers_ptr,
LLVMValueRef *res,
struct pipe_vertex_element *velem,
LLVMValueRef index,
LLVMValueRef instance_id)
{
- const struct util_format_description *format_desc = util_format_description(velem->src_format);
+ const struct util_format_description *format_desc =
+ util_format_description(velem->src_format);
LLVMValueRef zero = LLVMConstNull(LLVMInt32TypeInContext(gallivm->context));
LLVMBuilderRef builder = gallivm->builder;
LLVMValueRef indices =
&indices, 1, "");
LLVMValueRef vb_stride = draw_jit_vbuffer_stride(gallivm, vbuf);
LLVMValueRef vb_buffer_offset = draw_jit_vbuffer_offset(gallivm, vbuf);
+ LLVMValueRef map_ptr = draw_jit_dvbuffer_map(gallivm, vbuffer_ptr);
+ LLVMValueRef buffer_size = draw_jit_dvbuffer_size(gallivm, vbuffer_ptr);
LLVMValueRef stride;
+ LLVMValueRef buffer_overflowed;
+ LLVMValueRef needed_buffer_size;
+ LLVMValueRef temp_ptr =
+ lp_build_alloca(gallivm,
+ lp_build_vec_type(gallivm, lp_float32_vec4_type()), "");
+ LLVMValueRef ofbit = NULL;
+ struct lp_build_if_state if_ctx;
if (velem->instance_divisor) {
- /* array index = instance_id / instance_divisor */
- index = LLVMBuildUDiv(builder, instance_id,
- lp_build_const_int32(gallivm, velem->instance_divisor),
- "instance_divisor");
+ /* Index is equal to the start instance plus the number of current
+ * instance divided by the divisor. In this case we compute it as:
+ * index = start_instance + (instance_id / divisor)
+ */
+ LLVMValueRef current_instance;
+ index = lp_build_const_int32(gallivm, draw->start_instance);
+ current_instance = LLVMBuildUDiv(builder, instance_id,
+ lp_build_const_int32(gallivm, velem->instance_divisor),
+ "instance_divisor");
+ index = lp_build_uadd_overflow(gallivm, index, current_instance, &ofbit);
}
- stride = LLVMBuildMul(builder, vb_stride, index, "");
-
- vbuffer_ptr = LLVMBuildLoad(builder, vbuffer_ptr, "vbuffer");
-
- stride = LLVMBuildAdd(builder, stride,
- vb_buffer_offset,
- "");
- stride = LLVMBuildAdd(builder, stride,
- lp_build_const_int32(gallivm, velem->src_offset),
- "");
+ stride = lp_build_umul_overflow(gallivm, vb_stride, index, &ofbit);
+ stride = lp_build_uadd_overflow(gallivm, stride, vb_buffer_offset, &ofbit);
+ stride = lp_build_uadd_overflow(
+ gallivm, stride,
+ lp_build_const_int32(gallivm, velem->src_offset), &ofbit);
+ needed_buffer_size = lp_build_uadd_overflow(
+ gallivm, stride,
+ lp_build_const_int32(gallivm,
+ util_format_get_blocksize(velem->src_format)),
+ &ofbit);
+
+ buffer_overflowed = LLVMBuildICmp(builder, LLVMIntUGT,
+ needed_buffer_size, buffer_size,
+ "buffer_overflowed");
+ buffer_overflowed = LLVMBuildOr(builder, buffer_overflowed, ofbit, "");
+#if 0
+ lp_build_printf(gallivm, "vbuf index = %u, vb_stride is %u\n",
+ index, vb_stride);
+ lp_build_printf(gallivm, " vb_buffer_offset = %u, src_offset is %u\n",
+ vb_buffer_offset,
+ lp_build_const_int32(gallivm, velem->src_offset));
+ lp_build_print_value(gallivm, " blocksize = ",
+ lp_build_const_int32(
+ gallivm,
+ util_format_get_blocksize(velem->src_format)));
+ lp_build_printf(gallivm, " instance_id = %u\n", instance_id);
+ lp_build_printf(gallivm, " stride = %u\n", stride);
+ lp_build_printf(gallivm, " buffer size = %u\n", buffer_size);
+ lp_build_printf(gallivm, " needed_buffer_size = %u\n", needed_buffer_size);
+ lp_build_print_value(gallivm, " buffer overflowed = ", buffer_overflowed);
+#endif
-/* lp_build_printf(gallivm, "vbuf index = %d, stride is %d\n", indices, stride);*/
- vbuffer_ptr = LLVMBuildGEP(builder, vbuffer_ptr, &stride, 1, "");
+ lp_build_if(&if_ctx, gallivm, buffer_overflowed);
+ {
+ LLVMValueRef val =
+ lp_build_const_vec(gallivm, lp_float32_vec4_type(), 0);
+ LLVMBuildStore(builder, val, temp_ptr);
+ }
+ lp_build_else(&if_ctx);
+ {
+ LLVMValueRef val;
+ map_ptr = LLVMBuildGEP(builder, map_ptr, &stride, 1, "");
+
+ val = lp_build_fetch_rgba_aos(gallivm,
+ format_desc,
+ lp_float32_vec4_type(),
+ map_ptr,
+ zero, zero, zero);
+ LLVMBuildStore(builder, val, temp_ptr);
+ }
+ lp_build_endif(&if_ctx);
- *res = lp_build_fetch_rgba_aos(gallivm,
- format_desc,
- lp_float32_vec4_type(),
- vbuffer_ptr,
- zero, zero, zero);
+ *res = LLVMBuildLoad(builder, temp_ptr, "aos");
}
static void
LLVMBuilderRef builder = gallivm->builder;
LLVMValueRef attr_index = lp_build_const_int32(gallivm, attrib);
LLVMValueRef inds[LP_MAX_VECTOR_WIDTH / 32];
+ LLVMValueRef linear_inds[LP_MAX_VECTOR_WIDTH / 32];
LLVMValueRef io_ptrs[LP_MAX_VECTOR_WIDTH / 32];
int vector_length = soa_type.length;
int i;
debug_assert(TGSI_NUM_CHANNELS == 4);
for (i = 0; i < vector_length; i++) {
+ linear_inds[i] = lp_build_const_int32(gallivm, i);
if (indices) {
inds[i] = indices[i];
} else {
- inds[i] = lp_build_const_int32(gallivm, i);
+ inds[i] = linear_inds[i];
}
io_ptrs[i] = LLVMBuildGEP(builder, io_ptr, &inds[i], 1, "");
}
cliptmp = LLVMBuildOr(builder, val, clipmask, "");
for (i = 0; i < vector_length; i++) {
LLVMValueRef id_ptr = draw_jit_header_id(gallivm, io_ptrs[i]);
- val = LLVMBuildExtractElement(builder, cliptmp, inds[i], "");
+ val = LLVMBuildExtractElement(builder, cliptmp, linear_inds[i], "");
val = adjust_mask(gallivm, val);
LLVMBuildStore(builder, val, id_ptr);
#if DEBUG_STORE
LLVMConstInt(LLVMInt32TypeInContext(gallivm->context),
chan, 0));
lp_build_print_value(gallivm, "val = ", out);
+ {
+ LLVMValueRef iv =
+ LLVMBuildBitCast(builder, out, lp_build_int_vec_type(gallivm, soa_type), "");
+
+ lp_build_print_value(gallivm, " ival = ", iv);
+ }
#endif
soa[chan] = out;
}
int i;
struct gallivm_state *gallivm = variant->gallivm;
struct lp_type f32_type = vs_type;
+ const unsigned pos = draw_current_shader_position_output(variant->llvm->draw);
LLVMTypeRef vs_type_llvm = lp_build_vec_type(gallivm, vs_type);
- LLVMValueRef out3 = LLVMBuildLoad(builder, outputs[0][3], ""); /*w0 w1 .. wn*/
+ LLVMValueRef out3 = LLVMBuildLoad(builder, outputs[pos][3], ""); /*w0 w1 .. wn*/
LLVMValueRef const1 = lp_build_const_vec(gallivm, f32_type, 1.0); /*1.0 1.0 1.0 1.0*/
LLVMValueRef vp_ptr = draw_jit_context_viewport(gallivm, context_ptr);
/* for 1/w convention*/
out3 = LLVMBuildFDiv(builder, const1, out3, "");
- LLVMBuildStore(builder, out3, outputs[0][3]);
+ LLVMBuildStore(builder, out3, outputs[pos][3]);
/* Viewport Mapping */
for (i=0; i<3; i++) {
- LLVMValueRef out = LLVMBuildLoad(builder, outputs[0][i], ""); /*x0 x1 .. xn*/
+ LLVMValueRef out = LLVMBuildLoad(builder, outputs[pos][i], ""); /*x0 x1 .. xn*/
LLVMValueRef scale;
LLVMValueRef trans;
LLVMValueRef scale_i;
out = LLVMBuildFAdd(builder, out, trans, "");
/* store transformed outputs */
- LLVMBuildStore(builder, out, outputs[0][i]);
+ LLVMBuildStore(builder, out, outputs[pos][i]);
}
}
if (cd[0] != pos || cd[1] != pos)
have_cd = true;
+ if (num_written_clipdistance && !clip_user) {
+ clip_user = true;
+ ucp_enable = (1 << num_written_clipdistance) - 1;
+ }
+
mask = lp_build_const_int_vec(gallivm, i32_type, 0);
temp = lp_build_const_int_vec(gallivm, i32_type, 0);
zero = lp_build_const_vec(gallivm, f32_type, 0); /* 0.0f 0.0f 0.0f 0.0f */
struct gallivm_state *gallivm = variant->gallivm;
LLVMContextRef context = gallivm->context;
LLVMTypeRef int32_type = LLVMInt32TypeInContext(context);
- LLVMTypeRef arg_types[8];
+ LLVMTypeRef arg_types[10];
+ unsigned num_arg_types =
+ elts ? Elements(arg_types) : Elements(arg_types) - 1;
LLVMTypeRef func_type;
LLVMValueRef context_ptr;
LLVMBasicBlockRef block;
LLVMBuilderRef builder;
struct lp_type vs_type;
LLVMValueRef end, start;
- LLVMValueRef count, fetch_elts, fetch_count;
+ LLVMValueRef count, fetch_elts, fetch_elt_max, fetch_count;
+ LLVMValueRef vertex_id_offset;
LLVMValueRef stride, step, io_itr;
LLVMValueRef io_ptr, vbuffers_ptr, vb_ptr;
LLVMValueRef zero = lp_build_const_int32(gallivm, 0);
struct lp_build_sampler_soa *sampler = 0;
LLVMValueRef ret, clipmask_bool_ptr;
const struct draw_geometry_shader *gs = draw->gs.geometry_shader;
+ struct draw_llvm_variant_key *key = &variant->key;
/* If geometry shader is present we need to skip both the viewport
* transformation and clipping otherwise the inputs to the geometry
* shader will be incorrect.
*/
- const boolean bypass_viewport = gs || variant->key.bypass_viewport;
- const boolean enable_cliptest = !gs && (variant->key.clip_xy ||
- variant->key.clip_z ||
- variant->key.clip_user);
+ const boolean bypass_viewport = gs || key->bypass_viewport;
+ const boolean enable_cliptest = !gs && (key->clip_xy ||
+ key->clip_z ||
+ key->clip_user);
LLVMValueRef variant_func;
const unsigned pos = draw_current_shader_position_output(llvm->draw);
const unsigned cv = draw_current_shader_clipvertex_output(llvm->draw);
memset(&system_values, 0, sizeof(system_values));
- arg_types[0] = get_context_ptr_type(variant); /* context */
- arg_types[1] = get_vertex_header_ptr_type(variant); /* vertex_header */
- arg_types[2] = get_buffer_ptr_type(variant); /* vbuffers */
- if (elts)
- arg_types[3] = LLVMPointerType(int32_type, 0);/* fetch_elts * */
- else
- arg_types[3] = int32_type; /* start */
- arg_types[4] = int32_type; /* fetch_count / count */
- arg_types[5] = int32_type; /* stride */
- arg_types[6] = get_vb_ptr_type(variant); /* pipe_vertex_buffer's */
- arg_types[7] = int32_type; /* instance_id */
-
- func_type = LLVMFunctionType(int32_type, arg_types, Elements(arg_types), 0);
+ i = 0;
+ arg_types[i++] = get_context_ptr_type(variant); /* context */
+ arg_types[i++] = get_vertex_header_ptr_type(variant); /* vertex_header */
+ arg_types[i++] = get_buffer_ptr_type(variant); /* vbuffers */
+ if (elts) {
+ arg_types[i++] = LLVMPointerType(int32_type, 0);/* fetch_elts */
+ arg_types[i++] = int32_type; /* fetch_elt_max */
+ } else
+ arg_types[i++] = int32_type; /* start */
+ arg_types[i++] = int32_type; /* fetch_count / count */
+ arg_types[i++] = int32_type; /* stride */
+ arg_types[i++] = get_vb_ptr_type(variant); /* pipe_vertex_buffer's */
+ arg_types[i++] = int32_type; /* instance_id */
+ arg_types[i++] = int32_type; /* vertex_id_offset */
+
+ func_type = LLVMFunctionType(int32_type, arg_types, num_arg_types, 0);
variant_func = LLVMAddFunction(gallivm->module,
elts ? "draw_llvm_shader_elts" : "draw_llvm_shader",
variant->function = variant_func;
LLVMSetFunctionCallConv(variant_func, LLVMCCallConv);
- for (i = 0; i < Elements(arg_types); ++i)
+ for (i = 0; i < num_arg_types; ++i)
if (LLVMGetTypeKind(arg_types[i]) == LLVMPointerTypeKind)
LLVMAddAttribute(LLVMGetParam(variant_func, i),
LLVMNoAliasAttribute);
context_ptr = LLVMGetParam(variant_func, 0);
io_ptr = LLVMGetParam(variant_func, 1);
vbuffers_ptr = LLVMGetParam(variant_func, 2);
- stride = LLVMGetParam(variant_func, 5);
- vb_ptr = LLVMGetParam(variant_func, 6);
- system_values.instance_id = LLVMGetParam(variant_func, 7);
+ stride = LLVMGetParam(variant_func, 5 + (elts ? 1 : 0));
+ vb_ptr = LLVMGetParam(variant_func, 6 + (elts ? 1 : 0));
+ system_values.instance_id = LLVMGetParam(variant_func, 7 + (elts ? 1 : 0));
+ vertex_id_offset = LLVMGetParam(variant_func, 8 + (elts ? 1 : 0));
lp_build_name(context_ptr, "context");
lp_build_name(io_ptr, "io");
lp_build_name(stride, "stride");
lp_build_name(vb_ptr, "vb");
lp_build_name(system_values.instance_id, "instance_id");
+ lp_build_name(vertex_id_offset, "vertex_id_offset");
if (elts) {
- fetch_elts = LLVMGetParam(variant_func, 3);
- fetch_count = LLVMGetParam(variant_func, 4);
+ fetch_elts = LLVMGetParam(variant_func, 3);
+ fetch_elt_max = LLVMGetParam(variant_func, 4);
+ fetch_count = LLVMGetParam(variant_func, 5);
lp_build_name(fetch_elts, "fetch_elts");
+ lp_build_name(fetch_elt_max, "fetch_elt_max");
lp_build_name(fetch_count, "fetch_count");
start = count = NULL;
}
/* code generated texture sampling */
sampler = draw_llvm_sampler_soa_create(
- draw_llvm_variant_key_samplers(&variant->key),
+ draw_llvm_variant_key_samplers(key),
context_ptr);
if (elts) {
start = zero;
end = fetch_count;
+ count = fetch_count;
}
else {
end = lp_build_add(&bld, start, count);
fetch_max = LLVMBuildSub(builder, end, one, "fetch_max");
- lp_build_loop_begin(&lp_loop, gallivm, start);
+ lp_build_loop_begin(&lp_loop, gallivm, zero);
{
LLVMValueRef inputs[PIPE_MAX_SHADER_INPUTS][TGSI_NUM_CHANNELS];
LLVMValueRef aos_attribs[PIPE_MAX_SHADER_INPUTS][LP_MAX_VECTOR_WIDTH / 32] = { { 0 } };
LLVMValueRef clipmask; /* holds the clipmask value */
const LLVMValueRef (*ptr_aos)[TGSI_NUM_CHANNELS];
- if (elts)
- io_itr = lp_loop.counter;
- else
- io_itr = LLVMBuildSub(builder, lp_loop.counter, start, "");
+ io_itr = lp_loop.counter;
io = LLVMBuildGEP(builder, io_ptr, &io_itr, 1, "");
#if DEBUG_STORE
#endif
system_values.vertex_id = lp_build_zero(gallivm, lp_type_uint_vec(32, 32*vector_length));
for (i = 0; i < vector_length; ++i) {
- LLVMValueRef true_index =
+ LLVMValueRef vert_index =
LLVMBuildAdd(builder,
lp_loop.counter,
lp_build_const_int32(gallivm, i), "");
+ LLVMValueRef true_index =
+ LLVMBuildAdd(builder, start, vert_index, "");
+ LLVMValueRef vertex_id;
/* make sure we're not out of bounds which can happen
* if fetch_count % 4 != 0, because on the last iteration
if (elts) {
LLVMValueRef fetch_ptr;
- fetch_ptr = LLVMBuildGEP(builder, fetch_elts,
- &true_index, 1, "");
- true_index = LLVMBuildLoad(builder, fetch_ptr, "fetch_elt");
+ LLVMValueRef index_overflowed;
+ LLVMValueRef index_ptr =
+ lp_build_alloca(
+ gallivm,
+ lp_build_vec_type(gallivm, lp_type_int(32)), "");
+ struct lp_build_if_state if_ctx;
+ index_overflowed = LLVMBuildICmp(builder, LLVMIntUGT,
+ true_index, fetch_elt_max,
+ "index_overflowed");
+
+ lp_build_if(&if_ctx, gallivm, index_overflowed);
+ {
+ /* Generate maximum possible index so that
+ * generate_fetch can treat it just like
+ * any other overflow and return zeros.
+ * We don't have to worry about the restart
+ * primitive index because it has already been
+ * handled
+ */
+ LLVMValueRef val =
+ lp_build_const_int32(gallivm, 0xffffffff);
+ LLVMBuildStore(builder, val, index_ptr);
+ }
+ lp_build_else(&if_ctx);
+ {
+ LLVMValueRef val;
+ fetch_ptr = LLVMBuildGEP(builder, fetch_elts,
+ &true_index, 1, "");
+ val = LLVMBuildLoad(builder, fetch_ptr, "");
+ LLVMBuildStore(builder, val, index_ptr);
+ }
+ lp_build_endif(&if_ctx);
+ true_index = LLVMBuildLoad(builder, index_ptr, "true_index");
}
+ /* in the paths with elts vertex id has to be unaffected by the
+ * index bias and because indices inside our elements array have
+ * already had index bias applied we need to subtract it here to
+ * get back to the original index.
+ * in the linear paths vertex id has to be unaffected by the
+ * original start index and because we abuse the 'start' variable
+ * to either represent the actual start index or the index at which
+ * the primitive was split (we split rendering into chunks of at
+ * most 4095-vertices) we need to back out the original start
+ * index out of our vertex id here.
+ */
+ vertex_id = LLVMBuildSub(builder, true_index, vertex_id_offset, "");
+
+ system_values.vertex_id = LLVMBuildInsertElement(
+ gallivm->builder,
+ system_values.vertex_id, vertex_id,
+ lp_build_const_int32(gallivm, i), "");
- system_values.vertex_id = LLVMBuildInsertElement(gallivm->builder,
- system_values.vertex_id, true_index,
- lp_build_const_int32(gallivm, i), "");
for (j = 0; j < draw->pt.nr_vertex_elements; ++j) {
struct pipe_vertex_element *velem = &draw->pt.vertex_element[j];
LLVMValueRef vb_index =
lp_build_const_int32(gallivm, velem->vertex_buffer_index);
LLVMValueRef vb = LLVMBuildGEP(builder, vb_ptr, &vb_index, 1, "");
- generate_fetch(gallivm, vbuffers_ptr,
+ generate_fetch(gallivm, draw, vbuffers_ptr,
&aos_attribs[j][i], velem, vb, true_index,
system_values.instance_id);
}
&system_values,
context_ptr,
sampler,
- variant->key.clamp_vertex_color);
+ key->clamp_vertex_color);
if (pos != -1 && cv != -1) {
/* store original positions in clip before further manipulation */
gallivm,
vs_type,
outputs,
- variant->key.clip_xy,
- variant->key.clip_z,
- variant->key.clip_user,
- variant->key.clip_halfz,
- variant->key.ucp_enable,
+ key->clip_xy,
+ key->clip_z,
+ key->clip_user,
+ key->clip_halfz,
+ key->ucp_enable,
context_ptr, &have_clipdist);
temp = LLVMBuildOr(builder, clipmask, temp, "");
/* store temporary clipping boolean value */
vs_info->num_outputs, vs_type,
have_clipdist);
}
-
- lp_build_loop_end_cond(&lp_loop, end, step, LLVMIntUGE);
+ lp_build_loop_end_cond(&lp_loop, count, step, LLVMIntUGE);
sampler->destroy(sampler);
key->clip_z = llvm->draw->clip_z;
key->clip_user = llvm->draw->clip_user;
key->bypass_viewport = llvm->draw->identity_viewport;
- key->clip_halfz = !llvm->draw->rasterizer->gl_rasterization_rules;
+ key->clip_halfz = llvm->draw->rasterizer->clip_halfz;
key->need_edgeflags = (llvm->draw->vs.edgeflag_output ? TRUE : FALSE);
key->ucp_enable = llvm->draw->rasterizer->clip_plane_enable;
key->has_gs = llvm->draw->gs.geometry_shader != NULL;
lp_build_name(context_ptr, "context");
lp_build_name(input_array, "input");
lp_build_name(io_ptr, "io");
- lp_build_name(io_ptr, "num_prims");
+ lp_build_name(num_prims, "num_prims");
lp_build_name(system_values.instance_id, "instance_id");
lp_build_name(prim_id_ptr, "prim_id_ptr");
&mask,
consts_ptr,
&system_values,
- NULL /*pos*/,
NULL,
outputs,
sampler,