#include "util/u_math.h"
#include "util/u_cpu_detect.h"
#include "util/u_inlines.h"
+#include "util/u_helpers.h"
+#include "util/u_prim.h"
#include "draw_context.h"
#include "draw_vs.h"
#include "draw_gs.h"
#if HAVE_LLVM
#include "gallivm/lp_bld_init.h"
+#include "gallivm/lp_bld_limits.h"
#include "draw_llvm.h"
-static boolean
+boolean
draw_get_option_use_llvm(void)
{
static boolean first = TRUE;
#endif
-
-/**
- * Create new draw module context.
- */
-struct draw_context *
-draw_create(struct pipe_context *pipe)
-{
- return draw_create_gallivm(pipe, NULL);
-}
-
-
-
/**
* Create new draw module context with gallivm state for LLVM JIT.
*/
-struct draw_context *
-draw_create_gallivm(struct pipe_context *pipe, struct gallivm_state *gallivm)
+static struct draw_context *
+draw_create_context(struct pipe_context *pipe, boolean try_llvm)
{
struct draw_context *draw = CALLOC_STRUCT( draw_context );
if (draw == NULL)
- goto fail;
+ goto err_out;
#if HAVE_LLVM
- if (draw_get_option_use_llvm()) {
- if (!gallivm) {
- gallivm = gallivm_create();
- draw->own_gallivm = gallivm;
- }
-
- if (gallivm)
- draw->llvm = draw_llvm_create(draw, gallivm);
+ if (try_llvm && draw_get_option_use_llvm()) {
+ draw->llvm = draw_llvm_create(draw);
+ if (!draw->llvm)
+ goto err_destroy;
}
#endif
- if (!draw_init(draw))
- goto fail;
-
draw->pipe = pipe;
+ if (!draw_init(draw))
+ goto err_destroy;
+
return draw;
-fail:
+err_destroy:
draw_destroy( draw );
+err_out:
return NULL;
}
+/**
+ * Create new draw module context, with LLVM JIT.
+ */
+struct draw_context *
+draw_create(struct pipe_context *pipe)
+{
+ return draw_create_context(pipe, TRUE);
+}
+
+
+/**
+ * Create a new draw context, without LLVM JIT.
+ */
+struct draw_context *
+draw_create_no_llvm(struct pipe_context *pipe)
+{
+ return draw_create_context(pipe, FALSE);
+}
+
boolean draw_init(struct draw_context *draw)
{
ASSIGN_4V( draw->plane[3], 0, 1, 0, 1 );
ASSIGN_4V( draw->plane[4], 0, 0, 1, 1 ); /* yes these are correct */
ASSIGN_4V( draw->plane[5], 0, 0, -1, 1 ); /* mesa's a bit wonky */
- draw->nr_planes = 6;
draw->clip_xy = TRUE;
draw->clip_z = TRUE;
-
- draw->reduced_prim = ~0; /* != any of PIPE_PRIM_x */
-
+ draw->pt.user.planes = (float (*) [DRAW_TOTAL_CLIP_PLANES][4]) &(draw->plane[0]);
if (!draw_pipeline_init( draw ))
return FALSE;
if (!draw_gs_init( draw ))
return FALSE;
+ draw->quads_always_flatshade_last = !draw->pipe->screen->get_param(
+ draw->pipe->screen, PIPE_CAP_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION);
+
return TRUE;
}
+/*
+ * Called whenever we're starting to draw a new instance.
+ * Some internal structures don't want to have to reset internal
+ * members on each invocation (because their state might have to persist
+ * between multiple primitive restart rendering call) but might have to
+ * for each new instance.
+ * This is particularly the case for primitive id's in geometry shader.
+ */
+void draw_new_instance(struct draw_context *draw)
+{
+ draw_geometry_shader_new_instance(draw->gs.geometry_shader);
+}
+
void draw_destroy( struct draw_context *draw )
{
struct pipe_context *pipe;
- int i, j;
+ unsigned i, j;
if (!draw)
return;
#ifdef HAVE_LLVM
if (draw->llvm)
draw_llvm_destroy( draw->llvm );
-
- if (draw->own_gallivm)
- gallivm_destroy(draw->own_gallivm);
#endif
FREE( draw );
static void update_clip_flags( struct draw_context *draw )
{
draw->clip_xy = !draw->driver.bypass_clip_xy;
+ draw->guard_band_xy = (!draw->driver.bypass_clip_xy &&
+ draw->driver.guard_band_xy);
draw->clip_z = (!draw->driver.bypass_clip_z &&
- !draw->depth_clamp);
- draw->clip_user = (draw->nr_planes > 6);
+ draw->rasterizer && draw->rasterizer->depth_clip);
+ draw->clip_user = draw->rasterizer &&
+ draw->rasterizer->clip_plane_enable != 0;
}
/**
draw->rasterizer = raster;
draw->rast_handle = rast_handle;
-
- }
+ update_clip_flags(draw);
+ }
}
/* With a little more work, llvmpipe will be able to turn this off and
*/
void draw_set_driver_clipping( struct draw_context *draw,
boolean bypass_clip_xy,
- boolean bypass_clip_z )
+ boolean bypass_clip_z,
+ boolean guard_band_xy)
{
draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
draw->driver.bypass_clip_xy = bypass_clip_xy;
draw->driver.bypass_clip_z = bypass_clip_z;
+ draw->driver.guard_band_xy = guard_band_xy;
update_clip_flags(draw);
}
void draw_set_clip_state( struct draw_context *draw,
const struct pipe_clip_state *clip )
{
- draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
+ draw_do_flush(draw, DRAW_FLUSH_PARAMETER_CHANGE);
- assert(clip->nr <= PIPE_MAX_CLIP_PLANES);
- memcpy(&draw->plane[6], clip->ucp, clip->nr * sizeof(clip->ucp[0]));
- draw->nr_planes = 6 + clip->nr;
- draw->depth_clamp = clip->depth_clamp;
-
- update_clip_flags(draw);
+ memcpy(&draw->plane[6], clip->ucp, sizeof(clip->ucp));
}
/**
* Set the draw module's viewport state.
*/
-void draw_set_viewport_state( struct draw_context *draw,
- const struct pipe_viewport_state *viewport )
+void draw_set_viewport_states( struct draw_context *draw,
+ unsigned start_slot,
+ unsigned num_viewports,
+ const struct pipe_viewport_state *vps )
{
- draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
- draw->viewport = *viewport; /* struct copy */
- draw->identity_viewport = (viewport->scale[0] == 1.0f &&
- viewport->scale[1] == 1.0f &&
- viewport->scale[2] == 1.0f &&
- viewport->scale[3] == 1.0f &&
- viewport->translate[0] == 0.0f &&
- viewport->translate[1] == 0.0f &&
- viewport->translate[2] == 0.0f &&
- viewport->translate[3] == 0.0f);
+ const struct pipe_viewport_state *viewport = vps;
+ draw_do_flush(draw, DRAW_FLUSH_PARAMETER_CHANGE);
+
+ debug_assert(start_slot < PIPE_MAX_VIEWPORTS);
+ debug_assert((start_slot + num_viewports) <= PIPE_MAX_VIEWPORTS);
+
+ memcpy(draw->viewports + start_slot, vps,
+ sizeof(struct pipe_viewport_state) * num_viewports);
- draw_vs_set_viewport( draw, viewport );
+ draw->identity_viewport = (num_viewports == 1) &&
+ (viewport->scale[0] == 1.0f &&
+ viewport->scale[1] == 1.0f &&
+ viewport->scale[2] == 1.0f &&
+ viewport->scale[3] == 1.0f &&
+ viewport->translate[0] == 0.0f &&
+ viewport->translate[1] == 0.0f &&
+ viewport->translate[2] == 0.0f &&
+ viewport->translate[3] == 0.0f);
}
void
draw_set_vertex_buffers(struct draw_context *draw,
- unsigned count,
+ unsigned start_slot, unsigned count,
const struct pipe_vertex_buffer *buffers)
{
- assert(count <= PIPE_MAX_ATTRIBS);
+ assert(start_slot + count <= PIPE_MAX_ATTRIBS);
- util_copy_vertex_buffers(draw->pt.vertex_buffer,
- &draw->pt.nr_vertex_buffers,
- buffers, count);
+ util_set_vertex_buffers_count(draw->pt.vertex_buffer,
+ &draw->pt.nr_vertex_buffers,
+ buffers, start_slot, count);
}
{
assert(count <= PIPE_MAX_ATTRIBS);
+ /* We could improve this by only flushing the frontend and the fetch part
+ * of the middle. This would avoid recalculating the emit keys.*/
+ draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
+
memcpy(draw->pt.vertex_element, elements, count * sizeof(elements[0]));
draw->pt.nr_vertex_elements = count;
}
*/
void
draw_set_mapped_vertex_buffer(struct draw_context *draw,
- unsigned attr, const void *buffer)
+ unsigned attr, const void *buffer,
+ size_t size)
{
- draw->pt.user.vbuffer[attr] = buffer;
+ draw->pt.user.vbuffer[attr].map = buffer;
+ draw->pt.user.vbuffer[attr].size = size;
}
shader_type == PIPE_SHADER_GEOMETRY);
debug_assert(slot < PIPE_MAX_CONSTANT_BUFFERS);
+ draw_do_flush(draw, DRAW_FLUSH_PARAMETER_CHANGE);
+
switch (shader_type) {
case PIPE_SHADER_VERTEX:
draw->pt.user.vs_constants[slot] = buffer;
draw->pt.user.vs_constants_size[slot] = size;
- draw->pt.user.planes = (float (*) [12][4]) &(draw->plane[0]);
- draw_vs_set_constants(draw, slot, buffer, size);
break;
case PIPE_SHADER_GEOMETRY:
draw->pt.user.gs_constants[slot] = buffer;
draw->pt.user.gs_constants_size[slot] = size;
- draw_gs_set_constants(draw, slot, buffer, size);
break;
default:
assert(0 && "invalid shader type in draw_set_mapped_constant_buffer");
/**
- * Allocate an extra vertex/geometry shader vertex attribute.
+ * Allocate an extra vertex/geometry shader vertex attribute, if it doesn't
+ * exist already.
+ *
* This is used by some of the optional draw module stages such
* as wide_point which may need to allocate additional generic/texcoord
* attributes.
draw_alloc_extra_vertex_attrib(struct draw_context *draw,
uint semantic_name, uint semantic_index)
{
- const int num_outputs = draw_current_shader_outputs(draw);
- const int n = draw->extra_shader_outputs.num;
+ int slot;
+ uint num_outputs;
+ uint n;
+
+ slot = draw_find_shader_output(draw, semantic_name, semantic_index);
+ if (slot >= 0) {
+ return slot;
+ }
+
+ num_outputs = draw_current_shader_outputs(draw);
+ n = draw->extra_shader_outputs.num;
assert(n < Elements(draw->extra_shader_outputs.semantic_name));
}
+/**
+ * If a geometry shader is present, return its info, else the vertex shader's
+ * info.
+ */
+struct tgsi_shader_info *
+draw_get_shader_info(const struct draw_context *draw)
+{
+
+ if (draw->gs.geometry_shader) {
+ return &draw->gs.geometry_shader->info;
+ } else {
+ return &draw->vs.vertex_shader->info;
+ }
+}
+
+
/**
* Ask the draw module for the location/slot of the given vertex attribute in
* a post-transformed vertex.
* attributes (such as texcoords for AA lines). The driver can call this
* function to find those attributes.
*
- * Zero is returned if the attribute is not found since this is
- * a don't care / undefined situtation. Returning -1 would be a bit more
- * work for the drivers.
+ * -1 is returned if the attribute is not found since this is
+ * an undefined situation. Note, that zero is valid and can
+ * be used by any of the attributes, because position is not
+ * required to be attribute 0 or even at all present.
*/
int
draw_find_shader_output(const struct draw_context *draw,
uint semantic_name, uint semantic_index)
{
- const struct draw_vertex_shader *vs = draw->vs.vertex_shader;
- const struct draw_geometry_shader *gs = draw->gs.geometry_shader;
+ const struct tgsi_shader_info *info = draw_get_shader_info(draw);
uint i;
- const struct tgsi_shader_info *info = &vs->info;
-
- if (gs)
- info = &gs->info;
for (i = 0; i < info->num_outputs; i++) {
if (info->output_semantic_name[i] == semantic_name &&
}
}
- return 0;
+ return -1;
}
uint
draw_num_shader_outputs(const struct draw_context *draw)
{
+ const struct tgsi_shader_info *info = draw_get_shader_info(draw);
uint count;
- /* If a geometry shader is present, its outputs go to the
- * driver, else the vertex shader's outputs.
- */
- if (draw->gs.geometry_shader)
- count = draw->gs.geometry_shader->info.num_outputs;
- else
- count = draw->vs.vertex_shader->info.num_outputs;
-
+ count = info->num_outputs;
count += draw->extra_shader_outputs.num;
return count;
/**
* Provide TGSI sampler objects for vertex/geometry shaders that use
- * texture fetches.
+ * texture fetches. This state only needs to be set once per context.
* This might only be used by software drivers for the time being.
*/
void
-draw_texture_samplers(struct draw_context *draw,
- uint shader,
- uint num_samplers,
- struct tgsi_sampler **samplers)
+draw_texture_sampler(struct draw_context *draw,
+ uint shader,
+ struct tgsi_sampler *sampler)
{
if (shader == PIPE_SHADER_VERTEX) {
- draw->vs.num_samplers = num_samplers;
- draw->vs.samplers = samplers;
+ draw->vs.tgsi.sampler = sampler;
} else {
debug_assert(shader == PIPE_SHADER_GEOMETRY);
- draw->gs.num_samplers = num_samplers;
- draw->gs.samplers = samplers;
+ draw->gs.tgsi.sampler = sampler;
}
}
}
-void
-draw_set_index_buffer(struct draw_context *draw,
- const struct pipe_index_buffer *ib)
-{
- if (ib)
- memcpy(&draw->pt.index_buffer, ib, sizeof(draw->pt.index_buffer));
- else
- memset(&draw->pt.index_buffer, 0, sizeof(draw->pt.index_buffer));
-}
-
-
/**
- * Tell drawing context where to find mapped index/element buffer.
+ * Tell the draw module where vertex indexes/elements are located, and
+ * their size (in bytes).
+ *
+ * Note: the caller must apply the pipe_index_buffer::offset value to
+ * the address. The draw module doesn't do that.
*/
void
-draw_set_mapped_index_buffer(struct draw_context *draw,
- const void *elements)
-{
- draw->pt.user.elts = elements;
+draw_set_indexes(struct draw_context *draw,
+ const void *elements, unsigned elem_size,
+ unsigned elem_buffer_space)
+{
+ assert(elem_size == 0 ||
+ elem_size == 1 ||
+ elem_size == 2 ||
+ elem_size == 4);
+ draw->pt.user.elts = elements;
+ draw->pt.user.eltSizeIB = elem_size;
+ if (elem_size)
+ draw->pt.user.eltMax = elem_buffer_space / elem_size;
+ else
+ draw->pt.user.eltMax = 0;
}
draw_pipeline_flush( draw, flags );
- draw->reduced_prim = ~0; /* is reduced_prim needed any more? */
-
+ draw_pt_flush( draw, flags );
+
draw->flushing = FALSE;
}
}
}
+/**
+ * Return the index of the shader output which will contain the
+ * viewport index.
+ */
+uint
+draw_current_shader_viewport_index_output(const struct draw_context *draw)
+{
+ if (draw->gs.geometry_shader)
+ return draw->gs.geometry_shader->viewport_index_output;
+ return 0;
+}
+
+/**
+ * Returns true if there's a geometry shader bound and the geometry
+ * shader writes out a viewport index.
+ */
+boolean
+draw_current_shader_uses_viewport_index(const struct draw_context *draw)
+{
+ if (draw->gs.geometry_shader)
+ return draw->gs.geometry_shader->info.writes_viewport_index;
+ return FALSE;
+}
+
+
+/**
+ * Return the index of the shader output which will contain the
+ * vertex position.
+ */
+uint
+draw_current_shader_clipvertex_output(const struct draw_context *draw)
+{
+ return draw->vs.clipvertex_output;
+}
+
+uint
+draw_current_shader_clipdistance_output(const struct draw_context *draw, int index)
+{
+ debug_assert(index < PIPE_MAX_CLIP_OR_CULL_DISTANCE_ELEMENT_COUNT);
+ if (draw->gs.geometry_shader)
+ return draw->gs.geometry_shader->clipdistance_output[index];
+ return draw->vs.clipdistance_output[index];
+}
+
+
+uint
+draw_current_shader_num_written_clipdistances(const struct draw_context *draw)
+{
+ if (draw->gs.geometry_shader)
+ return draw->gs.geometry_shader->info.num_written_clipdistance;
+ return draw->vs.vertex_shader->info.num_written_clipdistance;
+}
+
+
+uint
+draw_current_shader_culldistance_output(const struct draw_context *draw, int index)
+{
+ debug_assert(index < PIPE_MAX_CLIP_OR_CULL_DISTANCE_ELEMENT_COUNT);
+ if (draw->gs.geometry_shader)
+ return draw->gs.geometry_shader->culldistance_output[index];
+ return draw->vs.vertex_shader->culldistance_output[index];
+}
+
+uint
+draw_current_shader_num_written_culldistances(const struct draw_context *draw)
+{
+ if (draw->gs.geometry_shader)
+ return draw->gs.geometry_shader->info.num_written_culldistance;
+ return draw->vs.vertex_shader->info.num_written_culldistance;
+}
+
/**
* Return a pointer/handle for a driver/CSO rasterizer object which
* disabled culling, stippling, unfilled tris, etc.
rast.scissor = scissor;
rast.flatshade = flatshade;
rast.front_ccw = 1;
- rast.gl_rasterization_rules = draw->rasterizer->gl_rasterization_rules;
+ rast.half_pixel_center = draw->rasterizer->half_pixel_center;
+ rast.bottom_edge_rule = draw->rasterizer->bottom_edge_rule;
+ rast.clip_halfz = draw->rasterizer->clip_halfz;
draw->rasterizer_no_cull[scissor][flatshade] =
pipe->create_rasterizer_state(pipe, &rast);
return draw->rasterizer_no_cull[scissor][flatshade];
}
+/**
+ * Sets the mapped so targets.
+ *
+ * The append bitmask specifies which of the buffers are in
+ * the append mode. The append mode means that the buffer
+ * should be appended to, rather than written to from the start.
+ * i.e. the outputs should be written starting from the last
+ * location to which the previous
+ * pass of stream output wrote to in this buffer.
+ * If the buffer is not in an append mode (which is more common)
+ * the writing begins from the start of the buffer.
+ *
+ */
void
-draw_set_mapped_so_buffers(struct draw_context *draw,
- void *buffers[PIPE_MAX_SO_BUFFERS],
- unsigned num_buffers)
+draw_set_mapped_so_targets(struct draw_context *draw,
+ int num_targets,
+ struct draw_so_target *targets[PIPE_MAX_SO_BUFFERS],
+ unsigned append_bitmask)
{
int i;
- for (i = 0; i < num_buffers; ++i) {
- draw->so.buffers[i] = buffers[i];
+ for (i = 0; i < num_targets; i++) {
+ draw->so.targets[i] = targets[i];
+ /* if we're not appending then lets reset the internal
+ data of our so target */
+ if (!(append_bitmask & (1 << i)) && draw->so.targets[i]) {
+ draw->so.targets[i]->internal_offset = 0;
+ draw->so.targets[i]->emitted_vertices = 0;
+ }
}
- draw->so.num_buffers = num_buffers;
-}
+ for (i = num_targets; i < PIPE_MAX_SO_BUFFERS; i++)
+ draw->so.targets[i] = NULL;
-void
-draw_set_so_state(struct draw_context *draw,
- struct pipe_stream_output_state *state)
-{
- memcpy(&draw->so.state,
- state,
- sizeof(struct pipe_stream_output_state));
+ draw->so.num_targets = num_targets;
}
void
draw_set_sampler_views(struct draw_context *draw,
+ unsigned shader_stage,
struct pipe_sampler_view **views,
unsigned num)
{
unsigned i;
- debug_assert(num <= PIPE_MAX_VERTEX_SAMPLERS);
+ debug_assert(shader_stage < PIPE_SHADER_TYPES);
+ debug_assert(num <= PIPE_MAX_SHADER_SAMPLER_VIEWS);
+
+ draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
for (i = 0; i < num; ++i)
- draw->sampler_views[i] = views[i];
- for (i = num; i < PIPE_MAX_VERTEX_SAMPLERS; ++i)
- draw->sampler_views[i] = NULL;
+ draw->sampler_views[shader_stage][i] = views[i];
+ for (i = num; i < PIPE_MAX_SHADER_SAMPLER_VIEWS; ++i)
+ draw->sampler_views[shader_stage][i] = NULL;
- draw->num_sampler_views = num;
+ draw->num_sampler_views[shader_stage] = num;
}
void
draw_set_samplers(struct draw_context *draw,
+ unsigned shader_stage,
struct pipe_sampler_state **samplers,
unsigned num)
{
unsigned i;
- debug_assert(num <= PIPE_MAX_VERTEX_SAMPLERS);
+ debug_assert(shader_stage < PIPE_SHADER_TYPES);
+ debug_assert(num <= PIPE_MAX_SAMPLERS);
+
+ draw_do_flush( draw, DRAW_FLUSH_STATE_CHANGE );
for (i = 0; i < num; ++i)
- draw->samplers[i] = samplers[i];
- for (i = num; i < PIPE_MAX_VERTEX_SAMPLERS; ++i)
- draw->samplers[i] = NULL;
+ draw->samplers[shader_stage][i] = samplers[i];
+ for (i = num; i < PIPE_MAX_SAMPLERS; ++i)
+ draw->samplers[shader_stage][i] = NULL;
- draw->num_samplers = num;
+ draw->num_samplers[shader_stage] = num;
#ifdef HAVE_LLVM
if (draw->llvm)
- draw_llvm_set_sampler_state(draw);
+ draw_llvm_set_sampler_state(draw, shader_stage);
#endif
}
void
draw_set_mapped_texture(struct draw_context *draw,
- unsigned sampler_idx,
+ unsigned shader_stage,
+ unsigned sview_idx,
uint32_t width, uint32_t height, uint32_t depth,
uint32_t first_level, uint32_t last_level,
+ const void *base_ptr,
uint32_t row_stride[PIPE_MAX_TEXTURE_LEVELS],
uint32_t img_stride[PIPE_MAX_TEXTURE_LEVELS],
- const void *data[PIPE_MAX_TEXTURE_LEVELS])
+ uint32_t mip_offsets[PIPE_MAX_TEXTURE_LEVELS])
{
#ifdef HAVE_LLVM
- if(draw->llvm)
+ if (draw->llvm)
draw_llvm_set_mapped_texture(draw,
- sampler_idx,
- width, height, depth, first_level, last_level,
- row_stride, img_stride, data);
+ shader_stage,
+ sview_idx,
+ width, height, depth, first_level,
+ last_level, base_ptr,
+ row_stride, img_stride, mip_offsets);
+#endif
+}
+
+/**
+ * XXX: Results for PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS because there are two
+ * different ways of setting textures, and drivers typically only support one.
+ */
+int
+draw_get_shader_param_no_llvm(unsigned shader, enum pipe_shader_cap param)
+{
+ switch(shader) {
+ case PIPE_SHADER_VERTEX:
+ case PIPE_SHADER_GEOMETRY:
+ return tgsi_exec_get_shader_param(param);
+ default:
+ return 0;
+ }
+}
+
+/**
+ * XXX: Results for PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS because there are two
+ * different ways of setting textures, and drivers typically only support one.
+ */
+int
+draw_get_shader_param(unsigned shader, enum pipe_shader_cap param)
+{
+
+#ifdef HAVE_LLVM
+ if (draw_get_option_use_llvm()) {
+ switch(shader) {
+ case PIPE_SHADER_VERTEX:
+ case PIPE_SHADER_GEOMETRY:
+ return gallivm_get_shader_param(param);
+ default:
+ return 0;
+ }
+ }
#endif
+
+ return draw_get_shader_param_no_llvm(shader, param);
+}
+
+/**
+ * Enables or disables collection of statistics.
+ *
+ * Draw module is capable of generating statistics for the vertex
+ * processing pipeline. Collection of that data isn't free and so
+ * it's disabled by default. The users of the module can enable
+ * (or disable) this functionality through this function.
+ * The actual data will be emitted through the VBUF interface,
+ * the 'pipeline_statistics' callback to be exact.
+ */
+void
+draw_collect_pipeline_statistics(struct draw_context *draw,
+ boolean enable)
+{
+ draw->collect_statistics = enable;
+}
+
+/**
+ * Computes clipper invocation statistics.
+ *
+ * Figures out how many primitives would have been
+ * sent to the clipper given the specified
+ * prim info data.
+ */
+void
+draw_stats_clipper_primitives(struct draw_context *draw,
+ const struct draw_prim_info *prim_info)
+{
+ if (draw->collect_statistics) {
+ unsigned start, i;
+ for (start = i = 0;
+ i < prim_info->primitive_count;
+ start += prim_info->primitive_lengths[i], i++)
+ {
+ draw->statistics.c_invocations +=
+ u_decomposed_prims_for_vertices(prim_info->prim,
+ prim_info->primitive_lengths[i]);
+ }
+ }
}