#include "main/state.h"
#include "main/enums.h"
#include "main/macros.h"
+#include "main/transformfeedback.h"
#include "tnl/tnl.h"
#include "vbo/vbo_context.h"
#include "swrast/swrast.h"
#include "swrast_setup/swrast_setup.h"
#include "drivers/common/meta.h"
+#include "brw_blorp.h"
#include "brw_draw.h"
#include "brw_defines.h"
#include "brw_context.h"
#define FILE_DEBUG_FLAG DEBUG_PRIMS
-static GLuint prim_to_hw_prim[GL_POLYGON+1] = {
+const GLuint prim_to_hw_prim[GL_POLYGON+1] = {
_3DPRIM_POINTLIST,
_3DPRIM_LINELIST,
_3DPRIM_LINELOOP,
static void brw_set_prim(struct brw_context *brw,
const struct _mesa_prim *prim)
{
- struct gl_context *ctx = &brw->intel.ctx;
+ struct gl_context *ctx = &brw->ctx;
uint32_t hw_prim = prim_to_hw_prim[prim->mode];
DBG("PRIM: %s\n", _mesa_lookup_enum_by_nr(prim->mode));
brw->primitive = hw_prim;
brw->state.dirty.brw |= BRW_NEW_PRIMITIVE;
- if (reduced_prim[prim->mode] != brw->intel.reduced_primitive) {
- brw->intel.reduced_primitive = reduced_prim[prim->mode];
+ if (reduced_prim[prim->mode] != brw->reduced_primitive) {
+ brw->reduced_primitive = reduced_prim[prim->mode];
brw->state.dirty.brw |= BRW_NEW_REDUCED_PRIMITIVE;
}
}
}
+/**
+ * The hardware is capable of removing dangling vertices on its own; however,
+ * prior to Gen6, we sometimes convert quads into trifans (and quad strips
+ * into tristrips), since pre-Gen6 hardware requires a GS to render quads.
+ * This function manually trims dangling vertices from a draw call involving
+ * quads so that those dangling vertices won't get drawn when we convert to
+ * trifans/tristrips.
+ */
static GLuint trim(GLenum prim, GLuint length)
{
if (prim == GL_QUAD_STRIP)
const struct _mesa_prim *prim,
uint32_t hw_prim)
{
- struct intel_context *intel = &brw->intel;
int verts_per_instance;
int vertex_access_type;
int start_vertex_location;
start_vertex_location = prim->start;
base_vertex_location = prim->basevertex;
if (prim->indexed) {
- vertex_access_type = GEN4_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM;
+ vertex_access_type = brw->gen >= 7 ?
+ GEN7_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM :
+ GEN4_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM;
start_vertex_location += brw->ib.start_vertex_offset;
base_vertex_location += brw->vb.start_vertex_bias;
} else {
- vertex_access_type = GEN4_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL;
+ vertex_access_type = brw->gen >= 7 ?
+ GEN7_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL :
+ GEN4_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL;
start_vertex_location += brw->vb.start_vertex_bias;
}
- verts_per_instance = trim(prim->mode, prim->count);
+ /* We only need to trim the primitive count on pre-Gen6. */
+ if (brw->gen < 6)
+ verts_per_instance = trim(prim->mode, prim->count);
+ else
+ verts_per_instance = prim->count;
/* If nothing to emit, just return. */
if (verts_per_instance == 0)
* and missed flushes of the render cache as it heads to other parts of
* the besides the draw code.
*/
- if (intel->always_flush_cache) {
- intel_batchbuffer_emit_mi_flush(intel);
- }
-
- BEGIN_BATCH(6);
- OUT_BATCH(CMD_3D_PRIM << 16 | (6 - 2) |
- hw_prim << GEN4_3DPRIM_TOPOLOGY_TYPE_SHIFT |
- vertex_access_type);
- OUT_BATCH(verts_per_instance);
- OUT_BATCH(start_vertex_location);
- OUT_BATCH(1); // instance count
- OUT_BATCH(0); // start instance location
- OUT_BATCH(base_vertex_location);
- ADVANCE_BATCH();
-
- intel->batch.need_workaround_flush = true;
-
- if (intel->always_flush_cache) {
- intel_batchbuffer_emit_mi_flush(intel);
+ if (brw->always_flush_cache) {
+ intel_batchbuffer_emit_mi_flush(brw);
}
-}
-
-static void gen7_emit_prim(struct brw_context *brw,
- const struct _mesa_prim *prim,
- uint32_t hw_prim)
-{
- struct intel_context *intel = &brw->intel;
- int verts_per_instance;
- int vertex_access_type;
- int start_vertex_location;
- int base_vertex_location;
-
- DBG("PRIM: %s %d %d\n", _mesa_lookup_enum_by_nr(prim->mode),
- prim->start, prim->count);
- start_vertex_location = prim->start;
- base_vertex_location = prim->basevertex;
- if (prim->indexed) {
- vertex_access_type = GEN7_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM;
- start_vertex_location += brw->ib.start_vertex_offset;
- base_vertex_location += brw->vb.start_vertex_bias;
+ if (brw->gen >= 7) {
+ BEGIN_BATCH(7);
+ OUT_BATCH(CMD_3D_PRIM << 16 | (7 - 2));
+ OUT_BATCH(hw_prim | vertex_access_type);
} else {
- vertex_access_type = GEN7_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL;
- start_vertex_location += brw->vb.start_vertex_bias;
+ BEGIN_BATCH(6);
+ OUT_BATCH(CMD_3D_PRIM << 16 | (6 - 2) |
+ hw_prim << GEN4_3DPRIM_TOPOLOGY_TYPE_SHIFT |
+ vertex_access_type);
}
-
- verts_per_instance = trim(prim->mode, prim->count);
-
- /* If nothing to emit, just return. */
- if (verts_per_instance == 0)
- return;
-
- /* If we're set to always flush, do it before and after the primitive emit.
- * We want to catch both missed flushes that hurt instruction/state cache
- * and missed flushes of the render cache as it heads to other parts of
- * the besides the draw code.
- */
- if (intel->always_flush_cache) {
- intel_batchbuffer_emit_mi_flush(intel);
- }
-
- BEGIN_BATCH(7);
- OUT_BATCH(CMD_3D_PRIM << 16 | (7 - 2));
- OUT_BATCH(hw_prim | vertex_access_type);
OUT_BATCH(verts_per_instance);
OUT_BATCH(start_vertex_location);
- OUT_BATCH(1); // instance count
- OUT_BATCH(0); // start instance location
+ OUT_BATCH(prim->num_instances);
+ OUT_BATCH(prim->base_instance);
OUT_BATCH(base_vertex_location);
ADVANCE_BATCH();
- if (intel->always_flush_cache) {
- intel_batchbuffer_emit_mi_flush(intel);
+ /* Only used on Sandybridge; harmless to set elsewhere. */
+ brw->batch.need_workaround_flush = true;
+
+ if (brw->always_flush_cache) {
+ intel_batchbuffer_emit_mi_flush(brw);
}
}
static void brw_merge_inputs( struct brw_context *brw,
const struct gl_client_array *arrays[])
{
- struct brw_vertex_info old = brw->vb.info;
GLuint i;
for (i = 0; i < brw->vb.nr_buffers; i++) {
}
brw->vb.nr_buffers = 0;
- memset(&brw->vb.info, 0, sizeof(brw->vb.info));
-
for (i = 0; i < VERT_ATTRIB_MAX; i++) {
brw->vb.inputs[i].buffer = -1;
brw->vb.inputs[i].glarray = arrays[i];
brw->vb.inputs[i].attrib = (gl_vert_attrib) i;
-
- if (arrays[i]->StrideB != 0)
- brw->vb.info.sizes[i/16] |= (brw->vb.inputs[i].glarray->Size - 1) <<
- ((i%16) * 2);
}
-
- /* Raise statechanges if input sizes have changed. */
- if (memcmp(brw->vb.info.sizes, old.sizes, sizeof(old.sizes)) != 0)
- brw->state.dirty.brw |= BRW_NEW_INPUT_DIMENSIONS;
}
/*
static void
brw_predraw_resolve_buffers(struct brw_context *brw)
{
- struct gl_context *ctx = &brw->intel.ctx;
- struct intel_context *intel = &brw->intel;
+ struct gl_context *ctx = &brw->ctx;
struct intel_renderbuffer *depth_irb;
struct intel_texture_object *tex_obj;
/* Resolve the depth buffer's HiZ buffer. */
depth_irb = intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_DEPTH);
- if (depth_irb && depth_irb->mt) {
- intel_renderbuffer_resolve_hiz(intel, depth_irb);
- }
+ if (depth_irb)
+ intel_renderbuffer_resolve_hiz(brw, depth_irb);
- /* Resolve depth buffer of each enabled depth texture. */
+ /* Resolve depth buffer of each enabled depth texture, and color buffer of
+ * each fast-clear-enabled color texture.
+ */
for (int i = 0; i < BRW_MAX_TEX_UNIT; i++) {
if (!ctx->Texture.Unit[i]._ReallyEnabled)
continue;
tex_obj = intel_texture_object(ctx->Texture.Unit[i]._Current);
if (!tex_obj || !tex_obj->mt)
continue;
- intel_miptree_all_slices_resolve_depth(intel, tex_obj->mt);
+ intel_miptree_all_slices_resolve_depth(brw, tex_obj->mt);
+ intel_miptree_resolve_color(brw, tex_obj->mt);
}
}
* If the depth buffer was written to and if it has an accompanying HiZ
* buffer, then mark that it needs a depth resolve.
*
- * (In the future, this will also mark needed MSAA resolves).
+ * If the color buffer is a multisample window system buffer, then
+ * mark that it needs a downsample.
*/
static void brw_postdraw_set_buffers_need_resolve(struct brw_context *brw)
{
- struct gl_context *ctx = &brw->intel.ctx;
+ struct gl_context *ctx = &brw->ctx;
struct gl_framebuffer *fb = ctx->DrawBuffer;
- struct intel_renderbuffer *depth_irb =
- intel_get_renderbuffer(fb, BUFFER_DEPTH);
- if (depth_irb && ctx->Depth.Mask) {
- intel_renderbuffer_set_needs_depth_resolve(depth_irb);
- }
-}
+ struct intel_renderbuffer *front_irb = NULL;
+ struct intel_renderbuffer *back_irb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
+ struct intel_renderbuffer *depth_irb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
+ struct gl_renderbuffer_attachment *depth_att = &fb->Attachment[BUFFER_DEPTH];
-static int
-verts_per_prim(GLenum mode)
-{
- switch (mode) {
- case GL_POINTS:
- return 1;
- case GL_LINE_STRIP:
- case GL_LINE_LOOP:
- case GL_LINES:
- return 2;
- case GL_TRIANGLE_STRIP:
- case GL_TRIANGLE_FAN:
- case GL_POLYGON:
- case GL_TRIANGLES:
- case GL_QUADS:
- case GL_QUAD_STRIP:
- return 3;
- default:
- _mesa_problem(NULL,
- "unknown prim type in transform feedback primitive count");
- return 0;
- }
-}
+ if (brw->is_front_buffer_rendering)
+ front_irb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
-/**
- * Update internal counters based on the the drawing operation described in
- * prim.
- */
-static void
-brw_update_primitive_count(struct brw_context *brw,
- const struct _mesa_prim *prim)
-{
- uint32_t count = count_tessellated_primitives(prim);
- brw->sol.primitives_generated += count;
- if (brw->intel.ctx.TransformFeedback.CurrentObject->Active &&
- !brw->intel.ctx.TransformFeedback.CurrentObject->Paused) {
- /* Update brw->sol.svbi_0_max_index to reflect the amount by which the
- * hardware is going to increment SVBI 0 when this drawing operation
- * occurs. This is necessary because the kernel does not (yet) save and
- * restore GPU registers when context switching, so we'll need to be
- * able to reload SVBI 0 with the correct value in case we have to start
- * a new batch buffer.
- */
- unsigned verts = verts_per_prim(prim->mode);
- uint32_t space_avail =
- (brw->sol.svbi_0_max_index - brw->sol.svbi_0_starting_index) / verts;
- uint32_t primitives_written = MIN2 (space_avail, count);
- brw->sol.svbi_0_starting_index += verts * primitives_written;
-
- /* And update the TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN query. */
- brw->sol.primitives_written += primitives_written;
- }
+ if (front_irb)
+ intel_renderbuffer_set_needs_downsample(front_irb);
+ if (back_irb)
+ intel_renderbuffer_set_needs_downsample(back_irb);
+ if (depth_irb && ctx->Depth.Mask)
+ intel_renderbuffer_att_set_needs_depth_resolve(depth_att);
}
/* May fail if out of video memory for texture or vbo upload, or on
*/
static bool brw_try_draw_prims( struct gl_context *ctx,
const struct gl_client_array *arrays[],
- const struct _mesa_prim *prim,
+ const struct _mesa_prim *prims,
GLuint nr_prims,
const struct _mesa_index_buffer *ib,
GLuint min_index,
GLuint max_index )
{
- struct intel_context *intel = intel_context(ctx);
struct brw_context *brw = brw_context(ctx);
bool retval = true;
GLuint i;
if (ctx->NewState)
_mesa_update_state( ctx );
+ /* Find the highest sampler unit used by each shader program. A bit-count
+ * won't work since ARB programs use the texture unit number as the sampler
+ * index.
+ */
+ brw->wm.sampler_count = _mesa_fls(ctx->FragmentProgram._Current->Base.SamplersUsed);
+ brw->gs.base.sampler_count = ctx->GeometryProgram._Current ?
+ _mesa_fls(ctx->GeometryProgram._Current->Base.SamplersUsed) : 0;
+ brw->vs.base.sampler_count =
+ _mesa_fls(ctx->VertexProgram._Current->Base.SamplersUsed);
+
/* We have to validate the textures *before* checking for fallbacks;
* otherwise, the software fallback won't be able to rely on the
* texture state, the firstLevel and lastLevel fields won't be
*/
brw_validate_textures( brw );
- /* Resolves must occur after updating state and finalizing textures but
- * before setting up any hardware state for this draw call.
+ intel_prepare_render(brw);
+
+ /* This workaround has to happen outside of brw_upload_state() because it
+ * may flush the batchbuffer for a blit, affecting the state flags.
+ */
+ brw_workaround_depthstencil_alignment(brw, 0);
+
+ /* Resolves must occur after updating renderbuffers, updating context state,
+ * and finalizing textures but before setting up any hardware state for
+ * this draw call.
*/
brw_predraw_resolve_buffers(brw);
brw->vb.max_index = max_index;
brw->state.dirty.brw |= BRW_NEW_VERTICES;
- /* Have to validate state quite late. Will rebuild tnl_program,
- * which depends on varying information.
- *
- * Note this is where brw->vs->prog_data.inputs_read is calculated,
- * so can't access it earlier.
- */
-
- intel_prepare_render(intel);
-
for (i = 0; i < nr_prims; i++) {
int estimated_max_prim_size;
* we've got validated state that needs to be in the same batch as the
* primitives.
*/
- intel_batchbuffer_require_space(intel, estimated_max_prim_size, false);
- intel_batchbuffer_save_state(intel);
+ intel_batchbuffer_require_space(brw, estimated_max_prim_size, false);
+ intel_batchbuffer_save_state(brw);
- if (intel->gen < 6)
- brw_set_prim(brw, &prim[i]);
+ if (brw->num_instances != prims[i].num_instances) {
+ brw->num_instances = prims[i].num_instances;
+ brw->state.dirty.brw |= BRW_NEW_VERTICES;
+ }
+ if (brw->basevertex != prims[i].basevertex) {
+ brw->basevertex = prims[i].basevertex;
+ brw->state.dirty.brw |= BRW_NEW_VERTICES;
+ }
+ if (brw->gen < 6)
+ brw_set_prim(brw, &prims[i]);
else
- gen6_set_prim(brw, &prim[i]);
+ gen6_set_prim(brw, &prims[i]);
retry:
/* Note that before the loop, brw->state.dirty.brw was set to != 0, and
* brw->state.dirty.brw.
*/
if (brw->state.dirty.brw) {
- intel->no_batch_wrap = true;
+ brw->no_batch_wrap = true;
brw_upload_state(brw);
-
- if (unlikely(brw->intel.Fallback)) {
- intel->no_batch_wrap = false;
- retval = false;
- goto out;
- }
}
- if (intel->gen >= 7)
- gen7_emit_prim(brw, &prim[i], brw->primitive);
- else
- brw_emit_prim(brw, &prim[i], brw->primitive);
+ brw_emit_prim(brw, &prims[i], brw->primitive);
- intel->no_batch_wrap = false;
+ brw->no_batch_wrap = false;
- if (dri_bufmgr_check_aperture_space(&intel->batch.bo, 1)) {
+ if (dri_bufmgr_check_aperture_space(&brw->batch.bo, 1)) {
if (!fail_next) {
- intel_batchbuffer_reset_to_saved(intel);
- intel_batchbuffer_flush(intel);
+ intel_batchbuffer_reset_to_saved(brw);
+ intel_batchbuffer_flush(brw);
fail_next = true;
goto retry;
} else {
- if (intel_batchbuffer_flush(intel) == -ENOSPC) {
+ if (intel_batchbuffer_flush(brw) == -ENOSPC) {
static bool warned = false;
if (!warned) {
}
}
}
-
- if (!_mesa_meta_in_progress(ctx))
- brw_update_primitive_count(brw, &prim[i]);
}
- if (intel->always_flush_batch)
- intel_batchbuffer_flush(intel);
- out:
+ if (brw->always_flush_batch)
+ intel_batchbuffer_flush(brw);
brw_state_cache_check_size(brw);
brw_postdraw_set_buffers_need_resolve(brw);
}
void brw_draw_prims( struct gl_context *ctx,
- const struct gl_client_array *arrays[],
- const struct _mesa_prim *prim,
+ const struct _mesa_prim *prims,
GLuint nr_prims,
const struct _mesa_index_buffer *ib,
GLboolean index_bounds_valid,
GLuint max_index,
struct gl_transform_feedback_object *tfb_vertcount )
{
- bool retval;
+ struct brw_context *brw = brw_context(ctx);
+ const struct gl_client_array **arrays = ctx->Array._DrawArrays;
if (!_mesa_check_conditional_render(ctx))
return;
- if (!vbo_all_varyings_in_vbos(arrays)) {
- if (!index_bounds_valid)
- vbo_get_minmax_indices(ctx, prim, ib, &min_index, &max_index, nr_prims);
-
- /* Decide if we want to rebase. If so we end up recursing once
- * only into this function.
- */
- if (min_index != 0 && !vbo_any_varyings_in_vbos(arrays)) {
- vbo_rebase_prims(ctx, arrays,
- prim, nr_prims,
- ib, min_index, max_index,
- brw_draw_prims );
- return;
- }
+ /* Handle primitive restart if needed */
+ if (brw_handle_primitive_restart(ctx, prims, nr_prims, ib)) {
+ /* The draw was handled, so we can exit now */
+ return;
}
- /* Make a first attempt at drawing:
+ /* If we're going to have to upload any of the user's vertex arrays, then
+ * get the minimum and maximum of their index buffer so we know what range
+ * to upload.
*/
- retval = brw_try_draw_prims(ctx, arrays, prim, nr_prims, ib, min_index, max_index);
+ if (!vbo_all_varyings_in_vbos(arrays) && !index_bounds_valid)
+ vbo_get_minmax_indices(ctx, prims, ib, &min_index, &max_index, nr_prims);
- /* Otherwise, we really are out of memory. Pass the drawing
- * command to the software tnl module and which will in turn call
- * swrast to do the drawing.
+ /* Do GL_SELECT and GL_FEEDBACK rendering using swrast, even though it
+ * won't support all the extensions we support.
*/
- if (!retval) {
- _swsetup_Wakeup(ctx);
- _tnl_wakeup(ctx);
- _tnl_draw_prims(ctx, arrays, prim, nr_prims, ib, min_index, max_index);
+ if (ctx->RenderMode != GL_RENDER) {
+ perf_debug("%s render mode not supported in hardware\n",
+ _mesa_lookup_enum_by_nr(ctx->RenderMode));
+ _swsetup_Wakeup(ctx);
+ _tnl_wakeup(ctx);
+ _tnl_draw_prims(ctx, arrays, prims, nr_prims, ib, min_index, max_index);
+ return;
}
+ /* Try drawing with the hardware, but don't do anything else if we can't
+ * manage it. swrast doesn't support our featureset, so we can't fall back
+ * to it.
+ */
+ brw_try_draw_prims(ctx, arrays, prims, nr_prims, ib, min_index, max_index);
}
void brw_draw_init( struct brw_context *brw )
{
- struct gl_context *ctx = &brw->intel.ctx;
+ struct gl_context *ctx = &brw->ctx;
struct vbo_context *vbo = vbo_context(ctx);
int i;