#include "brw_defines.h"
#include "brw_context.h"
#include "brw_state.h"
+#include "brw_vs.h"
#include "intel_batchbuffer.h"
#include "intel_buffers.h"
#include "intel_fbo.h"
#include "intel_mipmap_tree.h"
-#include "intel_regions.h"
#include "intel_buffer_objects.h"
#define FILE_DEBUG_FLAG DEBUG_PRIMS
-const GLuint prim_to_hw_prim[GL_TRIANGLE_STRIP_ADJACENCY+1] = {
+static const GLuint prim_to_hw_prim[GL_TRIANGLE_STRIP_ADJACENCY+1] = {
_3DPRIM_POINTLIST,
_3DPRIM_LINELIST,
_3DPRIM_LINELOOP,
GL_TRIANGLES
};
+uint32_t
+get_hw_prim_for_gl_prim(int mode)
+{
+ if (mode >= BRW_PRIM_OFFSET)
+ return mode - BRW_PRIM_OFFSET;
+ else {
+ assert(mode < ARRAY_SIZE(prim_to_hw_prim));
+ return prim_to_hw_prim[mode];
+ }
+}
+
/* When the primitive changes, set a state bit and re-validate. Not
* the nicest and would rather deal with this by having all the
* programs be immune to the active primitive (ie. cope with all
* possibilities). That may not be realistic however.
*/
-static void brw_set_prim(struct brw_context *brw,
- const struct _mesa_prim *prim)
+static void
+brw_set_prim(struct brw_context *brw, const struct _mesa_prim *prim)
{
struct gl_context *ctx = &brw->ctx;
- uint32_t hw_prim = prim_to_hw_prim[prim->mode];
+ uint32_t hw_prim = get_hw_prim_for_gl_prim(prim->mode);
- DBG("PRIM: %s\n", _mesa_lookup_enum_by_nr(prim->mode));
+ DBG("PRIM: %s\n", _mesa_enum_to_string(prim->mode));
/* Slight optimization to avoid the GS program when not needed:
*/
if (hw_prim != brw->primitive) {
brw->primitive = hw_prim;
- brw->state.dirty.brw |= BRW_NEW_PRIMITIVE;
+ brw->ctx.NewDriverState |= BRW_NEW_PRIMITIVE;
if (reduced_prim[prim->mode] != brw->reduced_primitive) {
brw->reduced_primitive = reduced_prim[prim->mode];
- brw->state.dirty.brw |= BRW_NEW_REDUCED_PRIMITIVE;
+ brw->ctx.NewDriverState |= BRW_NEW_REDUCED_PRIMITIVE;
}
}
}
-static void gen6_set_prim(struct brw_context *brw,
- const struct _mesa_prim *prim)
+static void
+gen6_set_prim(struct brw_context *brw, const struct _mesa_prim *prim)
{
- uint32_t hw_prim;
-
- DBG("PRIM: %s\n", _mesa_lookup_enum_by_nr(prim->mode));
-
- hw_prim = prim_to_hw_prim[prim->mode];
+ DBG("PRIM: %s\n", _mesa_enum_to_string(prim->mode));
+ const uint32_t hw_prim = get_hw_prim_for_gl_prim(prim->mode);
if (hw_prim != brw->primitive) {
brw->primitive = hw_prim;
- brw->state.dirty.brw |= BRW_NEW_PRIMITIVE;
+ brw->ctx.NewDriverState |= BRW_NEW_PRIMITIVE;
}
}
* quads so that those dangling vertices won't get drawn when we convert to
* trifans/tristrips.
*/
-static GLuint trim(GLenum prim, GLuint length)
+static GLuint
+trim(GLenum prim, GLuint length)
{
if (prim == GL_QUAD_STRIP)
return length > 3 ? (length - length % 2) : 0;
}
-static void brw_emit_prim(struct brw_context *brw,
- const struct _mesa_prim *prim,
- uint32_t hw_prim)
+static void
+brw_emit_prim(struct brw_context *brw,
+ const struct _mesa_prim *prim,
+ uint32_t hw_prim)
{
int verts_per_instance;
int vertex_access_type;
- int start_vertex_location;
- int base_vertex_location;
int indirect_flag;
- DBG("PRIM: %s %d %d\n", _mesa_lookup_enum_by_nr(prim->mode),
+ DBG("PRIM: %s %d %d\n", _mesa_enum_to_string(prim->mode),
prim->start, prim->count);
- start_vertex_location = prim->start;
- base_vertex_location = prim->basevertex;
+ int start_vertex_location = prim->start;
+ int base_vertex_location = prim->basevertex;
+
if (prim->indexed) {
vertex_access_type = brw->gen >= 7 ?
GEN7_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM :
* and missed flushes of the render cache as it heads to other parts of
* the besides the draw code.
*/
- if (brw->always_flush_cache) {
- intel_batchbuffer_emit_mi_flush(brw);
- }
+ if (brw->always_flush_cache)
+ brw_emit_mi_flush(brw);
/* If indirect, emit a bunch of loads from the indirect BO. */
if (prim->is_indirect) {
OUT_BATCH(0);
ADVANCE_BATCH();
}
- }
- else {
+ } else {
indirect_flag = 0;
}
+ BEGIN_BATCH(brw->gen >= 7 ? 7 : 6);
if (brw->gen >= 7) {
- BEGIN_BATCH(7);
- OUT_BATCH(CMD_3D_PRIM << 16 | (7 - 2) | indirect_flag);
+ const int predicate_enable =
+ (brw->predicate.state == BRW_PREDICATE_STATE_USE_BIT)
+ ? GEN7_3DPRIM_PREDICATE_ENABLE : 0;
+
+ OUT_BATCH(CMD_3D_PRIM << 16 | (7 - 2) | indirect_flag | predicate_enable);
OUT_BATCH(hw_prim | vertex_access_type);
} else {
- BEGIN_BATCH(6);
OUT_BATCH(CMD_3D_PRIM << 16 | (6 - 2) |
hw_prim << GEN4_3DPRIM_TOPOLOGY_TYPE_SHIFT |
vertex_access_type);
OUT_BATCH(base_vertex_location);
ADVANCE_BATCH();
- /* Only used on Sandybridge; harmless to set elsewhere. */
- brw->batch.need_workaround_flush = true;
-
- if (brw->always_flush_cache) {
- intel_batchbuffer_emit_mi_flush(brw);
- }
+ if (brw->always_flush_cache)
+ brw_emit_mi_flush(brw);
}
-static void brw_merge_inputs( struct brw_context *brw,
- const struct gl_client_array *arrays[])
+static void
+brw_merge_inputs(struct brw_context *brw,
+ const struct gl_client_array *arrays[])
{
+ const struct gl_context *ctx = &brw->ctx;
GLuint i;
for (i = 0; i < brw->vb.nr_buffers; i++) {
brw->vb.inputs[i].buffer = -1;
brw->vb.inputs[i].glarray = arrays[i];
}
-}
-/*
- * \brief Resolve buffers before drawing.
- *
- * Resolve the depth buffer's HiZ buffer, resolve the depth buffer of each
- * enabled depth texture, and flush the render cache for any dirty textures.
- *
- * (In the future, this will also perform MSAA resolves).
- */
-static void
-brw_predraw_resolve_buffers(struct brw_context *brw)
-{
- struct gl_context *ctx = &brw->ctx;
- struct intel_renderbuffer *depth_irb;
- struct intel_texture_object *tex_obj;
-
- /* Resolve the depth buffer's HiZ buffer. */
- depth_irb = intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_DEPTH);
- if (depth_irb)
- intel_renderbuffer_resolve_hiz(brw, depth_irb);
-
- /* Resolve depth buffer and render cache of each enabled texture. */
- int maxEnabledUnit = ctx->Texture._MaxEnabledTexImageUnit;
- for (int i = 0; i <= maxEnabledUnit; i++) {
- if (!ctx->Texture.Unit[i]._ReallyEnabled)
- continue;
- tex_obj = intel_texture_object(ctx->Texture.Unit[i]._Current);
- if (!tex_obj || !tex_obj->mt)
- continue;
- intel_miptree_all_slices_resolve_depth(brw, tex_obj->mt);
- intel_miptree_resolve_color(brw, tex_obj->mt);
- brw_render_cache_set_check_flush(brw, tex_obj->mt->region->bo);
+ if (brw->gen < 8 && !brw->is_haswell) {
+ struct gl_program *vp = &ctx->VertexProgram._Current->Base;
+ /* Prior to Haswell, the hardware can't natively support GL_FIXED or
+ * 2_10_10_10_REV vertex formats. Set appropriate workaround flags.
+ */
+ for (i = 0; i < VERT_ATTRIB_MAX; i++) {
+ if (!(vp->InputsRead & BITFIELD64_BIT(i)))
+ continue;
+
+ uint8_t wa_flags = 0;
+
+ switch (brw->vb.inputs[i].glarray->Type) {
+
+ case GL_FIXED:
+ wa_flags = brw->vb.inputs[i].glarray->Size;
+ break;
+
+ case GL_INT_2_10_10_10_REV:
+ wa_flags |= BRW_ATTRIB_WA_SIGN;
+ /* fallthough */
+
+ case GL_UNSIGNED_INT_2_10_10_10_REV:
+ if (brw->vb.inputs[i].glarray->Format == GL_BGRA)
+ wa_flags |= BRW_ATTRIB_WA_BGRA;
+
+ if (brw->vb.inputs[i].glarray->Normalized)
+ wa_flags |= BRW_ATTRIB_WA_NORMALIZE;
+ else if (!brw->vb.inputs[i].glarray->Integer)
+ wa_flags |= BRW_ATTRIB_WA_SCALE;
+
+ break;
+ }
+
+ if (brw->vb.attrib_wa_flags[i] != wa_flags) {
+ brw->vb.attrib_wa_flags[i] = wa_flags;
+ brw->ctx.NewDriverState |= BRW_NEW_VS_ATTRIB_WORKAROUNDS;
+ }
+ }
}
}
* Also mark any render targets which will be textured as needing a render
* cache flush.
*/
-static void brw_postdraw_set_buffers_need_resolve(struct brw_context *brw)
+static void
+brw_postdraw_set_buffers_need_resolve(struct brw_context *brw)
{
struct gl_context *ctx = &brw->ctx;
struct gl_framebuffer *fb = ctx->DrawBuffer;
back_irb->need_downsample = true;
if (depth_irb && ctx->Depth.Mask) {
intel_renderbuffer_att_set_needs_depth_resolve(depth_att);
- brw_render_cache_set_add_bo(brw, depth_irb->mt->region->bo);
+ brw_render_cache_set_add_bo(brw, depth_irb->mt->bo);
}
if (ctx->Extensions.ARB_stencil_texturing &&
stencil_irb && ctx->Stencil._WriteEnabled) {
- brw_render_cache_set_add_bo(brw, stencil_irb->mt->region->bo);
+ brw_render_cache_set_add_bo(brw, stencil_irb->mt->bo);
}
for (int i = 0; i < fb->_NumColorDrawBuffers; i++) {
intel_renderbuffer(fb->_ColorDrawBuffers[i]);
if (irb)
- brw_render_cache_set_add_bo(brw, irb->mt->region->bo);
+ brw_render_cache_set_add_bo(brw, irb->mt->bo);
}
}
/* May fail if out of video memory for texture or vbo upload, or on
* fallback conditions.
*/
-static bool brw_try_draw_prims( struct gl_context *ctx,
- const struct gl_client_array *arrays[],
- const struct _mesa_prim *prims,
- GLuint nr_prims,
- const struct _mesa_index_buffer *ib,
- GLuint min_index,
- GLuint max_index,
- struct gl_buffer_object *indirect)
+static void
+brw_try_draw_prims(struct gl_context *ctx,
+ const struct gl_client_array *arrays[],
+ const struct _mesa_prim *prims,
+ GLuint nr_prims,
+ const struct _mesa_index_buffer *ib,
+ GLuint min_index,
+ GLuint max_index,
+ struct gl_buffer_object *indirect)
{
struct brw_context *brw = brw_context(ctx);
- bool retval = true;
GLuint i;
bool fail_next = false;
if (ctx->NewState)
- _mesa_update_state( ctx );
+ _mesa_update_state(ctx);
/* Find the highest sampler unit used by each shader program. A bit-count
* won't work since ARB programs use the texture unit number as the sampler
* software fallback will segfault if it attempts to access any
* texture level other than level 0.
*/
- brw_validate_textures( brw );
+ brw_validate_textures(brw);
intel_prepare_render(brw);
- /* This workaround has to happen outside of brw_upload_state() because it
- * may flush the batchbuffer for a blit, affecting the state flags.
+ /* This workaround has to happen outside of brw_upload_render_state()
+ * because it may flush the batchbuffer for a blit, affecting the state
+ * flags.
*/
brw_workaround_depthstencil_alignment(brw, 0);
- /* Resolves must occur after updating renderbuffers, updating context state,
- * and finalizing textures but before setting up any hardware state for
- * this draw call.
- */
- brw_predraw_resolve_buffers(brw);
-
/* Bind all inputs, derive varying and size information:
*/
- brw_merge_inputs( brw, arrays );
+ brw_merge_inputs(brw, arrays);
brw->ib.ib = ib;
- brw->state.dirty.brw |= BRW_NEW_INDICES;
+ brw->ctx.NewDriverState |= BRW_NEW_INDICES;
brw->vb.min_index = min_index;
brw->vb.max_index = max_index;
- brw->state.dirty.brw |= BRW_NEW_VERTICES;
+ brw->ctx.NewDriverState |= BRW_NEW_VERTICES;
for (i = 0; i < nr_prims; i++) {
int estimated_max_prim_size;
+ const int sampler_state_size = 16;
estimated_max_prim_size = 512; /* batchbuffer commands */
- estimated_max_prim_size += (BRW_MAX_TEX_UNIT *
- (sizeof(struct brw_sampler_state) +
- sizeof(struct gen5_sampler_default_color)));
+ estimated_max_prim_size += BRW_MAX_TEX_UNIT *
+ (sampler_state_size + sizeof(struct gen5_sampler_default_color));
estimated_max_prim_size += 1024; /* gen6 VS push constants */
estimated_max_prim_size += 1024; /* gen6 WM push constants */
estimated_max_prim_size += 512; /* misc. pad */
intel_batchbuffer_require_space(brw, estimated_max_prim_size, RENDER_RING);
intel_batchbuffer_save_state(brw);
- if (brw->num_instances != prims[i].num_instances) {
+ if (brw->num_instances != prims[i].num_instances ||
+ brw->basevertex != prims[i].basevertex) {
brw->num_instances = prims[i].num_instances;
- brw->state.dirty.brw |= BRW_NEW_VERTICES;
- brw_merge_inputs(brw, arrays);
- }
- if (brw->basevertex != prims[i].basevertex) {
brw->basevertex = prims[i].basevertex;
- brw->state.dirty.brw |= BRW_NEW_VERTICES;
- brw_merge_inputs(brw, arrays);
+ if (i > 0) { /* For i == 0 we just did this before the loop */
+ brw->ctx.NewDriverState |= BRW_NEW_VERTICES;
+ brw_merge_inputs(brw, arrays);
+ }
+ }
+
+ brw->draw.gl_basevertex =
+ prims[i].indexed ? prims[i].basevertex : prims[i].start;
+
+ drm_intel_bo_unreference(brw->draw.draw_params_bo);
+
+ if (prims[i].is_indirect) {
+ /* Point draw_params_bo at the indirect buffer. */
+ brw->draw.draw_params_bo =
+ intel_buffer_object(ctx->DrawIndirectBuffer)->buffer;
+ drm_intel_bo_reference(brw->draw.draw_params_bo);
+ brw->draw.draw_params_offset =
+ prims[i].indirect_offset + (prims[i].indexed ? 12 : 8);
+ } else {
+ /* Set draw_params_bo to NULL so brw_prepare_vertices knows it
+ * has to upload gl_BaseVertex and such if they're needed.
+ */
+ brw->draw.draw_params_bo = NULL;
+ brw->draw.draw_params_offset = 0;
}
+
if (brw->gen < 6)
brw_set_prim(brw, &prims[i]);
else
gen6_set_prim(brw, &prims[i]);
retry:
- /* Note that before the loop, brw->state.dirty.brw was set to != 0, and
+
+ /* Note that before the loop, brw->ctx.NewDriverState was set to != 0, and
* that the state updated in the loop outside of this block is that in
* *_set_prim or intel_batchbuffer_flush(), which only impacts
- * brw->state.dirty.brw.
+ * brw->ctx.NewDriverState.
*/
- if (brw->state.dirty.brw) {
+ if (brw->ctx.NewDriverState) {
brw->no_batch_wrap = true;
- brw_upload_state(brw);
+ brw_upload_render_state(brw);
}
brw_emit_prim(brw, &prims[i], brw->primitive);
fail_next = true;
goto retry;
} else {
- if (intel_batchbuffer_flush(brw) == -ENOSPC) {
- static bool warned = false;
-
- if (!warned) {
- fprintf(stderr, "i965: Single primitive emit exceeded"
- "available aperture space\n");
- warned = true;
- }
-
- retval = false;
- }
+ int ret = intel_batchbuffer_flush(brw);
+ WARN_ONCE(ret == -ENOSPC,
+ "i965: Single primitive emit exceeded "
+ "available aperture space\n");
}
}
/* Now that we know we haven't run out of aperture space, we can safely
* reset the dirty bits.
*/
- if (brw->state.dirty.brw)
- brw_clear_dirty_bits(brw);
+ if (brw->ctx.NewDriverState)
+ brw_render_state_finished(brw);
}
if (brw->always_flush_batch)
brw_state_cache_check_size(brw);
brw_postdraw_set_buffers_need_resolve(brw);
- return retval;
+ return;
}
-void brw_draw_prims( struct gl_context *ctx,
- const struct _mesa_prim *prims,
- GLuint nr_prims,
- const struct _mesa_index_buffer *ib,
- GLboolean index_bounds_valid,
- GLuint min_index,
- GLuint max_index,
- struct gl_transform_feedback_object *unused_tfb_object,
- struct gl_buffer_object *indirect )
+void
+brw_draw_prims(struct gl_context *ctx,
+ const struct _mesa_prim *prims,
+ GLuint nr_prims,
+ const struct _mesa_index_buffer *ib,
+ GLboolean index_bounds_valid,
+ GLuint min_index,
+ GLuint max_index,
+ struct gl_transform_feedback_object *unused_tfb_object,
+ struct gl_buffer_object *indirect)
{
struct brw_context *brw = brw_context(ctx);
const struct gl_client_array **arrays = ctx->Array._DrawArrays;
assert(unused_tfb_object == NULL);
- if (!_mesa_check_conditional_render(ctx))
+ if (!brw_check_conditional_render(brw))
return;
/* Handle primitive restart if needed */
*/
if (ctx->RenderMode != GL_RENDER) {
perf_debug("%s render mode not supported in hardware\n",
- _mesa_lookup_enum_by_nr(ctx->RenderMode));
+ _mesa_enum_to_string(ctx->RenderMode));
_swsetup_Wakeup(ctx);
_tnl_wakeup(ctx);
_tnl_draw_prims(ctx, prims, nr_prims, ib,
* manage it. swrast doesn't support our featureset, so we can't fall back
* to it.
*/
- brw_try_draw_prims(ctx, arrays, prims, nr_prims, ib, min_index, max_index, indirect);
+ brw_try_draw_prims(ctx, arrays, prims, nr_prims, ib, min_index, max_index,
+ indirect);
}
-void brw_draw_init( struct brw_context *brw )
+void
+brw_draw_init(struct brw_context *brw)
{
struct gl_context *ctx = &brw->ctx;
struct vbo_context *vbo = vbo_context(ctx);
- int i;
/* Register our drawing function:
*/
vbo->draw_prims = brw_draw_prims;
- for (i = 0; i < VERT_ATTRIB_MAX; i++)
+ for (int i = 0; i < VERT_ATTRIB_MAX; i++)
brw->vb.inputs[i].buffer = -1;
brw->vb.nr_buffers = 0;
brw->vb.nr_enabled = 0;
}
-void brw_draw_destroy( struct brw_context *brw )
+void
+brw_draw_destroy(struct brw_context *brw)
{
int i;