#define FILE_DEBUG_FLAG DEBUG_PRIMS
-static GLuint prim_to_hw_prim[GL_POLYGON+1] = {
+const GLuint prim_to_hw_prim[GL_POLYGON+1] = {
_3DPRIM_POINTLIST,
_3DPRIM_LINELIST,
_3DPRIM_LINELOOP,
static void brw_set_prim(struct brw_context *brw,
const struct _mesa_prim *prim)
{
- struct gl_context *ctx = &brw->intel.ctx;
+ struct gl_context *ctx = &brw->ctx;
uint32_t hw_prim = prim_to_hw_prim[prim->mode];
DBG("PRIM: %s\n", _mesa_lookup_enum_by_nr(prim->mode));
brw->primitive = hw_prim;
brw->state.dirty.brw |= BRW_NEW_PRIMITIVE;
- if (reduced_prim[prim->mode] != brw->intel.reduced_primitive) {
- brw->intel.reduced_primitive = reduced_prim[prim->mode];
+ if (reduced_prim[prim->mode] != brw->reduced_primitive) {
+ brw->reduced_primitive = reduced_prim[prim->mode];
brw->state.dirty.brw |= BRW_NEW_REDUCED_PRIMITIVE;
}
}
const struct _mesa_prim *prim,
uint32_t hw_prim)
{
- struct intel_context *intel = &brw->intel;
int verts_per_instance;
int vertex_access_type;
int start_vertex_location;
start_vertex_location = prim->start;
base_vertex_location = prim->basevertex;
if (prim->indexed) {
- vertex_access_type = GEN4_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM;
+ vertex_access_type = brw->gen >= 7 ?
+ GEN7_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM :
+ GEN4_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM;
start_vertex_location += brw->ib.start_vertex_offset;
base_vertex_location += brw->vb.start_vertex_bias;
} else {
- vertex_access_type = GEN4_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL;
+ vertex_access_type = brw->gen >= 7 ?
+ GEN7_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL :
+ GEN4_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL;
start_vertex_location += brw->vb.start_vertex_bias;
}
/* We only need to trim the primitive count on pre-Gen6. */
- if (intel->gen < 6)
+ if (brw->gen < 6)
verts_per_instance = trim(prim->mode, prim->count);
else
verts_per_instance = prim->count;
* and missed flushes of the render cache as it heads to other parts of
* the besides the draw code.
*/
- if (intel->always_flush_cache) {
- intel_batchbuffer_emit_mi_flush(intel);
+ if (brw->always_flush_cache) {
+ intel_batchbuffer_emit_mi_flush(brw);
}
- BEGIN_BATCH(6);
- OUT_BATCH(CMD_3D_PRIM << 16 | (6 - 2) |
- hw_prim << GEN4_3DPRIM_TOPOLOGY_TYPE_SHIFT |
- vertex_access_type);
- OUT_BATCH(verts_per_instance);
- OUT_BATCH(start_vertex_location);
- OUT_BATCH(prim->num_instances);
- OUT_BATCH(prim->base_instance);
- OUT_BATCH(base_vertex_location);
- ADVANCE_BATCH();
-
- intel->batch.need_workaround_flush = true;
-
- if (intel->always_flush_cache) {
- intel_batchbuffer_emit_mi_flush(intel);
- }
-}
-
-static void gen7_emit_prim(struct brw_context *brw,
- const struct _mesa_prim *prim,
- uint32_t hw_prim)
-{
- struct intel_context *intel = &brw->intel;
- int verts_per_instance;
- int vertex_access_type;
- int start_vertex_location;
- int base_vertex_location;
-
- DBG("PRIM: %s %d %d\n", _mesa_lookup_enum_by_nr(prim->mode),
- prim->start, prim->count);
-
- start_vertex_location = prim->start;
- base_vertex_location = prim->basevertex;
- if (prim->indexed) {
- vertex_access_type = GEN7_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM;
- start_vertex_location += brw->ib.start_vertex_offset;
- base_vertex_location += brw->vb.start_vertex_bias;
+ if (brw->gen >= 7) {
+ BEGIN_BATCH(7);
+ OUT_BATCH(CMD_3D_PRIM << 16 | (7 - 2));
+ OUT_BATCH(hw_prim | vertex_access_type);
} else {
- vertex_access_type = GEN7_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL;
- start_vertex_location += brw->vb.start_vertex_bias;
- }
-
- verts_per_instance = prim->count;
-
- /* If nothing to emit, just return. */
- if (verts_per_instance == 0)
- return;
-
- /* If we're set to always flush, do it before and after the primitive emit.
- * We want to catch both missed flushes that hurt instruction/state cache
- * and missed flushes of the render cache as it heads to other parts of
- * the besides the draw code.
- */
- if (intel->always_flush_cache) {
- intel_batchbuffer_emit_mi_flush(intel);
+ BEGIN_BATCH(6);
+ OUT_BATCH(CMD_3D_PRIM << 16 | (6 - 2) |
+ hw_prim << GEN4_3DPRIM_TOPOLOGY_TYPE_SHIFT |
+ vertex_access_type);
}
-
- BEGIN_BATCH(7);
- OUT_BATCH(CMD_3D_PRIM << 16 | (7 - 2));
- OUT_BATCH(hw_prim | vertex_access_type);
OUT_BATCH(verts_per_instance);
OUT_BATCH(start_vertex_location);
OUT_BATCH(prim->num_instances);
OUT_BATCH(base_vertex_location);
ADVANCE_BATCH();
- if (intel->always_flush_cache) {
- intel_batchbuffer_emit_mi_flush(intel);
+ /* Only used on Sandybridge; harmless to set elsewhere. */
+ brw->batch.need_workaround_flush = true;
+
+ if (brw->always_flush_cache) {
+ intel_batchbuffer_emit_mi_flush(brw);
}
}
static void
brw_predraw_resolve_buffers(struct brw_context *brw)
{
- struct gl_context *ctx = &brw->intel.ctx;
- struct intel_context *intel = &brw->intel;
+ struct gl_context *ctx = &brw->ctx;
struct intel_renderbuffer *depth_irb;
struct intel_texture_object *tex_obj;
/* Resolve the depth buffer's HiZ buffer. */
depth_irb = intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_DEPTH);
if (depth_irb)
- intel_renderbuffer_resolve_hiz(intel, depth_irb);
+ intel_renderbuffer_resolve_hiz(brw, depth_irb);
/* Resolve depth buffer of each enabled depth texture, and color buffer of
* each fast-clear-enabled color texture.
tex_obj = intel_texture_object(ctx->Texture.Unit[i]._Current);
if (!tex_obj || !tex_obj->mt)
continue;
- intel_miptree_all_slices_resolve_depth(intel, tex_obj->mt);
- intel_miptree_resolve_color(intel, tex_obj->mt);
+ intel_miptree_all_slices_resolve_depth(brw, tex_obj->mt);
+ intel_miptree_resolve_color(brw, tex_obj->mt);
}
}
*/
static void brw_postdraw_set_buffers_need_resolve(struct brw_context *brw)
{
- struct intel_context *intel = &brw->intel;
- struct gl_context *ctx = &brw->intel.ctx;
+ struct gl_context *ctx = &brw->ctx;
struct gl_framebuffer *fb = ctx->DrawBuffer;
struct intel_renderbuffer *front_irb = NULL;
struct intel_renderbuffer *back_irb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
struct intel_renderbuffer *depth_irb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
+ struct gl_renderbuffer_attachment *depth_att = &fb->Attachment[BUFFER_DEPTH];
- if (intel->is_front_buffer_rendering)
+ if (brw->is_front_buffer_rendering)
front_irb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
if (front_irb)
if (back_irb)
intel_renderbuffer_set_needs_downsample(back_irb);
if (depth_irb && ctx->Depth.Mask)
- intel_renderbuffer_set_needs_depth_resolve(depth_irb);
+ intel_renderbuffer_att_set_needs_depth_resolve(depth_att);
}
/* May fail if out of video memory for texture or vbo upload, or on
*/
static bool brw_try_draw_prims( struct gl_context *ctx,
const struct gl_client_array *arrays[],
- const struct _mesa_prim *prim,
+ const struct _mesa_prim *prims,
GLuint nr_prims,
const struct _mesa_index_buffer *ib,
GLuint min_index,
GLuint max_index )
{
- struct intel_context *intel = intel_context(ctx);
struct brw_context *brw = brw_context(ctx);
bool retval = true;
GLuint i;
if (ctx->NewState)
_mesa_update_state( ctx );
+ /* Find the highest sampler unit used by each shader program. A bit-count
+ * won't work since ARB programs use the texture unit number as the sampler
+ * index.
+ */
+ brw->wm.sampler_count = _mesa_fls(ctx->FragmentProgram._Current->Base.SamplersUsed);
+ brw->gs.base.sampler_count = ctx->GeometryProgram._Current ?
+ _mesa_fls(ctx->GeometryProgram._Current->Base.SamplersUsed) : 0;
+ brw->vs.base.sampler_count =
+ _mesa_fls(ctx->VertexProgram._Current->Base.SamplersUsed);
+
/* We have to validate the textures *before* checking for fallbacks;
* otherwise, the software fallback won't be able to rely on the
* texture state, the firstLevel and lastLevel fields won't be
*/
brw_validate_textures( brw );
- intel_prepare_render(intel);
+ intel_prepare_render(brw);
/* This workaround has to happen outside of brw_upload_state() because it
* may flush the batchbuffer for a blit, affecting the state flags.
* we've got validated state that needs to be in the same batch as the
* primitives.
*/
- intel_batchbuffer_require_space(intel, estimated_max_prim_size, false);
- intel_batchbuffer_save_state(intel);
+ intel_batchbuffer_require_space(brw, estimated_max_prim_size, false);
+ intel_batchbuffer_save_state(brw);
- if (brw->num_instances != prim->num_instances) {
- brw->num_instances = prim->num_instances;
+ if (brw->num_instances != prims[i].num_instances) {
+ brw->num_instances = prims[i].num_instances;
brw->state.dirty.brw |= BRW_NEW_VERTICES;
}
- if (brw->basevertex != prim->basevertex) {
- brw->basevertex = prim->basevertex;
+ if (brw->basevertex != prims[i].basevertex) {
+ brw->basevertex = prims[i].basevertex;
brw->state.dirty.brw |= BRW_NEW_VERTICES;
}
- if (intel->gen < 6)
- brw_set_prim(brw, &prim[i]);
+ if (brw->gen < 6)
+ brw_set_prim(brw, &prims[i]);
else
- gen6_set_prim(brw, &prim[i]);
+ gen6_set_prim(brw, &prims[i]);
retry:
/* Note that before the loop, brw->state.dirty.brw was set to != 0, and
* brw->state.dirty.brw.
*/
if (brw->state.dirty.brw) {
- intel->no_batch_wrap = true;
+ brw->no_batch_wrap = true;
brw_upload_state(brw);
}
- if (intel->gen >= 7)
- gen7_emit_prim(brw, &prim[i], brw->primitive);
- else
- brw_emit_prim(brw, &prim[i], brw->primitive);
+ brw_emit_prim(brw, &prims[i], brw->primitive);
- intel->no_batch_wrap = false;
+ brw->no_batch_wrap = false;
- if (dri_bufmgr_check_aperture_space(&intel->batch.bo, 1)) {
+ if (dri_bufmgr_check_aperture_space(&brw->batch.bo, 1)) {
if (!fail_next) {
- intel_batchbuffer_reset_to_saved(intel);
- intel_batchbuffer_flush(intel);
+ intel_batchbuffer_reset_to_saved(brw);
+ intel_batchbuffer_flush(brw);
fail_next = true;
goto retry;
} else {
- if (intel_batchbuffer_flush(intel) == -ENOSPC) {
+ if (intel_batchbuffer_flush(brw) == -ENOSPC) {
static bool warned = false;
if (!warned) {
}
}
- if (intel->always_flush_batch)
- intel_batchbuffer_flush(intel);
+ if (brw->always_flush_batch)
+ intel_batchbuffer_flush(brw);
brw_state_cache_check_size(brw);
brw_postdraw_set_buffers_need_resolve(brw);
}
void brw_draw_prims( struct gl_context *ctx,
- const struct _mesa_prim *prim,
+ const struct _mesa_prim *prims,
GLuint nr_prims,
const struct _mesa_index_buffer *ib,
GLboolean index_bounds_valid,
GLuint max_index,
struct gl_transform_feedback_object *tfb_vertcount )
{
- struct intel_context *intel = intel_context(ctx);
+ struct brw_context *brw = brw_context(ctx);
const struct gl_client_array **arrays = ctx->Array._DrawArrays;
if (!_mesa_check_conditional_render(ctx))
return;
/* Handle primitive restart if needed */
- if (brw_handle_primitive_restart(ctx, prim, nr_prims, ib)) {
+ if (brw_handle_primitive_restart(ctx, prims, nr_prims, ib)) {
/* The draw was handled, so we can exit now */
return;
}
* to upload.
*/
if (!vbo_all_varyings_in_vbos(arrays) && !index_bounds_valid)
- vbo_get_minmax_indices(ctx, prim, ib, &min_index, &max_index, nr_prims);
+ vbo_get_minmax_indices(ctx, prims, ib, &min_index, &max_index, nr_prims);
/* Do GL_SELECT and GL_FEEDBACK rendering using swrast, even though it
* won't support all the extensions we support.
_mesa_lookup_enum_by_nr(ctx->RenderMode));
_swsetup_Wakeup(ctx);
_tnl_wakeup(ctx);
- _tnl_draw_prims(ctx, arrays, prim, nr_prims, ib, min_index, max_index);
+ _tnl_draw_prims(ctx, arrays, prims, nr_prims, ib, min_index, max_index);
return;
}
* manage it. swrast doesn't support our featureset, so we can't fall back
* to it.
*/
- brw_try_draw_prims(ctx, arrays, prim, nr_prims, ib, min_index, max_index);
+ brw_try_draw_prims(ctx, arrays, prims, nr_prims, ib, min_index, max_index);
}
void brw_draw_init( struct brw_context *brw )
{
- struct gl_context *ctx = &brw->intel.ctx;
+ struct gl_context *ctx = &brw->ctx;
struct vbo_context *vbo = vbo_context(ctx);
int i;