struct {
struct brw_state_flags dirty;
- /**
- * List of buffers accumulated in brw_validate_state to receive
- * drm_intel_bo_check_aperture treatment before exec, so we can
- * know if we should flush the batch and try again before
- * emitting primitives.
- *
- * This can be a fixed number as we only have a limited number of
- * objects referenced from the batchbuffer in a primitive emit,
- * consisting of the vertex buffers, pipelined state pointers,
- * the CURBE, the depth buffer, and a query BO.
- */
- drm_intel_bo *validated_bos[VERT_ATTRIB_MAX + BRW_WM_MAX_SURF + 16];
- unsigned int validated_bo_count;
} state;
struct brw_cache cache;
bufsz);
}
- brw_add_validated_bo(brw, brw->curbe.curbe_bo);
-
/* Because this provokes an action (ie copy the constants into the
* URB), it shouldn't be shortcircuited if identical to the
* previous time - because eg. the urb destination may have
return;
if (brw->vb.nr_buffers)
- goto validate;
+ goto prepare;
/* XXX: In the rare cases where this happens we fallback all
* the way to software rasterization, although a tnl fallback
brw->vb.nr_buffers = j;
-validate:
+prepare:
brw_prepare_query_begin(brw);
- for (i = 0; i < brw->vb.nr_buffers; i++) {
- brw_add_validated_bo(brw, brw->vb.buffers[i].bo);
- }
}
static void brw_emit_vertices(struct brw_context *brw)
drm_intel_bo_unreference(brw->ib.bo);
brw->ib.bo = bo;
- brw_add_validated_bo(brw, brw->ib.bo);
brw->state.dirty.brw |= BRW_NEW_INDEX_BUFFER;
} else {
drm_intel_bo_unreference(bo);
.emit = upload_psp_urb_cbs,
};
-static void prepare_depthbuffer(struct brw_context *brw)
-{
- struct intel_context *intel = &brw->intel;
- struct gl_context *ctx = &intel->ctx;
- struct gl_framebuffer *fb = ctx->DrawBuffer;
- struct intel_renderbuffer *drb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
- struct intel_renderbuffer *srb = intel_get_renderbuffer(fb, BUFFER_STENCIL);
-
- if (drb)
- brw_add_validated_bo(brw, drb->region->bo);
- if (drb && drb->hiz_region)
- brw_add_validated_bo(brw, drb->hiz_region->bo);
- if (srb)
- brw_add_validated_bo(brw, srb->region->bo);
-}
-
static void emit_depthbuffer(struct brw_context *brw)
{
struct intel_context *intel = &brw->intel;
.brw = BRW_NEW_BATCH,
.cache = 0,
},
- .prepare = prepare_depthbuffer,
.emit = emit_depthbuffer,
};
brw->query.index = 0;
}
-
- brw_add_validated_bo(brw, brw->query.bo);
}
/** Called just before primitive drawing to get a beginning PS_DEPTH_COUNT. */
#include "brw_context.h"
-static INLINE void
-brw_add_validated_bo(struct brw_context *brw, drm_intel_bo *bo)
-{
- assert(brw->state.validated_bo_count < ARRAY_SIZE(brw->state.validated_bos));
-
- if (bo != NULL) {
- drm_intel_bo_reference(bo);
- brw->state.validated_bos[brw->state.validated_bo_count++] = bo;
- }
-};
-
extern const struct brw_tracked_state brw_blend_constant_color;
extern const struct brw_tracked_state brw_cc_vp;
extern const struct brw_tracked_state brw_cc_unit;
void brw_upload_state(struct brw_context *brw);
void brw_init_state(struct brw_context *brw);
void brw_destroy_state(struct brw_context *brw);
-void brw_clear_validated_bos(struct brw_context *brw);
/***********************************************************************
* brw_state_cache.c
result->cache = a->cache ^ b->cache;
}
-void
-brw_clear_validated_bos(struct brw_context *brw)
-{
- int i;
-
- /* Clear the last round of validated bos */
- for (i = 0; i < brw->state.validated_bo_count; i++) {
- drm_intel_bo_unreference(brw->state.validated_bos[i]);
- brw->state.validated_bos[i] = NULL;
- }
- brw->state.validated_bo_count = 0;
-}
-
struct dirty_bit_map {
uint32_t bit;
char *name;
int num_atoms = brw->num_prepare_atoms;
GLuint i;
- brw_clear_validated_bos(brw);
-
state->mesa |= brw->intel.NewGLState;
brw->intel.NewGLState = 0;
- brw_add_validated_bo(brw, intel->batch.bo);
-
if (brw->emit_state_always) {
state->mesa |= ~0;
state->brw |= ~0;
int i;
static int dirty_count = 0;
- brw_clear_validated_bos(brw);
-
if (unlikely(INTEL_DEBUG)) {
/* Debug version which enforces various sanity checks on the
* state flags which are generated and checked to help ensure
int nr_surfaces = 0;
if (brw->vs.const_bo) {
- brw_add_validated_bo(brw, brw->vs.const_bo);
nr_surfaces = 1;
}
brw_destroy_state(brw);
brw_draw_destroy( brw );
- brw_clear_validated_bos(brw);
ralloc_free(brw->wm.compile_data);
dri_bo_release(&brw->curbe.curbe_bo);
int nr_surfaces = 0;
for (i = 0; i < ctx->DrawBuffer->_NumColorDrawBuffers; i++) {
- struct gl_renderbuffer *rb = ctx->DrawBuffer->_ColorDrawBuffers[i];
- struct intel_renderbuffer *irb = intel_renderbuffer(rb);
- struct intel_region *region = irb ? irb->region : NULL;
-
- if (region)
- brw_add_validated_bo(brw, region->bo);
nr_surfaces = SURF_INDEX_DRAW(i) + 1;
}
if (brw->wm.const_bo) {
- brw_add_validated_bo(brw, brw->wm.const_bo);
nr_surfaces = SURF_INDEX_FRAG_CONST_BUFFER + 1;
}
const struct gl_texture_unit *texUnit = &ctx->Texture.Unit[i];
if (texUnit->_ReallyEnabled) {
- struct gl_texture_object *tObj = texUnit->_Current;
- struct intel_texture_object *intelObj = intel_texture_object(tObj);
-
- brw_add_validated_bo(brw, intelObj->mt->region->bo);
nr_surfaces = SURF_INDEX_TEXTURE(i) + 1;
}
}
return 0;
}
-static void prepare_depthbuffer(struct brw_context *brw)
-{
- struct intel_context *intel = &brw->intel;
- struct gl_context *ctx = &intel->ctx;
- struct gl_framebuffer *fb = ctx->DrawBuffer;
- struct intel_renderbuffer *drb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
- struct intel_renderbuffer *srb = intel_get_renderbuffer(fb, BUFFER_STENCIL);
-
- if (drb)
- brw_add_validated_bo(brw, drb->region->bo);
- if (srb)
- brw_add_validated_bo(brw, srb->region->bo);
-}
-
static void emit_depthbuffer(struct brw_context *brw)
{
struct intel_context *intel = &brw->intel;
.brw = BRW_NEW_BATCH,
.cache = 0,
},
- .prepare = prepare_depthbuffer,
.emit = emit_depthbuffer,
};
if (ctx->DrawBuffer->_NumColorDrawBuffers >= 1) {
for (i = 0; i < ctx->DrawBuffer->_NumColorDrawBuffers; i++) {
- struct gl_renderbuffer *rb = ctx->DrawBuffer->_ColorDrawBuffers[i];
- struct intel_renderbuffer *irb = intel_renderbuffer(rb);
- struct intel_region *region = irb ? irb->region : NULL;
-
- if (region)
- brw_add_validated_bo(brw, region->bo);
nr_surfaces = SURF_INDEX_DRAW(i) + 1;
}
}
if (brw->wm.const_bo) {
- brw_add_validated_bo(brw, brw->wm.const_bo);
nr_surfaces = SURF_INDEX_FRAG_CONST_BUFFER + 1;
}
for (i = 0; i < BRW_MAX_TEX_UNIT; i++) {
const struct gl_texture_unit *texUnit = &ctx->Texture.Unit[i];
- struct gl_texture_object *tObj = texUnit->_Current;
- struct intel_texture_object *intelObj = intel_texture_object(tObj);
if (texUnit->_ReallyEnabled) {
- brw_add_validated_bo(brw, intelObj->mt->region->bo);
nr_surfaces = SURF_INDEX_TEXTURE(i) + 1;
}
}