{
struct i830_context *i830 = i830_context(&intel->ctx);
i830->state.emitted = 0;
-
- /* Check that we didn't just wrap our batchbuffer at a bad time. */
- assert(!intel->no_batch_wrap);
}
static void
* difficulties associated with them (physical address requirements).
*/
i915->state.emitted = 0;
-
- /* Check that we didn't just wrap our batchbuffer at a bad time. */
- assert(!intel->no_batch_wrap);
}
static void
GLuint primitive;
GLboolean emit_state_always;
- GLboolean no_batch_wrap;
struct {
struct brw_state_flags dirty;
prim_packet.base_vert_location = prim->basevertex;
/* Can't wrap here, since we rely on the validated state. */
- brw->no_batch_wrap = GL_TRUE;
+ intel->no_batch_wrap = GL_TRUE;
/* If we're set to always flush, do it before and after the primitive emit.
* We want to catch both missed flushes that hurt instruction/state cache
intel_batchbuffer_emit_mi_flush(intel->batch);
}
- brw->no_batch_wrap = GL_FALSE;
+ intel->no_batch_wrap = GL_FALSE;
}
static void brw_merge_inputs( struct brw_context *brw,
{
struct brw_context *brw = brw_context(&intel->ctx);
- /* Check that we didn't just wrap our batchbuffer at a bad time. */
- assert(!brw->no_batch_wrap);
-
brw->curbe.need_new_bo = GL_TRUE;
/* Mark all context state as needing to be re-emitted.
if (intel->vtbl.finish_batch)
intel->vtbl.finish_batch(intel);
+ /* Check that we didn't just wrap our batchbuffer at a bad time. */
+ assert(!intel->no_batch_wrap);
+
batch->reserved_space = BATCH_RESERVED;
/* TODO: Just pass the relocation list and dma buffer up to the