/* This isn't really handled at the moment.
*/
static void
-i830_lost_hardware(struct intel_context *intel)
+i830_new_batch(struct intel_context *intel)
{
struct i830_context *i830 = i830_context(&intel->ctx);
i830->state.emitted = 0;
i830->intel.vtbl.check_vertex_size = i830_check_vertex_size;
i830->intel.vtbl.destroy = i830_destroy_context;
i830->intel.vtbl.emit_state = i830_emit_state;
- i830->intel.vtbl.lost_hardware = i830_lost_hardware;
+ i830->intel.vtbl.new_batch = i830_new_batch;
i830->intel.vtbl.reduced_primitive_state = i830_reduced_primitive_state;
i830->intel.vtbl.set_draw_region = i830_set_draw_region;
i830->intel.vtbl.update_texture_state = i830UpdateTextureState;
static void
-i915_lost_hardware(struct intel_context *intel)
+i915_new_batch(struct intel_context *intel)
{
struct i915_context *i915 = i915_context(&intel->ctx);
+
+ /* Mark all state as needing to be emitted when starting a new batchbuffer.
+ * Using hardware contexts would be an alternative, but they have some
+ * difficulties associated with them (physical address requirements).
+ */
i915->state.emitted = 0;
}
i915->intel.vtbl.check_vertex_size = i915_check_vertex_size;
i915->intel.vtbl.destroy = i915_destroy_context;
i915->intel.vtbl.emit_state = i915_emit_state;
- i915->intel.vtbl.lost_hardware = i915_lost_hardware;
+ i915->intel.vtbl.new_batch = i915_new_batch;
i915->intel.vtbl.reduced_primitive_state = i915_reduced_primitive_state;
i915->intel.vtbl.render_start = i915_render_start;
i915->intel.vtbl.render_prevalidate = i915_render_prevalidate;
{
void (*destroy) (struct intel_context * intel);
void (*emit_state) (struct intel_context * intel);
- void (*lost_hardware) (struct intel_context * intel);
+ void (*new_batch) (struct intel_context * intel);
void (*update_texture_state) (struct intel_context * intel);
void (*render_start) (struct intel_context * intel);
UNLOCK_HARDWARE(intel);
exit(1);
}
-
- /* FIXME: use hardware contexts to avoid 'losing' hardware after
- * each buffer flush.
- */
- intel->vtbl.lost_hardware(intel);
}
void
exit(1);
}
*fence = fo;
-
- /* FIXME: use hardware contexts to avoid 'losing' hardware after
- * each buffer flush.
- */
- intel->vtbl.lost_hardware(intel);
-
}
/* called from intelFlushBatchLocked
*/
-static void brw_lost_hardware( struct intel_context *intel )
+static void brw_new_batch( struct intel_context *intel )
{
struct brw_context *brw = brw_context(&intel->ctx);
- /* Note that we effectively lose the context after this.
- *
- * Setting this flag provokes a state buffer wrap and also flushes
- * the hardware caches.
+ /* Mark all context state as needing to be re-emitted.
+ * This is probably not as severe as on 915, since almost all of our state
+ * is just in referenced buffers.
*/
brw->state.dirty.brw |= BRW_NEW_CONTEXT;
- /* Which means there shouldn't be any commands already queued:
- */
- assert(intel->batch->ptr == intel->batch->map);
-
brw->state.dirty.mesa |= ~0;
brw->state.dirty.brw |= ~0;
brw->state.dirty.cache |= ~0;
brw->intel.vtbl.invalidate_state = brw_invalidate_state;
brw->intel.vtbl.note_fence = brw_note_fence;
brw->intel.vtbl.note_unlock = brw_note_unlock;
- brw->intel.vtbl.lost_hardware = brw_lost_hardware;
+ brw->intel.vtbl.new_batch = brw_new_batch;
brw->intel.vtbl.destroy = brw_destroy_context;
brw->intel.vtbl.set_draw_region = brw_set_draw_region;
brw->intel.vtbl.flush_cmd = brw_flush_cmd;
sarea->ctxOwner, me);
}
sarea->ctxOwner = me;
- intel->vtbl.lost_hardware( intel );
}
/* If the last consumer of the texture memory wasn't us, notify the fake
void (*destroy)( struct intel_context *intel );
void (*emit_state)( struct intel_context *intel );
void (*emit_invarient_state)( struct intel_context *intel );
- void (*lost_hardware)( struct intel_context *intel );
+ void (*new_batch)( struct intel_context *intel );
void (*note_fence)( struct intel_context *intel, GLuint fence );
void (*note_unlock)( struct intel_context *intel );
void (*update_texture_state)( struct intel_context *intel );
exit(1);
}
*fence = fo;
-
- /* FIXME: use hardware contexts to avoid 'losing' hardware after
- * each buffer flush.
- */
- intel->vtbl.lost_hardware(intel);
-
}
sched_yield();
LOCK_HARDWARE(intel);
}
- intel->vtbl.lost_hardware(intel);
}
if (INTEL_DEBUG & DEBUG_BATCH) {
if (intel->vtbl.debug_batch != NULL)
intel->vtbl.debug_batch(intel);
}
+
+ intel->vtbl.new_batch(intel);
}
void