assert(!intel->no_batch_wrap);
}
-
-
-static GLuint
-i830_flush_cmd(void)
-{
- return MI_FLUSH | FLUSH_MAP_CACHE;
-}
-
-
static void
i830_assert_not_dirty( struct intel_context *intel )
{
i830->intel.vtbl.reduced_primitive_state = i830_reduced_primitive_state;
i830->intel.vtbl.set_draw_region = i830_set_draw_region;
i830->intel.vtbl.update_texture_state = i830UpdateTextureState;
- i830->intel.vtbl.flush_cmd = i830_flush_cmd;
i830->intel.vtbl.render_start = i830_render_start;
i830->intel.vtbl.render_prevalidate = i830_render_prevalidate;
i830->intel.vtbl.assert_not_dirty = i830_assert_not_dirty;
assert(!intel->no_batch_wrap);
}
-static GLuint
-i915_flush_cmd(void)
-{
- return MI_FLUSH | FLUSH_MAP_CACHE;
-}
-
static void
i915_assert_not_dirty( struct intel_context *intel )
{
i915->intel.vtbl.render_prevalidate = i915_render_prevalidate;
i915->intel.vtbl.set_draw_region = i915_set_draw_region;
i915->intel.vtbl.update_texture_state = i915UpdateTextureState;
- i915->intel.vtbl.flush_cmd = i915_flush_cmd;
i915->intel.vtbl.assert_not_dirty = i915_assert_not_dirty;
i915->intel.vtbl.finish_batch = intel_finish_vb;
}
* the besides the draw code.
*/
if (intel->always_flush_cache) {
- BEGIN_BATCH(1, IGNORE_CLIPRECTS);
- OUT_BATCH(intel->vtbl.flush_cmd());
- ADVANCE_BATCH();
+ intel_batchbuffer_emit_mi_flush(intel->batch);
}
if (prim_packet.verts_per_instance) {
intel_batchbuffer_data( brw->intel.batch, &prim_packet,
sizeof(prim_packet), LOOP_CLIPRECTS);
}
if (intel->always_flush_cache) {
- BEGIN_BATCH(1, IGNORE_CLIPRECTS);
- OUT_BATCH(intel->vtbl.flush_cmd());
- ADVANCE_BATCH();
+ intel_batchbuffer_emit_mi_flush(intel->batch);
}
brw->no_batch_wrap = GL_FALSE;
brw_context(&intel->ctx)->state.dirty.brw |= BRW_NEW_FENCE;
}
-/* called from intelWaitForIdle() and intelFlush()
- *
- * For now, just flush everything. Could be smarter later.
- */
-static GLuint brw_flush_cmd( void )
-{
- struct brw_mi_flush flush;
- flush.opcode = CMD_MI_FLUSH;
- flush.pad = 0;
- flush.flags = BRW_FLUSH_STATE_CACHE;
- return *(GLuint *)&flush;
-}
-
-
static void brw_invalidate_state( struct intel_context *intel, GLuint new_state )
{
/* nothing */
brw->intel.vtbl.finish_batch = brw_finish_batch;
brw->intel.vtbl.destroy = brw_destroy_context;
brw->intel.vtbl.set_draw_region = brw_set_draw_region;
- brw->intel.vtbl.flush_cmd = brw_flush_cmd;
brw->intel.vtbl.debug_batch = brw_debug_batch;
}
fprintf(stderr, "%s:%d: Batchbuffer flush with %db used\n", file, line,
used);
+ batch->reserved_space = 0;
/* Emit a flush if the bufmgr doesn't do it for us. */
if (intel->always_flush_cache || !intel->ttm) {
- *(GLuint *) (batch->ptr) = intel->vtbl.flush_cmd();
- batch->ptr += 4;
+ intel_batchbuffer_emit_mi_flush(batch);
used = batch->ptr - batch->map;
}
if (intel->vtbl.finish_batch)
intel->vtbl.finish_batch(intel);
+ batch->reserved_space = BATCH_RESERVED;
+
/* TODO: Just pass the relocation list and dma buffer up to the
* kernel.
*/
__memcpy(batch->ptr, data, bytes);
batch->ptr += bytes;
}
+
+/* Emit a pipelined flush to either flush render and texture cache for
+ * reading from a FBO-drawn texture, or flush so that frontbuffer
+ * render appears on the screen in DRI1.
+ *
+ * This is also used for the always_flush_cache driconf debug option.
+ */
+void
+intel_batchbuffer_emit_mi_flush(struct intel_batchbuffer *batch)
+{
+ struct intel_context *intel = batch->intel;
+
+ if (intel->gen >= 4) {
+ BEGIN_BATCH(4, IGNORE_CLIPRECTS);
+ OUT_BATCH(_3DSTATE_PIPE_CONTROL |
+ PIPE_CONTROL_INSTRUCTION_FLUSH |
+ PIPE_CONTROL_WRITE_FLUSH |
+ PIPE_CONTROL_NO_WRITE);
+ OUT_BATCH(0); /* write address */
+ OUT_BATCH(0); /* write data */
+ OUT_BATCH(0); /* write data */
+ ADVANCE_BATCH();
+ } else {
+ BEGIN_BATCH(1, IGNORE_CLIPRECTS);
+ OUT_BATCH(MI_FLUSH);
+ ADVANCE_BATCH();
+ }
+}
} emit;
GLuint dirty_state;
+ GLuint reserved_space;
};
struct intel_batchbuffer *intel_batchbuffer_alloc(struct intel_context
uint32_t read_domains,
uint32_t write_domain,
uint32_t offset);
+void intel_batchbuffer_emit_mi_flush(struct intel_batchbuffer *batch);
/* Inline functions - might actually be better off with these
* non-inlined. Certainly better off switching all command packets to
static INLINE GLint
intel_batchbuffer_space(struct intel_batchbuffer *batch)
{
- return (batch->size - BATCH_RESERVED) - (batch->ptr - batch->map);
+ return (batch->size - batch->reserved_space) - (batch->ptr - batch->map);
}
intel->batch->emit.start_ptr = NULL; \
} while(0)
-
-static INLINE void
-intel_batchbuffer_emit_mi_flush(struct intel_batchbuffer *batch)
-{
- intel_batchbuffer_require_space(batch, 4, IGNORE_CLIPRECTS);
- intel_batchbuffer_emit_dword(batch, MI_FLUSH);
-}
-
#endif
struct intel_region * depth_region,
GLuint num_regions);
- GLuint (*flush_cmd) (void);
-
void (*reduced_primitive_state) (struct intel_context * intel,
GLenum rprim);