/* The vertex instructions in the compute shaders use the texture cache,
* so we need to invalidate it. */
- rctx->flags |= R600_CONTEXT_INVAL_READ_CACHES;
+ rctx->flags |= R600_CONTEXT_INV_VERTEX_CACHE;
state->enabled_mask |= 1 << vb_index;
state->dirty_mask |= 1 << vb_index;
state->atom.dirty = true;
/* XXX evergreen_flush_emit() hardcodes the CP_COHER_SIZE to 0xffffffff
*/
- ctx->flags |= R600_CONTEXT_INVAL_READ_CACHES;
+ ctx->flags |= R600_CONTEXT_INV_CONST_CACHE |
+ R600_CONTEXT_INV_VERTEX_CACHE |
+ R600_CONTEXT_INV_TEX_CACHE;
r600_flush_emit(ctx);
#if 0
r600_write_value(cs, PKT3C(PKT3_NOP, 0, 0));
r600_write_value(cs, r600_context_bo_reloc(rctx, &rctx->rings.gfx,
kernel->code_bo, RADEON_USAGE_READ));
-
- rctx->flags |= R600_CONTEXT_INVAL_READ_CACHES;
}
static void evergreen_launch_grid(
/* We flush the caches, because we might read from or write
* to resources which are bound right now. */
- rctx->flags |= R600_CONTEXT_INVAL_READ_CACHES |
+ rctx->flags |= R600_CONTEXT_INV_CONST_CACHE |
+ R600_CONTEXT_INV_VERTEX_CACHE |
+ R600_CONTEXT_INV_TEX_CACHE |
R600_CONTEXT_FLUSH_AND_INV |
R600_CONTEXT_FLUSH_AND_INV_CB |
R600_CONTEXT_FLUSH_AND_INV_DB |
}
/* Invalidate the read caches. */
- rctx->flags |= R600_CONTEXT_INVAL_READ_CACHES;
+ rctx->flags |= R600_CONTEXT_INV_CONST_CACHE |
+ R600_CONTEXT_INV_VERTEX_CACHE |
+ R600_CONTEXT_INV_TEX_CACHE;
util_range_add(&r600_resource(dst)->valid_buffer_range, offset,
offset + size);
(rctx->flags & R600_CONTEXT_FLUSH_AND_INV_DB_META)) {
cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0);
cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0);
+
+ /* Set FULL_CACHE_ENA for DB META flushes on r7xx and later.
+ *
+ * This hack predates use of FLUSH_AND_INV_DB_META, so it's
+ * unclear whether it's still needed or even whether it has
+ * any effect.
+ */
+ cp_coher_cntl |= S_0085F0_FULL_CACHE_ENA(1);
}
if (rctx->flags & R600_CONTEXT_FLUSH_AND_INV) {
cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0);
}
- if (rctx->flags & R600_CONTEXT_INVAL_READ_CACHES) {
- cp_coher_cntl |= S_0085F0_VC_ACTION_ENA(1) |
- S_0085F0_TC_ACTION_ENA(1) |
- S_0085F0_SH_ACTION_ENA(1) |
- S_0085F0_FULL_CACHE_ENA(1);
+ if (rctx->flags & R600_CONTEXT_INV_CONST_CACHE) {
+ cp_coher_cntl |= S_0085F0_SH_ACTION_ENA(1);
+ }
+ if (rctx->flags & R600_CONTEXT_INV_VERTEX_CACHE) {
+ cp_coher_cntl |= rctx->has_vertex_cache ? S_0085F0_VC_ACTION_ENA(1)
+ : S_0085F0_TC_ACTION_ENA(1);
+ }
+ if (rctx->flags & R600_CONTEXT_INV_TEX_CACHE) {
+ cp_coher_cntl |= S_0085F0_TC_ACTION_ENA(1);
}
if (rctx->flags & R600_CONTEXT_FLUSH_AND_INV_DB) {
/* We flush the caches, because we might read from or write
* to resources which are bound right now. */
- rctx->flags |= R600_CONTEXT_INVAL_READ_CACHES |
+ rctx->flags |= R600_CONTEXT_INV_CONST_CACHE |
+ R600_CONTEXT_INV_VERTEX_CACHE |
+ R600_CONTEXT_INV_TEX_CACHE |
R600_CONTEXT_FLUSH_AND_INV |
R600_CONTEXT_FLUSH_AND_INV_CB |
R600_CONTEXT_FLUSH_AND_INV_DB |
}
/* Invalidate the read caches. */
- rctx->flags |= R600_CONTEXT_INVAL_READ_CACHES;
+ rctx->flags |= R600_CONTEXT_INV_CONST_CACHE |
+ R600_CONTEXT_INV_VERTEX_CACHE |
+ R600_CONTEXT_INV_TEX_CACHE;
util_range_add(&r600_resource(dst)->valid_buffer_range, dst_offset,
dst_offset + size);
#define R600_ERR(fmt, args...) \
fprintf(stderr, "EE %s:%d %s - "fmt, __FILE__, __LINE__, __func__, ##args)
-#define R600_CONTEXT_INVAL_READ_CACHES (1 << 0)
-#define R600_CONTEXT_STREAMOUT_FLUSH (1 << 1)
-#define R600_CONTEXT_WAIT_3D_IDLE (1 << 2)
-#define R600_CONTEXT_WAIT_CP_DMA_IDLE (1 << 3)
-#define R600_CONTEXT_FLUSH_AND_INV (1 << 4)
-#define R600_CONTEXT_FLUSH_AND_INV_CB_META (1 << 5)
-#define R600_CONTEXT_PS_PARTIAL_FLUSH (1 << 6)
-#define R600_CONTEXT_FLUSH_AND_INV_DB_META (1 << 7)
-#define R600_CONTEXT_FLUSH_AND_INV_DB (1 << 8)
-#define R600_CONTEXT_FLUSH_AND_INV_CB (1 << 9)
+/* read caches */
+#define R600_CONTEXT_INV_VERTEX_CACHE (1 << 0)
+#define R600_CONTEXT_INV_TEX_CACHE (1 << 1)
+#define R600_CONTEXT_INV_CONST_CACHE (1 << 2)
+/* read-write caches */
+#define R600_CONTEXT_STREAMOUT_FLUSH (1 << 8)
+#define R600_CONTEXT_FLUSH_AND_INV (1 << 9)
+#define R600_CONTEXT_FLUSH_AND_INV_CB_META (1 << 10)
+#define R600_CONTEXT_FLUSH_AND_INV_DB_META (1 << 11)
+#define R600_CONTEXT_FLUSH_AND_INV_DB (1 << 12)
+#define R600_CONTEXT_FLUSH_AND_INV_CB (1 << 13)
+/* engine synchronization */
+#define R600_CONTEXT_PS_PARTIAL_FLUSH (1 << 16)
+#define R600_CONTEXT_WAIT_3D_IDLE (1 << 17)
+#define R600_CONTEXT_WAIT_CP_DMA_IDLE (1 << 18)
#define R600_QUERY_DRAW_CALLS (PIPE_QUERY_DRIVER_SPECIFIC + 0)
#define R600_QUERY_REQUESTED_VRAM (PIPE_QUERY_DRIVER_SPECIFIC + 1)
{
struct r600_context *rctx = (struct r600_context *)ctx;
- rctx->flags |= R600_CONTEXT_INVAL_READ_CACHES |
+ rctx->flags |= R600_CONTEXT_INV_TEX_CACHE |
R600_CONTEXT_FLUSH_AND_INV_CB |
R600_CONTEXT_FLUSH_AND_INV |
R600_CONTEXT_WAIT_3D_IDLE;
void r600_vertex_buffers_dirty(struct r600_context *rctx)
{
if (rctx->vertex_buffer_state.dirty_mask) {
- rctx->flags |= R600_CONTEXT_INVAL_READ_CACHES;
+ rctx->flags |= R600_CONTEXT_INV_VERTEX_CACHE;
rctx->vertex_buffer_state.atom.num_dw = (rctx->chip_class >= EVERGREEN ? 12 : 11) *
util_bitcount(rctx->vertex_buffer_state.dirty_mask);
rctx->vertex_buffer_state.atom.dirty = true;
struct r600_samplerview_state *state)
{
if (state->dirty_mask) {
- rctx->flags |= R600_CONTEXT_INVAL_READ_CACHES;
+ rctx->flags |= R600_CONTEXT_INV_TEX_CACHE;
state->atom.num_dw = (rctx->chip_class >= EVERGREEN ? 14 : 13) *
util_bitcount(state->dirty_mask);
state->atom.dirty = true;
void r600_constant_buffers_dirty(struct r600_context *rctx, struct r600_constbuf_state *state)
{
if (state->dirty_mask) {
- rctx->flags |= R600_CONTEXT_INVAL_READ_CACHES;
+ rctx->flags |= R600_CONTEXT_INV_CONST_CACHE;
state->atom.num_dw = rctx->chip_class >= EVERGREEN ? util_bitcount(state->dirty_mask)*20
: util_bitcount(state->dirty_mask)*19;
state->atom.dirty = true;