struct radeon_winsys_cs *cs = rctx->b.gfx.cs;
bool render_cond_bit = rctx->b.render_cond && !rctx->b.render_cond_force_off;
uint64_t mask;
- unsigned num_patches;
+ unsigned num_patches, dirty_fb_counter;
if (!info.indirect && !info.count && (info.indexed || !info.count_from_stream_output)) {
return;
rctx->b.dma.flush(rctx, RADEON_FLUSH_ASYNC, NULL);
}
+ /* Re-emit the framebuffer state if needed. */
+ dirty_fb_counter = p_atomic_read(&rctx->b.screen->dirty_fb_counter);
+ if (dirty_fb_counter != rctx->b.last_dirty_fb_counter) {
+ rctx->b.last_dirty_fb_counter = dirty_fb_counter;
+ r600_mark_atom_dirty(rctx, &rctx->framebuffer.atom);
+ }
+
if (!r600_update_derived_state(rctx)) {
/* useless to render because current rendering command
* can't be achieved
/* Performance counters. */
struct r600_perfcounters *perfcounters;
+
+ /* If pipe_screen wants to re-emit the framebuffer state of all
+ * contexts, it should atomically increment this. Each context will
+ * compare this with its own last known value of the counter before
+ * drawing and re-emit the framebuffer state accordingly.
+ */
+ unsigned dirty_fb_counter;
};
/* This encapsulates a state or an operation which can emitted into the GPU
struct pipe_fence_handle *last_sdma_fence;
unsigned initial_gfx_cs_size;
unsigned gpu_reset_counter;
+ unsigned last_dirty_fb_counter;
struct u_upload_mgr *uploader;
struct u_suballocator *allocator_so_filled_size;
metadata->scanout = (surface->flags & RADEON_SURF_SCANOUT) != 0;
}
+static void r600_dirty_all_framebuffer_states(struct r600_common_screen *rscreen)
+{
+ p_atomic_inc(&rscreen->dirty_fb_counter);
+}
+
static void r600_eliminate_fast_color_clear(struct r600_common_screen *rscreen,
struct r600_texture *rtex)
{
pipe_mutex_unlock(rscreen->aux_context_lock);
}
+static void r600_texture_disable_cmask(struct r600_common_screen *rscreen,
+ struct r600_texture *rtex)
+{
+ if (!rtex->cmask.size)
+ return;
+
+ assert(rtex->resource.b.b.nr_samples <= 1);
+
+ /* Disable CMASK. */
+ memset(&rtex->cmask, 0, sizeof(rtex->cmask));
+ rtex->cmask.base_address_reg = rtex->resource.gpu_address >> 8;
+
+ if (rscreen->chip_class >= SI)
+ rtex->cb_color_info &= ~SI_S_028C70_FAST_CLEAR(1);
+ else
+ rtex->cb_color_info &= ~EG_S_028C70_FAST_CLEAR(1);
+
+ if (rtex->cmask_buffer != &rtex->resource)
+ pipe_resource_reference((struct pipe_resource**)&rtex->cmask_buffer, NULL);
+
+ /* Notify all contexts about the change. */
+ r600_dirty_all_framebuffer_states(rscreen);
+}
+
static boolean r600_texture_get_handle(struct pipe_screen* screen,
struct pipe_resource *resource,
struct winsys_handle *whandle,
if (!(usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH)) {
/* Eliminate fast clear (both CMASK and DCC) */
r600_eliminate_fast_color_clear(rscreen, rtex);
+
+ /* Disable CMASK if flush_resource isn't going
+ * to be called.
+ */
+ r600_texture_disable_cmask(rscreen, rtex);
}
r600_texture_init_metadata(rtex, &metadata);
struct si_context *sctx = (struct si_context *)ctx;
struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
struct pipe_index_buffer ib = {};
- unsigned mask;
+ unsigned mask, dirty_fb_counter;
if (!info->count && !info->indirect &&
(info->indexed || !info->count_from_stream_output))
return;
}
+ /* Re-emit the framebuffer state if needed. */
+ dirty_fb_counter = p_atomic_read(&sctx->b.screen->dirty_fb_counter);
+ if (dirty_fb_counter != sctx->b.last_dirty_fb_counter) {
+ sctx->b.last_dirty_fb_counter = dirty_fb_counter;
+ sctx->framebuffer.dirty_cbufs |=
+ ((1 << sctx->framebuffer.state.nr_cbufs) - 1);
+ sctx->framebuffer.dirty_zsbuf = true;
+ si_mark_atom_dirty(sctx, &sctx->framebuffer.atom);
+ }
+
si_decompress_textures(sctx);
/* Set the rasterization primitive type.