r600_update_compressed_resource_state(rctx, true);
+ if (!rctx->cmd_buf_is_compute) {
+ rctx->b.gfx.flush(rctx, PIPE_FLUSH_ASYNC, NULL);
+ rctx->cmd_buf_is_compute = true;
+ }
+
r600_need_cs_space(rctx, 0, true);
if (rctx->cs_shader_state.shader->ir_type == PIPE_SHADER_IR_TGSI) {
r600_shader_select(&rctx->b.b, rctx->cs_shader_state.shader->sel, &compute_dirty);
goto fallback;
}
+ if (rctx->cmd_buf_is_compute) {
+ rctx->b.gfx.flush(rctx, PIPE_FLUSH_ASYNC, NULL);
+ rctx->cmd_buf_is_compute = false;
+ }
+
if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) {
evergreen_dma_copy_buffer(rctx, dst, src, dst_x, src_box->x, src_box->width);
return;
{
struct r600_context *rctx = (struct r600_context *)ctx;
+ if (rctx->cmd_buf_is_compute) {
+ rctx->b.gfx.flush(rctx, PIPE_FLUSH_ASYNC, NULL);
+ rctx->cmd_buf_is_compute = false;
+ }
+
util_blitter_save_vertex_buffer_slot(rctx->blitter, rctx->vertex_buffer_state.vb);
util_blitter_save_vertex_elements(rctx->blitter, rctx->vertex_fetch_shader.cso);
util_blitter_save_vertex_shader(rctx->blitter, rctx->vs_shader);
struct r600_resource *trace_buf;
unsigned trace_id;
+ bool cmd_buf_is_compute;
struct pipe_resource *append_fence;
uint32_t append_fence_id;
};
rctx->b.dma.flush(rctx, PIPE_FLUSH_ASYNC, NULL);
}
+ if (rctx->cmd_buf_is_compute) {
+ rctx->b.gfx.flush(rctx, PIPE_FLUSH_ASYNC, NULL);
+ rctx->cmd_buf_is_compute = false;
+ }
+
/* Re-emit the framebuffer state if needed. */
dirty_tex_counter = p_atomic_read(&rctx->b.screen->dirty_tex_counter);
if (unlikely(dirty_tex_counter != rctx->b.last_dirty_tex_counter)) {