return PIPE_UNKNOWN_CONTEXT_RESET;
}
+static void r600_set_debug_callback(struct pipe_context *ctx,
+ const struct pipe_debug_callback *cb)
+{
+ struct r600_common_context *rctx = (struct r600_common_context *)ctx;
+
+ if (cb)
+ rctx->debug = *cb;
+ else
+ memset(&rctx->debug, 0, sizeof(rctx->debug));
+}
+
bool r600_common_context_init(struct r600_common_context *rctx,
struct r600_common_screen *rscreen)
{
rctx->b.transfer_inline_write = u_default_transfer_inline_write;
rctx->b.memory_barrier = r600_memory_barrier;
rctx->b.flush = r600_flush_from_st;
+ rctx->b.set_debug_callback = r600_set_debug_callback;
if (rscreen->info.drm_major == 2 && rscreen->info.drm_minor >= 43) {
rctx->b.get_device_reset_status = r600_get_reset_status;
* the GPU addresses are updated. */
struct list_head texture_buffers;
+ struct pipe_debug_callback debug;
+
/* Copy one resource to another using async DMA. */
void (*dma_copy)(struct pipe_context *ctx,
struct pipe_resource *dst,