return true;
}
-static void r600_dma_clear_buffer_fallback(struct pipe_context *ctx,
- struct pipe_resource *dst,
- uint64_t offset, uint64_t size,
- unsigned value)
-{
- struct r600_common_context *rctx = (struct r600_common_context *)ctx;
-
- rctx->clear_buffer(ctx, dst, offset, size, value, R600_COHERENCY_NONE);
-}
-
static bool r600_resource_commit(struct pipe_context *pctx,
struct pipe_resource *resource,
unsigned level, struct pipe_box *box,
rctx->b.transfer_unmap = u_transfer_unmap_vtbl;
rctx->b.texture_subdata = u_default_texture_subdata;
rctx->b.memory_barrier = r600_memory_barrier;
- rctx->dma_clear_buffer = r600_dma_clear_buffer_fallback;
rctx->b.buffer_subdata = si_buffer_subdata;
if (rscreen->info.drm_major == 2 && rscreen->info.drm_minor >= 43) {
void (*dma_clear_buffer)(struct pipe_context *ctx, struct pipe_resource *dst,
uint64_t offset, uint64_t size, unsigned value);
- void (*clear_buffer)(struct pipe_context *ctx, struct pipe_resource *dst,
- uint64_t offset, uint64_t size, unsigned value,
- enum r600_coherency coher);
-
void (*blit_decompress_depth)(struct pipe_context *ctx,
struct r600_texture *texture,
struct r600_texture *staging,
void si_init_cp_dma_functions(struct si_context *sctx)
{
sctx->b.b.clear_buffer = si_pipe_clear_buffer;
- sctx->b.clear_buffer = si_clear_buffer;
}
&sctx->null_const_buf);
/* Clear the NULL constant buffer, because loads should return zeros. */
- sctx->b.clear_buffer(&sctx->b.b, sctx->null_const_buf.buffer, 0,
- sctx->null_const_buf.buffer->width0, 0,
- R600_COHERENCY_SHADER);
+ si_clear_buffer(&sctx->b.b, sctx->null_const_buf.buffer, 0,
+ sctx->null_const_buf.buffer->width0, 0,
+ R600_COHERENCY_SHADER);
}
uint64_t max_threads_per_block;
set_random_pixels(ctx, src, &src_cpu);
/* clear dst pixels */
- sctx->b.clear_buffer(ctx, dst, 0, rdst->surface.surf_size, 0, true);
+ si_clear_buffer(ctx, dst, 0, rdst->surface.surf_size, 0, true);
memset(dst_cpu.ptr, 0, dst_cpu.layer_stride * tdst.array_size);
/* preparation */