r600_sb_context_destroy(rctx->sb_context);
- pipe_resource_reference((struct pipe_resource**)&rctx->dummy_cmask, NULL);
- pipe_resource_reference((struct pipe_resource**)&rctx->dummy_fmask, NULL);
+ r600_resource_reference(&rctx->dummy_cmask, NULL);
+ r600_resource_reference(&rctx->dummy_fmask, NULL);
for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) {
rctx->b.b.set_constant_buffer(&rctx->b.b, sh, R600_BUFFER_INFO_CONST_BUFFER, NULL);
void r600_pipe_shader_destroy(struct pipe_context *ctx, struct r600_pipe_shader *shader)
{
- pipe_resource_reference((struct pipe_resource**)&shader->bo, NULL);
+ r600_resource_reference(&shader->bo, NULL);
r600_bytecode_clear(&shader->shader.bc);
r600_release_command_buffer(&shader->command_buffer);
}
surf->cb_color_cmask = surf->cb_color_base;
surf->cb_color_mask = 0;
- pipe_resource_reference((struct pipe_resource**)&surf->cb_buffer_cmask,
- &rtex->resource.b.b);
- pipe_resource_reference((struct pipe_resource**)&surf->cb_buffer_fmask,
- &rtex->resource.b.b);
+ r600_resource_reference(&surf->cb_buffer_cmask, &rtex->resource);
+ r600_resource_reference(&surf->cb_buffer_fmask, &rtex->resource);
if (rtex->cmask.size) {
surf->cb_color_cmask = rtex->cmask.offset >> 8;
struct pipe_transfer *transfer;
void *ptr;
- pipe_resource_reference((struct pipe_resource**)&rctx->dummy_cmask, NULL);
+ r600_resource_reference(&rctx->dummy_cmask, NULL);
rctx->dummy_cmask = r600_buffer_create_helper(rscreen, cmask.size, cmask.alignment);
/* Set the contents to 0xCC. */
memset(ptr, 0xCC, cmask.size);
pipe_buffer_unmap(&rctx->b.b, transfer);
}
- pipe_resource_reference((struct pipe_resource**)&surf->cb_buffer_cmask,
- &rctx->dummy_cmask->b.b);
+ r600_resource_reference(&surf->cb_buffer_cmask, rctx->dummy_cmask);
/* FMASK. */
if (!rctx->dummy_fmask ||
rctx->dummy_fmask->b.b.width0 < fmask.size ||
rctx->dummy_fmask->buf->alignment % fmask.alignment != 0) {
- pipe_resource_reference((struct pipe_resource**)&rctx->dummy_fmask, NULL);
+ r600_resource_reference(&rctx->dummy_fmask, NULL);
rctx->dummy_fmask = r600_buffer_create_helper(rscreen, fmask.size, fmask.alignment);
}
- pipe_resource_reference((struct pipe_resource**)&surf->cb_buffer_fmask,
- &rctx->dummy_fmask->b.b);
+ r600_resource_reference(&surf->cb_buffer_fmask, rctx->dummy_fmask);
/* Init the registers. */
color_info |= S_0280A0_TILE_MODE(V_0280A0_FRAG_ENABLE);
static void r600_delete_vertex_elements(struct pipe_context *ctx, void *state)
{
struct r600_fetch_shader *shader = (struct r600_fetch_shader*)state;
- pipe_resource_reference((struct pipe_resource**)&shader->buffer, NULL);
+ r600_resource_reference(&shader->buffer, NULL);
FREE(shader);
}
data = r600_buffer_map_sync_with_rings(rctx, staging, PIPE_TRANSFER_READ);
if (!data) {
- pipe_resource_reference((struct pipe_resource **)&staging, NULL);
+ r600_resource_reference(&staging, NULL);
return NULL;
}
data += box->x % R600_MAP_BUFFER_ALIGNMENT;
r600_buffer_do_flush_region(ctx, transfer, &transfer->box);
if (rtransfer->staging)
- pipe_resource_reference((struct pipe_resource**)&rtransfer->staging, NULL);
+ r600_resource_reference(&rtransfer->staging, NULL);
util_slab_free(&rctx->pool_transfers, transfer);
}
while (prev) {
struct r600_query_buffer *qbuf = prev;
prev = prev->previous;
- pipe_resource_reference((struct pipe_resource**)&qbuf->buf, NULL);
+ r600_resource_reference(&qbuf->buf, NULL);
FREE(qbuf);
}
- pipe_resource_reference((struct pipe_resource**)&query->buffer.buf, NULL);
+ r600_resource_reference(&query->buffer.buf, NULL);
FREE(rquery);
}
if (query->flags & R600_QUERY_HW_FLAG_PREDICATE) {
if (!query->ops->prepare_buffer(ctx, query, buf)) {
- pipe_resource_reference((struct pipe_resource **)&buf, NULL);
+ r600_resource_reference(&buf, NULL);
return NULL;
}
}
while (prev) {
struct r600_query_buffer *qbuf = prev;
prev = prev->previous;
- pipe_resource_reference((struct pipe_resource**)&qbuf->buf, NULL);
+ r600_resource_reference(&qbuf->buf, NULL);
FREE(qbuf);
}
/* Obtain a new buffer if the current one can't be mapped without a stall. */
if (r600_rings_is_buffer_referenced(rctx, query->buffer.buf->buf, RADEON_USAGE_READWRITE) ||
!rctx->ws->buffer_wait(query->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
- pipe_resource_reference((struct pipe_resource**)&query->buffer.buf, NULL);
+ r600_resource_reference(&query->buffer.buf, NULL);
query->buffer.buf = r600_new_query_buffer(rctx, query);
} else {
if (!query->ops->prepare_buffer(rctx, query, query->buffer.buf))
- pipe_resource_reference((struct pipe_resource**)&query->buffer.buf, NULL);
+ r600_resource_reference(&query->buffer.buf, NULL);
}
}
}
}
}
- pipe_resource_reference((struct pipe_resource**)&buffer, NULL);
+ r600_resource_reference(&buffer, NULL);
if (mask != 0) {
ctx->backend_mask = mask;
{
struct r600_so_target *t = (struct r600_so_target*)target;
pipe_resource_reference(&t->b.buffer, NULL);
- pipe_resource_reference((struct pipe_resource**)&t->buf_filled_size, NULL);
+ r600_resource_reference(&t->buf_filled_size, NULL);
FREE(t);
}
rtex->cb_color_info &= ~EG_S_028C70_FAST_CLEAR(1);
if (rtex->cmask_buffer != &rtex->resource)
- pipe_resource_reference((struct pipe_resource**)&rtex->cmask_buffer, NULL);
+ r600_resource_reference(&rtex->cmask_buffer, NULL);
/* Notify all contexts about the change. */
r600_dirty_all_framebuffer_states(rscreen);
if (rtex->flushed_depth_texture)
pipe_resource_reference((struct pipe_resource **)&rtex->flushed_depth_texture, NULL);
- pipe_resource_reference((struct pipe_resource**)&rtex->htile_buffer, NULL);
+ r600_resource_reference(&rtex->htile_buffer, NULL);
if (rtex->cmask_buffer != &rtex->resource) {
- pipe_resource_reference((struct pipe_resource**)&rtex->cmask_buffer, NULL);
+ r600_resource_reference(&rtex->cmask_buffer, NULL);
}
pb_reference(&resource->buf, NULL);
FREE(rtex);
}
if (!(map = r600_buffer_map_sync_with_rings(rctx, buf, usage))) {
- pipe_resource_reference((struct pipe_resource**)&trans->staging, NULL);
+ r600_resource_reference(&trans->staging, NULL);
FREE(trans);
return NULL;
}
if (rtransfer->staging) {
rctx->num_alloc_tex_transfer_bytes += rtransfer->staging->buf->size;
- pipe_resource_reference((struct pipe_resource**)&rtransfer->staging, NULL);
+ r600_resource_reference(&rtransfer->staging, NULL);
}
/* Heuristic for {upload, draw, upload, draw, ..}:
struct pipe_surface *surface)
{
struct r600_surface *surf = (struct r600_surface*)surface;
- pipe_resource_reference((struct pipe_resource**)&surf->cb_buffer_fmask, NULL);
- pipe_resource_reference((struct pipe_resource**)&surf->cb_buffer_cmask, NULL);
+ r600_resource_reference(&surf->cb_buffer_fmask, NULL);
+ r600_resource_reference(&surf->cb_buffer_cmask, NULL);
pipe_resource_reference(&surface->texture, NULL);
FREE(surface);
}
/* destroy a buffer */
void rvid_destroy_buffer(struct rvid_buffer *buffer)
{
- pipe_resource_reference((struct pipe_resource **)&buffer->res, NULL);
+ r600_resource_reference(&buffer->res, NULL);
}
/* reallocate a buffer, preserving its content */
scratch_bo_size = sctx->compute_scratch_buffer->b.b.width0;
if (scratch_bo_size < scratch_needed) {
- pipe_resource_reference(
- (struct pipe_resource**)&sctx->compute_scratch_buffer,
- NULL);
+ r600_resource_reference(&sctx->compute_scratch_buffer, NULL);
sctx->compute_scratch_buffer =
si_resource_create_custom(&sctx->screen->b.b,
radeon_emit(cs, S_008F04_BASE_ADDRESS_HI (kernel_args_va >> 32) |
S_008F04_STRIDE(0));
- pipe_resource_reference((struct pipe_resource**)&input_buffer, NULL);
+ r600_resource_reference(&input_buffer, NULL);
}
static void si_setup_tgsi_grid(struct si_context *sctx,
static void si_release_descriptors(struct si_descriptors *desc)
{
- pipe_resource_reference((struct pipe_resource**)&desc->buffer, NULL);
+ r600_resource_reference(&desc->buffer, NULL);
FREE(desc->list);
}
if (scratch_needed_size > 0) {
if (scratch_needed_size > current_scratch_buffer_size) {
/* Create a bigger scratch buffer */
- pipe_resource_reference(
- (struct pipe_resource**)&sctx->scratch_buffer,
- NULL);
+ r600_resource_reference(&sctx->scratch_buffer, NULL);
sctx->scratch_buffer =
si_resource_create_custom(&sctx->screen->b.b,