decode->bsd_size = align(dec->bs_size, 128);
decode->dpb_size = dec->dpb.res->buf->size;
decode->dt_size =
- ((struct r600_resource *)((struct vl_video_buffer *)target)->resources[0])->buf->size +
- ((struct r600_resource *)((struct vl_video_buffer *)target)->resources[1])->buf->size;
+ r600_resource(((struct vl_video_buffer *)target)->resources[0])->buf->size +
+ r600_resource(((struct vl_video_buffer *)target)->resources[1])->buf->size;
decode->sct_size = 0;
decode->sc_coeff_size = 0;
* able to move buffers around individually, so request a
* non-sub-allocated buffer.
*/
- buffer->res = (struct r600_resource *)
- pipe_buffer_create(screen, PIPE_BIND_SHARED,
- usage, size);
+ buffer->res = r600_resource(pipe_buffer_create(screen, PIPE_BIND_SHARED,
+ usage, size));
return buffer->res != NULL;
}
struct r600_resource *staging;
assert(!(usage & TC_TRANSFER_MAP_THREADED_UNSYNC));
- staging = (struct r600_resource*) pipe_buffer_create(
+ staging = r600_resource(pipe_buffer_create(
ctx->screen, 0, PIPE_USAGE_STAGING,
- box->width + (box->x % SI_MAP_BUFFER_ALIGNMENT));
+ box->width + (box->x % SI_MAP_BUFFER_ALIGNMENT)));
if (staging) {
/* Copy the VRAM buffer to the staging buffer. */
sctx->dma_copy(ctx, &staging->b.b, 0,
return &rbuffer->b.b;
}
-struct pipe_resource *si_aligned_buffer_create(struct pipe_screen *screen,
- unsigned flags,
- unsigned usage,
- unsigned size,
- unsigned alignment)
+struct pipe_resource *pipe_aligned_buffer_create(struct pipe_screen *screen,
+ unsigned flags, unsigned usage,
+ unsigned size, unsigned alignment)
{
struct pipe_resource buffer;
return si_buffer_create(screen, &buffer, alignment);
}
+struct r600_resource *si_aligned_buffer_create(struct pipe_screen *screen,
+ unsigned flags, unsigned usage,
+ unsigned size, unsigned alignment)
+{
+ return r600_resource(pipe_aligned_buffer_create(screen, flags, usage,
+ size, alignment));
+}
+
static struct pipe_resource *
si_buffer_from_user_memory(struct pipe_screen *screen,
const struct pipe_resource *templ,
if (!rtex->cmask.size)
return;
- rtex->cmask_buffer = (struct r600_resource *)
+ rtex->cmask_buffer =
si_aligned_buffer_create(&sscreen->b,
SI_RESOURCE_FLAG_UNMAPPABLE,
PIPE_USAGE_DEFAULT,
if (scratch_bo_size < scratch_needed) {
r600_resource_reference(&sctx->compute_scratch_buffer, NULL);
- sctx->compute_scratch_buffer = (struct r600_resource*)
+ sctx->compute_scratch_buffer =
si_aligned_buffer_create(&sctx->screen->b,
- SI_RESOURCE_FLAG_UNMAPPABLE,
- PIPE_USAGE_DEFAULT,
- scratch_needed, 256);
+ SI_RESOURCE_FLAG_UNMAPPABLE,
+ PIPE_USAGE_DEFAULT,
+ scratch_needed, 256);
if (!sctx->compute_scratch_buffer)
return false;
int i;
radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
- (struct r600_resource *)info->indirect,
+ r600_resource(info->indirect),
RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
for (i = 0; i < 3; ++i) {
uint64_t base_va = r600_resource(info->indirect)->gpu_address;
radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
- (struct r600_resource *)info->indirect,
+ r600_resource(info->indirect),
RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0) |
/* Global buffers */
for (i = 0; i < MAX_GLOBAL_BUFFERS; i++) {
struct r600_resource *buffer =
- (struct r600_resource*)program->global_buffers[i];
+ r600_resource(program->global_buffers[i]);
if (!buffer) {
continue;
}
/* This must be done after need_cs_space. */
if (!(user_flags & SI_CPDMA_SKIP_BO_LIST_UPDATE)) {
radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
- (struct r600_resource*)dst,
+ r600_resource(dst),
RADEON_USAGE_WRITE, RADEON_PRIO_CP_DMA);
if (src)
radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
- (struct r600_resource*)src,
+ r600_resource(src),
RADEON_USAGE_READ, RADEON_PRIO_CP_DMA);
}
if (!sctx->scratch_buffer ||
sctx->scratch_buffer->b.b.width0 < scratch_size) {
r600_resource_reference(&sctx->scratch_buffer, NULL);
- sctx->scratch_buffer = (struct r600_resource*)
+ sctx->scratch_buffer =
si_aligned_buffer_create(&sctx->screen->b,
SI_RESOURCE_FLAG_UNMAPPABLE,
PIPE_USAGE_DEFAULT,
resource = &tex->flushed_depth_texture->resource.b.b;
}
- rres = (struct r600_resource*)resource;
+ rres = r600_resource(resource);
priority = si_get_sampler_view_priority(rres);
radeon_add_to_gfx_buffer_list_check_mem(sctx, rres, usage, priority,
static void
si_mark_image_range_valid(const struct pipe_image_view *view)
{
- struct r600_resource *res = (struct r600_resource *)view->resource;
+ struct r600_resource *res = r600_resource(view->resource);
assert(res && res->b.b.target == PIPE_BUFFER);
struct si_screen *screen = ctx->screen;
struct r600_resource *res;
- res = (struct r600_resource *)view->resource;
+ res = r600_resource(view->resource);
if (res->b.b.target == PIPE_BUFFER) {
if (view->access & PIPE_IMAGE_ACCESS_WRITE)
return;
}
- res = (struct r600_resource *)view->resource;
+ res = r600_resource(view->resource);
if (&images->views[slot] != view)
util_copy_image_view(&images->views[slot], view);
continue;
radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
- (struct r600_resource*)sctx->vertex_buffer[vb].buffer.resource,
+ r600_resource(sctx->vertex_buffer[vb].buffer.resource),
RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
}
uint32_t *desc = &ptr[i*4];
vb = &sctx->vertex_buffer[vbo_index];
- rbuffer = (struct r600_resource*)vb->buffer.resource;
+ rbuffer = r600_resource(vb->buffer.resource);
if (!rbuffer) {
memset(desc, 0, 16);
continue;
if (first_vb_use_mask & (1 << i)) {
radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
- (struct r600_resource*)vb->buffer.resource,
+ r600_resource(vb->buffer.resource),
RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
}
}
buffers->buffers[slot] = buffer;
radeon_add_to_gfx_buffer_list_check_mem(sctx,
- (struct r600_resource*)buffer,
+ r600_resource(buffer),
buffers->shader_usage_constbuf,
buffers->priority_constbuf, true);
buffers->enabled_mask |= 1u << slot;
continue;
}
- buf = (struct r600_resource *)sbuffer->buffer;
+ buf = r600_resource(sbuffer->buffer);
va = buf->gpu_address + sbuffer->buffer_offset;
desc[0] = va;
pipe_resource_reference(&buffers->buffers[slot], buffer);
radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
- (struct r600_resource*)buffer,
+ r600_resource(buffer),
buffers->shader_usage, buffers->priority);
buffers->enabled_mask |= 1u << slot;
} else {
sctx->descriptors_dirty |= 1u << descriptors_idx;
radeon_add_to_gfx_buffer_list_check_mem(sctx,
- (struct r600_resource *)buf,
+ r600_resource(buf),
usage, priority, true);
}
}
img_handle = (struct si_image_handle *)entry->data;
view = &img_handle->view;
- res = (struct r600_resource *)view->resource;
+ res = r600_resource(view->resource);
if (resident) {
if (res->b.b.target != PIPE_BUFFER) {
{
struct radeon_winsys_cs *cs = ctx->dma_cs;
unsigned i, ncopy, count, max_size, sub_cmd, shift;
- struct r600_resource *rdst = (struct r600_resource*)dst;
- struct r600_resource *rsrc = (struct r600_resource*)src;
+ struct r600_resource *rdst = r600_resource(dst);
+ struct r600_resource *rsrc = r600_resource(src);
/* Mark the buffer range of destination as valid (initialized),
* so that transfer_map knows it should wait for the GPU when mapping
pipe_reference_init(&ctx->current_saved_cs->reference, 1);
- ctx->current_saved_cs->trace_buf = (struct r600_resource*)
- pipe_buffer_create(ctx->b.screen, 0,
- PIPE_USAGE_STAGING, 8);
+ ctx->current_saved_cs->trace_buf = r600_resource(
+ pipe_buffer_create(ctx->b.screen, 0, PIPE_USAGE_STAGING, 8));
if (!ctx->current_saved_cs->trace_buf) {
free(ctx->current_saved_cs);
ctx->current_saved_cs = NULL;
if (sctx->chip_class == CIK ||
sctx->chip_class == VI ||
sctx->chip_class == GFX9) {
- sctx->eop_bug_scratch = (struct r600_resource*)
- pipe_buffer_create(&sscreen->b, 0, PIPE_USAGE_DEFAULT,
- 16 * sscreen->info.num_render_backends);
+ sctx->eop_bug_scratch = r600_resource(
+ pipe_buffer_create(&sscreen->b, 0, PIPE_USAGE_DEFAULT,
+ 16 * sscreen->info.num_render_backends));
if (!sctx->eop_bug_scratch)
goto fail;
}
if (!sctx->border_color_table)
goto fail;
- sctx->border_color_buffer = (struct r600_resource*)
+ sctx->border_color_buffer = r600_resource(
pipe_buffer_create(screen, 0, PIPE_USAGE_DEFAULT,
SI_MAX_BORDER_COLORS *
- sizeof(*sctx->border_color_table));
+ sizeof(*sctx->border_color_table)));
if (!sctx->border_color_buffer)
goto fail;
sctx->sample_mask = 0xffff;
if (sctx->chip_class >= GFX9) {
- sctx->wait_mem_scratch = (struct r600_resource*)
- pipe_buffer_create(screen, 0, PIPE_USAGE_DEFAULT, 4);
+ sctx->wait_mem_scratch = r600_resource(
+ pipe_buffer_create(screen, 0, PIPE_USAGE_DEFAULT, 4));
if (!sctx->wait_mem_scratch)
goto fail;
* if NUM_RECORDS == 0). We need to use a dummy buffer instead. */
if (sctx->chip_class == CIK) {
sctx->null_const_buf.buffer =
- si_aligned_buffer_create(screen,
- SI_RESOURCE_FLAG_32BIT,
+ pipe_aligned_buffer_create(screen,
+ SI_RESOURCE_FLAG_32BIT,
PIPE_USAGE_DEFAULT, 16,
sctx->screen->info.tcc_cache_line_size);
if (!sctx->null_const_buf.buffer)
uint64_t size, unsigned alignment);
bool si_alloc_resource(struct si_screen *sscreen,
struct r600_resource *res);
-struct pipe_resource *si_aligned_buffer_create(struct pipe_screen *screen,
- unsigned flags,
- unsigned usage,
- unsigned size,
- unsigned alignment);
+struct pipe_resource *pipe_aligned_buffer_create(struct pipe_screen *screen,
+ unsigned flags, unsigned usage,
+ unsigned size, unsigned alignment);
+struct r600_resource *si_aligned_buffer_create(struct pipe_screen *screen,
+ unsigned flags, unsigned usage,
+ unsigned size, unsigned alignment);
void si_replace_buffer_storage(struct pipe_context *ctx,
struct pipe_resource *dst,
struct pipe_resource *src);
static inline void
si_context_add_resource_size(struct si_context *sctx, struct pipe_resource *r)
{
- struct r600_resource *res = (struct r600_resource *)r;
-
- if (res) {
+ if (r) {
/* Add memory usage for need_gfx_cs_space */
- sctx->vram += res->vram_usage;
- sctx->gtt += res->gart_usage;
+ sctx->vram += r600_resource(r)->vram_usage;
+ sctx->gtt += r600_resource(r)->gart_usage;
}
}
r600_resource_reference(&state->indirect_buffer, NULL);
/* TODO: this hangs with 1024 or higher alignment on GFX9. */
- state->indirect_buffer = (struct r600_resource*)
+ state->indirect_buffer =
si_aligned_buffer_create(screen, 0,
PIPE_USAGE_DEFAULT, aligned_ndw * 4,
256);
* being written by the gpu, hence staging is probably a good
* usage pattern.
*/
- struct r600_resource *buf = (struct r600_resource*)
+ struct r600_resource *buf = r600_resource(
pipe_buffer_create(&sscreen->b, 0,
- PIPE_USAGE_STAGING, buf_size);
+ PIPE_USAGE_STAGING, buf_size));
if (!buf)
return NULL;
ssbo[2].buffer_offset = offset;
ssbo[2].buffer_size = 8;
- ((struct r600_resource *)resource)->TC_L2_dirty = true;
+ r600_resource(resource)->TC_L2_dirty = true;
}
sctx->b.set_shader_buffers(&sctx->b, PIPE_SHADER_COMPUTE, 0, 3, ssbo);
assert(!epilog || !epilog->rodata_size);
r600_resource_reference(&shader->bo, NULL);
- shader->bo = (struct r600_resource*)
- si_aligned_buffer_create(&sscreen->b,
+ shader->bo = si_aligned_buffer_create(&sscreen->b,
sscreen->cpdma_prefetch_writes_memory ?
0 : SI_RESOURCE_FLAG_READ_ONLY,
PIPE_USAGE_IMMUTABLE,
/* Buffer resource. */
if (texture->target == PIPE_BUFFER) {
si_make_buffer_descriptor(sctx->screen,
- (struct r600_resource *)texture,
+ r600_resource(texture),
state->format,
state->u.buf.offset,
state->u.buf.size,
index_va = r600_resource(indexbuf)->gpu_address + index_offset;
radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
- (struct r600_resource *)indexbuf,
+ r600_resource(indexbuf),
RADEON_USAGE_READ, RADEON_PRIO_INDEX_BUFFER);
} else {
/* On CI and later, non-indexed draws overwrite VGT_INDEX_TYPE,
radeon_emit(cs, indirect_va >> 32);
radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
- (struct r600_resource *)indirect->buffer,
+ r600_resource(indirect->buffer),
RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
unsigned di_src_sel = index_size ? V_0287F0_DI_SRC_SEL_DMA
if (indirect->indirect_draw_count) {
struct r600_resource *params_buf =
- (struct r600_resource *)indirect->indirect_draw_count;
+ r600_resource(indirect->indirect_draw_count);
radeon_add_to_buffer_list(
sctx, sctx->gfx_cs, params_buf,
if (update_esgs) {
pipe_resource_reference(&sctx->esgs_ring, NULL);
sctx->esgs_ring =
- si_aligned_buffer_create(sctx->b.screen,
+ pipe_aligned_buffer_create(sctx->b.screen,
SI_RESOURCE_FLAG_UNMAPPABLE,
PIPE_USAGE_DEFAULT,
esgs_ring_size, alignment);
if (update_gsvs) {
pipe_resource_reference(&sctx->gsvs_ring, NULL);
sctx->gsvs_ring =
- si_aligned_buffer_create(sctx->b.screen,
+ pipe_aligned_buffer_create(sctx->b.screen,
SI_RESOURCE_FLAG_UNMAPPABLE,
PIPE_USAGE_DEFAULT,
gsvs_ring_size, alignment);
/* Create a bigger scratch buffer */
r600_resource_reference(&sctx->scratch_buffer, NULL);
- sctx->scratch_buffer = (struct r600_resource*)
+ sctx->scratch_buffer =
si_aligned_buffer_create(&sctx->screen->b,
SI_RESOURCE_FLAG_UNMAPPABLE,
PIPE_USAGE_DEFAULT,
/* The address must be aligned to 2^19, because the shader only
* receives the high 13 bits.
*/
- sctx->tess_rings = si_aligned_buffer_create(sctx->b.screen,
+ sctx->tess_rings = pipe_aligned_buffer_create(sctx->b.screen,
SI_RESOURCE_FLAG_32BIT,
PIPE_USAGE_DEFAULT,
sctx->screen->tess_offchip_ring_size +
{
struct si_context *sctx = (struct si_context *)ctx;
struct si_streamout_target *t;
- struct r600_resource *rbuffer = (struct r600_resource*)buffer;
+ struct r600_resource *rbuffer = r600_resource(buffer);
t = CALLOC_STRUCT(si_streamout_target);
if (!t) {
pipe_resource_reference(&buffers->buffers[bufidx],
buffer);
radeon_add_to_gfx_buffer_list_check_mem(sctx,
- (struct r600_resource*)buffer,
+ r600_resource(buffer),
buffers->shader_usage,
RADEON_PRIO_SHADER_RW_BUFFER,
true);
{
struct si_screen *sscreen = (struct si_screen*)screen;
struct si_context *sctx;
- struct r600_resource *res = (struct r600_resource*)resource;
+ struct r600_resource *res = r600_resource(resource);
struct r600_texture *rtex = (struct r600_texture*)resource;
struct radeon_bo_metadata metadata;
bool update_metadata = false;
&trans->b.b.layer_stride);
}
- trans->staging = (struct r600_resource*)staging_depth;
+ trans->staging = &staging_depth->resource;
buf = trans->staging;
} else if (use_staging_texture) {
struct pipe_resource resource;
tex->dcc_separate_buffer = tex->last_dcc_separate_buffer;
tex->last_dcc_separate_buffer = NULL;
} else {
- tex->dcc_separate_buffer = (struct r600_resource*)
+ tex->dcc_separate_buffer =
si_aligned_buffer_create(sctx->b.screen,
SI_RESOURCE_FLAG_UNMAPPABLE,
PIPE_USAGE_DEFAULT,