"radeon_winsys_cs_handle *cs_buf" is now equivalent to "pb_buffer *buf".
Reviewed-by: Nicolai Hähnle <nicolai.haehnle@amd.com>
Reviewed-by: Edward O'Callaghan <eocallaghan@alterapraxis.com>
Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
/* The buffer where query results are stored. */
struct pb_buffer *buf;
- struct radeon_winsys_cs_handle *cs_buf;
};
struct r300_surface {
/* Winsys buffer backing the texture. */
struct pb_buffer *buf;
- struct radeon_winsys_cs_handle *cs_buf;
enum radeon_bo_domain domain;
/* Winsys buffer backing this resource. */
struct pb_buffer *buf;
- struct radeon_winsys_cs_handle *cs_buf;
enum radeon_bo_domain domain;
/* Constant buffers and SWTCL vertex and index buffers are in user
struct draw_context* draw;
/* Vertex buffer for SW TCL. */
struct pb_buffer *vbo;
- struct radeon_winsys_cs_handle *vbo_cs;
/* Offset and size into the SW TCL VBO. */
size_t draw_vbo_offset;
#define OUT_CS_RELOC(r) do { \
assert((r)); \
- assert((r)->cs_buf); \
+ assert((r)->buf); \
OUT_CS(0xc0001000); /* PKT3_NOP */ \
- OUT_CS(cs_winsys->cs_lookup_buffer(cs_copy, (r)->cs_buf) * 4); \
+ OUT_CS(cs_winsys->cs_lookup_buffer(cs_copy, (r)->buf) * 4); \
} while (0)
OUT_CS(r300->draw_vbo_offset);
OUT_CS(0);
- assert(r300->vbo_cs);
+ assert(r300->vbo);
OUT_CS(0xc0001000); /* PKT3_NOP */
- OUT_CS(r300->rws->cs_lookup_buffer(r300->cs, r300->vbo_cs) * 4);
+ OUT_CS(r300->rws->cs_lookup_buffer(r300->cs, r300->vbo) * 4);
END_CS;
}
continue;
tex = r300_resource(fb->cbufs[i]->texture);
assert(tex && tex->buf && "cbuf is marked, but NULL!");
- r300->rws->cs_add_buffer(r300->cs, tex->cs_buf,
+ r300->rws->cs_add_buffer(r300->cs, tex->buf,
RADEON_USAGE_READWRITE,
r300_surface(fb->cbufs[i])->domain,
tex->b.b.nr_samples > 1 ?
if (fb->zsbuf) {
tex = r300_resource(fb->zsbuf->texture);
assert(tex && tex->buf && "zsbuf is marked, but NULL!");
- r300->rws->cs_add_buffer(r300->cs, tex->cs_buf,
+ r300->rws->cs_add_buffer(r300->cs, tex->buf,
RADEON_USAGE_READWRITE,
r300_surface(fb->zsbuf)->domain,
tex->b.b.nr_samples > 1 ?
/* The AA resolve buffer. */
if (r300->aa_state.dirty) {
if (aa->dest) {
- r300->rws->cs_add_buffer(r300->cs, aa->dest->cs_buf,
+ r300->rws->cs_add_buffer(r300->cs, aa->dest->buf,
RADEON_USAGE_WRITE,
aa->dest->domain,
RADEON_PRIO_COLOR_BUFFER);
}
tex = r300_resource(texstate->sampler_views[i]->base.texture);
- r300->rws->cs_add_buffer(r300->cs, tex->cs_buf, RADEON_USAGE_READ,
+ r300->rws->cs_add_buffer(r300->cs, tex->buf, RADEON_USAGE_READ,
tex->domain, RADEON_PRIO_SAMPLER_TEXTURE);
}
}
/* ...occlusion query buffer... */
if (r300->query_current)
- r300->rws->cs_add_buffer(r300->cs, r300->query_current->cs_buf,
+ r300->rws->cs_add_buffer(r300->cs, r300->query_current->buf,
RADEON_USAGE_WRITE, RADEON_DOMAIN_GTT,
RADEON_PRIO_QUERY);
/* ...vertex buffer for SWTCL path... */
- if (r300->vbo_cs)
- r300->rws->cs_add_buffer(r300->cs, r300->vbo_cs,
+ if (r300->vbo)
+ r300->rws->cs_add_buffer(r300->cs, r300->vbo,
RADEON_USAGE_READ, RADEON_DOMAIN_GTT,
RADEON_PRIO_VERTEX_BUFFER);
/* ...vertex buffers for HWTCL path... */
if (!buf)
continue;
- r300->rws->cs_add_buffer(r300->cs, r300_resource(buf)->cs_buf,
+ r300->rws->cs_add_buffer(r300->cs, r300_resource(buf)->buf,
RADEON_USAGE_READ,
r300_resource(buf)->domain,
RADEON_PRIO_SAMPLER_BUFFER);
}
/* ...and index buffer for HWTCL path. */
if (index_buffer)
- r300->rws->cs_add_buffer(r300->cs, r300_resource(index_buffer)->cs_buf,
+ r300->rws->cs_add_buffer(r300->cs, r300_resource(index_buffer)->buf,
RADEON_USAGE_READ,
r300_resource(index_buffer)->domain,
RADEON_PRIO_INDEX_BUFFER);
FREE(q);
return NULL;
}
- q->cs_buf = r300->rws->buffer_get_cs_handle(q->buf);
-
return (struct pipe_query*)q;
}
return vresult->b;
}
- map = r300->rws->buffer_map(q->cs_buf, r300->cs,
+ map = r300->rws->buffer_map(q->buf, r300->cs,
PIPE_TRANSFER_READ |
(!wait ? PIPE_TRANSFER_DONTBLOCK : 0));
if (!map)
/* Map the buffer. */
if (!map[vbi]) {
map[vbi] = (uint32_t*)r300->rws->buffer_map(
- r300_resource(vbuf->buffer)->cs_buf,
+ r300_resource(vbuf->buffer)->buf,
r300->cs, PIPE_TRANSFER_READ | PIPE_TRANSFER_UNSYNCHRONIZED);
map[vbi] += (vbuf->buffer_offset / 4) + stride[i] * info->start;
}
/* Fallback for misaligned ushort indices. */
if (indexSize == 2 && (start & 1) && indexBuffer) {
/* If we got here, then orgIndexBuffer == indexBuffer. */
- uint16_t *ptr = r300->rws->buffer_map(r300_resource(orgIndexBuffer)->cs_buf,
+ uint16_t *ptr = r300->rws->buffer_map(r300_resource(orgIndexBuffer)->buf,
r300->cs,
PIPE_TRANSFER_READ |
PIPE_TRANSFER_UNSYNCHRONIZED);
if (!r300->vbo || size + r300->draw_vbo_offset > r300->vbo->size) {
pb_reference(&r300->vbo, NULL);
- r300->vbo_cs = NULL;
+ r300->vbo = NULL;
r300render->vbo_ptr = NULL;
r300->vbo = rws->buffer_create(rws,
if (!r300->vbo) {
return FALSE;
}
- r300->vbo_cs = rws->buffer_get_cs_handle(r300->vbo);
r300->draw_vbo_offset = 0;
- r300render->vbo_ptr = rws->buffer_map(r300->vbo_cs, r300->cs,
+ r300render->vbo_ptr = rws->buffer_map(r300->vbo, r300->cs,
PIPE_TRANSFER_WRITE);
}
assert(usage & PIPE_TRANSFER_WRITE);
/* Check if mapping this buffer would cause waiting for the GPU. */
- if (r300->rws->cs_is_buffer_referenced(r300->cs, rbuf->cs_buf, RADEON_USAGE_READWRITE) ||
+ if (r300->rws->cs_is_buffer_referenced(r300->cs, rbuf->buf, RADEON_USAGE_READWRITE) ||
!r300->rws->buffer_wait(rbuf->buf, 0, RADEON_USAGE_READWRITE)) {
unsigned i;
struct pb_buffer *new_buf;
/* Discard the old buffer. */
pb_reference(&rbuf->buf, NULL);
rbuf->buf = new_buf;
- rbuf->cs_buf = r300->rws->buffer_get_cs_handle(rbuf->buf);
/* We changed the buffer, now we need to bind it where the old one was bound. */
for (i = 0; i < r300->nr_vertex_buffers; i++) {
usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
}
- map = rws->buffer_map(rbuf->cs_buf, r300->cs, usage);
+ map = rws->buffer_map(rbuf->buf, r300->cs, usage);
if (!map) {
util_slab_free(&r300->pool_transfers, transfer);
FREE(rbuf);
return NULL;
}
-
- rbuf->cs_buf =
- r300screen->rws->buffer_get_cs_handle(rbuf->buf);
-
return &rbuf->b.b;
}
util_format_is_depth_or_stencil(base->format) ? "depth" : "color");
}
- tex->cs_buf = rws->buffer_get_cs_handle(tex->buf);
-
rws->buffer_set_tiling(tex->buf, NULL,
tex->tex.microtile, tex->tex.macrotile[0],
0, 0, 0, 0, 0, 0, 0,
surface->base.u.tex.last_layer = surf_tmpl->u.tex.last_layer;
surface->buf = tex->buf;
- surface->cs_buf = tex->cs_buf;
+ surface->buf = tex->buf;
/* Prefer VRAM if there are multiple domains to choose from. */
surface->domain = tex->domain;
char *map;
referenced_cs =
- r300->rws->cs_is_buffer_referenced(r300->cs, tex->cs_buf, RADEON_USAGE_READWRITE);
+ r300->rws->cs_is_buffer_referenced(r300->cs, tex->buf, RADEON_USAGE_READWRITE);
if (referenced_cs) {
referenced_hw = TRUE;
} else {
if (trans->linear_texture) {
/* The detiled texture is of the same size as the region being mapped
* (no offset needed). */
- map = r300->rws->buffer_map(trans->linear_texture->cs_buf,
+ map = r300->rws->buffer_map(trans->linear_texture->buf,
r300->cs, usage);
if (!map) {
pipe_resource_reference(
return map;
} else {
/* Tiling is disabled. */
- map = r300->rws->buffer_map(tex->cs_buf, r300->cs, usage);
+ map = r300->rws->buffer_map(tex->buf, r300->cs, usage);
if (!map) {
FREE(trans);
return NULL;
shader->bc.ndw * 4);
p = r600_buffer_map_sync_with_rings(&ctx->b, shader->code_bo, PIPE_TRANSFER_WRITE);
memcpy(p, shader->bc.bytecode, shader->bc.ndw * 4);
- ctx->b.ws->buffer_unmap(shader->code_bo->cs_buf);
+ ctx->b.ws->buffer_unmap(shader->code_bo->buf);
#endif
#endif
kernel->bc.ndw * 4);
p = r600_buffer_map_sync_with_rings(&ctx->b, kernel->code_bo, PIPE_TRANSFER_WRITE);
memcpy(p, kernel->bc.bytecode, kernel->bc.ndw * 4);
- ctx->b.ws->buffer_unmap(kernel->code_bo->cs_buf);
+ ctx->b.ws->buffer_unmap(kernel->code_bo->buf);
}
shader->active_kernel = kernel;
ctx->cs_shader_state.kernel_index = pc;
} else {
memcpy(bytecode, bc.bytecode, fs_size);
}
- rctx->b.ws->buffer_unmap(shader->buffer->cs_buf);
+ rctx->b.ws->buffer_unmap(shader->buffer->buf);
r600_bytecode_clear(&bc);
return shader;
/**
* Global buffers are not really resources, they are are actually offsets
* into a single global resource (r600_screen::global_pool). The means
- * they don't have their own cs_buf handle, so they cannot be passed
+ * they don't have their own buf handle, so they cannot be passed
* to r600_copy_buffer() and must be handled separately.
*/
static void r600_copy_global_buffer(struct pipe_context *ctx,
rctx->b.gfx.cs = ws->cs_create(rctx->b.ctx, RING_GFX,
r600_context_gfx_flush, rctx,
rscreen->b.trace_bo ?
- rscreen->b.trace_bo->cs_buf : NULL);
+ rscreen->b.trace_bo->buf : NULL);
rctx->b.gfx.flush = r600_context_gfx_flush;
rctx->allocator_fetch_shader = u_suballocator_create(&rctx->b.b, 64 * 1024, 256,
templ.usage = PIPE_USAGE_DEFAULT;
struct r600_resource *res = r600_resource(rscreen->screen.resource_create(&rscreen->screen, &templ));
- unsigned char *map = ws->buffer_map(res->cs_buf, NULL, PIPE_TRANSFER_WRITE);
+ unsigned char *map = ws->buffer_map(res->buf, NULL, PIPE_TRANSFER_WRITE);
memset(map, 0, 256);
} else {
memcpy(ptr, shader->shader.bc.bytecode, shader->shader.bc.ndw * sizeof(*ptr));
}
- rctx->b.ws->buffer_unmap(shader->bo->cs_buf);
+ rctx->b.ws->buffer_unmap(shader->bo->buf);
}
return 0;
if (!resources[i])
continue;
- /* recreate the CS handle */
- resources[i]->resource.cs_buf = ctx->b.ws->buffer_get_cs_handle(
- resources[i]->resource.buf);
+ /* reset the address */
resources[i]->resource.gpu_address = ctx->b.ws->buffer_get_virtual_address(
- resources[i]->resource.cs_buf);
+ resources[i]->resource.buf);
}
template.height *= array_size;
}
/* set the decoding target buffer offsets */
-static struct radeon_winsys_cs_handle* r600_uvd_set_dtb(struct ruvd_msg *msg, struct vl_video_buffer *buf)
+static struct pb_buffer* r600_uvd_set_dtb(struct ruvd_msg *msg, struct vl_video_buffer *buf)
{
struct r600_screen *rscreen = (struct r600_screen*)buf->base.context->screen;
struct r600_texture *luma = (struct r600_texture *)buf->resources[0];
ruvd_set_dt_surfaces(msg, &luma->surface, &chroma->surface);
- return luma->resource.cs_buf;
+ return luma->resource.buf;
}
/* get the radeon resources for VCE */
static void r600_vce_get_buffer(struct pipe_resource *resource,
- struct radeon_winsys_cs_handle **handle,
+ struct pb_buffer **handle,
struct radeon_surf **surface)
{
struct r600_texture *res = (struct r600_texture *)resource;
if (handle)
- *handle = res->resource.cs_buf;
+ *handle = res->resource.buf;
if (surface)
*surface = &res->surface;
#include <stdio.h>
boolean r600_rings_is_buffer_referenced(struct r600_common_context *ctx,
- struct radeon_winsys_cs_handle *buf,
+ struct pb_buffer *buf,
enum radeon_bo_usage usage)
{
if (ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs, buf, usage)) {
bool busy = false;
if (usage & PIPE_TRANSFER_UNSYNCHRONIZED) {
- return ctx->ws->buffer_map(resource->cs_buf, NULL, usage);
+ return ctx->ws->buffer_map(resource->buf, NULL, usage);
}
if (!(usage & PIPE_TRANSFER_WRITE)) {
if (ctx->gfx.cs->cdw != ctx->initial_gfx_cs_size &&
ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs,
- resource->cs_buf, rusage)) {
+ resource->buf, rusage)) {
if (usage & PIPE_TRANSFER_DONTBLOCK) {
ctx->gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
return NULL;
if (ctx->dma.cs &&
ctx->dma.cs->cdw &&
ctx->ws->cs_is_buffer_referenced(ctx->dma.cs,
- resource->cs_buf, rusage)) {
+ resource->buf, rusage)) {
if (usage & PIPE_TRANSFER_DONTBLOCK) {
ctx->dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
return NULL;
}
/* Setting the CS to NULL will prevent doing checks we have done already. */
- return ctx->ws->buffer_map(resource->cs_buf, NULL, usage);
+ return ctx->ws->buffer_map(resource->buf, NULL, usage);
}
bool r600_init_resource(struct r600_common_screen *rscreen,
* the same buffer where one of the contexts invalidates it while
* the others are using it. */
old_buf = res->buf;
- res->cs_buf = rscreen->ws->buffer_get_cs_handle(new_buf); /* should be atomic */
res->buf = new_buf; /* should be atomic */
if (rscreen->info.r600_virtual_address)
- res->gpu_address = rscreen->ws->buffer_get_virtual_address(res->cs_buf);
+ res->gpu_address = rscreen->ws->buffer_get_virtual_address(res->buf);
else
res->gpu_address = 0;
assert(usage & PIPE_TRANSFER_WRITE);
/* Check if mapping this buffer would cause waiting for the GPU. */
- if (r600_rings_is_buffer_referenced(rctx, rbuffer->cs_buf, RADEON_USAGE_READWRITE) ||
+ if (r600_rings_is_buffer_referenced(rctx, rbuffer->buf, RADEON_USAGE_READWRITE) ||
!rctx->ws->buffer_wait(rbuffer->buf, 0, RADEON_USAGE_READWRITE)) {
rctx->invalidate_buffer(&rctx->b, &rbuffer->b.b);
}
assert(usage & PIPE_TRANSFER_WRITE);
/* Check if mapping this buffer would cause waiting for the GPU. */
- if (r600_rings_is_buffer_referenced(rctx, rbuffer->cs_buf, RADEON_USAGE_READWRITE) ||
+ if (r600_rings_is_buffer_referenced(rctx, rbuffer->buf, RADEON_USAGE_READWRITE) ||
!rctx->ws->buffer_wait(rbuffer->buf, 0, RADEON_USAGE_READWRITE)) {
/* Do a wait-free write-only transfer using a temporary buffer. */
unsigned offset;
return NULL;
}
- rbuffer->cs_buf = ws->buffer_get_cs_handle(rbuffer->buf);
-
if (rscreen->info.r600_virtual_address)
rbuffer->gpu_address =
- ws->buffer_get_virtual_address(rbuffer->cs_buf);
+ ws->buffer_get_virtual_address(rbuffer->buf);
else
rbuffer->gpu_address = 0;
enum radeon_bo_priority priority)
{
assert(usage);
- return rctx->ws->cs_add_buffer(ring->cs, rbo->cs_buf, usage,
+ return rctx->ws->cs_add_buffer(ring->cs, rbo->buf, usage,
rbo->domains, priority) * 4;
}
PIPE_USAGE_STAGING,
4096);
if (rscreen->trace_bo) {
- rscreen->trace_ptr = rscreen->ws->buffer_map(rscreen->trace_bo->cs_buf, NULL,
+ rscreen->trace_ptr = rscreen->ws->buffer_map(rscreen->trace_bo->buf, NULL,
PIPE_TRANSFER_UNSYNCHRONIZED);
}
}
/* Winsys objects. */
struct pb_buffer *buf;
- struct radeon_winsys_cs_handle *cs_buf;
uint64_t gpu_address;
/* Resource state. */
/* r600_buffer.c */
boolean r600_rings_is_buffer_referenced(struct r600_common_context *ctx,
- struct radeon_winsys_cs_handle *buf,
+ struct pb_buffer *buf,
enum radeon_bo_usage usage);
void *r600_buffer_map_sync_with_rings(struct r600_common_context *ctx,
struct r600_resource *resource,
struct r600_resource *buffer)
{
/* Callers ensure that the buffer is currently unused by the GPU. */
- uint32_t *results = ctx->ws->buffer_map(buffer->cs_buf, NULL,
+ uint32_t *results = ctx->ws->buffer_map(buffer->buf, NULL,
PIPE_TRANSFER_WRITE |
PIPE_TRANSFER_UNSYNCHRONIZED);
if (query->flags & R600_QUERY_HW_FLAG_PREDICATE) {
/* Obtain a new buffer if the current one can't be mapped without a stall. */
- if (r600_rings_is_buffer_referenced(rctx, query->buffer.buf->cs_buf, RADEON_USAGE_READWRITE) ||
+ if (r600_rings_is_buffer_referenced(rctx, query->buffer.buf->buf, RADEON_USAGE_READWRITE) ||
!rctx->ws->buffer_wait(query->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
pipe_resource_reference((struct pipe_resource**)&query->buffer.buf, NULL);
query->buffer.buf = r600_new_query_buffer(rctx, query);
}
} else {
resource->buf = buf;
- resource->cs_buf = rscreen->ws->buffer_get_cs_handle(buf);
- resource->gpu_address = rscreen->ws->buffer_get_virtual_address(resource->cs_buf);
- resource->domains = rscreen->ws->buffer_get_initial_domain(resource->cs_buf);
+ resource->gpu_address = rscreen->ws->buffer_get_virtual_address(resource->buf);
+ resource->domains = rscreen->ws->buffer_get_initial_domain(resource->buf);
}
if (rtex->cmask.size) {
/* Untiled buffers in VRAM, which is slow for CPU reads */
use_staging_texture = TRUE;
} else if (!(usage & PIPE_TRANSFER_READ) &&
- (r600_rings_is_buffer_referenced(rctx, rtex->resource.cs_buf, RADEON_USAGE_READWRITE) ||
+ (r600_rings_is_buffer_referenced(rctx, rtex->resource.buf, RADEON_USAGE_READWRITE) ||
!rctx->ws->buffer_wait(rtex->resource.buf, 0, RADEON_USAGE_READWRITE))) {
/* Use a staging texture for uploads if the underlying BO is busy. */
use_staging_texture = TRUE;
/* send a command to the VCPU through the GPCOM registers */
static void send_cmd(struct ruvd_decoder *dec, unsigned cmd,
- struct radeon_winsys_cs_handle* cs_buf, uint32_t off,
+ struct pb_buffer* buf, uint32_t off,
enum radeon_bo_usage usage, enum radeon_bo_domain domain)
{
int reloc_idx;
- reloc_idx = dec->ws->cs_add_buffer(dec->cs, cs_buf, usage, domain,
+ reloc_idx = dec->ws->cs_add_buffer(dec->cs, buf, usage, domain,
RADEON_PRIO_UVD);
if (!dec->use_legacy) {
uint64_t addr;
- addr = dec->ws->buffer_get_virtual_address(cs_buf);
+ addr = dec->ws->buffer_get_virtual_address(buf);
addr = addr + off;
set_reg(dec, RUVD_GPCOM_VCPU_DATA0, addr);
set_reg(dec, RUVD_GPCOM_VCPU_DATA1, addr >> 32);
buf = &dec->msg_fb_it_buffers[dec->cur_buffer];
/* and map it for CPU access */
- ptr = dec->ws->buffer_map(buf->res->cs_buf, dec->cs, PIPE_TRANSFER_WRITE);
+ ptr = dec->ws->buffer_map(buf->res->buf, dec->cs, PIPE_TRANSFER_WRITE);
/* calc buffer offsets */
dec->msg = (struct ruvd_msg *)ptr;
buf = &dec->msg_fb_it_buffers[dec->cur_buffer];
/* unmap the buffer */
- dec->ws->buffer_unmap(buf->res->cs_buf);
+ dec->ws->buffer_unmap(buf->res->buf);
dec->msg = NULL;
dec->fb = NULL;
dec->it = NULL;
/* and send it to the hardware */
- send_cmd(dec, RUVD_CMD_MSG_BUFFER, buf->res->cs_buf, 0,
+ send_cmd(dec, RUVD_CMD_MSG_BUFFER, buf->res->buf, 0,
RADEON_USAGE_READ, RADEON_DOMAIN_GTT);
}
dec->bs_size = 0;
dec->bs_ptr = dec->ws->buffer_map(
- dec->bs_buffers[dec->cur_buffer].res->cs_buf,
+ dec->bs_buffers[dec->cur_buffer].res->buf,
dec->cs, PIPE_TRANSFER_WRITE);
}
unsigned new_size = dec->bs_size + sizes[i];
if (new_size > buf->res->buf->size) {
- dec->ws->buffer_unmap(buf->res->cs_buf);
+ dec->ws->buffer_unmap(buf->res->buf);
if (!rvid_resize_buffer(dec->screen, dec->cs, buf, new_size)) {
RVID_ERR("Can't resize bitstream buffer!");
return;
}
- dec->bs_ptr = dec->ws->buffer_map(buf->res->cs_buf, dec->cs,
+ dec->bs_ptr = dec->ws->buffer_map(buf->res->buf, dec->cs,
PIPE_TRANSFER_WRITE);
if (!dec->bs_ptr)
return;
struct pipe_picture_desc *picture)
{
struct ruvd_decoder *dec = (struct ruvd_decoder*)decoder;
- struct radeon_winsys_cs_handle *dt;
+ struct pb_buffer *dt;
struct rvid_buffer *msg_fb_it_buf, *bs_buf;
unsigned bs_size;
bs_size = align(dec->bs_size, 128);
memset(dec->bs_ptr, 0, bs_size - dec->bs_size);
- dec->ws->buffer_unmap(bs_buf->res->cs_buf);
+ dec->ws->buffer_unmap(bs_buf->res->buf);
map_msg_fb_it_buf(dec);
dec->msg->size = sizeof(*dec->msg);
send_msg_buf(dec);
- send_cmd(dec, RUVD_CMD_DPB_BUFFER, dec->dpb.res->cs_buf, 0,
+ send_cmd(dec, RUVD_CMD_DPB_BUFFER, dec->dpb.res->buf, 0,
RADEON_USAGE_READWRITE, RADEON_DOMAIN_VRAM);
if (u_reduce_video_profile(picture->profile) == PIPE_VIDEO_FORMAT_HEVC) {
- send_cmd(dec, RUVD_CMD_CONTEXT_BUFFER, dec->ctx.res->cs_buf, 0,
+ send_cmd(dec, RUVD_CMD_CONTEXT_BUFFER, dec->ctx.res->buf, 0,
RADEON_USAGE_READWRITE, RADEON_DOMAIN_VRAM);
}
- send_cmd(dec, RUVD_CMD_BITSTREAM_BUFFER, bs_buf->res->cs_buf,
+ send_cmd(dec, RUVD_CMD_BITSTREAM_BUFFER, bs_buf->res->buf,
0, RADEON_USAGE_READ, RADEON_DOMAIN_GTT);
send_cmd(dec, RUVD_CMD_DECODING_TARGET_BUFFER, dt, 0,
RADEON_USAGE_WRITE, RADEON_DOMAIN_VRAM);
- send_cmd(dec, RUVD_CMD_FEEDBACK_BUFFER, msg_fb_it_buf->res->cs_buf,
+ send_cmd(dec, RUVD_CMD_FEEDBACK_BUFFER, msg_fb_it_buf->res->buf,
FB_BUFFER_OFFSET, RADEON_USAGE_WRITE, RADEON_DOMAIN_GTT);
if (have_it(dec))
- send_cmd(dec, RUVD_CMD_ITSCALING_TABLE_BUFFER, msg_fb_it_buf->res->cs_buf,
+ send_cmd(dec, RUVD_CMD_ITSCALING_TABLE_BUFFER, msg_fb_it_buf->res->buf,
FB_BUFFER_OFFSET + FB_BUFFER_SIZE, RADEON_USAGE_READ, RADEON_DOMAIN_GTT);
set_reg(dec, RUVD_ENGINE_CNTL, 1);
};
/* driver dependent callback */
-typedef struct radeon_winsys_cs_handle* (*ruvd_set_dtb)
+typedef struct pb_buffer* (*ruvd_set_dtb)
(struct ruvd_msg* msg, struct vl_video_buffer *vb);
/* create an UVD decode */
#if 0
static void dump_feedback(struct rvce_encoder *enc, struct rvid_buffer *fb)
{
- uint32_t *ptr = enc->ws->buffer_map(fb->res->cs_buf, enc->cs, PIPE_TRANSFER_READ_WRITE);
+ uint32_t *ptr = enc->ws->buffer_map(fb->res->buf, enc->cs, PIPE_TRANSFER_READ_WRITE);
unsigned i = 0;
fprintf(stderr, "\n");
fprintf(stderr, "encStatus:\t\t\t%08x\n", ptr[i++]);
fprintf(stderr, "seiPrivatePackageOffset:\t%08x\n", ptr[i++]);
fprintf(stderr, "seiPrivatePackageSize:\t\t%08x\n", ptr[i++]);
fprintf(stderr, "\n");
- enc->ws->buffer_unmap(fb->res->cs_buf);
+ enc->ws->buffer_unmap(fb->res->buf);
}
#endif
struct rvid_buffer *fb = feedback;
if (size) {
- uint32_t *ptr = enc->ws->buffer_map(fb->res->cs_buf, enc->cs, PIPE_TRANSFER_READ_WRITE);
+ uint32_t *ptr = enc->ws->buffer_map(fb->res->buf, enc->cs, PIPE_TRANSFER_READ_WRITE);
if (ptr[1]) {
*size = ptr[4] - ptr[9];
*size = 0;
}
- enc->ws->buffer_unmap(fb->res->cs_buf);
+ enc->ws->buffer_unmap(fb->res->buf);
}
//dump_feedback(enc, fb);
rvid_destroy_buffer(fb);
/**
* Add the buffer as relocation to the current command submission
*/
-void rvce_add_buffer(struct rvce_encoder *enc, struct radeon_winsys_cs_handle *buf,
+void rvce_add_buffer(struct rvce_encoder *enc, struct pb_buffer *buf,
enum radeon_bo_usage usage, enum radeon_bo_domain domain,
signed offset)
{
/* driver dependent callback */
typedef void (*rvce_get_buffer)(struct pipe_resource *resource,
- struct radeon_winsys_cs_handle **handle,
+ struct pb_buffer **handle,
struct radeon_surf **surface);
/* Coded picture buffer slot */
rvce_get_buffer get_buffer;
- struct radeon_winsys_cs_handle* handle;
+ struct pb_buffer* handle;
struct radeon_surf* luma;
struct radeon_surf* chroma;
- struct radeon_winsys_cs_handle* bs_handle;
+ struct pb_buffer* bs_handle;
unsigned bs_size;
struct rvce_cpb_slot *cpb_array;
bool rvce_is_fw_version_supported(struct r600_common_screen *rscreen);
-void rvce_add_buffer(struct rvce_encoder *enc, struct radeon_winsys_cs_handle *buf,
+void rvce_add_buffer(struct rvce_encoder *enc, struct pb_buffer *buf,
enum radeon_bo_usage usage, enum radeon_bo_domain domain,
signed offset);
static void feedback(struct rvce_encoder *enc)
{
RVCE_BEGIN(0x05000005); // feedback buffer
- RVCE_WRITE(enc->fb->res->cs_buf, enc->fb->res->domains, 0x0); // feedbackRingAddressHi/Lo
+ RVCE_WRITE(enc->fb->res->buf, enc->fb->res->domains, 0x0); // feedbackRingAddressHi/Lo
RVCE_CS(0x00000001); // feedbackRingSize
RVCE_END();
}
enc->task_info(enc, 0x00000003, 0, 0, 0);
RVCE_BEGIN(0x05000001); // context buffer
- RVCE_READWRITE(enc->cpb.res->cs_buf, enc->cpb.res->domains, 0x0); // encodeContextAddressHi/Lo
+ RVCE_READWRITE(enc->cpb.res->buf, enc->cpb.res->domains, 0x0); // encodeContextAddressHi/Lo
RVCE_END();
RVCE_BEGIN(0x05000004); // video bitstream buffer
enc->task_info(enc, 0x00000003, dep, 0, bs_idx);
RVCE_BEGIN(0x05000001); // context buffer
- RVCE_READWRITE(enc->cpb.res->cs_buf, enc->cpb.res->domains, 0); // encodeContextAddressHi/Lo
+ RVCE_READWRITE(enc->cpb.res->buf, enc->cpb.res->domains, 0); // encodeContextAddressHi/Lo
RVCE_END();
bs_offset = -(signed)(bs_idx * enc->bs_size);
enc->task_info(enc, 0x00000003, dep, 0, bs_idx);
RVCE_BEGIN(0x05000001); // context buffer
- RVCE_READWRITE(enc->cpb.res->cs_buf, enc->cpb.res->domains, 0); // encodeContextAddressHi/Lo
+ RVCE_READWRITE(enc->cpb.res->buf, enc->cpb.res->domains, 0); // encodeContextAddressHi/Lo
RVCE_END();
bs_offset = -(signed)(bs_idx * enc->bs_size);
if (!rvid_create_buffer(screen, new_buf, new_size, new_buf->usage))
goto error;
- src = ws->buffer_map(old_buf.res->cs_buf, cs, PIPE_TRANSFER_READ);
+ src = ws->buffer_map(old_buf.res->buf, cs, PIPE_TRANSFER_READ);
if (!src)
goto error;
- dst = ws->buffer_map(new_buf->res->cs_buf, cs, PIPE_TRANSFER_WRITE);
+ dst = ws->buffer_map(new_buf->res->buf, cs, PIPE_TRANSFER_WRITE);
if (!dst)
goto error;
dst += bytes;
memset(dst, 0, new_size);
}
- ws->buffer_unmap(new_buf->res->cs_buf);
- ws->buffer_unmap(old_buf.res->cs_buf);
+ ws->buffer_unmap(new_buf->res->buf);
+ ws->buffer_unmap(old_buf.res->buf);
rvid_destroy_buffer(&old_buf);
return true;
error:
if (src)
- ws->buffer_unmap(old_buf.res->cs_buf);
+ ws->buffer_unmap(old_buf.res->buf);
rvid_destroy_buffer(new_buf);
*new_buf = old_buf;
return false;
};
struct winsys_handle;
-struct radeon_winsys_cs_handle;
struct radeon_winsys_ctx;
struct radeon_winsys_cs {
enum radeon_bo_domain domain,
enum radeon_bo_flag flags);
- struct radeon_winsys_cs_handle *(*buffer_get_cs_handle)(
- struct pb_buffer *buf);
-
/**
* Map the entire data store of a buffer object into the client's address
* space.
* \param usage A bitmask of the PIPE_TRANSFER_* flags.
* \return The pointer at the beginning of the buffer.
*/
- void *(*buffer_map)(struct radeon_winsys_cs_handle *buf,
+ void *(*buffer_map)(struct pb_buffer *buf,
struct radeon_winsys_cs *cs,
enum pipe_transfer_usage usage);
*
* \param buf A winsys buffer object to unmap.
*/
- void (*buffer_unmap)(struct radeon_winsys_cs_handle *buf);
+ void (*buffer_unmap)(struct pb_buffer *buf);
/**
* Wait for the buffer and return true if the buffer is not used
* \param buf A winsys buffer object
* \return virtual address
*/
- uint64_t (*buffer_get_virtual_address)(struct radeon_winsys_cs_handle *buf);
+ uint64_t (*buffer_get_virtual_address)(struct pb_buffer *buf);
/**
* Query the initial placement of the buffer from the kernel driver.
*/
- enum radeon_bo_domain (*buffer_get_initial_domain)(struct radeon_winsys_cs_handle *buf);
+ enum radeon_bo_domain (*buffer_get_initial_domain)(struct pb_buffer *buf);
/**************************************************************************
* Command submission.
void (*flush)(void *ctx, unsigned flags,
struct pipe_fence_handle **fence),
void *flush_ctx,
- struct radeon_winsys_cs_handle *trace_buf);
+ struct pb_buffer *trace_buf);
/**
* Destroy a command stream.
* \return Buffer index.
*/
unsigned (*cs_add_buffer)(struct radeon_winsys_cs *cs,
- struct radeon_winsys_cs_handle *buf,
+ struct pb_buffer *buf,
enum radeon_bo_usage usage,
enum radeon_bo_domain domain,
enum radeon_bo_priority priority);
* \return The buffer index, or -1 if the buffer has not been added.
*/
int (*cs_lookup_buffer)(struct radeon_winsys_cs *cs,
- struct radeon_winsys_cs_handle *buf);
+ struct pb_buffer *buf);
/**
* Return TRUE if there is enough memory in VRAM and GTT for the buffers
* \param buf A winsys buffer.
*/
boolean (*cs_is_buffer_referenced)(struct radeon_winsys_cs *cs,
- struct radeon_winsys_cs_handle *buf,
+ struct pb_buffer *buf,
enum radeon_bo_usage usage);
/**
/* The extra num_work_size_bytes are for work group / work item size information */
kernel_args_size = program->input_size + num_work_size_bytes + 8 /* For scratch va */;
- kernel_args = sctx->b.ws->buffer_map(input_buffer->cs_buf,
+ kernel_args = sctx->b.ws->buffer_map(input_buffer->buf,
sctx->b.gfx.cs, PIPE_TRANSFER_WRITE);
for (i = 0; i < 3; i++) {
kernel_args[i] = grid_layout[i];
/* Fallback for unaligned clears. */
if (offset % 4 != 0 || size % 4 != 0) {
- uint8_t *map = sctx->b.ws->buffer_map(r600_resource(dst)->cs_buf,
+ uint8_t *map = sctx->b.ws->buffer_map(r600_resource(dst)->buf,
sctx->b.gfx.cs,
PIPE_TRANSFER_WRITE);
map += offset;
* waited for the context, so this buffer should be idle.
* If the GPU is hung, there is no point in waiting for it.
*/
- uint32_t *map = sctx->b.ws->buffer_map(sctx->last_trace_buf->cs_buf,
+ uint32_t *map = sctx->b.ws->buffer_map(sctx->last_trace_buf->buf,
NULL,
PIPE_TRANSFER_UNSYNCHRONIZED |
PIPE_TRANSFER_READ);
sctx->b.gfx.cs = ws->cs_create(sctx->b.ctx, RING_GFX, si_context_gfx_flush,
sctx, sscreen->b.trace_bo ?
- sscreen->b.trace_bo->cs_buf : NULL);
+ sscreen->b.trace_bo->buf : NULL);
sctx->b.gfx.flush = si_context_gfx_flush;
/* Border colors. */
goto fail;
sctx->border_color_map =
- ws->buffer_map(sctx->border_color_buffer->cs_buf,
+ ws->buffer_map(sctx->border_color_buffer->buf,
NULL, PIPE_TRANSFER_WRITE);
if (!sctx->border_color_map)
goto fail;
if (!shader->bo)
return -ENOMEM;
- ptr = sscreen->b.ws->buffer_map(shader->bo->cs_buf, NULL,
+ ptr = sscreen->b.ws->buffer_map(shader->bo->buf, NULL,
PIPE_TRANSFER_READ_WRITE);
util_memcpy_cpu_to_le32(ptr, binary->code, binary->code_size);
if (binary->rodata_size > 0) {
binary->rodata_size);
}
- sscreen->b.ws->buffer_unmap(shader->bo->cs_buf);
+ sscreen->b.ws->buffer_unmap(shader->bo->buf);
return 0;
}
if (!resources[i])
continue;
- /* recreate the CS handle */
- resources[i]->resource.cs_buf = ctx->b.ws->buffer_get_cs_handle(
- resources[i]->resource.buf);
+ /* reset the address */
resources[i]->resource.gpu_address = ctx->b.ws->buffer_get_virtual_address(
- resources[i]->resource.cs_buf);
+ resources[i]->resource.buf);
}
template.height *= array_size;
}
/* set the decoding target buffer offsets */
-static struct radeon_winsys_cs_handle* si_uvd_set_dtb(struct ruvd_msg *msg, struct vl_video_buffer *buf)
+static struct pb_buffer* si_uvd_set_dtb(struct ruvd_msg *msg, struct vl_video_buffer *buf)
{
struct r600_texture *luma = (struct r600_texture *)buf->resources[0];
struct r600_texture *chroma = (struct r600_texture *)buf->resources[1];
ruvd_set_dt_surfaces(msg, &luma->surface, &chroma->surface);
- return luma->resource.cs_buf;
+ return luma->resource.buf;
}
/* get the radeon resources for VCE */
static void si_vce_get_buffer(struct pipe_resource *resource,
- struct radeon_winsys_cs_handle **handle,
+ struct pb_buffer **handle,
struct radeon_surf **surface)
{
struct r600_texture *res = (struct r600_texture *)resource;
if (handle)
- *handle = res->resource.cs_buf;
+ *handle = res->resource.buf;
if (surface)
*surface = &res->surface;
}
static enum radeon_bo_domain amdgpu_bo_get_initial_domain(
- struct radeon_winsys_cs_handle *buf)
+ struct pb_buffer *buf)
{
return ((struct amdgpu_winsys_bo*)buf)->initial_domain;
}
amdgpu_bo_destroy(_buf);
}
-static void *amdgpu_bo_map(struct radeon_winsys_cs_handle *buf,
+static void *amdgpu_bo_map(struct pb_buffer *buf,
struct radeon_winsys_cs *rcs,
enum pipe_transfer_usage usage)
{
return r ? NULL : cpu;
}
-static void amdgpu_bo_unmap(struct radeon_winsys_cs_handle *buf)
+static void amdgpu_bo_unmap(struct pb_buffer *buf)
{
struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
amdgpu_bo_set_metadata(bo->bo, &metadata);
}
-static struct radeon_winsys_cs_handle *amdgpu_get_cs_handle(struct pb_buffer *_buf)
-{
- /* return a direct pointer to amdgpu_winsys_bo. */
- return (struct radeon_winsys_cs_handle*)_buf;
-}
-
static struct pb_buffer *
amdgpu_bo_create(struct radeon_winsys *rws,
unsigned size,
return NULL;
}
-static uint64_t amdgpu_bo_get_va(struct radeon_winsys_cs_handle *buf)
+static uint64_t amdgpu_bo_get_va(struct pb_buffer *buf)
{
return ((struct amdgpu_winsys_bo*)buf)->va;
}
void amdgpu_bo_init_functions(struct amdgpu_winsys *ws)
{
- ws->base.buffer_get_cs_handle = amdgpu_get_cs_handle;
ws->base.buffer_set_tiling = amdgpu_bo_set_tiling;
ws->base.buffer_get_tiling = amdgpu_bo_get_tiling;
ws->base.buffer_map = amdgpu_bo_map;
if (!cs->big_ib_buffer ||
cs->used_ib_space + ib_size > cs->big_ib_buffer->size) {
struct radeon_winsys *ws = &cs->ctx->ws->base;
- struct radeon_winsys_cs_handle *winsys_bo;
pb_reference(&cs->big_ib_buffer, NULL);
cs->big_ib_winsys_buffer = NULL;
if (!cs->big_ib_buffer)
return false;
- winsys_bo = ws->buffer_get_cs_handle(cs->big_ib_buffer);
-
- cs->ib_mapped = ws->buffer_map(winsys_bo, NULL, PIPE_TRANSFER_WRITE);
+ cs->ib_mapped = ws->buffer_map(cs->big_ib_buffer, NULL,
+ PIPE_TRANSFER_WRITE);
if (!cs->ib_mapped) {
pb_reference(&cs->big_ib_buffer, NULL);
return false;
}
- cs->big_ib_winsys_buffer = (struct amdgpu_winsys_bo*)winsys_bo;
+ cs->big_ib_winsys_buffer = (struct amdgpu_winsys_bo*)cs->big_ib_buffer;
}
cs->ib.ib_mc_address = cs->big_ib_winsys_buffer->va + cs->used_ib_space;
void (*flush)(void *ctx, unsigned flags,
struct pipe_fence_handle **fence),
void *flush_ctx,
- struct radeon_winsys_cs_handle *trace_buf)
+ struct pb_buffer *trace_buf)
{
struct amdgpu_ctx *ctx = (struct amdgpu_ctx*)rwctx;
struct amdgpu_cs *cs;
}
static unsigned amdgpu_cs_add_buffer(struct radeon_winsys_cs *rcs,
- struct radeon_winsys_cs_handle *buf,
+ struct pb_buffer *buf,
enum radeon_bo_usage usage,
enum radeon_bo_domain domains,
enum radeon_bo_priority priority)
}
static int amdgpu_cs_lookup_buffer(struct radeon_winsys_cs *rcs,
- struct radeon_winsys_cs_handle *buf)
+ struct pb_buffer *buf)
{
struct amdgpu_cs *cs = amdgpu_cs(rcs);
}
static boolean amdgpu_bo_is_referenced(struct radeon_winsys_cs *rcs,
- struct radeon_winsys_cs_handle *_buf,
+ struct pb_buffer *_buf,
enum radeon_bo_usage usage)
{
struct amdgpu_cs *cs = amdgpu_cs(rcs);
}
static enum radeon_bo_domain radeon_bo_get_initial_domain(
- struct radeon_winsys_cs_handle *buf)
+ struct pb_buffer *buf)
{
struct radeon_bo *bo = (struct radeon_bo*)buf;
struct drm_radeon_gem_op args;
return bo->ptr;
}
-static void *radeon_bo_map(struct radeon_winsys_cs_handle *buf,
+static void *radeon_bo_map(struct pb_buffer *buf,
struct radeon_winsys_cs *rcs,
enum pipe_transfer_usage usage)
{
return radeon_bo_do_map(bo);
}
-static void radeon_bo_unmap(struct radeon_winsys_cs_handle *_buf)
+static void radeon_bo_unmap(struct pb_buffer *_buf)
{
struct radeon_bo *bo = (struct radeon_bo*)_buf;
sizeof(args));
}
-static struct radeon_winsys_cs_handle *radeon_drm_get_cs_handle(struct pb_buffer *_buf)
-{
- /* return radeon_bo. */
- return (struct radeon_winsys_cs_handle*)radeon_bo(_buf);
-}
-
static struct pb_buffer *
radeon_winsys_bo_create(struct radeon_winsys *rws,
unsigned size,
return TRUE;
}
-static uint64_t radeon_winsys_bo_va(struct radeon_winsys_cs_handle *buf)
+static uint64_t radeon_winsys_bo_va(struct pb_buffer *buf)
{
return ((struct radeon_bo*)buf)->va;
}
void radeon_drm_bo_init_functions(struct radeon_drm_winsys *ws)
{
- ws->base.buffer_get_cs_handle = radeon_drm_get_cs_handle;
ws->base.buffer_set_tiling = radeon_bo_set_tiling;
ws->base.buffer_get_tiling = radeon_bo_get_tiling;
ws->base.buffer_map = radeon_bo_map;
void (*flush)(void *ctx, unsigned flags,
struct pipe_fence_handle **fence),
void *flush_ctx,
- struct radeon_winsys_cs_handle *trace_buf)
+ struct pb_buffer *trace_buf)
{
struct radeon_drm_winsys *ws = (struct radeon_drm_winsys*)ctx;
struct radeon_drm_cs *cs;
}
static unsigned radeon_drm_cs_add_buffer(struct radeon_winsys_cs *rcs,
- struct radeon_winsys_cs_handle *buf,
+ struct pb_buffer *buf,
enum radeon_bo_usage usage,
enum radeon_bo_domain domains,
enum radeon_bo_priority priority)
}
static int radeon_drm_cs_lookup_buffer(struct radeon_winsys_cs *rcs,
- struct radeon_winsys_cs_handle *buf)
+ struct pb_buffer *buf)
{
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
}
static boolean radeon_bo_is_referenced(struct radeon_winsys_cs *rcs,
- struct radeon_winsys_cs_handle *_buf,
+ struct pb_buffer *_buf,
enum radeon_bo_usage usage)
{
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
fence = cs->ws->base.buffer_create(&cs->ws->base, 1, 1, TRUE,
RADEON_DOMAIN_GTT, 0);
/* Add the fence as a dummy relocation. */
- cs->ws->base.cs_add_buffer(rcs, cs->ws->base.buffer_get_cs_handle(fence),
+ cs->ws->base.cs_add_buffer(rcs, fence,
RADEON_USAGE_READWRITE, RADEON_DOMAIN_GTT,
RADEON_PRIO_FENCE);
return (struct pipe_fence_handle*)fence;