return vresult->b;
}
- map = r300->rws->buffer_map(q->buf, r300->cs,
+ map = r300->rws->buffer_map(q->cs_buf, r300->cs,
PIPE_TRANSFER_READ |
(!wait ? PIPE_TRANSFER_DONTBLOCK : 0));
if (!map)
map++;
}
- r300->rws->buffer_unmap(q->buf);
+ r300->rws->buffer_unmap(q->cs_buf);
if (q->type == PIPE_QUERY_OCCLUSION_PREDICATE) {
vresult->b = temp != 0;
/* Map the buffer. */
if (!map[vbi]) {
map[vbi] = (uint32_t*)r300->rws->buffer_map(
- r300_resource(vbuf->buffer)->buf,
+ r300_resource(vbuf->buffer)->cs_buf,
r300->cs, PIPE_TRANSFER_READ | PIPE_TRANSFER_UNSYNCHRONIZED);
map[vbi] += (vbuf->buffer_offset / 4) + stride[i] * info->start;
}
vbi = r300->velems->velem[i].vertex_buffer_index;
if (map[vbi]) {
- r300->rws->buffer_unmap(r300_resource(r300->vertex_buffer[vbi].buffer)->buf);
+ r300->rws->buffer_unmap(r300_resource(r300->vertex_buffer[vbi].buffer)->cs_buf);
map[vbi] = NULL;
}
}
if (indexSize == 2 && (start & 1) &&
!indexBuffer->user_ptr) {
/* If we got here, then orgIndexBuffer == indexBuffer. */
- uint16_t *ptr = r300->rws->buffer_map(r300_resource(orgIndexBuffer)->buf,
+ uint16_t *ptr = r300->rws->buffer_map(r300_resource(orgIndexBuffer)->cs_buf,
r300->cs,
PIPE_TRANSFER_READ |
PIPE_TRANSFER_UNSYNCHRONIZED);
r300_upload_index_buffer(r300, &indexBuffer, indexSize, &start,
count, (uint8_t*)ptr);
}
- r300->rws->buffer_unmap(r300_resource(orgIndexBuffer)->buf);
+ r300->rws->buffer_unmap(r300_resource(orgIndexBuffer)->cs_buf);
} else {
if (indexBuffer->user_ptr)
r300_upload_index_buffer(r300, &indexBuffer, indexSize,
usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
}
- map = rws->buffer_map(rbuf->buf, r300->cs, usage);
+ map = rws->buffer_map(rbuf->cs_buf, r300->cs, usage);
if (map == NULL)
return NULL;
struct radeon_winsys *rws = r300screen->rws;
struct r300_resource *rbuf = r300_resource(transfer->resource);
- if (rbuf->buf) {
- rws->buffer_unmap(rbuf->buf);
+ if (rbuf->cs_buf) {
+ rws->buffer_unmap(rbuf->cs_buf);
}
}
}
assert(rbuf->b.b.user_ptr == NULL);
- map = rws->buffer_map(rbuf->buf, r300->cs,
+ map = rws->buffer_map(rbuf->cs_buf, r300->cs,
PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE | usage);
memcpy(map + box->x, data, box->width);
- rws->buffer_unmap(rbuf->buf);
+ rws->buffer_unmap(rbuf->cs_buf);
}
static const struct u_resource_vtbl r300_buffer_vtbl =
if (r300transfer->linear_texture) {
/* The detiled texture is of the same size as the region being mapped
* (no offset needed). */
- return r300->rws->buffer_map(r300transfer->linear_texture->buf,
+ return r300->rws->buffer_map(r300transfer->linear_texture->cs_buf,
r300->cs, transfer->usage);
} else {
/* Tiling is disabled. */
- map = r300->rws->buffer_map(tex->buf, r300->cs, transfer->usage);
+ map = r300->rws->buffer_map(tex->cs_buf, r300->cs, transfer->usage);
if (!map) {
return NULL;
struct r300_resource *tex = r300_resource(transfer->resource);
if (r300transfer->linear_texture) {
- rws->buffer_unmap(r300transfer->linear_texture->buf);
+ rws->buffer_unmap(r300transfer->linear_texture->cs_buf);
} else {
- rws->buffer_unmap(tex->buf);
+ rws->buffer_unmap(tex->cs_buf);
}
}
return -ENOMEM;
}
- bytecode = rctx->ws->buffer_map(ve->fetch_shader->buf, rctx->cs, PIPE_TRANSFER_WRITE);
+ bytecode = rctx->ws->buffer_map(ve->fetch_shader->cs_buf, rctx->cs, PIPE_TRANSFER_WRITE);
if (bytecode == NULL) {
r600_bytecode_clear(&bc);
pipe_resource_reference((struct pipe_resource**)&ve->fetch_shader, NULL);
memcpy(bytecode, bc.bytecode, ve->fs_size);
}
- rctx->ws->buffer_unmap(ve->fetch_shader->buf);
+ rctx->ws->buffer_unmap(ve->fetch_shader->cs_buf);
r600_bytecode_clear(&bc);
if (rctx->chip_class >= EVERGREEN)
if (rbuffer->b.b.user_ptr)
return rbuffer->b.b.user_ptr + transfer->box.x;
- data = rctx->ws->buffer_map(rbuffer->buf, rctx->cs, transfer->usage);
+ data = rctx->ws->buffer_map(rbuffer->cs_buf, rctx->cs, transfer->usage);
if (!data)
return NULL;
if (rbuffer->b.b.user_ptr)
return;
- rctx->ws->buffer_unmap(rbuffer->buf);
+ rctx->ws->buffer_unmap(rbuffer->cs_buf);
}
static void r600_transfer_destroy(struct pipe_context *ctx,
va = r600_resource_va(&ctx->screen->screen, (void*)buffer);
/* initialize buffer with zeroes */
- results = ctx->ws->buffer_map(buffer->buf, ctx->cs, PIPE_TRANSFER_WRITE);
+ results = ctx->ws->buffer_map(buffer->cs_buf, ctx->cs, PIPE_TRANSFER_WRITE);
if (results) {
memset(results, 0, ctx->max_db * 4 * 4);
- ctx->ws->buffer_unmap(buffer->buf);
+ ctx->ws->buffer_unmap(buffer->cs_buf);
/* emit EVENT_WRITE for ZPASS_DONE */
cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 2, 0);
cs->buf[cs->cdw++] = r600_context_bo_reloc(ctx, buffer, RADEON_USAGE_WRITE);
/* analyze results */
- results = ctx->ws->buffer_map(buffer->buf, ctx->cs, PIPE_TRANSFER_READ);
+ results = ctx->ws->buffer_map(buffer->cs_buf, ctx->cs, PIPE_TRANSFER_READ);
if (results) {
for(i = 0; i < ctx->max_db; i++) {
/* at least highest bit will be set if backend is used */
if (results[i*4 + 1])
mask |= (1<<i);
}
- ctx->ws->buffer_unmap(buffer->buf);
+ ctx->ws->buffer_unmap(buffer->cs_buf);
}
}
R600_ERR("r600: failed to create bo for fence objects\n");
goto out;
}
- rscreen->fences.data = rctx->ws->buffer_map(rscreen->fences.bo->buf,
+ rscreen->fences.data = rctx->ws->buffer_map(rscreen->fences.bo->cs_buf,
rctx->cs,
PIPE_TRANSFER_READ_WRITE);
}
FREE(entry);
}
- rscreen->ws->buffer_unmap(rscreen->fences.bo->buf);
+ rscreen->ws->buffer_unmap(rscreen->fences.bo->cs_buf);
pipe_resource_reference((struct pipe_resource**)&rscreen->fences.bo, NULL);
}
pipe_mutex_destroy(rscreen->fences.mutex);
switch (type) {
case PIPE_QUERY_OCCLUSION_COUNTER:
case PIPE_QUERY_OCCLUSION_PREDICATE:
- results = ctx->ws->buffer_map(buf->buf, ctx->cs, PIPE_TRANSFER_WRITE);
+ results = ctx->ws->buffer_map(buf->cs_buf, ctx->cs, PIPE_TRANSFER_WRITE);
memset(results, 0, buf_size);
/* Set top bits for unused backends. */
}
results += 4 * ctx->max_db;
}
- ctx->ws->buffer_unmap(buf->buf);
+ ctx->ws->buffer_unmap(buf->cs_buf);
break;
case PIPE_QUERY_TIME_ELAPSED:
break;
case PIPE_QUERY_PRIMITIVES_GENERATED:
case PIPE_QUERY_SO_STATISTICS:
case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
- results = ctx->ws->buffer_map(buf->buf, ctx->cs, PIPE_TRANSFER_WRITE);
+ results = ctx->ws->buffer_map(buf->cs_buf, ctx->cs, PIPE_TRANSFER_WRITE);
memset(results, 0, buf_size);
- ctx->ws->buffer_unmap(buf->buf);
+ ctx->ws->buffer_unmap(buf->cs_buf);
break;
default:
assert(0);
unsigned results_base = 0;
char *map;
- map = ctx->ws->buffer_map(qbuf->buf->buf, ctx->cs,
+ map = ctx->ws->buffer_map(qbuf->buf->cs_buf, ctx->cs,
PIPE_TRANSFER_READ |
(wait ? 0 : PIPE_TRANSFER_DONTBLOCK));
if (!map)
assert(0);
}
- ctx->ws->buffer_unmap(qbuf->buf->buf);
+ ctx->ws->buffer_unmap(qbuf->buf->cs_buf);
return TRUE;
}
if (shader->bo == NULL) {
return -ENOMEM;
}
- ptr = (uint32_t*)rctx->ws->buffer_map(shader->bo->buf, rctx->cs, PIPE_TRANSFER_WRITE);
+ ptr = (uint32_t*)rctx->ws->buffer_map(shader->bo->cs_buf, rctx->cs, PIPE_TRANSFER_WRITE);
if (R600_BIG_ENDIAN) {
for (i = 0; i < rshader->bc.ndw; ++i) {
ptr[i] = bswap_32(rshader->bc.bytecode[i]);
} else {
memcpy(ptr, rshader->bc.bytecode, rshader->bc.ndw * sizeof(*ptr));
}
- rctx->ws->buffer_unmap(shader->bo->buf);
+ rctx->ws->buffer_unmap(shader->bo->cs_buf);
}
/* build state */
switch (rshader->processor_type) {
t->filled_size = (struct r600_resource*)
pipe_buffer_create(ctx->screen, PIPE_BIND_CUSTOM, PIPE_USAGE_STATIC, 4);
- ptr = rctx->ws->buffer_map(t->filled_size->buf, rctx->cs, PIPE_TRANSFER_WRITE);
+ ptr = rctx->ws->buffer_map(t->filled_size->cs_buf, rctx->cs, PIPE_TRANSFER_WRITE);
memset(ptr, 0, t->filled_size->buf->size);
- rctx->ws->buffer_unmap(t->filled_size->buf);
+ rctx->ws->buffer_unmap(t->filled_size->cs_buf);
return &t->b;
}
{
struct r600_context *rctx = (struct r600_context *)ctx;
struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
- struct pb_buffer *buf;
+ struct radeon_winsys_cs_handle *buf;
enum pipe_format format = transfer->resource->format;
unsigned offset = 0;
char *map;
if (rtransfer->staging) {
- buf = ((struct r600_resource *)rtransfer->staging)->buf;
+ buf = ((struct r600_resource *)rtransfer->staging)->cs_buf;
} else {
struct r600_resource_texture *rtex = (struct r600_resource_texture*)transfer->resource;
if (rtex->flushed_depth_texture)
- buf = ((struct r600_resource *)rtex->flushed_depth_texture)->buf;
+ buf = ((struct r600_resource *)rtex->flushed_depth_texture)->cs_buf;
else
- buf = ((struct r600_resource *)transfer->resource)->buf;
+ buf = ((struct r600_resource *)transfer->resource)->cs_buf;
offset = rtransfer->offset +
transfer->box.y / util_format_get_blockheight(format) * transfer->stride +
{
struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
struct r600_context *rctx = (struct r600_context*)ctx;
- struct pb_buffer *buf;
+ struct radeon_winsys_cs_handle *buf;
if (rtransfer->staging) {
- buf = ((struct r600_resource *)rtransfer->staging)->buf;
+ buf = ((struct r600_resource *)rtransfer->staging)->cs_buf;
} else {
struct r600_resource_texture *rtex = (struct r600_resource_texture*)transfer->resource;
if (rtex->flushed_depth_texture) {
- buf = ((struct r600_resource *)rtex->flushed_depth_texture)->buf;
+ buf = ((struct r600_resource *)rtex->flushed_depth_texture)->cs_buf;
} else {
- buf = ((struct r600_resource *)transfer->resource)->buf;
+ buf = ((struct r600_resource *)transfer->resource)->cs_buf;
}
}
rctx->ws->buffer_unmap(buf);
bo = (struct r600_resource*)
pipe_buffer_create(ctx->screen, PIPE_BIND_CUSTOM, PIPE_USAGE_IMMUTABLE,
count * sizeof(resource[0]->state));
- ptr = rctx->ws->buffer_map(bo->buf, rctx->cs, PIPE_TRANSFER_WRITE);
+ ptr = rctx->ws->buffer_map(bo->cs_buf, rctx->cs, PIPE_TRANSFER_WRITE);
for (i = 0; i < count; i++, ptr += sizeof(resource[0]->state)) {
pipe_sampler_view_reference(
memset(ptr, 0, sizeof(resource[0]->state));
}
- rctx->ws->buffer_unmap(bo->buf);
+ rctx->ws->buffer_unmap(bo->cs_buf);
for (i = count; i < NUM_TEX_UNITS; i++) {
if (rctx->ps_samplers.views[i])
bo = (struct r600_resource*)
pipe_buffer_create(ctx->screen, PIPE_BIND_CUSTOM, PIPE_USAGE_IMMUTABLE,
count * sizeof(rstates[0]->val));
- ptr = rctx->ws->buffer_map(bo->buf, rctx->cs, PIPE_TRANSFER_WRITE);
+ ptr = rctx->ws->buffer_map(bo->cs_buf, rctx->cs, PIPE_TRANSFER_WRITE);
for (i = 0; i < count; i++, ptr += sizeof(rstates[0]->val)) {
memcpy(ptr, rstates[i]->val, sizeof(rstates[0]->val));
}
- rctx->ws->buffer_unmap(bo->buf);
+ rctx->ws->buffer_unmap(bo->cs_buf);
va = r600_resource_va(ctx->screen, (void *)bo);
r600_pipe_state_add_reg(rstate, R_00B038_SPI_SHADER_USER_DATA_PS_2, va, bo, RADEON_USAGE_READ);
if (rbuffer->b.b.user_ptr)
return (uint8_t*)rbuffer->b.b.user_ptr + transfer->box.x;
- data = rctx->ws->buffer_map(rbuffer->buf, rctx->cs, transfer->usage);
+ data = rctx->ws->buffer_map(rbuffer->cs_buf, rctx->cs, transfer->usage);
if (!data)
return NULL;
if (rbuffer->b.b.user_ptr)
return;
- rctx->ws->buffer_unmap(rbuffer->buf);
+ rctx->ws->buffer_unmap(rbuffer->cs_buf);
}
static void r600_buffer_transfer_flush_region(struct pipe_context *pipe,
assert(rbuffer->b.b.user_ptr == NULL);
- map = rctx->ws->buffer_map(rbuffer->buf, rctx->cs,
+ map = rctx->ws->buffer_map(rbuffer->cs_buf, rctx->cs,
PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD_RANGE | usage);
memcpy(map + box->x, data, box->width);
- rctx->ws->buffer_unmap(rbuffer->buf);
+ rctx->ws->buffer_unmap(rbuffer->cs_buf);
}
static const struct u_resource_vtbl r600_buffer_vtbl =
goto err;
/* initialize buffer with zeroes */
- results = ctx->ws->buffer_map(buffer->buf, ctx->cs, PIPE_TRANSFER_WRITE);
+ results = ctx->ws->buffer_map(buffer->cs_buf, ctx->cs, PIPE_TRANSFER_WRITE);
if (results) {
uint64_t va = 0;
memset(results, 0, ctx->max_db * 4 * 4);
- ctx->ws->buffer_unmap(buffer->buf);
+ ctx->ws->buffer_unmap(buffer->cs_buf);
/* emit EVENT_WRITE for ZPASS_DONE */
va = r600_resource_va(&ctx->screen->screen, (void *)buffer);
cs->buf[cs->cdw++] = r600_context_bo_reloc(ctx, buffer, RADEON_USAGE_WRITE);
/* analyze results */
- results = ctx->ws->buffer_map(buffer->buf, ctx->cs, PIPE_TRANSFER_READ);
+ results = ctx->ws->buffer_map(buffer->cs_buf, ctx->cs, PIPE_TRANSFER_READ);
if (results) {
for(i = 0; i < ctx->max_db; i++) {
/* at least highest bit will be set if backend is used */
if (results[i*4 + 1])
mask |= (1<<i);
}
- ctx->ws->buffer_unmap(buffer->buf);
+ ctx->ws->buffer_unmap(buffer->cs_buf);
}
}
unsigned results_base = query->results_start;
char *map;
- map = ctx->ws->buffer_map(query->buffer->buf, ctx->cs,
+ map = ctx->ws->buffer_map(query->buffer->cs_buf, ctx->cs,
PIPE_TRANSFER_READ |
(wait ? 0 : PIPE_TRANSFER_DONTBLOCK));
if (!map)
}
query->results_start = query->results_end;
- ctx->ws->buffer_unmap(query->buffer->buf);
+ ctx->ws->buffer_unmap(query->buffer->cs_buf);
return TRUE;
}
switch (query->type) {
case PIPE_QUERY_OCCLUSION_COUNTER:
case PIPE_QUERY_OCCLUSION_PREDICATE:
- results = ctx->ws->buffer_map(query->buffer->buf, ctx->cs, PIPE_TRANSFER_WRITE);
+ results = ctx->ws->buffer_map(query->buffer->cs_buf, ctx->cs, PIPE_TRANSFER_WRITE);
if (results) {
results = (uint32_t*)((char*)results + query->results_end);
memset(results, 0, query->result_size);
results[(i * 4)+3] = 0x80000000;
}
}
- ctx->ws->buffer_unmap(query->buffer->buf);
+ ctx->ws->buffer_unmap(query->buffer->cs_buf);
}
break;
case PIPE_QUERY_TIME_ELAPSED:
case PIPE_QUERY_PRIMITIVES_GENERATED:
case PIPE_QUERY_SO_STATISTICS:
case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
- results = ctx->ws->buffer_map(query->buffer->buf, ctx->cs, PIPE_TRANSFER_WRITE);
+ results = ctx->ws->buffer_map(query->buffer->cs_buf, ctx->cs, PIPE_TRANSFER_WRITE);
results = (uint32_t*)((char*)results + query->results_end);
memset(results, 0, query->result_size);
- ctx->ws->buffer_unmap(query->buffer->buf);
+ ctx->ws->buffer_unmap(query->buffer->cs_buf);
break;
default:
assert(0);
if (!t[i])
continue;
- uint32_t *ptr = ctx->ws->buffer_map(t[i]->filled_size->buf, ctx->cs, RADEON_USAGE_READ);
+ uint32_t *ptr = ctx->ws->buffer_map(t[i]->filled_size->cs_buf, ctx->cs, RADEON_USAGE_READ);
printf("FILLED_SIZE%i: %u\n", i, *ptr);
- ctx->ws->buffer_unmap(t[i]->filled_size->buf);
+ ctx->ws->buffer_unmap(t[i]->filled_size->cs_buf);
}
}
t->filled_size = (struct r600_resource*)
pipe_buffer_create(ctx->screen, PIPE_BIND_CUSTOM, PIPE_USAGE_STATIC, 4);
- ptr = rctx->ws->buffer_map(t->filled_size->buf, rctx->cs, PIPE_TRANSFER_WRITE);
+ ptr = rctx->ws->buffer_map(t->filled_size->cs_buf, rctx->cs, PIPE_TRANSFER_WRITE);
memset(ptr, 0, t->filled_size->buf->size);
- rctx->ws->buffer_unmap(t->filled_size->buf);
+ rctx->ws->buffer_unmap(t->filled_size->cs_buf);
return &t->b;
}
if (t_list_buffer == NULL)
return;
- ptr = (uint32_t*)rctx->ws->buffer_map(t_list_buffer->buf,
+ ptr = (uint32_t*)rctx->ws->buffer_map(t_list_buffer->cs_buf,
rctx->cs,
PIPE_TRANSFER_WRITE);
{
struct r600_context *rctx = (struct r600_context *)ctx;
struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
- struct pb_buffer *buf;
+ struct radeon_winsys_cs_handle *buf;
enum pipe_format format = transfer->resource->format;
unsigned offset = 0;
char *map;
if (rtransfer->staging_texture) {
- buf = ((struct r600_resource *)rtransfer->staging_texture)->buf;
+ buf = ((struct r600_resource *)rtransfer->staging_texture)->cs_buf;
} else {
struct r600_resource_texture *rtex = (struct r600_resource_texture*)transfer->resource;
if (rtex->flushed_depth_texture)
- buf = ((struct r600_resource *)rtex->flushed_depth_texture)->buf;
+ buf = ((struct r600_resource *)rtex->flushed_depth_texture)->cs_buf;
else
- buf = ((struct r600_resource *)transfer->resource)->buf;
+ buf = ((struct r600_resource *)transfer->resource)->cs_buf;
offset = rtransfer->offset +
transfer->box.y / util_format_get_blockheight(format) * transfer->stride +
{
struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
struct r600_context *rctx = (struct r600_context*)ctx;
- struct pb_buffer *buf;
+ struct radeon_winsys_cs_handle *buf;
if (rtransfer->staging_texture) {
- buf = ((struct r600_resource *)rtransfer->staging_texture)->buf;
+ buf = ((struct r600_resource *)rtransfer->staging_texture)->cs_buf;
} else {
struct r600_resource_texture *rtex = (struct r600_resource_texture*)transfer->resource;
if (rtex->flushed_depth_texture) {
- buf = ((struct r600_resource *)rtex->flushed_depth_texture)->buf;
+ buf = ((struct r600_resource *)rtex->flushed_depth_texture)->cs_buf;
} else {
- buf = ((struct r600_resource *)transfer->resource)->buf;
+ buf = ((struct r600_resource *)transfer->resource)->cs_buf;
}
}
rctx->ws->buffer_unmap(buf);
R600_ERR("r600: failed to create bo for fence objects\n");
goto out;
}
- rscreen->fences.data = rctx->ws->buffer_map(rscreen->fences.bo->buf,
+ rscreen->fences.data = rctx->ws->buffer_map(rscreen->fences.bo->cs_buf,
rctx->cs,
PIPE_TRANSFER_READ_WRITE);
}
FREE(entry);
}
- rscreen->ws->buffer_unmap(rscreen->fences.bo->buf);
+ rscreen->ws->buffer_unmap(rscreen->fences.bo->cs_buf);
pipe_resource_reference((struct pipe_resource**)&rscreen->fences.bo, NULL);
}
pipe_mutex_destroy(rscreen->fences.mutex);
if (shader->bo == NULL) {
return -ENOMEM;
}
- ptr = (uint32_t*)rctx->ws->buffer_map(shader->bo->buf, rctx->cs, PIPE_TRANSFER_WRITE);
+ ptr = (uint32_t*)rctx->ws->buffer_map(shader->bo->cs_buf, rctx->cs, PIPE_TRANSFER_WRITE);
if (0 /*R600_BIG_ENDIAN*/) {
for (i = 0; i < (inst_byte_count-12)/4; ++i) {
ptr[i] = util_bswap32(*(uint32_t*)(inst_bytes+12 + i*4));
} else {
memcpy(ptr, inst_bytes + 12, inst_byte_count - 12);
}
- rctx->ws->buffer_unmap(shader->bo->buf);
+ rctx->ws->buffer_unmap(shader->bo->cs_buf);
}
free(inst_bytes);
FREE(bo);
}
-static unsigned get_pb_usage_from_transfer_flags(enum pipe_transfer_usage usage)
-{
- unsigned res = 0;
-
- if (usage & PIPE_TRANSFER_WRITE)
- res |= PB_USAGE_CPU_WRITE;
-
- if (usage & PIPE_TRANSFER_DONTBLOCK)
- res |= PB_USAGE_DONTBLOCK;
-
- if (usage & PIPE_TRANSFER_UNSYNCHRONIZED)
- res |= PB_USAGE_UNSYNCHRONIZED;
-
- return res;
-}
-
-static void *radeon_bo_map_internal(struct pb_buffer *_buf,
- unsigned flags, void *flush_ctx)
+static void *radeon_bo_map(struct radeon_winsys_cs_handle *buf,
+ struct radeon_winsys_cs *rcs,
+ enum pipe_transfer_usage usage)
{
- struct radeon_bo *bo = radeon_bo(_buf);
- struct radeon_drm_cs *cs = flush_ctx;
- struct drm_radeon_gem_mmap args;
+ struct radeon_bo *bo = (struct radeon_bo*)buf;
+ struct radeon_drm_cs *cs = (struct radeon_drm_cs*)rcs;
+ struct drm_radeon_gem_mmap args = {0};
void *ptr;
- memset(&args, 0, sizeof(args));
-
/* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
- if (!(flags & PB_USAGE_UNSYNCHRONIZED)) {
+ if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
/* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
- if (flags & PB_USAGE_DONTBLOCK) {
- if (!(flags & PB_USAGE_CPU_WRITE)) {
+ if (usage & PIPE_TRANSFER_DONTBLOCK) {
+ if (!(usage & PIPE_TRANSFER_WRITE)) {
/* Mapping for read.
*
* Since we are mapping for read, we don't need to wait
}
}
} else {
- if (!(flags & PB_USAGE_CPU_WRITE)) {
+ if (!(usage & PIPE_TRANSFER_WRITE)) {
/* Mapping for read.
*
* Since we are mapping for read, we don't need to wait
return bo->ptr;
}
-static void radeon_bo_unmap_internal(struct pb_buffer *_buf)
+static void radeon_bo_unmap(struct radeon_winsys_cs_handle *_buf)
{
/* NOP */
}
const struct pb_vtbl radeon_bo_vtbl = {
radeon_bo_destroy,
- radeon_bo_map_internal,
- radeon_bo_unmap_internal,
+ NULL, /* never called */
+ NULL, /* never called */
radeon_bo_validate,
radeon_bo_fence,
radeon_bo_get_base_buffer,
return &mgr->base;
}
-static void *radeon_bo_map(struct pb_buffer *buf,
- struct radeon_winsys_cs *cs,
- enum pipe_transfer_usage usage)
-{
- return pb_map(buf, get_pb_usage_from_transfer_flags(usage), cs);
-}
-
static unsigned eg_tile_split(unsigned tile_split)
{
switch (tile_split) {
ws->base.buffer_set_tiling = radeon_bo_set_tiling;
ws->base.buffer_get_tiling = radeon_bo_get_tiling;
ws->base.buffer_map = radeon_bo_map;
- ws->base.buffer_unmap = pb_unmap;
+ ws->base.buffer_unmap = radeon_bo_unmap;
ws->base.buffer_wait = radeon_bo_wait;
ws->base.buffer_is_busy = radeon_bo_is_busy;
ws->base.buffer_create = radeon_winsys_bo_create;
* \param usage A bitmask of the PIPE_TRANSFER_* flags.
* \return The pointer at the beginning of the buffer.
*/
- void *(*buffer_map)(struct pb_buffer *buf,
+ void *(*buffer_map)(struct radeon_winsys_cs_handle *buf,
struct radeon_winsys_cs *cs,
enum pipe_transfer_usage usage);
*
* \param buf A winsys buffer object to unmap.
*/
- void (*buffer_unmap)(struct pb_buffer *buf);
+ void (*buffer_unmap)(struct radeon_winsys_cs_handle *buf);
/**
* Return TRUE if a buffer object is being used by the GPU.