buffer_unmap is currently a no-op on radeon and done correctly on amdgpu.
I plan to fix it for radeon, but before that, all occurences of buffer_unmap
that can negatively affect performance in the future must be removed.
There are 2 reasons for removing buffer_unmap calls:
- There is a likelihood that buffer_map will be called again, so we don't
want to unmap yet.
- The buffer is being released, which automatically unmaps it.
Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
map++;
}
- r300->rws->buffer_unmap(q->cs_buf);
-
if (q->type == PIPE_QUERY_OCCLUSION_PREDICATE) {
vresult->b = temp != 0;
} else {
struct r300_resource *tex = r300_resource(transfer->resource);
if (trans->linear_texture) {
- rws->buffer_unmap(trans->linear_texture->cs_buf);
-
if (transfer->usage & PIPE_TRANSFER_WRITE) {
r300_copy_into_tiled_texture(ctx, trans);
}
pipe_resource_reference(
(struct pipe_resource**)&trans->linear_texture, NULL);
- } else {
- rws->buffer_unmap(tex->cs_buf);
}
FREE(transfer);
}
data += info.indirect_offset / sizeof(unsigned);
start = data[2] * ib.index_size;
count = data[0];
- rctx->b.ws->buffer_unmap(indirect_resource->cs_buf);
}
else {
start = 0;
pipe_mutex_destroy(rscreen->aux_context_lock);
rscreen->aux_context->destroy(rscreen->aux_context);
- if (rscreen->trace_bo) {
- rscreen->ws->buffer_unmap(rscreen->trace_bo->cs_buf);
+ if (rscreen->trace_bo)
pipe_resource_reference((struct pipe_resource**)&rscreen->trace_bo, NULL);
- }
rscreen->ws->destroy(rscreen->ws);
FREE(rscreen);
}
results += 4 * ctx->max_db;
}
- ctx->ws->buffer_unmap(buf->cs_buf);
break;
case PIPE_QUERY_TIME_ELAPSED:
case PIPE_QUERY_TIMESTAMP:
case PIPE_QUERY_PIPELINE_STATISTICS:
results = r600_buffer_map_sync_with_rings(ctx, buf, PIPE_TRANSFER_WRITE);
memset(results, 0, buf_size);
- ctx->ws->buffer_unmap(buf->cs_buf);
break;
default:
assert(0);
assert(0);
}
- ctx->ws->buffer_unmap(qbuf->buf->cs_buf);
return TRUE;
}
results = r600_buffer_map_sync_with_rings(ctx, buffer, PIPE_TRANSFER_WRITE);
if (results) {
memset(results, 0, ctx->max_db * 4 * 4);
- ctx->ws->buffer_unmap(buffer->cs_buf);
/* emit EVENT_WRITE for ZPASS_DONE */
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
if (results[i*4 + 1])
mask |= (1<<i);
}
- ctx->ws->buffer_unmap(buffer->cs_buf);
}
}
struct pipe_transfer* transfer)
{
struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
- struct r600_common_context *rctx = (struct r600_common_context*)ctx;
- struct radeon_winsys_cs_handle *buf;
struct pipe_resource *texture = transfer->resource;
struct r600_texture *rtex = (struct r600_texture*)texture;
- if (rtransfer->staging) {
- buf = rtransfer->staging->cs_buf;
- } else {
- buf = r600_resource(transfer->resource)->cs_buf;
- }
- rctx->ws->buffer_unmap(buf);
-
if ((transfer->usage & PIPE_TRANSFER_WRITE) && rtransfer->staging) {
if (rtex->is_depth && rtex->resource.b.b.nr_samples <= 1) {
ctx->resource_copy_region(ctx, texture, transfer->level,
kernel_args[i]);
}
- sctx->b.ws->buffer_unmap(input_buffer->cs_buf);
-
kernel_args_va = input_buffer->gpu_address;
kernel_args_va += kernel_args_offset;