X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Fr600%2Fr600_buffer.c;h=0b0ac3460e1bfb3fec66028a2fa3d7ea9e2fe5d9;hb=36ea744f58e5b436c45ed857bd92bbb472e641d5;hp=a17c54d6eeb8958635fb0b4251a1ce1419541a21;hpb=e179a8bf34022c200deee92d328cb1bae3c3aa4a;p=mesa.git diff --git a/src/gallium/drivers/r600/r600_buffer.c b/src/gallium/drivers/r600/r600_buffer.c index a17c54d6eeb..0b0ac3460e1 100644 --- a/src/gallium/drivers/r600/r600_buffer.c +++ b/src/gallium/drivers/r600/r600_buffer.c @@ -24,109 +24,138 @@ * Jerome Glisse * Corbin Simpson */ -#include -#include -#include -#include -#include -#include "state_tracker/drm_driver.h" -#include -#include "radeon_drm.h" -#include "r600.h" #include "r600_pipe.h" +#include "util/u_upload_mgr.h" +#include "util/u_memory.h" -extern struct u_resource_vtbl r600_buffer_vtbl; - - -struct pipe_resource *r600_buffer_create(struct pipe_screen *screen, - const struct pipe_resource *templ) +static void r600_buffer_destroy(struct pipe_screen *screen, + struct pipe_resource *buf) { - struct r600_resource_buffer *rbuffer; - struct r600_bo *bo; - /* XXX We probably want a different alignment for buffers and textures. */ - unsigned alignment = 4096; - - rbuffer = CALLOC_STRUCT(r600_resource_buffer); - if (rbuffer == NULL) - return NULL; + struct r600_resource *rbuffer = r600_resource(buf); - rbuffer->magic = R600_BUFFER_MAGIC; - rbuffer->user_buffer = NULL; - rbuffer->r.base.b = *templ; - pipe_reference_init(&rbuffer->r.base.b.reference, 1); - rbuffer->r.base.b.screen = screen; - rbuffer->r.base.vtbl = &r600_buffer_vtbl; - rbuffer->r.size = rbuffer->r.base.b.width0; - rbuffer->r.bo_size = rbuffer->r.size; - rbuffer->uploaded = FALSE; - bo = r600_bo((struct radeon*)screen->winsys, rbuffer->r.base.b.width0, alignment, rbuffer->r.base.b.bind, rbuffer->r.base.b.usage); - if (bo == NULL) { - FREE(rbuffer); - return NULL; - } - rbuffer->r.bo = bo; - return &rbuffer->r.base.b; + pb_reference(&rbuffer->buf, NULL); + FREE(rbuffer); } -struct pipe_resource *r600_user_buffer_create(struct pipe_screen *screen, - void *ptr, unsigned bytes, - unsigned bind) +static struct pipe_transfer *r600_get_transfer(struct pipe_context *ctx, + struct pipe_resource *resource, + unsigned level, + unsigned usage, + const struct pipe_box *box) { - struct r600_resource_buffer *rbuffer; - - rbuffer = CALLOC_STRUCT(r600_resource_buffer); - if (rbuffer == NULL) - return NULL; - - rbuffer->magic = R600_BUFFER_MAGIC; - pipe_reference_init(&rbuffer->r.base.b.reference, 1); - rbuffer->r.base.vtbl = &r600_buffer_vtbl; - rbuffer->r.base.b.screen = screen; - rbuffer->r.base.b.target = PIPE_BUFFER; - rbuffer->r.base.b.format = PIPE_FORMAT_R8_UNORM; - rbuffer->r.base.b.usage = PIPE_USAGE_IMMUTABLE; - rbuffer->r.base.b.bind = bind; - rbuffer->r.base.b.width0 = bytes; - rbuffer->r.base.b.height0 = 1; - rbuffer->r.base.b.depth0 = 1; - rbuffer->r.base.b.array_size = 1; - rbuffer->r.base.b.flags = 0; - rbuffer->r.bo = NULL; - rbuffer->r.bo_size = 0; - rbuffer->user_buffer = ptr; - rbuffer->uploaded = FALSE; - return &rbuffer->r.base.b; + struct r600_context *rctx = (struct r600_context*)ctx; + struct r600_transfer *transfer = util_slab_alloc(&rctx->pool_transfers); + + assert(box->x + box->width <= resource->width0); + + transfer->transfer.resource = resource; + transfer->transfer.level = level; + transfer->transfer.usage = usage; + transfer->transfer.box = *box; + transfer->transfer.stride = 0; + transfer->transfer.layer_stride = 0; + transfer->transfer.data = NULL; + transfer->staging = NULL; + transfer->offset = 0; + + /* Note strides are zero, this is ok for buffers, but not for + * textures 2d & higher at least. + */ + return &transfer->transfer; } -static void r600_buffer_destroy(struct pipe_screen *screen, - struct pipe_resource *buf) +static void r600_set_constants_dirty_if_bound(struct r600_context *rctx, + struct r600_resource *rbuffer) { - struct r600_resource_buffer *rbuffer = r600_buffer(buf); - - if (rbuffer->r.bo) { - r600_bo_reference((struct radeon*)screen->winsys, &rbuffer->r.bo, NULL); + unsigned shader; + + for (shader = 0; shader < PIPE_SHADER_TYPES; shader++) { + struct r600_constbuf_state *state = &rctx->constbuf_state[shader]; + bool found = false; + uint32_t mask = state->enabled_mask; + + while (mask) { + unsigned i = u_bit_scan(&mask); + if (state->cb[i].buffer == &rbuffer->b.b) { + found = true; + state->dirty_mask |= 1 << i; + } + } + if (found) { + r600_constant_buffers_dirty(rctx, state); + } } - rbuffer->r.bo = NULL; - FREE(rbuffer); } static void *r600_buffer_transfer_map(struct pipe_context *pipe, struct pipe_transfer *transfer) { - struct r600_resource_buffer *rbuffer = r600_buffer(transfer->resource); - int write = 0; + struct r600_resource *rbuffer = r600_resource(transfer->resource); + struct r600_context *rctx = (struct r600_context*)pipe; uint8_t *data; - if (rbuffer->user_buffer) - return (uint8_t*)rbuffer->user_buffer + transfer->box.x; - - if (transfer->usage & PIPE_TRANSFER_DONTBLOCK) { - /* FIXME */ + if (transfer->usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE && + !(transfer->usage & PIPE_TRANSFER_UNSYNCHRONIZED)) { + assert(transfer->usage & PIPE_TRANSFER_WRITE); + + /* Check if mapping this buffer would cause waiting for the GPU. */ + if (rctx->ws->cs_is_buffer_referenced(rctx->cs, rbuffer->cs_buf, RADEON_USAGE_READWRITE) || + rctx->ws->buffer_is_busy(rbuffer->buf, RADEON_USAGE_READWRITE)) { + unsigned i, mask; + + /* Discard the buffer. */ + pb_reference(&rbuffer->buf, NULL); + + /* Create a new one in the same pipe_resource. */ + /* XXX We probably want a different alignment for buffers and textures. */ + r600_init_resource(rctx->screen, rbuffer, rbuffer->b.b.width0, 4096, + rbuffer->b.b.bind, rbuffer->b.b.usage); + + /* We changed the buffer, now we need to bind it where the old one was bound. */ + /* Vertex buffers. */ + mask = rctx->vertex_buffer_state.enabled_mask; + while (mask) { + i = u_bit_scan(&mask); + if (rctx->vertex_buffer_state.vb[i].buffer == &rbuffer->b.b) { + rctx->vertex_buffer_state.dirty_mask |= 1 << i; + r600_vertex_buffers_dirty(rctx); + } + } + /* Streamout buffers. */ + for (i = 0; i < rctx->num_so_targets; i++) { + if (rctx->so_targets[i]->b.buffer == &rbuffer->b.b) { + r600_context_streamout_end(rctx); + rctx->streamout_start = TRUE; + rctx->streamout_append_bitmask = ~0; + } + } + /* Constant buffers. */ + r600_set_constants_dirty_if_bound(rctx, rbuffer); + } } - if (transfer->usage & PIPE_TRANSFER_WRITE) { - write = 1; +#if 0 /* this is broken (see Bug 53130) */ + else if ((transfer->usage & PIPE_TRANSFER_DISCARD_RANGE) && + !(transfer->usage & PIPE_TRANSFER_UNSYNCHRONIZED) && + rctx->screen->has_streamout && + /* The buffer range must be aligned to 4. */ + transfer->box.x % 4 == 0 && transfer->box.width % 4 == 0) { + assert(transfer->usage & PIPE_TRANSFER_WRITE); + + /* Check if mapping this buffer would cause waiting for the GPU. */ + if (rctx->ws->cs_is_buffer_referenced(rctx->cs, rbuffer->cs_buf, RADEON_USAGE_READWRITE) || + rctx->ws->buffer_is_busy(rbuffer->buf, RADEON_USAGE_READWRITE)) { + /* Do a wait-free write-only transfer using a temporary buffer. */ + struct r600_transfer *rtransfer = (struct r600_transfer*)transfer; + + rtransfer->staging = (struct r600_resource*) + pipe_buffer_create(pipe->screen, PIPE_BIND_VERTEX_BUFFER, + PIPE_USAGE_STAGING, transfer->box.width); + return rctx->ws->buffer_map(rtransfer->staging->cs_buf, rctx->cs, PIPE_TRANSFER_WRITE); + } } - data = r600_bo_map((struct radeon*)pipe->winsys, rbuffer->r.bo, transfer->usage, pipe); +#endif + + data = rctx->ws->buffer_map(rbuffer->cs_buf, rctx->cs, transfer->usage); if (!data) return NULL; @@ -136,117 +165,97 @@ static void *r600_buffer_transfer_map(struct pipe_context *pipe, static void r600_buffer_transfer_unmap(struct pipe_context *pipe, struct pipe_transfer *transfer) { - struct r600_resource_buffer *rbuffer = r600_buffer(transfer->resource); - - if (rbuffer->user_buffer) - return; + struct r600_transfer *rtransfer = (struct r600_transfer*)transfer; - if (rbuffer->r.bo) - r600_bo_unmap((struct radeon*)pipe->winsys, rbuffer->r.bo); -} + if (rtransfer->staging) { + struct pipe_box box; + u_box_1d(0, transfer->box.width, &box); -static void r600_buffer_transfer_flush_region(struct pipe_context *pipe, - struct pipe_transfer *transfer, - const struct pipe_box *box) -{ + /* Copy the staging buffer into the original one. */ + r600_copy_buffer(pipe, transfer->resource, transfer->box.x, + &rtransfer->staging->b.b, &box); + pipe_resource_reference((struct pipe_resource**)&rtransfer->staging, NULL); + } } -unsigned r600_buffer_is_referenced_by_cs(struct pipe_context *context, - struct pipe_resource *buf, - unsigned level, int layer) +static void r600_transfer_destroy(struct pipe_context *ctx, + struct pipe_transfer *transfer) { - /* FIXME */ - return PIPE_REFERENCED_FOR_READ | PIPE_REFERENCED_FOR_WRITE; + struct r600_context *rctx = (struct r600_context*)ctx; + util_slab_free(&rctx->pool_transfers, transfer); } -struct pipe_resource *r600_buffer_from_handle(struct pipe_screen *screen, - struct winsys_handle *whandle) -{ - struct radeon *rw = (struct radeon*)screen->winsys; - struct r600_resource *rbuffer; - struct r600_bo *bo = NULL; - - bo = r600_bo_handle(rw, whandle->handle, NULL); - if (bo == NULL) { - return NULL; - } - - rbuffer = CALLOC_STRUCT(r600_resource); - if (rbuffer == NULL) { - r600_bo_reference(rw, &bo, NULL); - return NULL; - } - - pipe_reference_init(&rbuffer->base.b.reference, 1); - rbuffer->base.b.target = PIPE_BUFFER; - rbuffer->base.b.screen = screen; - rbuffer->base.vtbl = &r600_buffer_vtbl; - rbuffer->bo = bo; - return &rbuffer->base.b; -} - -struct u_resource_vtbl r600_buffer_vtbl = +static const struct u_resource_vtbl r600_buffer_vtbl = { u_default_resource_get_handle, /* get_handle */ r600_buffer_destroy, /* resource_destroy */ - r600_buffer_is_referenced_by_cs, /* is_buffer_referenced */ - u_default_get_transfer, /* get_transfer */ - u_default_transfer_destroy, /* transfer_destroy */ + r600_get_transfer, /* get_transfer */ + r600_transfer_destroy, /* transfer_destroy */ r600_buffer_transfer_map, /* transfer_map */ - r600_buffer_transfer_flush_region, /* transfer_flush_region */ + NULL, /* transfer_flush_region */ r600_buffer_transfer_unmap, /* transfer_unmap */ - u_default_transfer_inline_write /* transfer_inline_write */ + NULL /* transfer_inline_write */ }; -int r600_upload_index_buffer(struct r600_pipe_context *rctx, struct r600_drawl *draw) +bool r600_init_resource(struct r600_screen *rscreen, + struct r600_resource *res, + unsigned size, unsigned alignment, + unsigned bind, unsigned usage) { - if (r600_buffer_is_user_buffer(draw->index_buffer)) { - struct r600_resource_buffer *rbuffer = r600_buffer(draw->index_buffer); - unsigned upload_offset; - int ret = 0; - - ret = r600_upload_buffer(rctx->rupload_vb, - draw->index_buffer_offset, - draw->count * draw->index_size, - rbuffer, - &upload_offset, - &rbuffer->r.bo_size, - &rbuffer->r.bo); - if (ret) - return ret; - rbuffer->uploaded = TRUE; - draw->index_buffer_offset = upload_offset; + uint32_t initial_domain, domains; + + /* Staging resources particpate in transfers and blits only + * and are used for uploads and downloads from regular + * resources. We generate them internally for some transfers. + */ + if (usage == PIPE_USAGE_STAGING) { + domains = RADEON_DOMAIN_GTT; + initial_domain = RADEON_DOMAIN_GTT; + } else { + domains = RADEON_DOMAIN_GTT | RADEON_DOMAIN_VRAM; + + switch(usage) { + case PIPE_USAGE_DYNAMIC: + case PIPE_USAGE_STREAM: + case PIPE_USAGE_STAGING: + initial_domain = RADEON_DOMAIN_GTT; + break; + case PIPE_USAGE_DEFAULT: + case PIPE_USAGE_STATIC: + case PIPE_USAGE_IMMUTABLE: + default: + initial_domain = RADEON_DOMAIN_VRAM; + break; + } + } + + res->buf = rscreen->ws->buffer_create(rscreen->ws, size, alignment, bind, initial_domain); + if (!res->buf) { + return false; } - return 0; + res->cs_buf = rscreen->ws->buffer_get_cs_handle(res->buf); + res->domains = domains; + return true; } -int r600_upload_user_buffers(struct r600_pipe_context *rctx) +struct pipe_resource *r600_buffer_create(struct pipe_screen *screen, + const struct pipe_resource *templ, + unsigned alignment) { - enum pipe_error ret = PIPE_OK; - int i, nr; - - nr = rctx->vertex_elements->count; - nr = rctx->nvertex_buffer; - - for (i = 0; i < nr; i++) { - struct pipe_vertex_buffer *vb = &rctx->vertex_buffer[i]; - - if (r600_buffer_is_user_buffer(vb->buffer)) { - struct r600_resource_buffer *rbuffer = r600_buffer(vb->buffer); - unsigned upload_offset; - - ret = r600_upload_buffer(rctx->rupload_vb, - 0, vb->buffer->width0, - rbuffer, - &upload_offset, - &rbuffer->r.bo_size, - &rbuffer->r.bo); - if (ret) - return ret; - rbuffer->uploaded = TRUE; - vb->buffer_offset = upload_offset; - } + struct r600_screen *rscreen = (struct r600_screen*)screen; + struct r600_resource *rbuffer; + + rbuffer = MALLOC_STRUCT(r600_resource); + + rbuffer->b.b = *templ; + pipe_reference_init(&rbuffer->b.b.reference, 1); + rbuffer->b.b.screen = screen; + rbuffer->b.vtbl = &r600_buffer_vtbl; + + if (!r600_init_resource(rscreen, rbuffer, templ->width0, alignment, templ->bind, templ->usage)) { + FREE(rbuffer); + return NULL; } - return ret; + return &rbuffer->b.b; }