* Jerome Glisse
* Corbin Simpson <MostAwesomeDude@gmail.com>
*/
-#include <pipe/p_screen.h>
-#include <util/u_format.h>
-#include <util/u_math.h>
-#include <util/u_inlines.h>
-#include <util/u_memory.h>
-#include <util/u_upload_mgr.h>
-#include "state_tracker/drm_driver.h"
-#include <xf86drm.h>
-#include "radeon_drm.h"
-#include "r600.h"
#include "r600_pipe.h"
+#include "util/u_upload_mgr.h"
+#include "util/u_memory.h"
-extern struct u_resource_vtbl r600_buffer_vtbl;
-
-u32 r600_domain_from_usage(unsigned usage)
+static void r600_buffer_destroy(struct pipe_screen *screen,
+ struct pipe_resource *buf)
{
- u32 domain = RADEON_GEM_DOMAIN_GTT;
+ struct r600_resource *rbuffer = r600_resource(buf);
- if (usage & PIPE_BIND_RENDER_TARGET) {
- domain |= RADEON_GEM_DOMAIN_VRAM;
- }
- if (usage & PIPE_BIND_DEPTH_STENCIL) {
- domain |= RADEON_GEM_DOMAIN_VRAM;
- }
- if (usage & PIPE_BIND_SAMPLER_VIEW) {
- domain |= RADEON_GEM_DOMAIN_VRAM;
- }
- /* also need BIND_BLIT_SOURCE/DESTINATION ? */
- if (usage & PIPE_BIND_VERTEX_BUFFER) {
- domain |= RADEON_GEM_DOMAIN_GTT;
- }
- if (usage & PIPE_BIND_INDEX_BUFFER) {
- domain |= RADEON_GEM_DOMAIN_GTT;
- }
- if (usage & PIPE_BIND_CONSTANT_BUFFER) {
- domain |= RADEON_GEM_DOMAIN_VRAM;
- }
-
- return domain;
+ pb_reference(&rbuffer->buf, NULL);
+ FREE(rbuffer);
}
-struct pipe_resource *r600_buffer_create(struct pipe_screen *screen,
- const struct pipe_resource *templ)
+static struct pipe_transfer *r600_get_transfer(struct pipe_context *ctx,
+ struct pipe_resource *resource,
+ unsigned level,
+ unsigned usage,
+ const struct pipe_box *box)
{
- struct r600_resource_buffer *rbuffer;
- struct radeon_ws_bo *bo;
- /* XXX We probably want a different alignment for buffers and textures. */
- unsigned alignment = 4096;
-
- rbuffer = CALLOC_STRUCT(r600_resource_buffer);
- if (rbuffer == NULL)
- return NULL;
-
- rbuffer->magic = R600_BUFFER_MAGIC;
- rbuffer->user_buffer = NULL;
- rbuffer->num_ranges = 0;
- rbuffer->r.base.b = *templ;
- pipe_reference_init(&rbuffer->r.base.b.reference, 1);
- rbuffer->r.base.b.screen = screen;
- rbuffer->r.base.vtbl = &r600_buffer_vtbl;
- rbuffer->r.size = rbuffer->r.base.b.width0;
- rbuffer->r.domain = r600_domain_from_usage(rbuffer->r.base.b.bind);
- bo = radeon_ws_bo((struct radeon*)screen->winsys, rbuffer->r.base.b.width0, alignment, rbuffer->r.base.b.bind);
- if (bo == NULL) {
- FREE(rbuffer);
- return NULL;
- }
- rbuffer->r.bo = bo;
- return &rbuffer->r.base.b;
-}
+ struct r600_context *rctx = (struct r600_context*)ctx;
+ struct r600_transfer *transfer = util_slab_alloc(&rctx->pool_transfers);
-struct pipe_resource *r600_user_buffer_create(struct pipe_screen *screen,
- void *ptr, unsigned bytes,
- unsigned bind)
-{
- struct r600_resource_buffer *rbuffer;
+ assert(box->x + box->width <= resource->width0);
- rbuffer = CALLOC_STRUCT(r600_resource_buffer);
- if (rbuffer == NULL)
- return NULL;
+ transfer->transfer.resource = resource;
+ transfer->transfer.level = level;
+ transfer->transfer.usage = usage;
+ transfer->transfer.box = *box;
+ transfer->transfer.stride = 0;
+ transfer->transfer.layer_stride = 0;
+ transfer->transfer.data = NULL;
+ transfer->staging = NULL;
+ transfer->offset = 0;
- rbuffer->magic = R600_BUFFER_MAGIC;
- pipe_reference_init(&rbuffer->r.base.b.reference, 1);
- rbuffer->r.base.vtbl = &r600_buffer_vtbl;
- rbuffer->r.base.b.screen = screen;
- rbuffer->r.base.b.target = PIPE_BUFFER;
- rbuffer->r.base.b.format = PIPE_FORMAT_R8_UNORM;
- rbuffer->r.base.b.usage = PIPE_USAGE_IMMUTABLE;
- rbuffer->r.base.b.bind = bind;
- rbuffer->r.base.b.width0 = bytes;
- rbuffer->r.base.b.height0 = 1;
- rbuffer->r.base.b.depth0 = 1;
- rbuffer->r.base.b.flags = 0;
- rbuffer->num_ranges = 0;
- rbuffer->r.bo = NULL;
- rbuffer->user_buffer = ptr;
- return &rbuffer->r.base.b;
+ /* Note strides are zero, this is ok for buffers, but not for
+ * textures 2d & higher at least.
+ */
+ return &transfer->transfer;
}
-static void r600_buffer_destroy(struct pipe_screen *screen,
- struct pipe_resource *buf)
+static void r600_set_constants_dirty_if_bound(struct r600_context *rctx,
+ struct r600_resource *rbuffer)
{
- struct r600_resource_buffer *rbuffer = r600_buffer(buf);
+ unsigned shader;
+
+ for (shader = 0; shader < PIPE_SHADER_TYPES; shader++) {
+ struct r600_constbuf_state *state = &rctx->constbuf_state[shader];
+ bool found = false;
+ uint32_t mask = state->enabled_mask;
- if (rbuffer->r.bo) {
- radeon_ws_bo_reference((struct radeon*)screen->winsys, &rbuffer->r.bo, NULL);
+ while (mask) {
+ unsigned i = u_bit_scan(&mask);
+ if (state->cb[i].buffer == &rbuffer->b.b) {
+ found = true;
+ state->dirty_mask |= 1 << i;
+ }
+ }
+ if (found) {
+ r600_constant_buffers_dirty(rctx, state);
+ }
}
- FREE(rbuffer);
}
static void *r600_buffer_transfer_map(struct pipe_context *pipe,
struct pipe_transfer *transfer)
{
- struct r600_resource_buffer *rbuffer = r600_buffer(transfer->resource);
- int write = 0;
+ struct r600_resource *rbuffer = r600_resource(transfer->resource);
+ struct r600_context *rctx = (struct r600_context*)pipe;
uint8_t *data;
- int i;
- boolean flush = FALSE;
-
- if (rbuffer->user_buffer)
- return (uint8_t*)rbuffer->user_buffer + transfer->box.x;
-
- if (transfer->usage & PIPE_TRANSFER_DISCARD) {
- for (i = 0; i < rbuffer->num_ranges; i++) {
- if ((transfer->box.x >= rbuffer->ranges[i].start) &&
- (transfer->box.x < rbuffer->ranges[i].end))
- flush = TRUE;
-
- if (flush) {
- radeon_ws_bo_reference((struct radeon*)pipe->winsys, &rbuffer->r.bo, NULL);
- rbuffer->num_ranges = 0;
- rbuffer->r.bo = radeon_ws_bo((struct radeon*)pipe->winsys,
- rbuffer->r.base.b.width0, 0,
- rbuffer->r.base.b.bind);
- break;
+
+ if (transfer->usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE &&
+ !(transfer->usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
+ assert(transfer->usage & PIPE_TRANSFER_WRITE);
+
+ /* Check if mapping this buffer would cause waiting for the GPU. */
+ if (rctx->ws->cs_is_buffer_referenced(rctx->cs, rbuffer->cs_buf, RADEON_USAGE_READWRITE) ||
+ rctx->ws->buffer_is_busy(rbuffer->buf, RADEON_USAGE_READWRITE)) {
+ unsigned i, mask;
+
+ /* Discard the buffer. */
+ pb_reference(&rbuffer->buf, NULL);
+
+ /* Create a new one in the same pipe_resource. */
+ /* XXX We probably want a different alignment for buffers and textures. */
+ r600_init_resource(rctx->screen, rbuffer, rbuffer->b.b.width0, 4096,
+ rbuffer->b.b.bind, rbuffer->b.b.usage);
+
+ /* We changed the buffer, now we need to bind it where the old one was bound. */
+ /* Vertex buffers. */
+ mask = rctx->vertex_buffer_state.enabled_mask;
+ while (mask) {
+ i = u_bit_scan(&mask);
+ if (rctx->vertex_buffer_state.vb[i].buffer == &rbuffer->b.b) {
+ rctx->vertex_buffer_state.dirty_mask |= 1 << i;
+ r600_vertex_buffers_dirty(rctx);
+ }
+ }
+ /* Streamout buffers. */
+ for (i = 0; i < rctx->num_so_targets; i++) {
+ if (rctx->so_targets[i]->b.buffer == &rbuffer->b.b) {
+ r600_context_streamout_end(rctx);
+ rctx->streamout_start = TRUE;
+ rctx->streamout_append_bitmask = ~0;
+ }
}
+ /* Constant buffers. */
+ r600_set_constants_dirty_if_bound(rctx, rbuffer);
}
}
- if (transfer->usage & PIPE_TRANSFER_DONTBLOCK) {
- /* FIXME */
- }
- if (transfer->usage & PIPE_TRANSFER_WRITE) {
- write = 1;
+#if 0 /* this is broken (see Bug 53130) */
+ else if ((transfer->usage & PIPE_TRANSFER_DISCARD_RANGE) &&
+ !(transfer->usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
+ rctx->screen->has_streamout &&
+ /* The buffer range must be aligned to 4. */
+ transfer->box.x % 4 == 0 && transfer->box.width % 4 == 0) {
+ assert(transfer->usage & PIPE_TRANSFER_WRITE);
+
+ /* Check if mapping this buffer would cause waiting for the GPU. */
+ if (rctx->ws->cs_is_buffer_referenced(rctx->cs, rbuffer->cs_buf, RADEON_USAGE_READWRITE) ||
+ rctx->ws->buffer_is_busy(rbuffer->buf, RADEON_USAGE_READWRITE)) {
+ /* Do a wait-free write-only transfer using a temporary buffer. */
+ struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
+
+ rtransfer->staging = (struct r600_resource*)
+ pipe_buffer_create(pipe->screen, PIPE_BIND_VERTEX_BUFFER,
+ PIPE_USAGE_STAGING, transfer->box.width);
+ return rctx->ws->buffer_map(rtransfer->staging->cs_buf, rctx->cs, PIPE_TRANSFER_WRITE);
+ }
}
- data = radeon_ws_bo_map((struct radeon*)pipe->winsys, rbuffer->r.bo, transfer->usage, pipe);
+#endif
+
+ data = rctx->ws->buffer_map(rbuffer->cs_buf, rctx->cs, transfer->usage);
if (!data)
return NULL;
static void r600_buffer_transfer_unmap(struct pipe_context *pipe,
struct pipe_transfer *transfer)
{
- struct r600_resource_buffer *rbuffer = r600_buffer(transfer->resource);
+ struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
- if (rbuffer->r.bo)
- radeon_ws_bo_unmap((struct radeon*)pipe->winsys, rbuffer->r.bo);
-}
+ if (rtransfer->staging) {
+ struct pipe_box box;
+ u_box_1d(0, transfer->box.width, &box);
-static void r600_buffer_transfer_flush_region(struct pipe_context *pipe,
- struct pipe_transfer *transfer,
- const struct pipe_box *box)
-{
- struct r600_resource_buffer *rbuffer = r600_buffer(transfer->resource);
- unsigned i;
- unsigned offset = transfer->box.x + box->x;
- unsigned length = box->width;
-
- assert(box->x + box->width <= transfer->box.width);
-
- if (rbuffer->user_buffer)
- return;
-
- /* mark the range as used */
- for(i = 0; i < rbuffer->num_ranges; ++i) {
- if(offset <= rbuffer->ranges[i].end && rbuffer->ranges[i].start <= (offset+box->width)) {
- rbuffer->ranges[i].start = MIN2(rbuffer->ranges[i].start, offset);
- rbuffer->ranges[i].end = MAX2(rbuffer->ranges[i].end, (offset+length));
- return;
- }
+ /* Copy the staging buffer into the original one. */
+ r600_copy_buffer(pipe, transfer->resource, transfer->box.x,
+ &rtransfer->staging->b.b, &box);
+ pipe_resource_reference((struct pipe_resource**)&rtransfer->staging, NULL);
}
-
- rbuffer->ranges[rbuffer->num_ranges].start = offset;
- rbuffer->ranges[rbuffer->num_ranges].end = offset+length;
- rbuffer->num_ranges++;
}
-unsigned r600_buffer_is_referenced_by_cs(struct pipe_context *context,
- struct pipe_resource *buf,
- unsigned face, unsigned level)
+static void r600_transfer_destroy(struct pipe_context *ctx,
+ struct pipe_transfer *transfer)
{
- /* FIXME */
- return PIPE_REFERENCED_FOR_READ | PIPE_REFERENCED_FOR_WRITE;
+ struct r600_context *rctx = (struct r600_context*)ctx;
+ util_slab_free(&rctx->pool_transfers, transfer);
}
-struct pipe_resource *r600_buffer_from_handle(struct pipe_screen *screen,
- struct winsys_handle *whandle)
+static const struct u_resource_vtbl r600_buffer_vtbl =
{
- struct radeon *rw = (struct radeon*)screen->winsys;
- struct r600_resource *rbuffer;
- struct radeon_ws_bo *bo = NULL;
+ u_default_resource_get_handle, /* get_handle */
+ r600_buffer_destroy, /* resource_destroy */
+ r600_get_transfer, /* get_transfer */
+ r600_transfer_destroy, /* transfer_destroy */
+ r600_buffer_transfer_map, /* transfer_map */
+ NULL, /* transfer_flush_region */
+ r600_buffer_transfer_unmap, /* transfer_unmap */
+ NULL /* transfer_inline_write */
+};
- bo = radeon_ws_bo_handle(rw, whandle->handle);
- if (bo == NULL) {
- return NULL;
+bool r600_init_resource(struct r600_screen *rscreen,
+ struct r600_resource *res,
+ unsigned size, unsigned alignment,
+ unsigned bind, unsigned usage)
+{
+ uint32_t initial_domain, domains;
+
+ /* Staging resources particpate in transfers and blits only
+ * and are used for uploads and downloads from regular
+ * resources. We generate them internally for some transfers.
+ */
+ if (usage == PIPE_USAGE_STAGING) {
+ domains = RADEON_DOMAIN_GTT;
+ initial_domain = RADEON_DOMAIN_GTT;
+ } else {
+ domains = RADEON_DOMAIN_GTT | RADEON_DOMAIN_VRAM;
+
+ switch(usage) {
+ case PIPE_USAGE_DYNAMIC:
+ case PIPE_USAGE_STREAM:
+ case PIPE_USAGE_STAGING:
+ initial_domain = RADEON_DOMAIN_GTT;
+ break;
+ case PIPE_USAGE_DEFAULT:
+ case PIPE_USAGE_STATIC:
+ case PIPE_USAGE_IMMUTABLE:
+ default:
+ initial_domain = RADEON_DOMAIN_VRAM;
+ break;
+ }
}
- rbuffer = CALLOC_STRUCT(r600_resource);
- if (rbuffer == NULL) {
- radeon_ws_bo_reference(rw, &bo, NULL);
- return NULL;
+ res->buf = rscreen->ws->buffer_create(rscreen->ws, size, alignment, bind, initial_domain);
+ if (!res->buf) {
+ return false;
}
- pipe_reference_init(&rbuffer->base.b.reference, 1);
- rbuffer->base.b.target = PIPE_BUFFER;
- rbuffer->base.b.screen = screen;
- rbuffer->base.vtbl = &r600_buffer_vtbl;
- rbuffer->bo = bo;
- return &rbuffer->base.b;
+ res->cs_buf = rscreen->ws->buffer_get_cs_handle(res->buf);
+ res->domains = domains;
+ return true;
}
-struct u_resource_vtbl r600_buffer_vtbl =
+struct pipe_resource *r600_buffer_create(struct pipe_screen *screen,
+ const struct pipe_resource *templ,
+ unsigned alignment)
{
- u_default_resource_get_handle, /* get_handle */
- r600_buffer_destroy, /* resource_destroy */
- r600_buffer_is_referenced_by_cs, /* is_buffer_referenced */
- u_default_get_transfer, /* get_transfer */
- u_default_transfer_destroy, /* transfer_destroy */
- r600_buffer_transfer_map, /* transfer_map */
- r600_buffer_transfer_flush_region, /* transfer_flush_region */
- r600_buffer_transfer_unmap, /* transfer_unmap */
- u_default_transfer_inline_write /* transfer_inline_write */
-};
+ struct r600_screen *rscreen = (struct r600_screen*)screen;
+ struct r600_resource *rbuffer;
+
+ rbuffer = MALLOC_STRUCT(r600_resource);
+
+ rbuffer->b.b = *templ;
+ pipe_reference_init(&rbuffer->b.b.reference, 1);
+ rbuffer->b.b.screen = screen;
+ rbuffer->b.vtbl = &r600_buffer_vtbl;
+
+ if (!r600_init_resource(rscreen, rbuffer, templ->width0, alignment, templ->bind, templ->usage)) {
+ FREE(rbuffer);
+ return NULL;
+ }
+ return &rbuffer->b.b;
+}