this moves to using a pb bufmgr instead of kernel bos directly.
r600_queries_resume(ctx);
}
+void r600_flush_ctx(void *data)
+{
+ struct r600_context *rctx = data;
+
+ rctx->context.flush(&rctx->context, 0, NULL);
+}
+
struct pipe_context *r600_create_context(struct pipe_screen *screen, void *priv)
{
struct r600_context *rctx = CALLOC_STRUCT(r600_context);
#define R600_QUERY_SIZE 1
#define R600_QUERY_PM4 128
+void r600_flush_ctx(void *data);
#endif
radeon_pciid.c \
radeon.c \
r600_drm.c \
- radeon_ws_bo.c
+ radeon_ws_bo.c \
+ radeon_bo_pb.c
LIBRARY_INCLUDES = -I$(TOP)/src/gallium/drivers/r600 \
$(shell pkg-config libdrm --cflags-only-I)
r = radeon_state_reloc(state, state->cpm4, regs[id + i].bo_id);
if (r)
return r;
- state->pm4[state->cpm4++] = state->bo[regs[id + i].bo_id]->bo->handle;
+ state->pm4[state->cpm4++] = radeon_ws_bo_get_handle(state->bo[regs[id + i].bo_id]);
}
}
return 0;
r = radeon_state_reloc(state, state->cpm4, regs[id + i].bo_id);
if (r)
return r;
- state->pm4[state->cpm4++] = state->bo[regs[id + i].bo_id]->bo->handle;
+ state->pm4[state->cpm4++] = radeon_ws_bo_get_handle(state->bo[regs[id + i].bo_id]);
}
}
return 0;
r = radeon_state_reloc(state, state->cpm4, regs[id + i].bo_id);
if (r)
return r;
- state->pm4[state->cpm4++] = state->bo[regs[id + i].bo_id]->bo->handle;
+ state->pm4[state->cpm4++] = radeon_ws_bo_get_handle(state->bo[regs[id + i].bo_id]);
}
}
return 0;
r = radeon_state_reloc(state, state->cpm4, regs[id + i].bo_id);
if (r)
return r;
- state->pm4[state->cpm4++] = state->bo[regs[id + i].bo_id]->bo->handle;
+ state->pm4[state->cpm4++] = radeon_ws_bo_get_handle(state->bo[regs[id + i].bo_id]);
}
}
return 0;
}
}
for (i = 0; i < state->nreloc; i++) {
- size = (state->bo[state->reloc_bo_id[i]]->bo->size + 255) >> 8;
+ size = (radeon_ws_bo_get_size(state->bo[state->reloc_bo_id[i]]) + 255) >> 8;
state->pm4[state->cpm4++] = PKT3(PKT3_SURFACE_SYNC, 3);
if (bufs_are_cbs)
flags |= S_0085F0_CB0_DEST_BASE_ENA(1 << i);
state->pm4[state->cpm4++] = 0x0000000A;
state->pm4[state->cpm4++] = PKT3(PKT3_NOP, 0);
state->reloc_pm4_id[i] = state->cpm4;
- state->pm4[state->cpm4++] = state->bo[state->reloc_bo_id[i]]->bo->handle;
+ state->pm4[state->cpm4++] = radeon_ws_bo_get_handle(state->bo[state->reloc_bo_id[i]]);
}
}
r = radeon_state_reloc(state, state->cpm4, 0);
if (r)
return r;
- state->pm4[state->cpm4++] = state->bo[0]->bo->handle;
+ state->pm4[state->cpm4++] = radeon_ws_bo_get_handle(state->bo[0]);
return 0;
}
r = radeon_state_reloc(state, state->cpm4, 0);
if (r)
return r;
- state->pm4[state->cpm4++] = state->bo[0]->bo->handle;
+ state->pm4[state->cpm4++] = radeon_ws_bo_get_handle(state->bo[0]);
return 0;
}
r = radeon_state_reloc(state, state->cpm4, 0);
if (r)
return r;
- state->pm4[state->cpm4++] = state->bo[0]->bo->handle;
+ state->pm4[state->cpm4++] = radeon_ws_bo_get_handle(state->bo[0]);
} else {
state->pm4[state->cpm4++] = PKT3(PKT3_DRAW_INDEX_AUTO, 1);
state->pm4[state->cpm4++] = state->states[R600_DRAW__VGT_NUM_INDICES];
r = radeon_state_reloc(state, state->cpm4, 0);
if (r)
return r;
- state->pm4[state->cpm4++] = state->bo[0]->bo->handle;
+ state->pm4[state->cpm4++] = radeon_ws_bo_get_handle(state->bo[0]);
if (type == 2) {
state->pm4[state->cpm4++] = PKT3(PKT3_NOP, 0);
r = radeon_state_reloc(state, state->cpm4, 1);
if (r)
return r;
- state->pm4[state->cpm4++] = state->bo[1]->bo->handle;
+ state->pm4[state->cpm4++] = radeon_ws_bo_get_handle(state->bo[1]);
}
return 0;
}
radeon->mman = pb_malloc_bufmgr_create();
if (!radeon->mman)
return NULL;
+ radeon->kman = radeon_bo_pbmgr_create(radeon);
+ if (!radeon->kman)
+ return NULL;
return radeon;
}
}
radeon->mman->destroy(radeon->mman);
+ radeon->kman->destroy(radeon->kman);
drmClose(radeon->fd);
free(radeon);
return NULL;
} while (ret == -EBUSY);
return ret;
}
+
+int radeon_bo_busy(struct radeon *radeon, struct radeon_bo *bo, uint32_t *domain)
+{
+ struct drm_radeon_gem_busy args;
+ int ret;
+
+ memset(&args, 0, sizeof(args));
+ args.handle = bo->handle;
+ args.domain = 0;
+
+ ret = drmCommandWriteRead(radeon->fd, DRM_RADEON_GEM_BUSY,
+ &args, sizeof(args));
+
+ *domain = args.domain;
+ return ret;
+}
--- /dev/null
+#include "radeon_priv.h"
+
+#include "util/u_inlines.h"
+#include "util/u_memory.h"
+#include "util/u_double_list.h"
+#include "pipebuffer/pb_buffer.h"
+#include "pipebuffer/pb_bufmgr.h"
+
+struct radeon_bo_pb {
+ struct pb_buffer b;
+ struct radeon_bo *bo;
+
+ struct radeon_bo_pbmgr *mgr;
+ struct list_head maplist;
+};
+
+extern const struct pb_vtbl radeon_bo_pb_vtbl;
+
+static INLINE struct radeon_bo_pb *radeon_bo_pb(struct pb_buffer *buf)
+{
+ assert(buf);
+ assert(buf->vtbl == &radeon_bo_pb_vtbl);
+ return (struct radeon_bo_pb *)buf;
+}
+
+struct radeon_bo_pbmgr {
+ struct pb_manager b;
+ struct radeon *radeon;
+ struct list_head buffer_map_list;
+};
+
+static INLINE struct radeon_bo_pbmgr *radeon_bo_pbmgr(struct pb_manager *mgr)
+{
+ assert(mgr);
+ return (struct radeon_bo_pbmgr *)mgr;
+}
+
+static void radeon_bo_pb_destroy(struct pb_buffer *_buf)
+{
+ struct radeon_bo_pb *buf = radeon_bo_pb(_buf);
+
+ if (buf->bo->data != NULL) {
+ LIST_DEL(&buf->maplist);
+ radeon_bo_unmap(buf->mgr->radeon, buf->bo);
+ }
+ radeon_bo_reference(buf->mgr->radeon, &buf->bo, NULL);
+ FREE(buf);
+}
+
+static void *
+radeon_bo_pb_map_internal(struct pb_buffer *_buf,
+ unsigned flags, void *ctx)
+{
+ struct radeon_bo_pb *buf = radeon_bo_pb(_buf);
+
+ if (flags & PB_USAGE_DONTBLOCK) {
+ if (p_atomic_read(&buf->bo->reference.count) > 1)
+ return NULL;
+ }
+ if (buf->bo->data != NULL)
+ return buf->bo->data;
+
+ if (flags & PB_USAGE_DONTBLOCK) {
+ uint32_t domain;
+ if (radeon_bo_busy(buf->mgr->radeon, buf->bo, &domain))
+ return NULL;
+ }
+
+ if (p_atomic_read(&buf->bo->reference.count) > 1 && ctx) {
+ r600_flush_ctx(ctx);
+ }
+ if (radeon_bo_map(buf->mgr->radeon, buf->bo)) {
+ return NULL;
+ }
+ LIST_ADDTAIL(&buf->maplist, &buf->mgr->buffer_map_list);
+ return buf->bo->data;
+}
+
+static void radeon_bo_pb_unmap_internal(struct pb_buffer *_buf)
+{
+ (void)_buf;
+}
+
+static void
+radeon_bo_pb_get_base_buffer(struct pb_buffer *buf,
+ struct pb_buffer **base_buf,
+ unsigned *offset)
+{
+ *base_buf = buf;
+ *offset = 0;
+}
+
+static enum pipe_error
+radeon_bo_pb_validate(struct pb_buffer *_buf,
+ struct pb_validate *vl,
+ unsigned flags)
+{
+ /* Always pinned */
+ return PIPE_OK;
+}
+
+static void
+radeon_bo_pb_fence(struct pb_buffer *buf,
+ struct pipe_fence_handle *fence)
+{
+}
+
+const struct pb_vtbl radeon_bo_pb_vtbl = {
+ radeon_bo_pb_destroy,
+ radeon_bo_pb_map_internal,
+ radeon_bo_pb_unmap_internal,
+ radeon_bo_pb_validate,
+ radeon_bo_pb_fence,
+ radeon_bo_pb_get_base_buffer,
+};
+
+struct pb_buffer *
+radeon_bo_pb_create_buffer_from_handle(struct pb_manager *_mgr,
+ uint32_t handle)
+{
+ struct radeon_bo_pbmgr *mgr = radeon_bo_pbmgr(_mgr);
+ struct radeon *radeon = mgr->radeon;
+ struct radeon_bo_pb *bo;
+ struct radeon_bo *hw_bo;
+
+ hw_bo = radeon_bo(radeon, handle, 0, 0, NULL);
+ if (hw_bo == NULL)
+ return NULL;
+
+ bo = CALLOC_STRUCT(radeon_bo_pb);
+ if (!bo) {
+ radeon_bo_reference(radeon, &hw_bo, NULL);
+ return NULL;
+ }
+
+ LIST_INITHEAD(&bo->maplist);
+ pipe_reference_init(&bo->b.base.reference, 1);
+ bo->b.base.alignment = 0;
+ bo->b.base.usage = PB_USAGE_GPU_WRITE | PB_USAGE_GPU_READ;
+ bo->b.base.size = hw_bo->size;
+ bo->b.vtbl = &radeon_bo_pb_vtbl;
+ bo->mgr = mgr;
+
+ bo->bo = hw_bo;
+
+ return &bo->b;
+}
+
+static struct pb_buffer *
+radeon_bo_pb_create_buffer(struct pb_manager *_mgr,
+ pb_size size,
+ const struct pb_desc *desc)
+{
+ struct radeon_bo_pbmgr *mgr = radeon_bo_pbmgr(_mgr);
+ struct radeon *radeon = mgr->radeon;
+ struct radeon_bo_pb *bo;
+ uint32_t domain;
+
+ bo = CALLOC_STRUCT(radeon_bo_pb);
+ if (!bo)
+ goto error1;
+
+ pipe_reference_init(&bo->b.base.reference, 1);
+ bo->b.base.alignment = desc->alignment;
+ bo->b.base.usage = desc->usage;
+ bo->b.base.size = size;
+ bo->b.vtbl = &radeon_bo_pb_vtbl;
+ bo->mgr = mgr;
+
+ LIST_INITHEAD(&bo->maplist);
+
+ bo->bo = radeon_bo(radeon, 0, size,
+ desc->alignment, NULL);
+ if (bo->bo == NULL)
+ goto error2;
+ return &bo->b;
+
+error2:
+ FREE(bo);
+error1:
+ return NULL;
+}
+
+static void
+radeon_bo_pbmgr_flush(struct pb_manager *mgr)
+{
+ /* NOP */
+}
+
+static void
+radeon_bo_pbmgr_destroy(struct pb_manager *_mgr)
+{
+ struct radeon_bo_pbmgr *mgr = radeon_bo_pbmgr(_mgr);
+ FREE(mgr);
+}
+
+struct pb_manager *radeon_bo_pbmgr_create(struct radeon *radeon)
+{
+ struct radeon_bo_pbmgr *mgr;
+
+ mgr = CALLOC_STRUCT(radeon_bo_pbmgr);
+ if (!mgr)
+ return NULL;
+
+ mgr->b.destroy = radeon_bo_pbmgr_destroy;
+ mgr->b.create_buffer = radeon_bo_pb_create_buffer;
+ mgr->b.flush = radeon_bo_pbmgr_flush;
+
+ mgr->radeon = radeon;
+ LIST_INITHEAD(&mgr->buffer_map_list);
+ return &mgr->b;
+}
+
+void radeon_bo_pbmgr_flush_maps(struct pb_manager *_mgr)
+{
+ struct radeon_bo_pbmgr *mgr = radeon_bo_pbmgr(_mgr);
+ struct radeon_bo_pb *rpb, *t_rpb;
+
+ LIST_FOR_EACH_ENTRY_SAFE(rpb, t_rpb, &mgr->buffer_map_list, maplist) {
+ radeon_bo_unmap(mgr->radeon, rpb->bo);
+ LIST_DEL(&rpb->maplist);
+ }
+
+ LIST_INITHEAD(&mgr->buffer_map_list);
+}
+
+struct radeon_bo *radeon_bo_pb_get_bo(struct pb_buffer *_buf)
+{
+ struct radeon_bo_pb *buf;
+ if (_buf->vtbl == &radeon_bo_pb_vtbl) {
+ buf = radeon_bo_pb(_buf);
+ return buf->bo;
+ } else {
+ struct pb_buffer *base_buf;
+ pb_size offset;
+ pb_get_base_buffer(_buf, &base_buf, &offset);
+ if (base_buf->vtbl == &radeon_bo_pb_vtbl) {
+ buf = radeon_bo_pb(base_buf);
+ return buf->bo;
+ }
+ }
+ return NULL;
+}
greloc = (void *)(((u8 *)ctx->reloc) + reloc * 4);
for (i = 0; i < ctx->nbo; i++) {
- if (ctx->bo[i]->bo->handle == greloc->handle) {
+ if (radeon_ws_bo_get_handle(ctx->bo[i]) == greloc->handle) {
radeon_ws_bo_reference(ctx->radeon, &bo, ctx->bo[i]);
return bo;
}
placement[1] = 0;
greloc = (void *)(((u8 *)ctx->reloc) + reloc * 4);
for (i = 0; i < ctx->nbo; i++) {
- if (ctx->bo[i]->bo->handle == greloc->handle) {
+ if (radeon_ws_bo_get_handle(ctx->bo[i]) == greloc->handle) {
placement[0] = greloc->read_domain | greloc->write_domain;
placement[1] = placement[0];
return;
unsigned id, unsigned *placement)
{
unsigned i;
+ unsigned bo_handle = radeon_ws_bo_get_handle(bo);
for (i = 0; i < ctx->nreloc; i++) {
- if (ctx->reloc[i].handle == bo->bo->handle) {
+ if (ctx->reloc[i].handle == bo_handle) {
ctx->pm4[id] = i * sizeof(struct radeon_cs_reloc) / 4;
return 0;
}
if (ctx->nreloc >= RADEON_CTX_MAX_PM4) {
return -EBUSY;
}
- ctx->reloc[ctx->nreloc].handle = bo->bo->handle;
+ ctx->reloc[ctx->nreloc].handle = bo_handle;
ctx->reloc[ctx->nreloc].read_domain = placement[0] | placement [1];
ctx->reloc[ctx->nreloc].write_domain = placement[0] | placement [1];
ctx->reloc[ctx->nreloc].flags = 0;
bof_t *bcs, *blob, *array, *bo, *size, *handle, *device_id, *root;
unsigned i;
void *data;
-
+ unsigned bo_size;
root = device_id = bcs = blob = array = bo = size = handle = NULL;
root = bof_object();
if (root == NULL)
bo = bof_object();
if (bo == NULL)
goto out_err;
- size = bof_int32(ctx->bo[i]->bo->size);
+ bo_size = radeon_ws_bo_get_size(ctx->bo[i]);
+ size = bof_int32(bo_size);
if (size == NULL)
goto out_err;
if (bof_object_set(bo, "size", size))
goto out_err;
bof_decref(size);
size = NULL;
- handle = bof_int32(ctx->bo[i]->bo->handle);
+ handle = bof_int32(radeon_ws_bo_get_handle(ctx->bo[i]));
if (handle == NULL)
goto out_err;
if (bof_object_set(bo, "handle", handle))
bof_decref(handle);
handle = NULL;
data = radeon_ws_bo_map(ctx->radeon, ctx->bo[i], 0, NULL);
- blob = bof_blob(ctx->bo[i]->bo->size, data);
+ blob = bof_blob(bo_size, data);
radeon_ws_bo_unmap(ctx->radeon, ctx->bo[i]);
if (blob == NULL)
goto out_err;
unsigned max_states;
boolean use_mem_constant; /* true for evergreen */
struct pb_manager *mman; /* malloc manager */
+ struct pb_manager *kman; /* kernel bo manager */
};
struct radeon_ws_bo {
struct pipe_reference reference;
- struct radeon_bo *bo;
struct pb_buffer *pb;
};
*/
extern int radeon_draw_pm4(struct radeon_draw *draw);
+/* ws bo winsys only */
+unsigned radeon_ws_bo_get_handle(struct radeon_ws_bo *bo);
+unsigned radeon_ws_bo_get_size(struct radeon_ws_bo *bo);
+
/* bo */
struct radeon_bo *radeon_bo(struct radeon *radeon, unsigned handle,
unsigned size, unsigned alignment, void *ptr);
void radeon_bo_reference(struct radeon *radeon, struct radeon_bo **dst,
struct radeon_bo *src);
int radeon_bo_wait(struct radeon *radeon, struct radeon_bo *bo);
+int radeon_bo_busy(struct radeon *radeon, struct radeon_bo *bo, uint32_t *domain);
+
+/* pipebuffer kernel bo manager */
+struct pb_manager *radeon_bo_pbmgr_create(struct radeon *radeon);
+struct radeon_bo *radeon_bo_pb_get_bo(struct pb_buffer *_buf);
+void radeon_bo_pbmgr_flush_maps(struct pb_manager *_mgr);
+struct pb_buffer *radeon_bo_pb_create_buffer_from_handle(struct pb_manager *_mgr,
+ uint32_t handle);
#endif
{
struct radeon_ws_bo *ws_bo = calloc(1, sizeof(struct radeon_ws_bo));
struct pb_desc desc;
+ struct pb_manager *man;
+
+ desc.alignment = alignment;
+ desc.usage = usage;
if (radeon->use_mem_constant && (usage & PIPE_BIND_CONSTANT_BUFFER)) {
- desc.alignment = alignment;
- desc.usage = usage;
- ws_bo->pb = radeon->mman->create_buffer(radeon->mman, size, &desc);
- if (ws_bo->pb == NULL) {
- free(ws_bo);
- return NULL;
- }
- } else {
- ws_bo->bo = radeon_bo(radeon, 0, size, alignment, NULL);
- if (!ws_bo->bo) {
- free(ws_bo);
- return NULL;
- }
+ man = radeon->mman;
+ } else
+ man = radeon->kman;
+
+ ws_bo->pb = man->create_buffer(man, size, &desc);
+ if (ws_bo->pb == NULL) {
+ free(ws_bo);
+ return NULL;
}
pipe_reference_init(&ws_bo->reference, 1);
{
struct radeon_ws_bo *ws_bo = calloc(1, sizeof(struct radeon_ws_bo));
- ws_bo->bo = radeon_bo(radeon, handle, 0, 0, NULL);
- if (!ws_bo->bo) {
+ ws_bo->pb = radeon_bo_pb_create_buffer_from_handle(radeon->kman, handle);
+ if (!ws_bo->pb) {
free(ws_bo);
return NULL;
}
void *radeon_ws_bo_map(struct radeon *radeon, struct radeon_ws_bo *bo, unsigned usage, void *ctx)
{
- if (bo->pb)
- return pb_map(bo->pb, usage, ctx);
- radeon_bo_map(radeon, bo->bo);
- return bo->bo->data;
+ return pb_map(bo->pb, usage, ctx);
}
void radeon_ws_bo_unmap(struct radeon *radeon, struct radeon_ws_bo *bo)
{
- if (bo->pb)
- pb_unmap(bo->pb);
- else
- radeon_bo_unmap(radeon, bo->bo);
+ pb_unmap(bo->pb);
}
static void radeon_ws_bo_destroy(struct radeon *radeon, struct radeon_ws_bo *bo)
{
if (bo->pb)
pb_reference(&bo->pb, NULL);
- else
- radeon_bo_reference(radeon, &bo->bo, NULL);
free(bo);
}
*dst = src;
}
-int radeon_ws_bo_wait(struct radeon *radeon, struct radeon_ws_bo *bo)
+int radeon_ws_bo_wait(struct radeon *radeon, struct radeon_ws_bo *pb_bo)
{
- if (bo->pb)
+ /* TODO */
+ struct radeon_bo *bo;
+ bo = radeon_bo_pb_get_bo(pb_bo->pb);
+ if (!bo)
return 0;
- else
- return radeon_bo_wait(radeon, bo->bo);
+ radeon_bo_wait(radeon, bo);
+ return 0;
+}
+
+unsigned radeon_ws_bo_get_handle(struct radeon_ws_bo *pb_bo)
+{
+ struct radeon_bo *bo;
+
+ bo = radeon_bo_pb_get_bo(pb_bo->pb);
+ if (!bo)
+ return 0;
+
+ return bo->handle;
+}
+
+unsigned radeon_ws_bo_get_size(struct radeon_ws_bo *pb_bo)
+{
+ struct radeon_bo *bo;
+
+ bo = radeon_bo_pb_get_bo(pb_bo->pb);
+ if (!bo)
+ return 0;
+
+ return bo->size;
}