FREE(rbuffer);
}
-void r600_invalidate_resource(struct pipe_context *ctx,
- struct pipe_resource *resource)
+static bool
+r600_do_invalidate_resource(struct r600_common_context *rctx,
+ struct r600_resource *rbuffer)
{
- struct r600_common_context *rctx = (struct r600_common_context*)ctx;
- struct r600_resource *rbuffer = r600_resource(resource);
+ /* In AMD_pinned_memory, the user pointer association only gets
+ * broken when the buffer is explicitly re-allocated.
+ */
+ if (rctx->ws->buffer_is_user_ptr(rbuffer->buf))
+ return false;
/* Check if mapping this buffer would cause waiting for the GPU. */
if (r600_rings_is_buffer_referenced(rctx, rbuffer->buf, RADEON_USAGE_READWRITE) ||
} else {
util_range_set_empty(&rbuffer->valid_buffer_range);
}
+
+ return true;
+}
+
+void r600_invalidate_resource(struct pipe_context *ctx,
+ struct pipe_resource *resource)
+{
+ struct r600_common_context *rctx = (struct r600_common_context*)ctx;
+ struct r600_resource *rbuffer = r600_resource(resource);
+
+ (void)r600_do_invalidate_resource(rctx, rbuffer);
}
static void *r600_buffer_get_transfer(struct pipe_context *ctx,
!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
assert(usage & PIPE_TRANSFER_WRITE);
- r600_invalidate_resource(ctx, resource);
-
- /* At this point, the buffer is always idle. */
- usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
+ if (r600_do_invalidate_resource(rctx, rbuffer)) {
+ /* At this point, the buffer is always idle. */
+ usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
+ }
}
else if ((usage & PIPE_TRANSFER_DISCARD_RANGE) &&
!(usage & PIPE_TRANSFER_UNSYNCHRONIZED) &&
struct pb_buffer *(*buffer_from_ptr)(struct radeon_winsys *ws,
void *pointer, unsigned size);
+ /**
+ * Whether the buffer was created from a user pointer.
+ *
+ * \param buf A winsys buffer object
+ * \return whether \p buf was created via buffer_from_ptr
+ */
+ bool (*buffer_is_user_ptr)(struct pb_buffer *buf);
+
/**
* Get a winsys handle from a winsys buffer. The internal structure
* of the handle is platform-specific and only a winsys should access it.
return NULL;
}
+static bool amdgpu_bo_is_user_ptr(struct pb_buffer *buf)
+{
+ return ((struct amdgpu_winsys_bo*)buf)->user_ptr != NULL;
+}
+
static uint64_t amdgpu_bo_get_va(struct pb_buffer *buf)
{
return ((struct amdgpu_winsys_bo*)buf)->va;
ws->base.buffer_create = amdgpu_bo_create;
ws->base.buffer_from_handle = amdgpu_bo_from_handle;
ws->base.buffer_from_ptr = amdgpu_bo_from_ptr;
+ ws->base.buffer_is_user_ptr = amdgpu_bo_is_user_ptr;
ws->base.buffer_get_handle = amdgpu_bo_get_handle;
ws->base.buffer_get_virtual_address = amdgpu_bo_get_va;
ws->base.buffer_get_initial_domain = amdgpu_bo_get_initial_domain;
return TRUE;
}
+static bool radeon_winsys_bo_is_user_ptr(struct pb_buffer *buf)
+{
+ return ((struct radeon_bo*)buf)->user_ptr != NULL;
+}
+
static uint64_t radeon_winsys_bo_va(struct pb_buffer *buf)
{
return ((struct radeon_bo*)buf)->va;
ws->base.buffer_create = radeon_winsys_bo_create;
ws->base.buffer_from_handle = radeon_winsys_bo_from_handle;
ws->base.buffer_from_ptr = radeon_winsys_bo_from_ptr;
+ ws->base.buffer_is_user_ptr = radeon_winsys_bo_is_user_ptr;
ws->base.buffer_get_handle = radeon_winsys_bo_get_handle;
ws->base.buffer_get_virtual_address = radeon_winsys_bo_va;
ws->base.buffer_get_initial_domain = radeon_bo_get_initial_domain;