The timeout parameter covers both cases.
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
if (q->type == PIPE_QUERY_GPU_FINISHED) {
if (wait) {
- r300->rws->buffer_wait(q->buf, RADEON_USAGE_READWRITE);
+ r300->rws->buffer_wait(q->buf, PIPE_TIMEOUT_INFINITE,
+ RADEON_USAGE_READWRITE);
vresult->b = TRUE;
} else {
- vresult->b = !r300->rws->buffer_is_busy(q->buf, RADEON_USAGE_READWRITE);
+ vresult->b = r300->rws->buffer_wait(q->buf, 0, RADEON_USAGE_READWRITE);
}
return vresult->b;
}
/* Check if mapping this buffer would cause waiting for the GPU. */
if (r300->rws->cs_is_buffer_referenced(r300->cs, rbuf->cs_buf, RADEON_USAGE_READWRITE) ||
- r300->rws->buffer_is_busy(rbuf->buf, RADEON_USAGE_READWRITE)) {
+ !r300->rws->buffer_wait(rbuf->buf, 0, RADEON_USAGE_READWRITE)) {
unsigned i;
struct pb_buffer *new_buf;
referenced_hw = TRUE;
} else {
referenced_hw =
- r300->rws->buffer_is_busy(tex->buf, RADEON_USAGE_READWRITE);
+ !r300->rws->buffer_wait(tex->buf, 0, RADEON_USAGE_READWRITE);
}
trans = CALLOC_STRUCT(r300_transfer);
}
}
- if (busy || ctx->ws->buffer_is_busy(resource->buf, rusage)) {
+ if (busy || !ctx->ws->buffer_wait(resource->buf, 0, rusage)) {
if (usage & PIPE_TRANSFER_DONTBLOCK) {
return NULL;
} else {
/* Check if mapping this buffer would cause waiting for the GPU. */
if (r600_rings_is_buffer_referenced(rctx, rbuffer->cs_buf, RADEON_USAGE_READWRITE) ||
- rctx->ws->buffer_is_busy(rbuffer->buf, RADEON_USAGE_READWRITE)) {
+ !rctx->ws->buffer_wait(rbuffer->buf, 0, RADEON_USAGE_READWRITE)) {
rctx->invalidate_buffer(&rctx->b, &rbuffer->b.b);
}
/* At this point, the buffer is always idle. */
/* Check if mapping this buffer would cause waiting for the GPU. */
if (r600_rings_is_buffer_referenced(rctx, rbuffer->cs_buf, RADEON_USAGE_READWRITE) ||
- rctx->ws->buffer_is_busy(rbuffer->buf, RADEON_USAGE_READWRITE)) {
+ !rctx->ws->buffer_wait(rbuffer->buf, 0, RADEON_USAGE_READWRITE)) {
/* Do a wait-free write-only transfer using a temporary buffer. */
unsigned offset;
struct r600_resource *staging = NULL;
/* Obtain a new buffer if the current one can't be mapped without a stall. */
if (r600_rings_is_buffer_referenced(rctx, rquery->buffer.buf->cs_buf, RADEON_USAGE_READWRITE) ||
- rctx->ws->buffer_is_busy(rquery->buffer.buf->buf, RADEON_USAGE_READWRITE)) {
+ !rctx->ws->buffer_wait(rquery->buffer.buf->buf, 0, RADEON_USAGE_READWRITE)) {
pipe_resource_reference((struct pipe_resource**)&rquery->buffer.buf, NULL);
rquery->buffer.buf = r600_new_query_buffer(rctx, rquery->type);
}
use_staging_texture = TRUE;
} else if (!(usage & PIPE_TRANSFER_READ) &&
(r600_rings_is_buffer_referenced(rctx, rtex->resource.cs_buf, RADEON_USAGE_READWRITE) ||
- rctx->ws->buffer_is_busy(rtex->resource.buf, RADEON_USAGE_READWRITE))) {
+ !rctx->ws->buffer_wait(rtex->resource.buf, 0, RADEON_USAGE_READWRITE))) {
/* Use a staging texture for uploads if the underlying BO is busy. */
use_staging_texture = TRUE;
}
void (*buffer_unmap)(struct radeon_winsys_cs_handle *buf);
/**
- * Return TRUE if a buffer object is being used by the GPU.
+ * Wait for the buffer and return true if the buffer is not used
+ * by the device.
*
- * \param buf A winsys buffer object.
- * \param usage Only check whether the buffer is busy for the given usage.
- */
- boolean (*buffer_is_busy)(struct pb_buffer *buf,
- enum radeon_bo_usage usage);
-
- /**
- * Wait for a buffer object until it is not used by a GPU. This is
- * equivalent to a fence placed after the last command using the buffer,
- * and synchronizing to the fence.
- *
- * \param buf A winsys buffer object to wait for.
- * \param usage Only wait until the buffer is idle for the given usage,
- * but may still be busy for some other usage.
+ * The timeout of 0 will only return the status.
+ * The timeout of PIPE_TIMEOUT_INFINITE will always wait until the buffer
+ * is idle.
*/
- void (*buffer_wait)(struct pb_buffer *buf, enum radeon_bo_usage usage);
+ bool (*buffer_wait)(struct pb_buffer *buf, uint64_t timeout,
+ enum radeon_bo_usage usage);
/**
* Return tiling flags describing a memory layout of a buffer object.
return bo;
}
-static void radeon_bo_wait(struct pb_buffer *_buf, enum radeon_bo_usage usage)
+static bool radeon_bo_wait(struct pb_buffer *_buf, uint64_t timeout,
+ enum radeon_bo_usage usage)
{
- struct radeon_bo *bo = get_radeon_bo(_buf);
- struct drm_radeon_gem_wait_idle args = {0};
+ struct radeon_bo *bo = get_radeon_bo(_buf);
- while (p_atomic_read(&bo->num_active_ioctls)) {
- sched_yield();
- }
+ /* Wait if any ioctl is being submitted with this buffer. */
+ if (!os_wait_until_zero(&bo->num_active_ioctls, timeout))
+ return false;
- args.handle = bo->handle;
- while (drmCommandWrite(bo->rws->fd, DRM_RADEON_GEM_WAIT_IDLE,
- &args, sizeof(args)) == -EBUSY);
-}
+ /* TODO: handle arbitrary timeout */
+ if (!timeout) {
+ struct drm_radeon_gem_busy args = {0};
-static boolean radeon_bo_is_busy(struct pb_buffer *_buf,
- enum radeon_bo_usage usage)
-{
- struct radeon_bo *bo = get_radeon_bo(_buf);
- struct drm_radeon_gem_busy args = {0};
+ args.handle = bo->handle;
+ return drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_BUSY,
+ &args, sizeof(args)) == 0;
+ } else {
+ struct drm_radeon_gem_wait_idle args = {0};
- if (p_atomic_read(&bo->num_active_ioctls)) {
- return TRUE;
+ args.handle = bo->handle;
+ while (drmCommandWrite(bo->rws->fd, DRM_RADEON_GEM_WAIT_IDLE,
+ &args, sizeof(args)) == -EBUSY);
+ return true;
}
-
- args.handle = bo->handle;
- return drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_BUSY,
- &args, sizeof(args)) != 0;
}
static enum radeon_bo_domain get_valid_domain(enum radeon_bo_domain domain)
return NULL;
}
- if (radeon_bo_is_busy((struct pb_buffer*)bo,
- RADEON_USAGE_WRITE)) {
+ if (!radeon_bo_wait((struct pb_buffer*)bo, 0,
+ RADEON_USAGE_WRITE)) {
return NULL;
}
} else {
return NULL;
}
- if (radeon_bo_is_busy((struct pb_buffer*)bo,
- RADEON_USAGE_READWRITE)) {
+ if (!radeon_bo_wait((struct pb_buffer*)bo, 0,
+ RADEON_USAGE_READWRITE)) {
return NULL;
}
}
if (cs && radeon_bo_is_referenced_by_cs_for_write(cs, bo)) {
cs->flush_cs(cs->flush_data, 0, NULL);
}
- radeon_bo_wait((struct pb_buffer*)bo,
+ radeon_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
RADEON_USAGE_WRITE);
} else {
/* Mapping for write. */
}
}
- radeon_bo_wait((struct pb_buffer*)bo, RADEON_USAGE_READWRITE);
+ radeon_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
+ RADEON_USAGE_READWRITE);
}
bo->mgr->rws->buffer_wait_time += os_time_get_nano() - time;
return TRUE;
}
- if (radeon_bo_is_busy((struct pb_buffer*)bo, RADEON_USAGE_READWRITE)) {
+ if (!radeon_bo_wait((struct pb_buffer*)bo, 0, RADEON_USAGE_READWRITE)) {
return TRUE;
}
ws->base.buffer_map = radeon_bo_map;
ws->base.buffer_unmap = radeon_bo_unmap;
ws->base.buffer_wait = radeon_bo_wait;
- ws->base.buffer_is_busy = radeon_bo_is_busy;
ws->base.buffer_create = radeon_winsys_bo_create;
ws->base.buffer_from_handle = radeon_winsys_bo_from_handle;
ws->base.buffer_from_ptr = radeon_winsys_bo_from_ptr;
struct pb_buffer *rfence = (struct pb_buffer*)fence;
if (timeout == 0)
- return !ws->buffer_is_busy(rfence, RADEON_USAGE_READWRITE);
+ return ws->buffer_wait(rfence, 0, RADEON_USAGE_READWRITE);
if (timeout != PIPE_TIMEOUT_INFINITE) {
int64_t start_time = os_time_get();
timeout /= 1000;
/* Wait in a loop. */
- while (ws->buffer_is_busy(rfence, RADEON_USAGE_READWRITE)) {
+ while (!ws->buffer_wait(rfence, 0, RADEON_USAGE_READWRITE)) {
if (os_time_get() - start_time >= timeout) {
return FALSE;
}
return TRUE;
}
- ws->buffer_wait(rfence, RADEON_USAGE_READWRITE);
+ ws->buffer_wait(rfence, PIPE_TIMEOUT_INFINITE, RADEON_USAGE_READWRITE);
return TRUE;
}