and handle PIPE_FLUSH_HINT_FINISH in r300.
Reviewed-by: Nicolai Hähnle <nicolai.haehnle@amd.com>
/* Reserve CS space. */
if (!r300->rws->cs_check_space(r300->cs, dwords)) {
- r300_flush(&r300->context, RADEON_FLUSH_ASYNC, NULL);
+ r300_flush(&r300->context, PIPE_FLUSH_ASYNC, NULL);
}
/* Emit clear packets. */
struct pipe_fence_handle **fence,
unsigned flags)
{
- r300_flush(pipe,
- flags & PIPE_FLUSH_END_OF_FRAME ? RADEON_FLUSH_END_OF_FRAME : 0,
- fence);
+ if (flags & PIPE_FLUSH_HINT_FINISH)
+ flags &= ~PIPE_FLUSH_ASYNC;
+
+ r300_flush(pipe, flags, fence);
}
void r300_init_flush_functions(struct r300_context* r300)
if (q->type == PIPE_QUERY_GPU_FINISHED) {
pb_reference(&q->buf, NULL);
- r300_flush(pipe, RADEON_FLUSH_ASYNC,
+ r300_flush(pipe, PIPE_FLUSH_ASYNC,
(struct pipe_fence_handle**)&q->buf);
return true;
}
/* Reserve requested CS space. */
if (!r300->rws->cs_check_space(r300->cs, cs_dwords)) {
- r300_flush(&r300->context, RADEON_FLUSH_ASYNC, NULL);
+ r300_flush(&r300->context, PIPE_FLUSH_ASYNC, NULL);
flushed = TRUE;
}
/* make sure that the gfx ring is only one active */
if (radeon_emitted(rctx->b.dma.cs, 0)) {
- rctx->b.dma.flush(rctx, RADEON_FLUSH_ASYNC, NULL);
+ rctx->b.dma.flush(rctx, PIPE_FLUSH_ASYNC, NULL);
}
/* Initialize all the compute-related registers.
ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs,
resource->buf, rusage)) {
if (usage & PIPE_TRANSFER_DONTBLOCK) {
- ctx->gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+ ctx->gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
return NULL;
} else {
ctx->gfx.flush(ctx, 0, NULL);
ctx->ws->cs_is_buffer_referenced(ctx->dma.cs,
resource->buf, rusage)) {
if (usage & PIPE_TRANSFER_DONTBLOCK) {
- ctx->dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+ ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
return NULL;
} else {
ctx->dma.flush(ctx, 0, NULL);
!radeon_cs_memory_below_limit(rctx->screen, ring->cs,
rctx->vram + rbo->vram_usage,
rctx->gtt + rbo->gart_usage))
- ring->flush(rctx, RADEON_FLUSH_ASYNC, NULL);
+ ring->flush(rctx, PIPE_FLUSH_ASYNC, NULL);
return radeon_add_to_buffer_list(rctx, ring, rbo, usage, priority);
}
{
/* Flush the DMA IB if it's not empty. */
if (radeon_emitted(ctx->b.dma.cs, 0))
- ctx->b.dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+ ctx->b.dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
if (!radeon_cs_memory_below_limit(ctx->b.screen, ctx->b.gfx.cs,
ctx->b.vram, ctx->b.gtt)) {
ctx->b.gtt = 0;
ctx->b.vram = 0;
- ctx->b.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+ ctx->b.gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
return;
}
/* all will be accounted once relocation are emited */
/* Flush if there's not enough space. */
if (!ctx->b.ws->cs_check_space(ctx->b.gfx.cs, num_dw)) {
- ctx->b.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+ ctx->b.gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
}
}
&offset, (struct pipe_resource**)&buf);
if (!buf) {
/* This is too heavyweight, but will work. */
- rctx->b.gfx.flush(rctx, RADEON_FLUSH_ASYNC, NULL);
+ rctx->b.gfx.flush(rctx, PIPE_FLUSH_ASYNC, NULL);
return;
}
(src &&
ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs, src->buf,
RADEON_USAGE_WRITE))))
- ctx->gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+ ctx->gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
/* Flush if there's not enough space, or if the memory usage per IB
* is too large.
if (!ctx->ws->cs_check_space(ctx->dma.cs, num_dw) ||
ctx->dma.cs->used_vram + ctx->dma.cs->used_gart > 64 * 1024 * 1024 ||
!radeon_cs_memory_below_limit(ctx->screen, ctx->dma.cs, vram, gtt)) {
- ctx->dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+ ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
assert((num_dw + ctx->dma.cs->current.cdw) <= ctx->dma.cs->current.max_dw);
}
struct pipe_fence_handle *gfx_fence = NULL;
struct pipe_fence_handle *sdma_fence = NULL;
bool deferred_fence = false;
- unsigned rflags = RADEON_FLUSH_ASYNC;
+ unsigned rflags = PIPE_FLUSH_ASYNC;
if (flags & PIPE_FLUSH_END_OF_FRAME)
- rflags |= RADEON_FLUSH_END_OF_FRAME;
+ rflags |= PIPE_FLUSH_END_OF_FRAME;
/* DMA IBs are preambles to gfx IBs, therefore must be flushed first. */
if (rctx->dma.cs)
if (radeon_emitted(ctx->gfx.cs, ctx->initial_gfx_cs_size) &&
ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs,
res->buf, RADEON_USAGE_READWRITE)) {
- ctx->gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+ ctx->gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
}
if (radeon_emitted(ctx->dma.cs, 0) &&
ctx->ws->cs_is_buffer_referenced(ctx->dma.cs,
res->buf, RADEON_USAGE_READWRITE)) {
- ctx->dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+ ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
}
ctx->ws->cs_sync_flush(ctx->dma.cs);
if (rctx &&
rfence->gfx_unflushed.ctx == rctx &&
rfence->gfx_unflushed.ib_index == rctx->num_gfx_cs_flushes) {
- rctx->gfx.flush(rctx, timeout ? 0 : RADEON_FLUSH_ASYNC, NULL);
+ rctx->gfx.flush(rctx, timeout ? 0 : PIPE_FLUSH_ASYNC, NULL);
rfence->gfx_unflushed.ctx = NULL;
if (!timeout)
/* make sure that the gfx ring is only one active */
if (radeon_emitted(rctx->b.dma.cs, 0)) {
- rctx->b.dma.flush(rctx, RADEON_FLUSH_ASYNC, NULL);
+ rctx->b.dma.flush(rctx, PIPE_FLUSH_ASYNC, NULL);
}
/* Re-emit the framebuffer state if needed. */
* The result is that the kernel memory manager is never a bottleneck.
*/
if (rctx->num_alloc_tex_transfer_bytes > rctx->screen->info.gart_size / 4) {
- rctx->gfx.flush(rctx, RADEON_FLUSH_ASYNC, NULL);
+ rctx->gfx.flush(rctx, PIPE_FLUSH_ASYNC, NULL);
rctx->num_alloc_tex_transfer_bytes = 0;
}
FB_BUFFER_OFFSET + dec->fb_size, RADEON_USAGE_READ, RADEON_DOMAIN_GTT);
set_reg(dec, dec->reg.cntl, 1);
- flush(dec, RADEON_FLUSH_ASYNC);
+ flush(dec, PIPE_FLUSH_ASYNC);
next_buffer(dec);
}
*/
static void flush(struct rvce_encoder *enc)
{
- enc->ws->cs_flush(enc->cs, RADEON_FLUSH_ASYNC, NULL);
+ enc->ws->cs_flush(enc->cs, PIPE_FLUSH_ASYNC, NULL);
enc->task_info_idx = 0;
enc->bs_idx = 0;
}
ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs,
resource->buf, rusage)) {
if (usage & PIPE_TRANSFER_DONTBLOCK) {
- ctx->gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+ ctx->gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
return NULL;
} else {
ctx->gfx.flush(ctx, 0, NULL);
ctx->ws->cs_is_buffer_referenced(ctx->dma.cs,
resource->buf, rusage)) {
if (usage & PIPE_TRANSFER_DONTBLOCK) {
- ctx->dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+ ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
return NULL;
} else {
ctx->dma.flush(ctx, 0, NULL);
!radeon_cs_memory_below_limit(rctx->screen, ring->cs,
rctx->vram + rbo->vram_usage,
rctx->gtt + rbo->gart_usage))
- ring->flush(rctx, RADEON_FLUSH_ASYNC, NULL);
+ ring->flush(rctx, PIPE_FLUSH_ASYNC, NULL);
return radeon_add_to_buffer_list(rctx, ring, rbo, usage, priority);
}
(src &&
ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs, src->buf,
RADEON_USAGE_WRITE))))
- ctx->gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+ ctx->gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
/* Flush if there's not enough space, or if the memory usage per IB
* is too large.
if (!ctx->ws->cs_check_space(ctx->dma.cs, num_dw) ||
ctx->dma.cs->used_vram + ctx->dma.cs->used_gart > 64 * 1024 * 1024 ||
!radeon_cs_memory_below_limit(ctx->screen, ctx->dma.cs, vram, gtt)) {
- ctx->dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+ ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
assert((num_dw + ctx->dma.cs->current.cdw) <= ctx->dma.cs->current.max_dw);
}
if (radeon_emitted(ctx->gfx.cs, ctx->initial_gfx_cs_size) &&
ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs,
res->buf, RADEON_USAGE_READWRITE)) {
- ctx->gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+ ctx->gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
}
if (radeon_emitted(ctx->dma.cs, 0) &&
ctx->ws->cs_is_buffer_referenced(ctx->dma.cs,
res->buf, RADEON_USAGE_READWRITE)) {
- ctx->dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+ ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
}
ctx->ws->cs_sync_flush(ctx->dma.cs);
* The result is that the kernel memory manager is never a bottleneck.
*/
if (rctx->num_alloc_tex_transfer_bytes > rctx->screen->info.gart_size / 4) {
- rctx->gfx.flush(rctx, RADEON_FLUSH_ASYNC, NULL);
+ rctx->gfx.flush(rctx, PIPE_FLUSH_ASYNC, NULL);
rctx->num_alloc_tex_transfer_bytes = 0;
}
FB_BUFFER_OFFSET + dec->fb_size, RADEON_USAGE_READ, RADEON_DOMAIN_GTT);
set_reg(dec, dec->reg.cntl, 1);
- flush(dec, RADEON_FLUSH_ASYNC);
+ flush(dec, PIPE_FLUSH_ASYNC);
next_buffer(dec);
}
*/
static void flush(struct rvce_encoder *enc)
{
- enc->ws->cs_flush(enc->cs, RADEON_FLUSH_ASYNC, NULL);
+ enc->ws->cs_flush(enc->cs, PIPE_FLUSH_ASYNC, NULL);
enc->task_info_idx = 0;
enc->bs_idx = 0;
}
FB_BUFFER_OFFSET + FB_BUFFER_SIZE, RADEON_USAGE_READ, RADEON_DOMAIN_GTT);
set_reg(dec, RDECODE_ENGINE_CNTL, 1);
- flush(dec, RADEON_FLUSH_ASYNC);
+ flush(dec, PIPE_FLUSH_ASYNC);
next_buffer(dec);
}
static void flush(struct radeon_encoder *enc)
{
- enc->ws->cs_flush(enc->cs, RADEON_FLUSH_ASYNC, NULL);
+ enc->ws->cs_flush(enc->cs, PIPE_FLUSH_ASYNC, NULL);
}
static void radeon_enc_flush(struct pipe_video_codec *encoder)
#include "amd/common/ac_gpu_info.h"
#include "amd/common/ac_surface.h"
-#define RADEON_FLUSH_ASYNC (1 << 0)
-#define RADEON_FLUSH_END_OF_FRAME (1 << 1)
-
/* Tiling flags. */
enum radeon_bo_layout {
RADEON_LAYOUT_LINEAR = 0,
* Flush a command stream.
*
* \param cs A command stream to flush.
- * \param flags, RADEON_FLUSH_ASYNC or 0.
+ * \param flags, PIPE_FLUSH_* flags.
* \param fence Pointer to a fence. If non-NULL, a fence is inserted
* after the CS and is returned through this parameter.
* \return Negative POSIX error code or 0 for success.
* not going to wait.
*/
threaded_context_unwrap_sync(ctx);
- sctx->b.gfx.flush(&sctx->b, timeout ? 0 : RADEON_FLUSH_ASYNC, NULL);
+ sctx->b.gfx.flush(&sctx->b, timeout ? 0 : PIPE_FLUSH_ASYNC, NULL);
rfence->gfx_unflushed.ctx = NULL;
if (!timeout)
struct pipe_fence_handle *sdma_fence = NULL;
bool deferred_fence = false;
struct si_fine_fence fine = {};
- unsigned rflags = RADEON_FLUSH_ASYNC;
+ unsigned rflags = PIPE_FLUSH_ASYNC;
if (flags & PIPE_FLUSH_END_OF_FRAME)
- rflags |= RADEON_FLUSH_END_OF_FRAME;
+ rflags |= PIPE_FLUSH_END_OF_FRAME;
if (flags & (PIPE_FLUSH_TOP_OF_PIPE | PIPE_FLUSH_BOTTOM_OF_PIPE)) {
assert(flags & PIPE_FLUSH_DEFERRED);
ctx->b.vram, ctx->b.gtt))) {
ctx->b.gtt = 0;
ctx->b.vram = 0;
- ctx->b.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+ ctx->b.gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
return;
}
ctx->b.gtt = 0;
* and just flush if there is not enough space left.
*/
if (!ctx->b.ws->cs_check_space(cs, 2048))
- ctx->b.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+ ctx->b.gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
}
void si_context_gfx_flush(void *context, unsigned flags,
return;
if (ctx->screen->debug_flags & DBG(CHECK_VM))
- flags &= ~RADEON_FLUSH_ASYNC;
+ flags &= ~PIPE_FLUSH_ASYNC;
/* If the state tracker is flushing the GFX IB, r600_flush_from_st is
* responsible for flushing the DMA IB and merging the fences from both.
/* Flush the context to re-emit both init_config states. */
sctx->b.initial_gfx_cs_size = 0; /* force flush */
- si_context_gfx_flush(sctx, RADEON_FLUSH_ASYNC, NULL);
+ si_context_gfx_flush(sctx, PIPE_FLUSH_ASYNC, NULL);
/* Set ring bindings. */
if (sctx->esgs_ring) {
*/
si_pm4_upload_indirect_buffer(sctx, sctx->init_config);
sctx->b.initial_gfx_cs_size = 0; /* force flush */
- si_context_gfx_flush(sctx, RADEON_FLUSH_ASYNC, NULL);
+ si_context_gfx_flush(sctx, PIPE_FLUSH_ASYNC, NULL);
}
/**
* Only check whether the buffer is being used for write. */
if (cs && amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
RADEON_USAGE_WRITE)) {
- cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
+ cs->flush_cs(cs->flush_data, PIPE_FLUSH_ASYNC, NULL);
return NULL;
}
}
} else {
if (cs && amdgpu_bo_is_referenced_by_cs(cs, bo)) {
- cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
+ cs->flush_cs(cs->flush_data, PIPE_FLUSH_ASYNC, NULL);
return NULL;
}
/* The submission has been queued, unlock the fence now. */
simple_mtx_unlock(&ws->bo_fence_lock);
- if (!(flags & RADEON_FLUSH_ASYNC)) {
+ if (!(flags & PIPE_FLUSH_ASYNC)) {
amdgpu_cs_sync_flush(rcs);
error_code = cur->error_code;
}
*
* Only check whether the buffer is being used for write. */
if (cs && radeon_bo_is_referenced_by_cs_for_write(cs, bo)) {
- cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
+ cs->flush_cs(cs->flush_data, PIPE_FLUSH_ASYNC, NULL);
return NULL;
}
}
} else {
if (cs && radeon_bo_is_referenced_by_cs(cs, bo)) {
- cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
+ cs->flush_cs(cs->flush_data, PIPE_FLUSH_ASYNC, NULL);
return NULL;
}
/* Flush if there are any relocs. Clean up otherwise. */
if (cs->csc->num_relocs) {
- cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
+ cs->flush_cs(cs->flush_data, PIPE_FLUSH_ASYNC, NULL);
} else {
radeon_cs_context_cleanup(cs->csc);
cs->base.used_vram = 0;
cs->cst->flags[0] |= RADEON_CS_USE_VM;
cs->cst->cs.num_chunks = 3;
}
- if (flags & RADEON_FLUSH_END_OF_FRAME) {
+ if (flags & PIPE_FLUSH_END_OF_FRAME) {
cs->cst->flags[0] |= RADEON_CS_END_OF_FRAME;
cs->cst->cs.num_chunks = 3;
}
if (util_queue_is_initialized(&cs->ws->cs_queue)) {
util_queue_add_job(&cs->ws->cs_queue, cs, &cs->flush_completed,
radeon_drm_cs_emit_ioctl_oneshot, NULL);
- if (!(flags & RADEON_FLUSH_ASYNC))
+ if (!(flags & PIPE_FLUSH_ASYNC))
radeon_drm_cs_sync_flush(rcs);
} else {
radeon_drm_cs_emit_ioctl_oneshot(cs, 0);