r300,r600,radeonsi: replace RADEON_FLUSH_* with PIPE_FLUSH_*
authorMarek Olšák <marek.olsak@amd.com>
Tue, 28 Nov 2017 16:54:55 +0000 (17:54 +0100)
committerMarek Olšák <marek.olsak@amd.com>
Wed, 29 Nov 2017 17:21:30 +0000 (18:21 +0100)
and handle PIPE_FLUSH_HINT_FINISH in r300.

Reviewed-by: Nicolai Hähnle <nicolai.haehnle@amd.com>
29 files changed:
src/gallium/drivers/r300/r300_blit.c
src/gallium/drivers/r300/r300_flush.c
src/gallium/drivers/r300/r300_query.c
src/gallium/drivers/r300/r300_render.c
src/gallium/drivers/r600/evergreen_compute.c
src/gallium/drivers/r600/r600_buffer_common.c
src/gallium/drivers/r600/r600_cs.h
src/gallium/drivers/r600/r600_hw_context.c
src/gallium/drivers/r600/r600_pipe_common.c
src/gallium/drivers/r600/r600_state_common.c
src/gallium/drivers/r600/r600_texture.c
src/gallium/drivers/r600/radeon_uvd.c
src/gallium/drivers/r600/radeon_vce.c
src/gallium/drivers/radeon/r600_buffer_common.c
src/gallium/drivers/radeon/r600_cs.h
src/gallium/drivers/radeon/r600_pipe_common.c
src/gallium/drivers/radeon/r600_texture.c
src/gallium/drivers/radeon/radeon_uvd.c
src/gallium/drivers/radeon/radeon_vce.c
src/gallium/drivers/radeon/radeon_vcn_dec.c
src/gallium/drivers/radeon/radeon_vcn_enc.c
src/gallium/drivers/radeon/radeon_winsys.h
src/gallium/drivers/radeonsi/si_fence.c
src/gallium/drivers/radeonsi/si_hw_context.c
src/gallium/drivers/radeonsi/si_state_shaders.c
src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
src/gallium/winsys/amdgpu/drm/amdgpu_cs.c
src/gallium/winsys/radeon/drm/radeon_drm_bo.c
src/gallium/winsys/radeon/drm/radeon_drm_cs.c

index 8fda727f3515d2618dd958a65ede0753e7e7f4cf..bc497757a93076eb7f61109d969a472555c73f41 100644 (file)
@@ -383,7 +383,7 @@ static void r300_clear(struct pipe_context* pipe,
 
         /* Reserve CS space. */
         if (!r300->rws->cs_check_space(r300->cs, dwords)) {
-            r300_flush(&r300->context, RADEON_FLUSH_ASYNC, NULL);
+            r300_flush(&r300->context, PIPE_FLUSH_ASYNC, NULL);
         }
 
         /* Emit clear packets. */
index 7fabd13f3d7943b07c141d33967e5c0da72f351a..f6c1bf32ca2db24fbd6b0af8183d823893df69a3 100644 (file)
@@ -129,9 +129,10 @@ static void r300_flush_wrapped(struct pipe_context *pipe,
                                struct pipe_fence_handle **fence,
                                unsigned flags)
 {
-    r300_flush(pipe,
-               flags & PIPE_FLUSH_END_OF_FRAME ? RADEON_FLUSH_END_OF_FRAME : 0,
-               fence);
+    if (flags & PIPE_FLUSH_HINT_FINISH)
+        flags &= ~PIPE_FLUSH_ASYNC;
+
+    r300_flush(pipe, flags, fence);
 }
 
 void r300_init_flush_functions(struct r300_context* r300)
index a84c941768fcb3016203b4bc456aca3981e6125d..014055b221e061a2eb27cb64a5fd6fc24ab37366 100644 (file)
@@ -121,7 +121,7 @@ static bool r300_end_query(struct pipe_context* pipe,
 
     if (q->type == PIPE_QUERY_GPU_FINISHED) {
         pb_reference(&q->buf, NULL);
-        r300_flush(pipe, RADEON_FLUSH_ASYNC,
+        r300_flush(pipe, PIPE_FLUSH_ASYNC,
                    (struct pipe_fence_handle**)&q->buf);
         return true;
     }
index 9397aaeba7c7f17bae62efc8d072c4434d2db6c6..211d35d06077bb9b57597ad8167d7b816477dee3 100644 (file)
@@ -216,7 +216,7 @@ static boolean r300_reserve_cs_dwords(struct r300_context *r300,
 
     /* Reserve requested CS space. */
     if (!r300->rws->cs_check_space(r300->cs, cs_dwords)) {
-        r300_flush(&r300->context, RADEON_FLUSH_ASYNC, NULL);
+        r300_flush(&r300->context, PIPE_FLUSH_ASYNC, NULL);
         flushed = TRUE;
     }
 
index 48c4a9ca459b5556bc880242be683944cfb609ca..3985e7098cfb4100b59ca72d0cb4fffc8eb19336 100644 (file)
@@ -623,7 +623,7 @@ static void compute_emit_cs(struct r600_context *rctx,
 
        /* make sure that the gfx ring is only one active */
        if (radeon_emitted(rctx->b.dma.cs, 0)) {
-               rctx->b.dma.flush(rctx, RADEON_FLUSH_ASYNC, NULL);
+               rctx->b.dma.flush(rctx, PIPE_FLUSH_ASYNC, NULL);
        }
 
        /* Initialize all the compute-related registers.
index 35a702341ab5e0c9645ac1b538c4a17053b57410..5ff25ae4c5c54c59ea3d9a4b10c92b6608089a7d 100644 (file)
@@ -66,7 +66,7 @@ void *r600_buffer_map_sync_with_rings(struct r600_common_context *ctx,
            ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs,
                                             resource->buf, rusage)) {
                if (usage & PIPE_TRANSFER_DONTBLOCK) {
-                       ctx->gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+                       ctx->gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
                        return NULL;
                } else {
                        ctx->gfx.flush(ctx, 0, NULL);
@@ -77,7 +77,7 @@ void *r600_buffer_map_sync_with_rings(struct r600_common_context *ctx,
            ctx->ws->cs_is_buffer_referenced(ctx->dma.cs,
                                             resource->buf, rusage)) {
                if (usage & PIPE_TRANSFER_DONTBLOCK) {
-                       ctx->dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+                       ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
                        return NULL;
                } else {
                        ctx->dma.flush(ctx, 0, NULL);
index 0efae09f386f40fded1b436dbee089e9c50b34ee..9c8298a846d1eb82a45b0de3d7f168ae05ebbb85 100644 (file)
@@ -108,7 +108,7 @@ radeon_add_to_buffer_list_check_mem(struct r600_common_context *rctx,
            !radeon_cs_memory_below_limit(rctx->screen, ring->cs,
                                          rctx->vram + rbo->vram_usage,
                                          rctx->gtt + rbo->gart_usage))
-               ring->flush(rctx, RADEON_FLUSH_ASYNC, NULL);
+               ring->flush(rctx, PIPE_FLUSH_ASYNC, NULL);
 
        return radeon_add_to_buffer_list(rctx, ring, rbo, usage, priority);
 }
index 727ad9c9a3c4c3783c23440758fdba8539bb8050..8ffd02b5ba7df2076083ac63b965f6dc9dc90be2 100644 (file)
@@ -35,13 +35,13 @@ void r600_need_cs_space(struct r600_context *ctx, unsigned num_dw,
 {
        /* Flush the DMA IB if it's not empty. */
        if (radeon_emitted(ctx->b.dma.cs, 0))
-               ctx->b.dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+               ctx->b.dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
 
        if (!radeon_cs_memory_below_limit(ctx->b.screen, ctx->b.gfx.cs,
                                          ctx->b.vram, ctx->b.gtt)) {
                ctx->b.gtt = 0;
                ctx->b.vram = 0;
-               ctx->b.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+               ctx->b.gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
                return;
        }
        /* all will be accounted once relocation are emited */
@@ -82,7 +82,7 @@ void r600_need_cs_space(struct r600_context *ctx, unsigned num_dw,
 
        /* Flush if there's not enough space. */
        if (!ctx->b.ws->cs_check_space(ctx->b.gfx.cs, num_dw)) {
-               ctx->b.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+               ctx->b.gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
        }
 }
 
@@ -439,7 +439,7 @@ void r600_emit_pfp_sync_me(struct r600_context *rctx)
                                     &offset, (struct pipe_resource**)&buf);
                if (!buf) {
                        /* This is too heavyweight, but will work. */
-                       rctx->b.gfx.flush(rctx, RADEON_FLUSH_ASYNC, NULL);
+                       rctx->b.gfx.flush(rctx, PIPE_FLUSH_ASYNC, NULL);
                        return;
                }
 
index 23f7d74ce05d794ef4ac6a63c293b73385695082..d44860a2d715da8c43fec347f8971e15d6d841b2 100644 (file)
@@ -270,7 +270,7 @@ void r600_need_dma_space(struct r600_common_context *ctx, unsigned num_dw,
             (src &&
              ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs, src->buf,
                                               RADEON_USAGE_WRITE))))
-               ctx->gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+               ctx->gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
 
        /* Flush if there's not enough space, or if the memory usage per IB
         * is too large.
@@ -288,7 +288,7 @@ void r600_need_dma_space(struct r600_common_context *ctx, unsigned num_dw,
        if (!ctx->ws->cs_check_space(ctx->dma.cs, num_dw) ||
            ctx->dma.cs->used_vram + ctx->dma.cs->used_gart > 64 * 1024 * 1024 ||
            !radeon_cs_memory_below_limit(ctx->screen, ctx->dma.cs, vram, gtt)) {
-               ctx->dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+               ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
                assert((num_dw + ctx->dma.cs->current.cdw) <= ctx->dma.cs->current.max_dw);
        }
 
@@ -400,10 +400,10 @@ static void r600_flush_from_st(struct pipe_context *ctx,
        struct pipe_fence_handle *gfx_fence = NULL;
        struct pipe_fence_handle *sdma_fence = NULL;
        bool deferred_fence = false;
-       unsigned rflags = RADEON_FLUSH_ASYNC;
+       unsigned rflags = PIPE_FLUSH_ASYNC;
 
        if (flags & PIPE_FLUSH_END_OF_FRAME)
-               rflags |= RADEON_FLUSH_END_OF_FRAME;
+               rflags |= PIPE_FLUSH_END_OF_FRAME;
 
        /* DMA IBs are preambles to gfx IBs, therefore must be flushed first. */
        if (rctx->dma.cs)
@@ -626,12 +626,12 @@ static bool r600_resource_commit(struct pipe_context *pctx,
        if (radeon_emitted(ctx->gfx.cs, ctx->initial_gfx_cs_size) &&
            ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs,
                                             res->buf, RADEON_USAGE_READWRITE)) {
-               ctx->gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+               ctx->gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
        }
        if (radeon_emitted(ctx->dma.cs, 0) &&
            ctx->ws->cs_is_buffer_referenced(ctx->dma.cs,
                                             res->buf, RADEON_USAGE_READWRITE)) {
-               ctx->dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+               ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
        }
 
        ctx->ws->cs_sync_flush(ctx->dma.cs);
@@ -1194,7 +1194,7 @@ static boolean r600_fence_finish(struct pipe_screen *screen,
        if (rctx &&
            rfence->gfx_unflushed.ctx == rctx &&
            rfence->gfx_unflushed.ib_index == rctx->num_gfx_cs_flushes) {
-               rctx->gfx.flush(rctx, timeout ? 0 : RADEON_FLUSH_ASYNC, NULL);
+               rctx->gfx.flush(rctx, timeout ? 0 : PIPE_FLUSH_ASYNC, NULL);
                rfence->gfx_unflushed.ctx = NULL;
 
                if (!timeout)
index a977cdc30d24d28f403221a03e37a408778ee4de..d9b15929852b7be9c1e9b4a3a8dc106f6fa2b397 100644 (file)
@@ -1835,7 +1835,7 @@ static void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info
 
        /* make sure that the gfx ring is only one active */
        if (radeon_emitted(rctx->b.dma.cs, 0)) {
-               rctx->b.dma.flush(rctx, RADEON_FLUSH_ASYNC, NULL);
+               rctx->b.dma.flush(rctx, PIPE_FLUSH_ASYNC, NULL);
        }
 
        /* Re-emit the framebuffer state if needed. */
index 07782ff8ce955840a2b7dff3a7642354a637b501..4042b70a9bf9dc041b1e84b776491a9d4dbddf49 100644 (file)
@@ -1500,7 +1500,7 @@ static void r600_texture_transfer_unmap(struct pipe_context *ctx,
         * The result is that the kernel memory manager is never a bottleneck.
         */
        if (rctx->num_alloc_tex_transfer_bytes > rctx->screen->info.gart_size / 4) {
-               rctx->gfx.flush(rctx, RADEON_FLUSH_ASYNC, NULL);
+               rctx->gfx.flush(rctx, PIPE_FLUSH_ASYNC, NULL);
                rctx->num_alloc_tex_transfer_bytes = 0;
        }
 
index 69bba8cf6c6f53b29b9802e3f4e81d54b5958681..17ff3d5d72aeed498cc26654d43c75bf7a316dd0 100644 (file)
@@ -1259,7 +1259,7 @@ static void ruvd_end_frame(struct pipe_video_codec *decoder,
                         FB_BUFFER_OFFSET + dec->fb_size, RADEON_USAGE_READ, RADEON_DOMAIN_GTT);
        set_reg(dec, dec->reg.cntl, 1);
 
-       flush(dec, RADEON_FLUSH_ASYNC);
+       flush(dec, PIPE_FLUSH_ASYNC);
        next_buffer(dec);
 }
 
index 16a0127f31968399ae1a25f59d8815162d691736..533bc183f06191f723527b62ff0c474d3247e49c 100644 (file)
@@ -59,7 +59,7 @@
  */
 static void flush(struct rvce_encoder *enc)
 {
-       enc->ws->cs_flush(enc->cs, RADEON_FLUSH_ASYNC, NULL);
+       enc->ws->cs_flush(enc->cs, PIPE_FLUSH_ASYNC, NULL);
        enc->task_info_idx = 0;
        enc->bs_idx = 0;
 }
index 2992455d098ca2462f34b6caa1b214910f52eac0..d162eeae662e3fc94086eab7c3e885631adc8851 100644 (file)
@@ -64,7 +64,7 @@ void *si_buffer_map_sync_with_rings(struct r600_common_context *ctx,
            ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs,
                                             resource->buf, rusage)) {
                if (usage & PIPE_TRANSFER_DONTBLOCK) {
-                       ctx->gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+                       ctx->gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
                        return NULL;
                } else {
                        ctx->gfx.flush(ctx, 0, NULL);
@@ -75,7 +75,7 @@ void *si_buffer_map_sync_with_rings(struct r600_common_context *ctx,
            ctx->ws->cs_is_buffer_referenced(ctx->dma.cs,
                                             resource->buf, rusage)) {
                if (usage & PIPE_TRANSFER_DONTBLOCK) {
-                       ctx->dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+                       ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
                        return NULL;
                } else {
                        ctx->dma.flush(ctx, 0, NULL);
index 2f555dca2ed160a7a9b379b79a1db8503bc848c0..89d6c7c16a111c41dae993900e90da5b6879114b 100644 (file)
@@ -106,7 +106,7 @@ radeon_add_to_buffer_list_check_mem(struct r600_common_context *rctx,
            !radeon_cs_memory_below_limit(rctx->screen, ring->cs,
                                          rctx->vram + rbo->vram_usage,
                                          rctx->gtt + rbo->gart_usage))
-               ring->flush(rctx, RADEON_FLUSH_ASYNC, NULL);
+               ring->flush(rctx, PIPE_FLUSH_ASYNC, NULL);
 
        return radeon_add_to_buffer_list(rctx, ring, rbo, usage, priority);
 }
index 08eb40675a6843de97d27ee79dc717815ef6abc1..7e7e42f3d6c4629af4219a9e4fd3ead7ad652b83 100644 (file)
@@ -189,7 +189,7 @@ void si_need_dma_space(struct r600_common_context *ctx, unsigned num_dw,
             (src &&
              ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs, src->buf,
                                               RADEON_USAGE_WRITE))))
-               ctx->gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+               ctx->gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
 
        /* Flush if there's not enough space, or if the memory usage per IB
         * is too large.
@@ -207,7 +207,7 @@ void si_need_dma_space(struct r600_common_context *ctx, unsigned num_dw,
        if (!ctx->ws->cs_check_space(ctx->dma.cs, num_dw) ||
            ctx->dma.cs->used_vram + ctx->dma.cs->used_gart > 64 * 1024 * 1024 ||
            !radeon_cs_memory_below_limit(ctx->screen, ctx->dma.cs, vram, gtt)) {
-               ctx->dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+               ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
                assert((num_dw + ctx->dma.cs->current.cdw) <= ctx->dma.cs->current.max_dw);
        }
 
@@ -386,12 +386,12 @@ static bool r600_resource_commit(struct pipe_context *pctx,
        if (radeon_emitted(ctx->gfx.cs, ctx->initial_gfx_cs_size) &&
            ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs,
                                             res->buf, RADEON_USAGE_READWRITE)) {
-               ctx->gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+               ctx->gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
        }
        if (radeon_emitted(ctx->dma.cs, 0) &&
            ctx->ws->cs_is_buffer_referenced(ctx->dma.cs,
                                             res->buf, RADEON_USAGE_READWRITE)) {
-               ctx->dma.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+               ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
        }
 
        ctx->ws->cs_sync_flush(ctx->dma.cs);
index bc72e73823be195fe43290e7a8d4f0153ba8d412..1a0503bec6ee14a0fd166f505424b066be2dd229 100644 (file)
@@ -1834,7 +1834,7 @@ static void r600_texture_transfer_unmap(struct pipe_context *ctx,
         * The result is that the kernel memory manager is never a bottleneck.
         */
        if (rctx->num_alloc_tex_transfer_bytes > rctx->screen->info.gart_size / 4) {
-               rctx->gfx.flush(rctx, RADEON_FLUSH_ASYNC, NULL);
+               rctx->gfx.flush(rctx, PIPE_FLUSH_ASYNC, NULL);
                rctx->num_alloc_tex_transfer_bytes = 0;
        }
 
index afa8836c369f5e736a1dbe287e2aa228040ef7d1..ee76e748b087401ac3bc70e2686fde90407c30fe 100644 (file)
@@ -1321,7 +1321,7 @@ static void ruvd_end_frame(struct pipe_video_codec *decoder,
                         FB_BUFFER_OFFSET + dec->fb_size, RADEON_USAGE_READ, RADEON_DOMAIN_GTT);
        set_reg(dec, dec->reg.cntl, 1);
 
-       flush(dec, RADEON_FLUSH_ASYNC);
+       flush(dec, PIPE_FLUSH_ASYNC);
        next_buffer(dec);
 }
 
index 7594421d0e7008d2f25bdde57b0cd2d242ae0c1e..69e602210b33769357659c4e72681791fb3b8497 100644 (file)
@@ -53,7 +53,7 @@
  */
 static void flush(struct rvce_encoder *enc)
 {
-       enc->ws->cs_flush(enc->cs, RADEON_FLUSH_ASYNC, NULL);
+       enc->ws->cs_flush(enc->cs, PIPE_FLUSH_ASYNC, NULL);
        enc->task_info_idx = 0;
        enc->bs_idx = 0;
 }
index 30cd607789d2fdf0f2b7f1dbc6430f84e5c0cc27..fa62155991dca58bfd250e34ffd91e4400696481 100644 (file)
@@ -1158,7 +1158,7 @@ static void radeon_dec_end_frame(struct pipe_video_codec *decoder,
                         FB_BUFFER_OFFSET + FB_BUFFER_SIZE, RADEON_USAGE_READ, RADEON_DOMAIN_GTT);
        set_reg(dec, RDECODE_ENGINE_CNTL, 1);
 
-       flush(dec, RADEON_FLUSH_ASYNC);
+       flush(dec, PIPE_FLUSH_ASYNC);
        next_buffer(dec);
 }
 
index 4972d11062daceda6c7c15d2bf720caa82b69c08..abc89a7c5972ffd0ead70720f3af6abdcd347bfe 100644 (file)
@@ -56,7 +56,7 @@ static void radeon_vcn_enc_get_param(struct radeon_encoder *enc, struct pipe_h26
 
 static void flush(struct radeon_encoder *enc)
 {
-       enc->ws->cs_flush(enc->cs, RADEON_FLUSH_ASYNC, NULL);
+       enc->ws->cs_flush(enc->cs, PIPE_FLUSH_ASYNC, NULL);
 }
 
 static void radeon_enc_flush(struct pipe_video_codec *encoder)
index c03090b2ad4caf73e6cc3c2f1d602654c7ab535b..7ab110a4cfd3a9ee40c89c1eb9415cd70dbdb775 100644 (file)
@@ -31,9 +31,6 @@
 #include "amd/common/ac_gpu_info.h"
 #include "amd/common/ac_surface.h"
 
-#define RADEON_FLUSH_ASYNC             (1 << 0)
-#define RADEON_FLUSH_END_OF_FRAME       (1 << 1)
-
 /* Tiling flags. */
 enum radeon_bo_layout {
     RADEON_LAYOUT_LINEAR = 0,
@@ -531,7 +528,7 @@ struct radeon_winsys {
      * Flush a command stream.
      *
      * \param cs          A command stream to flush.
-     * \param flags,      RADEON_FLUSH_ASYNC or 0.
+     * \param flags,      PIPE_FLUSH_* flags.
      * \param fence       Pointer to a fence. If non-NULL, a fence is inserted
      *                    after the CS and is returned through this parameter.
      * \return Negative POSIX error code or 0 for success.
index 5f478afaf638d8df8e19b5a94077ea8c693215e5..0d165a14b5478ffa79aece3c3164be585fe64749 100644 (file)
@@ -271,7 +271,7 @@ static boolean si_fence_finish(struct pipe_screen *screen,
                         * not going to wait.
                         */
                        threaded_context_unwrap_sync(ctx);
-                       sctx->b.gfx.flush(&sctx->b, timeout ? 0 : RADEON_FLUSH_ASYNC, NULL);
+                       sctx->b.gfx.flush(&sctx->b, timeout ? 0 : PIPE_FLUSH_ASYNC, NULL);
                        rfence->gfx_unflushed.ctx = NULL;
 
                        if (!timeout)
@@ -378,10 +378,10 @@ static void si_flush_from_st(struct pipe_context *ctx,
        struct pipe_fence_handle *sdma_fence = NULL;
        bool deferred_fence = false;
        struct si_fine_fence fine = {};
-       unsigned rflags = RADEON_FLUSH_ASYNC;
+       unsigned rflags = PIPE_FLUSH_ASYNC;
 
        if (flags & PIPE_FLUSH_END_OF_FRAME)
-               rflags |= RADEON_FLUSH_END_OF_FRAME;
+               rflags |= PIPE_FLUSH_END_OF_FRAME;
 
        if (flags & (PIPE_FLUSH_TOP_OF_PIPE | PIPE_FLUSH_BOTTOM_OF_PIPE)) {
                assert(flags & PIPE_FLUSH_DEFERRED);
index d46c1093f24df99b78a4b0c9781178dbb1a64def..3823be056f377cf05f1bd121001d3fa20d86080c 100644 (file)
@@ -53,7 +53,7 @@ void si_need_cs_space(struct si_context *ctx)
                                                   ctx->b.vram, ctx->b.gtt))) {
                ctx->b.gtt = 0;
                ctx->b.vram = 0;
-               ctx->b.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+               ctx->b.gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
                return;
        }
        ctx->b.gtt = 0;
@@ -63,7 +63,7 @@ void si_need_cs_space(struct si_context *ctx)
         * and just flush if there is not enough space left.
         */
        if (!ctx->b.ws->cs_check_space(cs, 2048))
-               ctx->b.gfx.flush(ctx, RADEON_FLUSH_ASYNC, NULL);
+               ctx->b.gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
 }
 
 void si_context_gfx_flush(void *context, unsigned flags,
@@ -83,7 +83,7 @@ void si_context_gfx_flush(void *context, unsigned flags,
                return;
 
        if (ctx->screen->debug_flags & DBG(CHECK_VM))
-               flags &= ~RADEON_FLUSH_ASYNC;
+               flags &= ~PIPE_FLUSH_ASYNC;
 
        /* If the state tracker is flushing the GFX IB, r600_flush_from_st is
         * responsible for flushing the DMA IB and merging the fences from both.
index 461760f580df087d8faf273fb551d46784937d1a..4f683b85144152b7cfaf889cbfe5fceb852acb20 100644 (file)
@@ -2822,7 +2822,7 @@ static bool si_update_gs_ring_buffers(struct si_context *sctx)
 
        /* Flush the context to re-emit both init_config states. */
        sctx->b.initial_gfx_cs_size = 0; /* force flush */
-       si_context_gfx_flush(sctx, RADEON_FLUSH_ASYNC, NULL);
+       si_context_gfx_flush(sctx, PIPE_FLUSH_ASYNC, NULL);
 
        /* Set ring bindings. */
        if (sctx->esgs_ring) {
@@ -3161,7 +3161,7 @@ static void si_init_tess_factor_ring(struct si_context *sctx)
         */
        si_pm4_upload_indirect_buffer(sctx, sctx->init_config);
        sctx->b.initial_gfx_cs_size = 0; /* force flush */
-       si_context_gfx_flush(sctx, RADEON_FLUSH_ASYNC, NULL);
+       si_context_gfx_flush(sctx, PIPE_FLUSH_ASYNC, NULL);
 }
 
 /**
index c3e97c2286180b09b0f838fbb2c901a09a69e19a..6ec7cb79043058332608baa0cfed03952f152ffc 100644 (file)
@@ -235,7 +235,7 @@ static void *amdgpu_bo_map(struct pb_buffer *buf,
              * Only check whether the buffer is being used for write. */
             if (cs && amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
                                                                RADEON_USAGE_WRITE)) {
-               cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
+               cs->flush_cs(cs->flush_data, PIPE_FLUSH_ASYNC, NULL);
                return NULL;
             }
 
@@ -245,7 +245,7 @@ static void *amdgpu_bo_map(struct pb_buffer *buf,
             }
          } else {
             if (cs && amdgpu_bo_is_referenced_by_cs(cs, bo)) {
-               cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
+               cs->flush_cs(cs->flush_data, PIPE_FLUSH_ASYNC, NULL);
                return NULL;
             }
 
index 2c6856b8cae4fb3c2d4d31a98603e6c7952957af..089a3585912dacce313612bc0be97f7dbd29f2f4 100644 (file)
@@ -1493,7 +1493,7 @@ static int amdgpu_cs_flush(struct radeon_winsys_cs *rcs,
       /* The submission has been queued, unlock the fence now. */
       simple_mtx_unlock(&ws->bo_fence_lock);
 
-      if (!(flags & RADEON_FLUSH_ASYNC)) {
+      if (!(flags & PIPE_FLUSH_ASYNC)) {
          amdgpu_cs_sync_flush(rcs);
          error_code = cur->error_code;
       }
index b4e501c817fad91b5afd17fbcd63bfcf892e4c65..87c3f1e1006ef600d7e0eb1c24a9422bd531b75c 100644 (file)
@@ -490,7 +490,7 @@ static void *radeon_bo_map(struct pb_buffer *buf,
                  *
                  * Only check whether the buffer is being used for write. */
                 if (cs && radeon_bo_is_referenced_by_cs_for_write(cs, bo)) {
-                    cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
+                    cs->flush_cs(cs->flush_data, PIPE_FLUSH_ASYNC, NULL);
                     return NULL;
                 }
 
@@ -500,7 +500,7 @@ static void *radeon_bo_map(struct pb_buffer *buf,
                 }
             } else {
                 if (cs && radeon_bo_is_referenced_by_cs(cs, bo)) {
-                    cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
+                    cs->flush_cs(cs->flush_data, PIPE_FLUSH_ASYNC, NULL);
                     return NULL;
                 }
 
index add88f80aae4961714bea361b7e605a709885380..c3398d0f589acab390829bc660ebb86155a84220 100644 (file)
@@ -399,7 +399,7 @@ static bool radeon_drm_cs_validate(struct radeon_winsys_cs *rcs)
 
         /* Flush if there are any relocs. Clean up otherwise. */
         if (cs->csc->num_relocs) {
-            cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
+            cs->flush_cs(cs->flush_data, PIPE_FLUSH_ASYNC, NULL);
         } else {
             radeon_cs_context_cleanup(cs->csc);
             cs->base.used_vram = 0;
@@ -655,7 +655,7 @@ static int radeon_drm_cs_flush(struct radeon_winsys_cs *rcs,
                 cs->cst->flags[0] |= RADEON_CS_USE_VM;
                 cs->cst->cs.num_chunks = 3;
             }
-            if (flags & RADEON_FLUSH_END_OF_FRAME) {
+            if (flags & PIPE_FLUSH_END_OF_FRAME) {
                 cs->cst->flags[0] |= RADEON_CS_END_OF_FRAME;
                 cs->cst->cs.num_chunks = 3;
             }
@@ -669,7 +669,7 @@ static int radeon_drm_cs_flush(struct radeon_winsys_cs *rcs,
         if (util_queue_is_initialized(&cs->ws->cs_queue)) {
             util_queue_add_job(&cs->ws->cs_queue, cs, &cs->flush_completed,
                                radeon_drm_cs_emit_ioctl_oneshot, NULL);
-            if (!(flags & RADEON_FLUSH_ASYNC))
+            if (!(flags & PIPE_FLUSH_ASYNC))
                 radeon_drm_cs_sync_flush(rcs);
         } else {
             radeon_drm_cs_emit_ioctl_oneshot(cs, 0);