radeonsi: update copyrights
[mesa.git] / src / gallium / drivers / radeon / r600_pipe_common.c
index 7e7e42f3d6c4629af4219a9e4fd3ead7ad652b83..493b6f56e54e8bf476e0e45cf27cd3a27af326fc 100644 (file)
@@ -1,5 +1,6 @@
 /*
  * Copyright 2013 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
  * pipe_context
  */
 
-/**
- * Write an EOP event.
- *
- * \param event                EVENT_TYPE_*
- * \param event_flags  Optional cache flush flags (TC)
- * \param data_sel     1 = fence, 3 = timestamp
- * \param buf          Buffer
- * \param va           GPU address
- * \param old_value    Previous fence value (for a bug workaround)
- * \param new_value    Fence value to write for this event.
- */
-void si_gfx_write_event_eop(struct r600_common_context *ctx,
-                           unsigned event, unsigned event_flags,
-                           unsigned data_sel,
-                           struct r600_resource *buf, uint64_t va,
-                           uint32_t new_fence, unsigned query_type)
-{
-       struct radeon_winsys_cs *cs = ctx->gfx.cs;
-       unsigned op = EVENT_TYPE(event) |
-                     EVENT_INDEX(5) |
-                     event_flags;
-       unsigned sel = EOP_DATA_SEL(data_sel);
-
-       /* Wait for write confirmation before writing data, but don't send
-        * an interrupt. */
-       if (data_sel != EOP_DATA_SEL_DISCARD)
-               sel |= EOP_INT_SEL(EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM);
-
-       if (ctx->chip_class >= GFX9) {
-               /* A ZPASS_DONE or PIXEL_STAT_DUMP_EVENT (of the DB occlusion
-                * counters) must immediately precede every timestamp event to
-                * prevent a GPU hang on GFX9.
-                *
-                * Occlusion queries don't need to do it here, because they
-                * always do ZPASS_DONE before the timestamp.
-                */
-               if (ctx->chip_class == GFX9 &&
-                   query_type != PIPE_QUERY_OCCLUSION_COUNTER &&
-                   query_type != PIPE_QUERY_OCCLUSION_PREDICATE &&
-                   query_type != PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE) {
-                       struct r600_resource *scratch = ctx->eop_bug_scratch;
-
-                       assert(16 * ctx->screen->info.num_render_backends <=
-                              scratch->b.b.width0);
-                       radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 2, 0));
-                       radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1));
-                       radeon_emit(cs, scratch->gpu_address);
-                       radeon_emit(cs, scratch->gpu_address >> 32);
-
-                       radeon_add_to_buffer_list(ctx, &ctx->gfx, scratch,
-                                                 RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
-               }
-
-               radeon_emit(cs, PKT3(PKT3_RELEASE_MEM, 6, 0));
-               radeon_emit(cs, op);
-               radeon_emit(cs, sel);
-               radeon_emit(cs, va);            /* address lo */
-               radeon_emit(cs, va >> 32);      /* address hi */
-               radeon_emit(cs, new_fence);     /* immediate data lo */
-               radeon_emit(cs, 0); /* immediate data hi */
-               radeon_emit(cs, 0); /* unused */
-       } else {
-               if (ctx->chip_class == CIK ||
-                   ctx->chip_class == VI) {
-                       struct r600_resource *scratch = ctx->eop_bug_scratch;
-                       uint64_t va = scratch->gpu_address;
-
-                       /* Two EOP events are required to make all engines go idle
-                        * (and optional cache flushes executed) before the timestamp
-                        * is written.
-                        */
-                       radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
-                       radeon_emit(cs, op);
-                       radeon_emit(cs, va);
-                       radeon_emit(cs, ((va >> 32) & 0xffff) | sel);
-                       radeon_emit(cs, 0); /* immediate data */
-                       radeon_emit(cs, 0); /* unused */
-
-                       radeon_add_to_buffer_list(ctx, &ctx->gfx, scratch,
-                                                 RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
-               }
-
-               radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0));
-               radeon_emit(cs, op);
-               radeon_emit(cs, va);
-               radeon_emit(cs, ((va >> 32) & 0xffff) | sel);
-               radeon_emit(cs, new_fence); /* immediate data */
-               radeon_emit(cs, 0); /* unused */
-       }
-
-       if (buf) {
-               radeon_add_to_buffer_list(ctx, &ctx->gfx, buf, RADEON_USAGE_WRITE,
-                                         RADEON_PRIO_QUERY);
-       }
-}
-
-unsigned si_gfx_write_fence_dwords(struct si_screen *screen)
-{
-       unsigned dwords = 6;
-
-       if (screen->info.chip_class == CIK ||
-           screen->info.chip_class == VI)
-               dwords *= 2;
-
-       if (!screen->info.has_virtual_memory)
-               dwords += 2;
-
-       return dwords;
-}
-
-void si_gfx_wait_fence(struct r600_common_context *ctx,
-                      uint64_t va, uint32_t ref, uint32_t mask)
-{
-       struct radeon_winsys_cs *cs = ctx->gfx.cs;
-
-       radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0));
-       radeon_emit(cs, WAIT_REG_MEM_EQUAL | WAIT_REG_MEM_MEM_SPACE(1));
-       radeon_emit(cs, va);
-       radeon_emit(cs, va >> 32);
-       radeon_emit(cs, ref); /* reference value */
-       radeon_emit(cs, mask); /* mask */
-       radeon_emit(cs, 4); /* poll interval */
-}
-
-static void r600_dma_emit_wait_idle(struct r600_common_context *rctx)
-{
-       struct radeon_winsys_cs *cs = rctx->dma.cs;
-
-       /* NOP waits for idle on Evergreen and later. */
-       if (rctx->chip_class >= CIK)
-               radeon_emit(cs, 0x00000000); /* NOP */
-       else
-               radeon_emit(cs, 0xf0000000); /* NOP */
-}
-
-void si_need_dma_space(struct r600_common_context *ctx, unsigned num_dw,
-                      struct r600_resource *dst, struct r600_resource *src)
-{
-       uint64_t vram = ctx->dma.cs->used_vram;
-       uint64_t gtt = ctx->dma.cs->used_gart;
-
-       if (dst) {
-               vram += dst->vram_usage;
-               gtt += dst->gart_usage;
-       }
-       if (src) {
-               vram += src->vram_usage;
-               gtt += src->gart_usage;
-       }
-
-       /* Flush the GFX IB if DMA depends on it. */
-       if (radeon_emitted(ctx->gfx.cs, ctx->initial_gfx_cs_size) &&
-           ((dst &&
-             ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs, dst->buf,
-                                              RADEON_USAGE_READWRITE)) ||
-            (src &&
-             ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs, src->buf,
-                                              RADEON_USAGE_WRITE))))
-               ctx->gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
-
-       /* Flush if there's not enough space, or if the memory usage per IB
-        * is too large.
-        *
-        * IBs using too little memory are limited by the IB submission overhead.
-        * IBs using too much memory are limited by the kernel/TTM overhead.
-        * Too long IBs create CPU-GPU pipeline bubbles and add latency.
-        *
-        * This heuristic makes sure that DMA requests are executed
-        * very soon after the call is made and lowers memory usage.
-        * It improves texture upload performance by keeping the DMA
-        * engine busy while uploads are being submitted.
-        */
-       num_dw++; /* for emit_wait_idle below */
-       if (!ctx->ws->cs_check_space(ctx->dma.cs, num_dw) ||
-           ctx->dma.cs->used_vram + ctx->dma.cs->used_gart > 64 * 1024 * 1024 ||
-           !radeon_cs_memory_below_limit(ctx->screen, ctx->dma.cs, vram, gtt)) {
-               ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
-               assert((num_dw + ctx->dma.cs->current.cdw) <= ctx->dma.cs->current.max_dw);
-       }
-
-       /* Wait for idle if either buffer has been used in the IB before to
-        * prevent read-after-write hazards.
-        */
-       if ((dst &&
-            ctx->ws->cs_is_buffer_referenced(ctx->dma.cs, dst->buf,
-                                             RADEON_USAGE_READWRITE)) ||
-           (src &&
-            ctx->ws->cs_is_buffer_referenced(ctx->dma.cs, src->buf,
-                                             RADEON_USAGE_WRITE)))
-               r600_dma_emit_wait_idle(ctx);
-
-       /* If GPUVM is not supported, the CS checker needs 2 entries
-        * in the buffer list per packet, which has to be done manually.
-        */
-       if (ctx->screen->info.has_virtual_memory) {
-               if (dst)
-                       radeon_add_to_buffer_list(ctx, &ctx->dma, dst,
-                                                 RADEON_USAGE_WRITE,
-                                                 RADEON_PRIO_SDMA_BUFFER);
-               if (src)
-                       radeon_add_to_buffer_list(ctx, &ctx->dma, src,
-                                                 RADEON_USAGE_READ,
-                                                 RADEON_PRIO_SDMA_BUFFER);
-       }
-
-       /* this function is called before all DMA calls, so increment this. */
-       ctx->num_dma_calls++;
-}
-
-static void r600_flush_dma_ring(void *ctx, unsigned flags,
-                               struct pipe_fence_handle **fence)
-{
-       struct r600_common_context *rctx = (struct r600_common_context *)ctx;
-       struct radeon_winsys_cs *cs = rctx->dma.cs;
-       struct radeon_saved_cs saved;
-       bool check_vm =
-               (rctx->screen->debug_flags & DBG(CHECK_VM)) &&
-               rctx->check_vm_faults;
-
-       if (!radeon_emitted(cs, 0)) {
-               if (fence)
-                       rctx->ws->fence_reference(fence, rctx->last_sdma_fence);
-               return;
-       }
-
-       if (check_vm)
-               si_save_cs(rctx->ws, cs, &saved, true);
-
-       rctx->ws->cs_flush(cs, flags, &rctx->last_sdma_fence);
-       if (fence)
-               rctx->ws->fence_reference(fence, rctx->last_sdma_fence);
-
-       if (check_vm) {
-               /* Use conservative timeout 800ms, after which we won't wait any
-                * longer and assume the GPU is hung.
-                */
-               rctx->ws->fence_wait(rctx->ws, rctx->last_sdma_fence, 800*1000*1000);
-
-               rctx->check_vm_faults(rctx, &saved, RING_DMA);
-               si_clear_saved_cs(&saved);
-       }
-}
-
-/**
- * Store a linearized copy of all chunks of \p cs together with the buffer
- * list in \p saved.
- */
-void si_save_cs(struct radeon_winsys *ws, struct radeon_winsys_cs *cs,
-               struct radeon_saved_cs *saved, bool get_buffer_list)
-{
-       uint32_t *buf;
-       unsigned i;
-
-       /* Save the IB chunks. */
-       saved->num_dw = cs->prev_dw + cs->current.cdw;
-       saved->ib = MALLOC(4 * saved->num_dw);
-       if (!saved->ib)
-               goto oom;
-
-       buf = saved->ib;
-       for (i = 0; i < cs->num_prev; ++i) {
-               memcpy(buf, cs->prev[i].buf, cs->prev[i].cdw * 4);
-               buf += cs->prev[i].cdw;
-       }
-       memcpy(buf, cs->current.buf, cs->current.cdw * 4);
-
-       if (!get_buffer_list)
-               return;
-
-       /* Save the buffer list. */
-       saved->bo_count = ws->cs_get_buffer_list(cs, NULL);
-       saved->bo_list = CALLOC(saved->bo_count,
-                               sizeof(saved->bo_list[0]));
-       if (!saved->bo_list) {
-               FREE(saved->ib);
-               goto oom;
-       }
-       ws->cs_get_buffer_list(cs, saved->bo_list);
-
-       return;
-
-oom:
-       fprintf(stderr, "%s: out of memory\n", __func__);
-       memset(saved, 0, sizeof(*saved));
-}
-
-void si_clear_saved_cs(struct radeon_saved_cs *saved)
-{
-       FREE(saved->ib);
-       FREE(saved->bo_list);
-
-       memset(saved, 0, sizeof(*saved));
-}
-
 static enum pipe_reset_status r600_get_reset_status(struct pipe_context *ctx)
 {
        struct r600_common_context *rctx = (struct r600_common_context *)ctx;
@@ -373,7 +80,7 @@ static bool r600_resource_commit(struct pipe_context *pctx,
                                 unsigned level, struct pipe_box *box,
                                 bool commit)
 {
-       struct r600_common_context *ctx = (struct r600_common_context *)pctx;
+       struct si_context *ctx = (struct si_context *)pctx;
        struct r600_resource *res = r600_resource(resource);
 
        /*
@@ -383,29 +90,31 @@ static bool r600_resource_commit(struct pipe_context *pctx,
         * (b) wait for threaded submit to finish, including those that were
         *     triggered by some other, earlier operation.
         */
-       if (radeon_emitted(ctx->gfx.cs, ctx->initial_gfx_cs_size) &&
-           ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs,
-                                            res->buf, RADEON_USAGE_READWRITE)) {
-               ctx->gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
+       if (radeon_emitted(ctx->b.gfx_cs, ctx->b.initial_gfx_cs_size) &&
+           ctx->b.ws->cs_is_buffer_referenced(ctx->b.gfx_cs,
+                                              res->buf, RADEON_USAGE_READWRITE)) {
+               si_flush_gfx_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
        }
-       if (radeon_emitted(ctx->dma.cs, 0) &&
-           ctx->ws->cs_is_buffer_referenced(ctx->dma.cs,
-                                            res->buf, RADEON_USAGE_READWRITE)) {
-               ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
+       if (radeon_emitted(ctx->b.dma_cs, 0) &&
+           ctx->b.ws->cs_is_buffer_referenced(ctx->b.dma_cs,
+                                              res->buf, RADEON_USAGE_READWRITE)) {
+               si_flush_dma_cs(ctx, PIPE_FLUSH_ASYNC, NULL);
        }
 
-       ctx->ws->cs_sync_flush(ctx->dma.cs);
-       ctx->ws->cs_sync_flush(ctx->gfx.cs);
+       ctx->b.ws->cs_sync_flush(ctx->b.dma_cs);
+       ctx->b.ws->cs_sync_flush(ctx->b.gfx_cs);
 
        assert(resource->target == PIPE_BUFFER);
 
-       return ctx->ws->buffer_commit(res->buf, box->x, box->width, commit);
+       return ctx->b.ws->buffer_commit(res->buf, box->x, box->width, commit);
 }
 
 bool si_common_context_init(struct r600_common_context *rctx,
                            struct si_screen *sscreen,
                            unsigned context_flags)
 {
+       struct si_context *sctx = (struct si_context*)rctx;
+
        slab_create_child(&rctx->pool_transfers, &sscreen->pool_transfers);
        slab_create_child(&rctx->pool_transfers_unsync, &sscreen->pool_transfers);
 
@@ -425,8 +134,8 @@ bool si_common_context_init(struct r600_common_context *rctx,
 
        rctx->b.set_device_reset_callback = r600_set_device_reset_callback;
 
-       si_init_context_texture_functions(rctx);
-       si_init_query_functions(rctx);
+       si_init_context_texture_functions(sctx);
+       si_init_query_functions(sctx);
 
        if (rctx->chip_class == CIK ||
            rctx->chip_class == VI ||
@@ -445,24 +154,32 @@ bool si_common_context_init(struct r600_common_context *rctx,
                return false;
 
        rctx->b.stream_uploader = u_upload_create(&rctx->b, 1024 * 1024,
-                                                 0, PIPE_USAGE_STREAM);
+                                                 0, PIPE_USAGE_STREAM,
+                                                 R600_RESOURCE_FLAG_READ_ONLY);
        if (!rctx->b.stream_uploader)
                return false;
 
        rctx->b.const_uploader = u_upload_create(&rctx->b, 128 * 1024,
-                                                0, PIPE_USAGE_DEFAULT);
+                                                0, PIPE_USAGE_DEFAULT,
+                                                R600_RESOURCE_FLAG_32BIT |
+                                                (sscreen->cpdma_prefetch_writes_memory ?
+                                                       0 : R600_RESOURCE_FLAG_READ_ONLY));
        if (!rctx->b.const_uploader)
                return false;
 
+       rctx->cached_gtt_allocator = u_upload_create(&rctx->b, 16 * 1024,
+                                                    0, PIPE_USAGE_STAGING, 0);
+       if (!rctx->cached_gtt_allocator)
+               return false;
+
        rctx->ctx = rctx->ws->ctx_create(rctx->ws);
        if (!rctx->ctx)
                return false;
 
        if (sscreen->info.num_sdma_rings && !(sscreen->debug_flags & DBG(NO_ASYNC_DMA))) {
-               rctx->dma.cs = rctx->ws->cs_create(rctx->ctx, RING_DMA,
-                                                  r600_flush_dma_ring,
+               rctx->dma_cs = rctx->ws->cs_create(rctx->ctx, RING_DMA,
+                                                  (void*)si_flush_dma_cs,
                                                   rctx);
-               rctx->dma.flush = r600_flush_dma_ring;
        }
 
        return true;
@@ -487,10 +204,10 @@ void si_common_context_cleanup(struct r600_common_context *rctx)
        if (rctx->query_result_shader)
                rctx->b.delete_compute_state(&rctx->b, rctx->query_result_shader);
 
-       if (rctx->gfx.cs)
-               rctx->ws->cs_destroy(rctx->gfx.cs);
-       if (rctx->dma.cs)
-               rctx->ws->cs_destroy(rctx->dma.cs);
+       if (rctx->gfx_cs)
+               rctx->ws->cs_destroy(rctx->gfx_cs);
+       if (rctx->dma_cs)
+               rctx->ws->cs_destroy(rctx->dma_cs);
        if (rctx->ctx)
                rctx->ws->ctx_destroy(rctx->ctx);
 
@@ -498,6 +215,8 @@ void si_common_context_cleanup(struct r600_common_context *rctx)
                u_upload_destroy(rctx->b.stream_uploader);
        if (rctx->b.const_uploader)
                u_upload_destroy(rctx->b.const_uploader);
+       if (rctx->cached_gtt_allocator)
+               u_upload_destroy(rctx->cached_gtt_allocator);
 
        slab_destroy_child(&rctx->pool_transfers);
        slab_destroy_child(&rctx->pool_transfers_unsync);
@@ -509,15 +228,3 @@ void si_common_context_cleanup(struct r600_common_context *rctx)
        rctx->ws->fence_reference(&rctx->last_sdma_fence, NULL);
        r600_resource_reference(&rctx->eop_bug_scratch, NULL);
 }
-
-
-void si_screen_clear_buffer(struct si_screen *sscreen, struct pipe_resource *dst,
-                           uint64_t offset, uint64_t size, unsigned value)
-{
-       struct r600_common_context *rctx = (struct r600_common_context*)sscreen->aux_context;
-
-       mtx_lock(&sscreen->aux_context_lock);
-       rctx->dma_clear_buffer(&rctx->b, dst, offset, size, value);
-       sscreen->aux_context->flush(sscreen->aux_context, NULL, 0);
-       mtx_unlock(&sscreen->aux_context_lock);
-}