radv: add support for local bos. (v3)
authorDave Airlie <airlied@redhat.com>
Wed, 25 Oct 2017 06:12:13 +0000 (07:12 +0100)
committerDave Airlie <airlied@redhat.com>
Thu, 26 Oct 2017 22:59:28 +0000 (23:59 +0100)
This uses the new kernel interfaces for reduced cs overhead,
We only set the local flag for memory allocations that don't have
 a dedicated allocation and ones that aren't imports.

v2: add to all the internal buffer creation paths.
v3: missed some command submission paths, handle 0/empty bo lists.

Reviewed-by: Bas Nieuwenhuizen <bas@basnieuwenhuizen.nl>
Signed-off-by: Dave Airlie <airlied@redhat.com>
src/amd/vulkan/radv_cmd_buffer.c
src/amd/vulkan/radv_debug.c
src/amd/vulkan/radv_descriptor_set.c
src/amd/vulkan/radv_device.c
src/amd/vulkan/radv_query.c
src/amd/vulkan/radv_radeon_winsys.h
src/amd/vulkan/radv_shader.c
src/amd/vulkan/si_cmd_buffer.c
src/amd/vulkan/winsys/amdgpu/radv_amdgpu_bo.c
src/amd/vulkan/winsys/amdgpu/radv_amdgpu_bo.h
src/amd/vulkan/winsys/amdgpu/radv_amdgpu_cs.c

index 08a05277fa54e8c52cc449b4a64a3645a0b29441..1c276168f832e089b4ca8dc3c11b3271b06b3727 100644 (file)
@@ -313,7 +313,8 @@ radv_cmd_buffer_resize_upload_buf(struct radv_cmd_buffer *cmd_buffer,
        bo = device->ws->buffer_create(device->ws,
                                       new_size, 4096,
                                       RADEON_DOMAIN_GTT,
-                                      RADEON_FLAG_CPU_ACCESS);
+                                      RADEON_FLAG_CPU_ACCESS|
+                                      RADEON_FLAG_NO_INTERPROCESS_SHARING);
 
        if (!bo) {
                cmd_buffer->record_result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
index b69c05b64f363e1278710f81cbe9efd2f0deda87..cdf8e7a11453d377f5523e3df127bac7cec190b2 100644 (file)
@@ -61,7 +61,8 @@ radv_init_trace(struct radv_device *device)
 
        device->trace_bo = ws->buffer_create(ws, TRACE_BO_SIZE, 8,
                                             RADEON_DOMAIN_VRAM,
-                                            RADEON_FLAG_CPU_ACCESS);
+                                            RADEON_FLAG_CPU_ACCESS|
+                                            RADEON_FLAG_NO_INTERPROCESS_SHARING);
        if (!device->trace_bo)
                return false;
 
index c6b736bb6893e9f6f943df12b8a1d433efe827d0..167944f4e2f245276cd7ca9f53bdb511ecf7224a 100644 (file)
@@ -431,7 +431,7 @@ VkResult radv_CreateDescriptorPool(
 
        if (bo_size) {
                pool->bo = device->ws->buffer_create(device->ws, bo_size,
-                                                       32, RADEON_DOMAIN_VRAM, 0);
+                                                       32, RADEON_DOMAIN_VRAM, RADEON_FLAG_NO_INTERPROCESS_SHARING);
                pool->mapped_ptr = (uint8_t*)device->ws->buffer_map(pool->bo);
        }
        pool->size = bo_size;
index 19ff8fec64797124b7173ef7e611f71134015ead..d25e9c97ba8c735e4b3659a849ad07070a03d07e 100644 (file)
@@ -1394,6 +1394,7 @@ radv_get_preamble_cs(struct radv_queue *queue,
        unsigned tess_factor_ring_size = 0, tess_offchip_ring_size = 0;
        unsigned max_offchip_buffers;
        unsigned hs_offchip_param = 0;
+       uint32_t ring_bo_flags = RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING;
        if (!queue->has_tess_rings) {
                if (needs_tess_rings)
                        add_tess_rings = true;
@@ -1427,7 +1428,7 @@ radv_get_preamble_cs(struct radv_queue *queue,
                                                              scratch_size,
                                                              4096,
                                                              RADEON_DOMAIN_VRAM,
-                                                             RADEON_FLAG_NO_CPU_ACCESS);
+                                                             ring_bo_flags);
                if (!scratch_bo)
                        goto fail;
        } else
@@ -1438,7 +1439,7 @@ radv_get_preamble_cs(struct radv_queue *queue,
                                                                      compute_scratch_size,
                                                                      4096,
                                                                      RADEON_DOMAIN_VRAM,
-                                                                     RADEON_FLAG_NO_CPU_ACCESS);
+                                                                     ring_bo_flags);
                if (!compute_scratch_bo)
                        goto fail;
 
@@ -1450,7 +1451,7 @@ radv_get_preamble_cs(struct radv_queue *queue,
                                                                esgs_ring_size,
                                                                4096,
                                                                RADEON_DOMAIN_VRAM,
-                                                               RADEON_FLAG_NO_CPU_ACCESS);
+                                                               ring_bo_flags);
                if (!esgs_ring_bo)
                        goto fail;
        } else {
@@ -1463,7 +1464,7 @@ radv_get_preamble_cs(struct radv_queue *queue,
                                                                gsvs_ring_size,
                                                                4096,
                                                                RADEON_DOMAIN_VRAM,
-                                                               RADEON_FLAG_NO_CPU_ACCESS);
+                                                               ring_bo_flags);
                if (!gsvs_ring_bo)
                        goto fail;
        } else {
@@ -1476,14 +1477,14 @@ radv_get_preamble_cs(struct radv_queue *queue,
                                                                       tess_factor_ring_size,
                                                                       256,
                                                                       RADEON_DOMAIN_VRAM,
-                                                                      RADEON_FLAG_NO_CPU_ACCESS);
+                                                                      ring_bo_flags);
                if (!tess_factor_ring_bo)
                        goto fail;
                tess_offchip_ring_bo = queue->device->ws->buffer_create(queue->device->ws,
                                                                       tess_offchip_ring_size,
                                                                       256,
                                                                       RADEON_DOMAIN_VRAM,
-                                                                      RADEON_FLAG_NO_CPU_ACCESS);
+                                                                       ring_bo_flags);
                if (!tess_offchip_ring_bo)
                        goto fail;
        } else {
@@ -1510,7 +1511,7 @@ radv_get_preamble_cs(struct radv_queue *queue,
                                                                 size,
                                                                 4096,
                                                                 RADEON_DOMAIN_VRAM,
-                                                                RADEON_FLAG_CPU_ACCESS);
+                                                                RADEON_FLAG_CPU_ACCESS|RADEON_FLAG_NO_INTERPROCESS_SHARING);
                if (!descriptor_bo)
                        goto fail;
        } else
@@ -2119,6 +2120,9 @@ VkResult radv_alloc_memory(VkDevice                        _device,
        if (mem_flags & RADV_MEM_IMPLICIT_SYNC)
                flags |= RADEON_FLAG_IMPLICIT_SYNC;
 
+       if (!dedicate_info && !import_info)
+               flags |= RADEON_FLAG_NO_INTERPROCESS_SHARING;
+
        mem->bo = device->ws->buffer_create(device->ws, alloc_size, device->physical_device->rad_info.max_alignment,
                                               domain, flags);
 
@@ -2682,7 +2686,7 @@ VkResult radv_CreateEvent(
 
        event->bo = device->ws->buffer_create(device->ws, 8, 8,
                                              RADEON_DOMAIN_GTT,
-                                             RADEON_FLAG_VA_UNCACHED | RADEON_FLAG_CPU_ACCESS);
+                                             RADEON_FLAG_VA_UNCACHED | RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING);
        if (!event->bo) {
                vk_free2(&device->alloc, pAllocator, event);
                return VK_ERROR_OUT_OF_DEVICE_MEMORY;
index 06045d6b41bfa9e542fa0018b44102eb63e6c0f6..06bf14ab6806de2c2e1af53be722d762c13a6d40 100644 (file)
@@ -780,7 +780,7 @@ VkResult radv_CreateQueryPool(
                size += 4 * pCreateInfo->queryCount;
 
        pool->bo = device->ws->buffer_create(device->ws, size,
-                                            64, RADEON_DOMAIN_GTT, 0);
+                                            64, RADEON_DOMAIN_GTT, RADEON_FLAG_NO_INTERPROCESS_SHARING);
 
        if (!pool->bo) {
                vk_free2(&device->alloc, pAllocator, pool);
index cf5a9e8f069469d875e5d40cb3561d3dcadc013c..395c8499b3de77ed485e68ae3ea8bc244ba9ea31 100644 (file)
@@ -54,6 +54,7 @@ enum radeon_bo_flag { /* bitfield */
        RADEON_FLAG_VIRTUAL =       (1 << 3),
        RADEON_FLAG_VA_UNCACHED =   (1 << 4),
        RADEON_FLAG_IMPLICIT_SYNC = (1 << 5),
+       RADEON_FLAG_NO_INTERPROCESS_SHARING = (1 << 6),
 };
 
 enum radeon_bo_usage { /* bitfield */
index 5903917068747b4790395a5d54b62543b22ea444..7f10798fdf435ab60160f5fd51f66e1508eebff7 100644 (file)
@@ -325,7 +325,7 @@ radv_alloc_shader_memory(struct radv_device *device,
 
        slab->size = 256 * 1024;
        slab->bo = device->ws->buffer_create(device->ws, slab->size, 256,
-                                            RADEON_DOMAIN_VRAM, 0);
+                                            RADEON_DOMAIN_VRAM, RADEON_FLAG_NO_INTERPROCESS_SHARING);
        slab->ptr = (char*)device->ws->buffer_map(slab->bo);
        list_inithead(&slab->shaders);
 
index 20144d39ea33976e7ad3e933fcf4da300c627892..89ee399817db79b1630cc027662ab255833aeb84 100644 (file)
@@ -571,7 +571,8 @@ cik_create_gfx_config(struct radv_device *device)
        device->gfx_init = device->ws->buffer_create(device->ws,
                                                     cs->cdw * 4, 4096,
                                                     RADEON_DOMAIN_GTT,
-                                                    RADEON_FLAG_CPU_ACCESS);
+                                                    RADEON_FLAG_CPU_ACCESS|
+                                                    RADEON_FLAG_NO_INTERPROCESS_SHARING);
        if (!device->gfx_init)
                goto fail;
 
index 15099b318e75353ea37aab61364cfef3876d9ddb..dac549a20ad92d363623cde7f18ca49ff78d1571 100644 (file)
@@ -332,6 +332,10 @@ radv_amdgpu_winsys_bo_create(struct radeon_winsys *_ws,
                request.flags |= AMDGPU_GEM_CREATE_CPU_GTT_USWC;
        if (!(flags & RADEON_FLAG_IMPLICIT_SYNC) && ws->info.drm_minor >= 22)
                request.flags |= AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
+       if (flags & RADEON_FLAG_NO_INTERPROCESS_SHARING && ws->info.drm_minor >= 20) {
+               bo->is_local = true;
+               request.flags |= AMDGPU_GEM_CREATE_VM_ALWAYS_VALID;
+       }
 
        /* this won't do anything on pre 4.9 kernels */
        if (ws->zero_all_vram_allocs && (initial_domain & RADEON_DOMAIN_VRAM))
index f32e43083860f2e3baebe6a7d5169c5bc39bb17b..f9aac9451c0585741e0c4e4712413d2f70e613dd 100644 (file)
@@ -45,6 +45,7 @@ struct radv_amdgpu_winsys_bo {
        uint64_t size;
        struct radv_amdgpu_winsys *ws;
        bool is_virtual;
+       bool is_local;
        int ref_count;
 
        union {
index 46e5b7670331b640af0a76b87e9fc59ac8378bde..939c221e0c8c0b5c3587896c214f7607fa6a4810 100644 (file)
@@ -202,7 +202,8 @@ radv_amdgpu_cs_create(struct radeon_winsys *ws,
        if (cs->ws->use_ib_bos) {
                cs->ib_buffer = ws->buffer_create(ws, ib_size, 0,
                                                RADEON_DOMAIN_GTT,
-                                               RADEON_FLAG_CPU_ACCESS);
+                                               RADEON_FLAG_CPU_ACCESS|
+                                                 RADEON_FLAG_NO_INTERPROCESS_SHARING);
                if (!cs->ib_buffer) {
                        free(cs);
                        return NULL;
@@ -287,7 +288,8 @@ static void radv_amdgpu_cs_grow(struct radeon_winsys_cs *_cs, size_t min_size)
 
        cs->ib_buffer = cs->ws->base.buffer_create(&cs->ws->base, ib_size, 0,
                                                   RADEON_DOMAIN_GTT,
-                                                  RADEON_FLAG_CPU_ACCESS);
+                                                  RADEON_FLAG_CPU_ACCESS|
+                                                  RADEON_FLAG_NO_INTERPROCESS_SHARING);
 
        if (!cs->ib_buffer) {
                cs->base.cdw = 0;
@@ -471,6 +473,9 @@ static void radv_amdgpu_cs_add_buffer(struct radeon_winsys_cs *_cs,
                return;
        }
 
+       if (bo->is_local)
+               return;
+
        radv_amdgpu_cs_add_buffer_internal(cs, bo->bo, priority);
 }
 
@@ -541,6 +546,10 @@ static int radv_amdgpu_create_bo_list(struct radv_amdgpu_winsys *ws,
        } else if (count == 1 && !extra_bo && !extra_cs &&
                   !radv_amdgpu_cs(cs_array[0])->num_virtual_buffers) {
                struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs*)cs_array[0];
+               if (cs->num_buffers == 0) {
+                       *bo_list = 0;
+                       return 0;
+               }
                r = amdgpu_bo_list_create(ws->dev, cs->num_buffers, cs->handles,
                                          cs->priorities, bo_list);
        } else {
@@ -556,7 +565,10 @@ static int radv_amdgpu_create_bo_list(struct radv_amdgpu_winsys *ws,
                if (extra_cs) {
                        total_buffer_count += ((struct radv_amdgpu_cs*)extra_cs)->num_buffers;
                }
-
+               if (total_buffer_count == 0) {
+                       *bo_list = 0;
+                       return 0;
+               }
                amdgpu_bo_handle *handles = malloc(sizeof(amdgpu_bo_handle) * total_buffer_count);
                uint8_t *priorities = malloc(sizeof(uint8_t) * total_buffer_count);
                if (!handles || !priorities) {
@@ -721,7 +733,8 @@ static int radv_amdgpu_winsys_cs_submit_chained(struct radeon_winsys_ctx *_ctx,
                                        "see dmesg for more information.\n");
        }
 
-       amdgpu_bo_list_destroy(bo_list);
+       if (bo_list)
+               amdgpu_bo_list_destroy(bo_list);
 
        if (fence)
                radv_amdgpu_request_to_fence(ctx, fence, &request);
@@ -795,7 +808,8 @@ static int radv_amdgpu_winsys_cs_submit_fallback(struct radeon_winsys_ctx *_ctx,
                                                "see dmesg for more information.\n");
                }
 
-               amdgpu_bo_list_destroy(bo_list);
+               if (bo_list)
+                       amdgpu_bo_list_destroy(bo_list);
 
                if (r)
                        return r;
@@ -856,7 +870,7 @@ static int radv_amdgpu_winsys_cs_submit_sysmem(struct radeon_winsys_ctx *_ctx,
                }
                assert(cnt);
 
-               bo = ws->buffer_create(ws, 4 * size, 4096, RADEON_DOMAIN_GTT, RADEON_FLAG_CPU_ACCESS);
+               bo = ws->buffer_create(ws, 4 * size, 4096, RADEON_DOMAIN_GTT, RADEON_FLAG_CPU_ACCESS|RADEON_FLAG_NO_INTERPROCESS_SHARING);
                ptr = ws->buffer_map(bo);
 
                if (preamble_cs) {
@@ -905,7 +919,8 @@ static int radv_amdgpu_winsys_cs_submit_sysmem(struct radeon_winsys_ctx *_ctx,
                                                "see dmesg for more information.\n");
                }
 
-               amdgpu_bo_list_destroy(bo_list);
+               if (bo_list)
+                       amdgpu_bo_list_destroy(bo_list);
 
                ws->buffer_destroy(bo);
                if (r)
@@ -1038,7 +1053,8 @@ static struct radeon_winsys_ctx *radv_amdgpu_ctx_create(struct radeon_winsys *_w
        assert(AMDGPU_HW_IP_NUM * MAX_RINGS_PER_TYPE * sizeof(uint64_t) <= 4096);
        ctx->fence_bo = ws->base.buffer_create(&ws->base, 4096, 8,
                                              RADEON_DOMAIN_GTT,
-                                             RADEON_FLAG_CPU_ACCESS);
+                                             RADEON_FLAG_CPU_ACCESS|
+                                              RADEON_FLAG_NO_INTERPROCESS_SHARING);
        if (ctx->fence_bo)
                ctx->fence_map = (uint64_t*)ws->base.buffer_map(ctx->fence_bo);
        if (ctx->fence_map)