#include <stdio.h>
#include <inttypes.h>
+#ifndef AMDGPU_GEM_CREATE_VM_ALWAYS_VALID
+#define AMDGPU_GEM_CREATE_VM_ALWAYS_VALID (1 << 6)
+#endif
+
/* Set to 1 for verbose output showing committed sparse buffer ranges. */
#define DEBUG_SPARSE_COMMITS 0
request.flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
if (flags & RADEON_FLAG_GTT_WC)
request.flags |= AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+ if (flags & RADEON_FLAG_NO_INTERPROCESS_SHARING &&
+ ws->info.drm_minor >= 20)
+ request.flags |= AMDGPU_GEM_CREATE_VM_ALWAYS_VALID;
r = amdgpu_bo_alloc(ws->dev, &request, &buf_handle);
if (r) {
bo->u.real.va_handle = va_handle;
bo->initial_domain = initial_domain;
bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
+ bo->is_local = !!(request.flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID);
if (initial_domain & RADEON_DOMAIN_VRAM)
ws->allocated_vram += align64(size, ws->info.gart_page_size);
free(handles);
mtx_unlock(&ws->global_bo_list_lock);
} else {
+ unsigned num_handles;
+
if (!amdgpu_add_sparse_backing_buffers(cs)) {
r = -ENOMEM;
goto bo_list_error;
}
}
+ num_handles = 0;
for (i = 0; i < cs->num_real_buffers; ++i) {
struct amdgpu_cs_buffer *buffer = &cs->real_buffers[i];
+ if (buffer->bo->is_local)
+ continue;
+
assert(buffer->u.real.priority_usage != 0);
- cs->handles[i] = buffer->bo->bo;
- cs->flags[i] = (util_last_bit64(buffer->u.real.priority_usage) - 1) / 4;
+ cs->handles[num_handles] = buffer->bo->bo;
+ cs->flags[num_handles] = (util_last_bit64(buffer->u.real.priority_usage) - 1) / 4;
+ ++num_handles;
}
if (acs->ring_type == RING_GFX)
ws->gfx_bo_list_counter += cs->num_real_buffers;
- r = amdgpu_bo_list_create(ws->dev, cs->num_real_buffers,
- cs->handles, cs->flags,
- &cs->request.resources);
+ if (num_handles) {
+ r = amdgpu_bo_list_create(ws->dev, num_handles,
+ cs->handles, cs->flags,
+ &cs->request.resources);
+ } else {
+ r = 0;
+ cs->request.resources = 0;
+ }
}
bo_list_error: