}
}
+static int radv_amdgpu_signal_sems(struct radv_amdgpu_ctx *ctx,
+ uint32_t ip_type,
+ uint32_t ring,
+ struct radv_winsys_sem_info *sem_info);
+static int radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx,
+ struct amdgpu_cs_request *request,
+ struct radv_winsys_sem_info *sem_info);
+
static void radv_amdgpu_request_to_fence(struct radv_amdgpu_ctx *ctx,
struct radv_amdgpu_fence *fence,
struct amdgpu_cs_request *req)
if (cs->ws->use_ib_bos) {
cs->ib_buffer = ws->buffer_create(ws, ib_size, 0,
RADEON_DOMAIN_GTT,
- RADEON_FLAG_CPU_ACCESS);
+ RADEON_FLAG_CPU_ACCESS|
+ RADEON_FLAG_NO_INTERPROCESS_SHARING);
if (!cs->ib_buffer) {
free(cs);
return NULL;
return NULL;
}
- cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->va;
+ cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
cs->base.buf = (uint32_t *)cs->ib_mapped;
cs->base.max_dw = ib_size / 4 - 4;
cs->ib_size_ptr = &cs->ib.size;
cs->ib_buffer = cs->ws->base.buffer_create(&cs->ws->base, ib_size, 0,
RADEON_DOMAIN_GTT,
- RADEON_FLAG_CPU_ACCESS);
+ RADEON_FLAG_CPU_ACCESS|
+ RADEON_FLAG_NO_INTERPROCESS_SHARING);
if (!cs->ib_buffer) {
cs->base.cdw = 0;
cs->ws->base.cs_add_buffer(&cs->base, cs->ib_buffer, 8);
cs->base.buf[cs->base.cdw++] = PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0);
- cs->base.buf[cs->base.cdw++] = radv_amdgpu_winsys_bo(cs->ib_buffer)->va;
- cs->base.buf[cs->base.cdw++] = radv_amdgpu_winsys_bo(cs->ib_buffer)->va >> 32;
+ cs->base.buf[cs->base.cdw++] = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
+ cs->base.buf[cs->base.cdw++] = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va >> 32;
cs->ib_size_ptr = cs->base.buf + cs->base.cdw;
cs->base.buf[cs->base.cdw++] = S_3F2_CHAIN(1) | S_3F2_VALID(1);
cs->ws->base.buffer_destroy(cs->old_ib_buffers[i]);
cs->num_old_ib_buffers = 0;
- cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->va;
+ cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
cs->ib_size_ptr = &cs->ib.size;
cs->ib.size = 0;
}
return;
}
+ if (bo->is_local)
+ return;
+
radv_amdgpu_cs_add_buffer_internal(cs, bo->bo, priority);
}
} else if (count == 1 && !extra_bo && !extra_cs &&
!radv_amdgpu_cs(cs_array[0])->num_virtual_buffers) {
struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs*)cs_array[0];
+ if (cs->num_buffers == 0) {
+ *bo_list = 0;
+ return 0;
+ }
r = amdgpu_bo_list_create(ws->dev, cs->num_buffers, cs->handles,
cs->priorities, bo_list);
} else {
if (extra_cs) {
total_buffer_count += ((struct radv_amdgpu_cs*)extra_cs)->num_buffers;
}
-
+ if (total_buffer_count == 0) {
+ *bo_list = 0;
+ return 0;
+ }
amdgpu_bo_handle *handles = malloc(sizeof(amdgpu_bo_handle) * total_buffer_count);
uint8_t *priorities = malloc(sizeof(uint8_t) * total_buffer_count);
if (!handles || !priorities) {
static int radv_amdgpu_winsys_cs_submit_chained(struct radeon_winsys_ctx *_ctx,
int queue_idx,
+ struct radv_winsys_sem_info *sem_info,
struct radeon_winsys_cs **cs_array,
unsigned cs_count,
struct radeon_winsys_cs *initial_preamble_cs,
ibs[0] = ((struct radv_amdgpu_cs*)initial_preamble_cs)->ib;
}
- r = amdgpu_cs_submit(ctx->ctx, 0, &request, 1);
+ r = radv_amdgpu_cs_submit(ctx, &request, sem_info);
if (r) {
if (r == -ENOMEM)
fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
"see dmesg for more information.\n");
}
- amdgpu_bo_list_destroy(bo_list);
+ if (bo_list)
+ amdgpu_bo_list_destroy(bo_list);
if (fence)
radv_amdgpu_request_to_fence(ctx, fence, &request);
static int radv_amdgpu_winsys_cs_submit_fallback(struct radeon_winsys_ctx *_ctx,
int queue_idx,
+ struct radv_winsys_sem_info *sem_info,
struct radeon_winsys_cs **cs_array,
unsigned cs_count,
struct radeon_winsys_cs *initial_preamble_cs,
struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
amdgpu_bo_list_handle bo_list;
struct amdgpu_cs_request request;
-
+ bool emit_signal_sem = sem_info->cs_emit_signal;
assert(cs_count);
for (unsigned i = 0; i < cs_count;) {
}
}
- r = amdgpu_cs_submit(ctx->ctx, 0, &request, 1);
+ sem_info->cs_emit_signal = (i == cs_count - cnt) ? emit_signal_sem : false;
+ r = radv_amdgpu_cs_submit(ctx, &request, sem_info);
if (r) {
if (r == -ENOMEM)
fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
"see dmesg for more information.\n");
}
- amdgpu_bo_list_destroy(bo_list);
+ if (bo_list)
+ amdgpu_bo_list_destroy(bo_list);
if (r)
return r;
static int radv_amdgpu_winsys_cs_submit_sysmem(struct radeon_winsys_ctx *_ctx,
int queue_idx,
+ struct radv_winsys_sem_info *sem_info,
struct radeon_winsys_cs **cs_array,
unsigned cs_count,
struct radeon_winsys_cs *initial_preamble_cs,
amdgpu_bo_list_handle bo_list;
struct amdgpu_cs_request request;
uint32_t pad_word = 0xffff1000U;
+ bool emit_signal_sem = sem_info->cs_emit_signal;
if (radv_amdgpu_winsys(ws)->info.chip_class == SI)
pad_word = 0x80000000;
uint32_t *ptr;
unsigned cnt = 0;
unsigned size = 0;
-
+ unsigned pad_words = 0;
if (preamble_cs)
size += preamble_cs->cdw;
++cnt;
}
+ while(!size || (size & 7)) {
+ size++;
+ pad_words++;
+ }
assert(cnt);
- bo = ws->buffer_create(ws, 4 * size, 4096, RADEON_DOMAIN_GTT, RADEON_FLAG_CPU_ACCESS);
+ bo = ws->buffer_create(ws, 4 * size, 4096, RADEON_DOMAIN_GTT, RADEON_FLAG_CPU_ACCESS|RADEON_FLAG_NO_INTERPROCESS_SHARING);
ptr = ws->buffer_map(bo);
if (preamble_cs) {
}
- while(!size || (size & 7)) {
+ for (unsigned j = 0; j < pad_words; ++j)
*ptr++ = pad_word;
- ++size;
- }
memset(&request, 0, sizeof(request));
}
ib.size = size;
- ib.ib_mc_address = ws->buffer_get_va(bo);
+ ib.ib_mc_address = radv_buffer_get_va(bo);
request.ip_type = cs0->hw_ip;
request.ring = queue_idx;
request.ibs = &ib;
request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
- r = amdgpu_cs_submit(ctx->ctx, 0, &request, 1);
+ sem_info->cs_emit_signal = (i == cs_count - cnt) ? emit_signal_sem : false;
+ r = radv_amdgpu_cs_submit(ctx, &request, sem_info);
if (r) {
if (r == -ENOMEM)
fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
"see dmesg for more information.\n");
}
- amdgpu_bo_list_destroy(bo_list);
+ if (bo_list)
+ amdgpu_bo_list_destroy(bo_list);
ws->buffer_destroy(bo);
if (r)
unsigned cs_count,
struct radeon_winsys_cs *initial_preamble_cs,
struct radeon_winsys_cs *continue_preamble_cs,
- struct radeon_winsys_sem **wait_sem,
- unsigned wait_sem_count,
- struct radeon_winsys_sem **signal_sem,
- unsigned signal_sem_count,
+ struct radv_winsys_sem_info *sem_info,
bool can_patch,
struct radeon_winsys_fence *_fence)
{
struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[0]);
struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
int ret;
- int i;
-
- for (i = 0; i < wait_sem_count; i++) {
- amdgpu_semaphore_handle sem = (amdgpu_semaphore_handle)wait_sem[i];
- amdgpu_cs_wait_semaphore(ctx->ctx, cs->hw_ip, 0, queue_idx,
- sem);
- }
+
+ assert(sem_info);
if (!cs->ws->use_ib_bos) {
- ret = radv_amdgpu_winsys_cs_submit_sysmem(_ctx, queue_idx, cs_array,
+ ret = radv_amdgpu_winsys_cs_submit_sysmem(_ctx, queue_idx, sem_info, cs_array,
cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
- } else if (can_patch && cs_count > AMDGPU_CS_MAX_IBS_PER_SUBMIT && false) {
- ret = radv_amdgpu_winsys_cs_submit_chained(_ctx, queue_idx, cs_array,
+ } else if (can_patch && cs_count > AMDGPU_CS_MAX_IBS_PER_SUBMIT && cs->ws->batchchain) {
+ ret = radv_amdgpu_winsys_cs_submit_chained(_ctx, queue_idx, sem_info, cs_array,
cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
} else {
- ret = radv_amdgpu_winsys_cs_submit_fallback(_ctx, queue_idx, cs_array,
+ ret = radv_amdgpu_winsys_cs_submit_fallback(_ctx, queue_idx, sem_info, cs_array,
cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
}
- for (i = 0; i < signal_sem_count; i++) {
- amdgpu_semaphore_handle sem = (amdgpu_semaphore_handle)signal_sem[i];
- amdgpu_cs_signal_semaphore(ctx->ctx, cs->hw_ip, 0, queue_idx,
- sem);
- }
+ radv_amdgpu_signal_sems(ctx, cs->hw_ip, queue_idx, sem_info);
return ret;
}
-
static void *radv_amdgpu_winsys_get_cpu_addr(void *_cs, uint64_t addr)
{
struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs *)_cs;
void *ret = NULL;
+
+ if (!cs->ib_buffer)
+ return NULL;
for (unsigned i = 0; i <= cs->num_old_ib_buffers; ++i) {
struct radv_amdgpu_winsys_bo *bo;
bo = (struct radv_amdgpu_winsys_bo*)
(i == cs->num_old_ib_buffers ? cs->ib_buffer : cs->old_ib_buffers[i]);
- if (addr >= bo->va && addr - bo->va < bo->size) {
+ if (addr >= bo->base.va && addr - bo->base.va < bo->size) {
if (amdgpu_bo_cpu_map(bo->bo, &ret) == 0)
- return (char *)ret + (addr - bo->va);
+ return (char *)ret + (addr - bo->base.va);
+ }
+ }
+ if(cs->ws->debug_all_bos) {
+ pthread_mutex_lock(&cs->ws->global_bo_list_lock);
+ list_for_each_entry(struct radv_amdgpu_winsys_bo, bo,
+ &cs->ws->global_bo_list, global_list_item) {
+ if (addr >= bo->base.va && addr - bo->base.va < bo->size) {
+ if (amdgpu_bo_cpu_map(bo->bo, &ret) == 0) {
+ pthread_mutex_unlock(&cs->ws->global_bo_list_lock);
+ return (char *)ret + (addr - bo->base.va);
+ }
+ }
}
+ pthread_mutex_unlock(&cs->ws->global_bo_list_lock);
}
return ret;
}
static void radv_amdgpu_winsys_cs_dump(struct radeon_winsys_cs *_cs,
FILE* file,
- uint32_t trace_id)
+ const int *trace_ids, int trace_id_count)
{
struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs *)_cs;
+ void *ib = cs->base.buf;
+ int num_dw = cs->base.cdw;
- ac_parse_ib(file,
- radv_amdgpu_winsys_get_cpu_addr(cs, cs->ib.ib_mc_address),
- cs->ib.size, trace_id, "main IB", cs->ws->info.chip_class,
- radv_amdgpu_winsys_get_cpu_addr, cs);
+ if (cs->ws->use_ib_bos) {
+ ib = radv_amdgpu_winsys_get_cpu_addr(cs, cs->ib.ib_mc_address);
+ num_dw = cs->ib.size;
+ }
+ assert(ib);
+ ac_parse_ib(file, ib, num_dw, trace_ids, trace_id_count, "main IB",
+ cs->ws->info.chip_class, radv_amdgpu_winsys_get_cpu_addr, cs);
}
-static struct radeon_winsys_ctx *radv_amdgpu_ctx_create(struct radeon_winsys *_ws)
+static uint32_t radv_to_amdgpu_priority(enum radeon_ctx_priority radv_priority)
+{
+ switch (radv_priority) {
+ case RADEON_CTX_PRIORITY_REALTIME:
+ return AMDGPU_CTX_PRIORITY_VERY_HIGH;
+ case RADEON_CTX_PRIORITY_HIGH:
+ return AMDGPU_CTX_PRIORITY_HIGH;
+ case RADEON_CTX_PRIORITY_MEDIUM:
+ return AMDGPU_CTX_PRIORITY_NORMAL;
+ case RADEON_CTX_PRIORITY_LOW:
+ return AMDGPU_CTX_PRIORITY_LOW;
+ default:
+ unreachable("Invalid context priority");
+ }
+}
+
+static struct radeon_winsys_ctx *radv_amdgpu_ctx_create(struct radeon_winsys *_ws,
+ enum radeon_ctx_priority priority)
{
struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
struct radv_amdgpu_ctx *ctx = CALLOC_STRUCT(radv_amdgpu_ctx);
+ uint32_t amdgpu_priority = radv_to_amdgpu_priority(priority);
int r;
if (!ctx)
return NULL;
- r = amdgpu_cs_ctx_create(ws->dev, &ctx->ctx);
+
+ r = amdgpu_cs_ctx_create2(ws->dev, amdgpu_priority, &ctx->ctx);
if (r) {
- fprintf(stderr, "amdgpu: radv_amdgpu_cs_ctx_create failed. (%i)\n", r);
+ fprintf(stderr, "amdgpu: radv_amdgpu_cs_ctx_create2 failed. (%i)\n", r);
goto error_create;
}
ctx->ws = ws;
assert(AMDGPU_HW_IP_NUM * MAX_RINGS_PER_TYPE * sizeof(uint64_t) <= 4096);
ctx->fence_bo = ws->base.buffer_create(&ws->base, 4096, 8,
RADEON_DOMAIN_GTT,
- RADEON_FLAG_CPU_ACCESS);
+ RADEON_FLAG_CPU_ACCESS|
+ RADEON_FLAG_NO_INTERPROCESS_SHARING);
if (ctx->fence_bo)
ctx->fence_map = (uint64_t*)ws->base.buffer_map(ctx->fence_bo);
if (ctx->fence_map)
static struct radeon_winsys_sem *radv_amdgpu_create_sem(struct radeon_winsys *_ws)
{
- int ret;
- amdgpu_semaphore_handle sem;
-
- ret = amdgpu_cs_create_semaphore(&sem);
- if (ret)
+ struct amdgpu_cs_fence *sem = CALLOC_STRUCT(amdgpu_cs_fence);
+ if (!sem)
return NULL;
+
return (struct radeon_winsys_sem *)sem;
}
static void radv_amdgpu_destroy_sem(struct radeon_winsys_sem *_sem)
{
- amdgpu_semaphore_handle sem = (amdgpu_semaphore_handle)_sem;
- amdgpu_cs_destroy_semaphore(sem);
+ struct amdgpu_cs_fence *sem = (struct amdgpu_cs_fence *)_sem;
+ FREE(sem);
+}
+
+static int radv_amdgpu_signal_sems(struct radv_amdgpu_ctx *ctx,
+ uint32_t ip_type,
+ uint32_t ring,
+ struct radv_winsys_sem_info *sem_info)
+{
+ for (unsigned i = 0; i < sem_info->signal.sem_count; i++) {
+ struct amdgpu_cs_fence *sem = (struct amdgpu_cs_fence *)(sem_info->signal.sem)[i];
+
+ if (sem->context)
+ return -EINVAL;
+
+ *sem = ctx->last_submission[ip_type][ring].fence;
+ }
+ return 0;
+}
+
+static struct drm_amdgpu_cs_chunk_sem *radv_amdgpu_cs_alloc_syncobj_chunk(struct radv_winsys_sem_counts *counts,
+ struct drm_amdgpu_cs_chunk *chunk, int chunk_id)
+{
+ struct drm_amdgpu_cs_chunk_sem *syncobj = malloc(sizeof(struct drm_amdgpu_cs_chunk_sem) * counts->syncobj_count);
+ if (!syncobj)
+ return NULL;
+
+ for (unsigned i = 0; i < counts->syncobj_count; i++) {
+ struct drm_amdgpu_cs_chunk_sem *sem = &syncobj[i];
+ sem->handle = counts->syncobj[i];
+ }
+
+ chunk->chunk_id = chunk_id;
+ chunk->length_dw = sizeof(struct drm_amdgpu_cs_chunk_sem) / 4 * counts->syncobj_count;
+ chunk->chunk_data = (uint64_t)(uintptr_t)syncobj;
+ return syncobj;
+}
+
+static int radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx,
+ struct amdgpu_cs_request *request,
+ struct radv_winsys_sem_info *sem_info)
+{
+ int r;
+ int num_chunks;
+ int size;
+ bool user_fence;
+ struct drm_amdgpu_cs_chunk *chunks;
+ struct drm_amdgpu_cs_chunk_data *chunk_data;
+ struct drm_amdgpu_cs_chunk_dep *sem_dependencies = NULL;
+ struct drm_amdgpu_cs_chunk_sem *wait_syncobj = NULL, *signal_syncobj = NULL;
+ int i;
+ struct amdgpu_cs_fence *sem;
+
+ user_fence = (request->fence_info.handle != NULL);
+ size = request->number_of_ibs + (user_fence ? 2 : 1) + 3;
+
+ chunks = alloca(sizeof(struct drm_amdgpu_cs_chunk) * size);
+
+ size = request->number_of_ibs + (user_fence ? 1 : 0);
+
+ chunk_data = alloca(sizeof(struct drm_amdgpu_cs_chunk_data) * size);
+
+ num_chunks = request->number_of_ibs;
+ for (i = 0; i < request->number_of_ibs; i++) {
+ struct amdgpu_cs_ib_info *ib;
+ chunks[i].chunk_id = AMDGPU_CHUNK_ID_IB;
+ chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_ib) / 4;
+ chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
+
+ ib = &request->ibs[i];
+
+ chunk_data[i].ib_data._pad = 0;
+ chunk_data[i].ib_data.va_start = ib->ib_mc_address;
+ chunk_data[i].ib_data.ib_bytes = ib->size * 4;
+ chunk_data[i].ib_data.ip_type = request->ip_type;
+ chunk_data[i].ib_data.ip_instance = request->ip_instance;
+ chunk_data[i].ib_data.ring = request->ring;
+ chunk_data[i].ib_data.flags = ib->flags;
+ }
+
+ if (user_fence) {
+ i = num_chunks++;
+
+ chunks[i].chunk_id = AMDGPU_CHUNK_ID_FENCE;
+ chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_fence) / 4;
+ chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
+
+ amdgpu_cs_chunk_fence_info_to_data(&request->fence_info,
+ &chunk_data[i]);
+ }
+
+ if (sem_info->wait.syncobj_count && sem_info->cs_emit_wait) {
+ wait_syncobj = radv_amdgpu_cs_alloc_syncobj_chunk(&sem_info->wait,
+ &chunks[num_chunks],
+ AMDGPU_CHUNK_ID_SYNCOBJ_IN);
+ if (!wait_syncobj) {
+ r = -ENOMEM;
+ goto error_out;
+ }
+ num_chunks++;
+
+ if (sem_info->wait.sem_count == 0)
+ sem_info->cs_emit_wait = false;
+
+ }
+
+ if (sem_info->wait.sem_count && sem_info->cs_emit_wait) {
+ sem_dependencies = malloc(sizeof(struct drm_amdgpu_cs_chunk_dep) * sem_info->wait.sem_count);
+ if (!sem_dependencies) {
+ r = -ENOMEM;
+ goto error_out;
+ }
+ int sem_count = 0;
+ for (unsigned j = 0; j < sem_info->wait.sem_count; j++) {
+ sem = (struct amdgpu_cs_fence *)sem_info->wait.sem[j];
+ if (!sem->context)
+ continue;
+ struct drm_amdgpu_cs_chunk_dep *dep = &sem_dependencies[sem_count++];
+
+ amdgpu_cs_chunk_fence_to_dep(sem, dep);
+
+ sem->context = NULL;
+ }
+ i = num_chunks++;
+
+ /* dependencies chunk */
+ chunks[i].chunk_id = AMDGPU_CHUNK_ID_DEPENDENCIES;
+ chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_dep) / 4 * sem_count;
+ chunks[i].chunk_data = (uint64_t)(uintptr_t)sem_dependencies;
+
+ sem_info->cs_emit_wait = false;
+ }
+
+ if (sem_info->signal.syncobj_count && sem_info->cs_emit_signal) {
+ signal_syncobj = radv_amdgpu_cs_alloc_syncobj_chunk(&sem_info->signal,
+ &chunks[num_chunks],
+ AMDGPU_CHUNK_ID_SYNCOBJ_OUT);
+ if (!signal_syncobj) {
+ r = -ENOMEM;
+ goto error_out;
+ }
+ num_chunks++;
+ }
+
+ r = amdgpu_cs_submit_raw(ctx->ws->dev,
+ ctx->ctx,
+ request->resources,
+ num_chunks,
+ chunks,
+ &request->seq_no);
+error_out:
+ free(sem_dependencies);
+ free(wait_syncobj);
+ free(signal_syncobj);
+ return r;
+}
+
+static int radv_amdgpu_create_syncobj(struct radeon_winsys *_ws,
+ uint32_t *handle)
+{
+ struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
+ return amdgpu_cs_create_syncobj(ws->dev, handle);
+}
+
+static void radv_amdgpu_destroy_syncobj(struct radeon_winsys *_ws,
+ uint32_t handle)
+{
+ struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
+ amdgpu_cs_destroy_syncobj(ws->dev, handle);
+}
+
+static int radv_amdgpu_export_syncobj(struct radeon_winsys *_ws,
+ uint32_t syncobj,
+ int *fd)
+{
+ struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
+
+ return amdgpu_cs_export_syncobj(ws->dev, syncobj, fd);
+}
+
+static int radv_amdgpu_import_syncobj(struct radeon_winsys *_ws,
+ int fd,
+ uint32_t *syncobj)
+{
+ struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
+
+ return amdgpu_cs_import_syncobj(ws->dev, fd, syncobj);
}
void radv_amdgpu_cs_init_functions(struct radv_amdgpu_winsys *ws)
ws->base.destroy_fence = radv_amdgpu_destroy_fence;
ws->base.create_sem = radv_amdgpu_create_sem;
ws->base.destroy_sem = radv_amdgpu_destroy_sem;
+ ws->base.create_syncobj = radv_amdgpu_create_syncobj;
+ ws->base.destroy_syncobj = radv_amdgpu_destroy_syncobj;
+ ws->base.export_syncobj = radv_amdgpu_export_syncobj;
+ ws->base.import_syncobj = radv_amdgpu_import_syncobj;
ws->base.fence_wait = radv_amdgpu_fence_wait;
}