#include <linux/filter.h>
#include <linux/seccomp.h>
#include <linux/unistd.h>
+#include <stdatomic.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdio.h>
void radv_destroy_semaphore_part(struct radv_device *device,
struct radv_semaphore_part *part);
+static VkResult
+radv_create_pthread_cond(pthread_cond_t *cond);
+
+uint64_t radv_get_current_time(void)
+{
+ struct timespec tv;
+ clock_gettime(CLOCK_MONOTONIC, &tv);
+ return tv.tv_nsec + tv.tv_sec*1000000000ull;
+}
+
+static uint64_t radv_get_absolute_timeout(uint64_t timeout)
+{
+ uint64_t current_time = radv_get_current_time();
+
+ timeout = MIN2(UINT64_MAX - current_time, timeout);
+
+ return current_time + timeout;
+}
+
static int
radv_device_get_cache_uuid(enum radeon_family family, void *uuid)
{
list_inithead(&queue->pending_submissions);
pthread_mutex_init(&queue->pending_mutex, NULL);
+ pthread_mutex_init(&queue->thread_mutex, NULL);
+ queue->thread_submission = NULL;
+ queue->thread_running = queue->thread_exit = false;
+ result = radv_create_pthread_cond(&queue->thread_cond);
+ if (result != VK_SUCCESS)
+ return vk_error(device->instance, result);
+
return VK_SUCCESS;
}
static void
radv_queue_finish(struct radv_queue *queue)
{
+ if (queue->thread_running) {
+ p_atomic_set(&queue->thread_exit, true);
+ pthread_cond_broadcast(&queue->thread_cond);
+ pthread_join(queue->submission_thread, NULL);
+ }
+ pthread_cond_destroy(&queue->thread_cond);
pthread_mutex_destroy(&queue->pending_mutex);
+ pthread_mutex_destroy(&queue->thread_mutex);
if (queue->hw_ctx)
queue->device->ws->ctx_destroy(queue->hw_ctx);
VkFence _fence,
bool is_signal)
{
- int syncobj_idx = 0, sem_idx = 0;
+ int syncobj_idx = 0, non_reset_idx = 0, sem_idx = 0, timeline_idx = 0;
if (num_sems == 0 && _fence == VK_NULL_HANDLE)
return VK_SUCCESS;
switch(sems[i]->kind) {
case RADV_SEMAPHORE_SYNCOBJ:
counts->syncobj_count++;
+ counts->syncobj_reset_count++;
break;
case RADV_SEMAPHORE_WINSYS:
counts->sem_count++;
case RADV_SEMAPHORE_TIMELINE:
counts->syncobj_count++;
break;
+ case RADV_SEMAPHORE_TIMELINE_SYNCOBJ:
+ counts->timeline_syncobj_count++;
+ break;
}
}
counts->syncobj_count++;
}
- if (counts->syncobj_count) {
- counts->syncobj = (uint32_t *)malloc(sizeof(uint32_t) * counts->syncobj_count);
- if (!counts->syncobj)
+ if (counts->syncobj_count || counts->timeline_syncobj_count) {
+ counts->points = (uint64_t *)malloc(
+ sizeof(*counts->syncobj) * counts->syncobj_count +
+ (sizeof(*counts->syncobj) + sizeof(*counts->points)) * counts->timeline_syncobj_count);
+ if (!counts->points)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+ counts->syncobj = (uint32_t*)(counts->points + counts->timeline_syncobj_count);
}
if (counts->sem_count) {
}
}
+ non_reset_idx = counts->syncobj_reset_count;
+
for (uint32_t i = 0; i < num_sems; i++) {
switch(sems[i]->kind) {
case RADV_SEMAPHORE_NONE:
pthread_mutex_unlock(&sems[i]->timeline.mutex);
if (point) {
- counts->syncobj[syncobj_idx++] = point->syncobj;
+ counts->syncobj[non_reset_idx++] = point->syncobj;
} else {
/* Explicitly remove the semaphore so we might not find
* a point later post-submit. */
}
break;
}
+ case RADV_SEMAPHORE_TIMELINE_SYNCOBJ:
+ counts->syncobj[counts->syncobj_count + timeline_idx] = sems[i]->syncobj;
+ counts->points[timeline_idx] = timeline_values[i];
+ ++timeline_idx;
+ break;
}
}
fence->temporary.kind != RADV_FENCE_NONE ?
&fence->temporary : &fence->permanent;
if (part->kind == RADV_FENCE_SYNCOBJ)
- counts->syncobj[syncobj_idx++] = part->syncobj;
+ counts->syncobj[non_reset_idx++] = part->syncobj;
}
- assert(syncobj_idx <= counts->syncobj_count);
- counts->syncobj_count = syncobj_idx;
+ assert(MAX2(syncobj_idx, non_reset_idx) <= counts->syncobj_count);
+ counts->syncobj_count = MAX2(syncobj_idx, non_reset_idx);
return VK_SUCCESS;
}
static void
radv_free_sem_info(struct radv_winsys_sem_info *sem_info)
{
- free(sem_info->wait.syncobj);
+ free(sem_info->wait.points);
free(sem_info->wait.sem);
- free(sem_info->signal.syncobj);
+ free(sem_info->signal.points);
free(sem_info->signal.sem);
}
point->wait_count -= 2;
radv_timeline_trigger_waiters_locked(&signal_sems[i]->timeline, processing_list);
pthread_mutex_unlock(&signal_sems[i]->timeline.mutex);
+ } else if (signal_sems[i] && signal_sems[i]->kind == RADV_SEMAPHORE_TIMELINE_SYNCOBJ) {
+ signal_sems[i]->timeline_syncobj.max_point =
+ MAX2(signal_sems[i]->timeline_syncobj.max_point, signal_values[i]);
}
}
}
-static void
+static VkResult
radv_sparse_buffer_bind_memory(struct radv_device *device,
const VkSparseBufferMemoryBindInfo *bind)
{
RADV_FROM_HANDLE(radv_buffer, buffer, bind->buffer);
+ VkResult result;
for (uint32_t i = 0; i < bind->bindCount; ++i) {
struct radv_device_memory *mem = NULL;
if (bind->pBinds[i].memory != VK_NULL_HANDLE)
mem = radv_device_memory_from_handle(bind->pBinds[i].memory);
- device->ws->buffer_virtual_bind(buffer->bo,
- bind->pBinds[i].resourceOffset,
- bind->pBinds[i].size,
- mem ? mem->bo : NULL,
- bind->pBinds[i].memoryOffset);
+ result = device->ws->buffer_virtual_bind(buffer->bo,
+ bind->pBinds[i].resourceOffset,
+ bind->pBinds[i].size,
+ mem ? mem->bo : NULL,
+ bind->pBinds[i].memoryOffset);
+ if (result != VK_SUCCESS)
+ return result;
}
+
+ return VK_SUCCESS;
}
-static void
+static VkResult
radv_sparse_image_opaque_bind_memory(struct radv_device *device,
const VkSparseImageOpaqueMemoryBindInfo *bind)
{
RADV_FROM_HANDLE(radv_image, image, bind->image);
+ VkResult result;
for (uint32_t i = 0; i < bind->bindCount; ++i) {
struct radv_device_memory *mem = NULL;
if (bind->pBinds[i].memory != VK_NULL_HANDLE)
mem = radv_device_memory_from_handle(bind->pBinds[i].memory);
- device->ws->buffer_virtual_bind(image->bo,
- bind->pBinds[i].resourceOffset,
- bind->pBinds[i].size,
- mem ? mem->bo : NULL,
- bind->pBinds[i].memoryOffset);
+ result = device->ws->buffer_virtual_bind(image->bo,
+ bind->pBinds[i].resourceOffset,
+ bind->pBinds[i].size,
+ mem ? mem->bo : NULL,
+ bind->pBinds[i].memoryOffset);
+ if (result != VK_SUCCESS)
+ return result;
}
+
+ return VK_SUCCESS;
}
static VkResult
uint32_t signal_value_count;
};
+static VkResult
+radv_queue_trigger_submission(struct radv_deferred_queue_submission *submission,
+ uint32_t decrement,
+ struct list_head *processing_list);
+
static VkResult
radv_create_deferred_submission(struct radv_queue *queue,
const struct radv_queue_submission *submission,
return VK_SUCCESS;
}
-static void
+static VkResult
radv_queue_enqueue_submission(struct radv_deferred_queue_submission *submission,
struct list_head *processing_list)
{
* submitted, but if the queue was empty, we decrement ourselves as there is no previous
* submission. */
uint32_t decrement = submission->wait_semaphore_count - wait_cnt + (is_first ? 1 : 0);
- if (__atomic_sub_fetch(&submission->submission_wait_count, decrement, __ATOMIC_ACQ_REL) == 0) {
- list_addtail(&submission->processing_list, processing_list);
- }
+ return radv_queue_trigger_submission(submission, decrement, processing_list);
}
static void
list_first_entry(&submission->queue->pending_submissions,
struct radv_deferred_queue_submission,
queue_pending_list);
- if (p_atomic_dec_zero(&next_submission->submission_wait_count)) {
- list_addtail(&next_submission->processing_list, processing_list);
- }
+ radv_queue_trigger_submission(next_submission, 1, processing_list);
}
pthread_mutex_unlock(&submission->queue->pending_mutex);
goto fail;
for (uint32_t i = 0; i < submission->buffer_bind_count; ++i) {
- radv_sparse_buffer_bind_memory(queue->device,
- submission->buffer_binds + i);
+ result = radv_sparse_buffer_bind_memory(queue->device,
+ submission->buffer_binds + i);
+ if (result != VK_SUCCESS)
+ goto fail;
}
for (uint32_t i = 0; i < submission->image_opaque_bind_count; ++i) {
- radv_sparse_image_opaque_bind_memory(queue->device,
- submission->image_opaque_binds + i);
+ result = radv_sparse_image_opaque_bind_memory(queue->device,
+ submission->image_opaque_binds + i);
+ if (result != VK_SUCCESS)
+ goto fail;
}
if (!submission->cmd_buffer_count) {
return VK_SUCCESS;
}
+static VkResult
+wait_for_submission_timelines_available(struct radv_deferred_queue_submission *submission,
+ uint64_t timeout)
+{
+ struct radv_device *device = submission->queue->device;
+ uint32_t syncobj_count = 0;
+ uint32_t syncobj_idx = 0;
+
+ for (uint32_t i = 0; i < submission->wait_semaphore_count; ++i) {
+ if (submission->wait_semaphores[i]->kind != RADV_SEMAPHORE_TIMELINE_SYNCOBJ)
+ continue;
+
+ if (submission->wait_semaphores[i]->timeline_syncobj.max_point >= submission->wait_values[i])
+ continue;
+ ++syncobj_count;
+ }
+
+ if (!syncobj_count)
+ return VK_SUCCESS;
+
+ uint64_t *points = malloc((sizeof(uint64_t) + sizeof(uint32_t)) * syncobj_count);
+ if (!points)
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ uint32_t *syncobj = (uint32_t*)(points + syncobj_count);
+
+ for (uint32_t i = 0; i < submission->wait_semaphore_count; ++i) {
+ if (submission->wait_semaphores[i]->kind != RADV_SEMAPHORE_TIMELINE_SYNCOBJ)
+ continue;
+
+ if (submission->wait_semaphores[i]->timeline_syncobj.max_point >= submission->wait_values[i])
+ continue;
+
+ syncobj[syncobj_idx] = submission->wait_semaphores[i]->syncobj;
+ points[syncobj_idx] = submission->wait_values[i];
+ ++syncobj_idx;
+ }
+ bool success = device->ws->wait_timeline_syncobj(device->ws, syncobj, points, syncobj_idx, true, true, timeout);
+
+ free(points);
+ return success ? VK_SUCCESS : VK_TIMEOUT;
+}
+
+static void* radv_queue_submission_thread_run(void *q)
+{
+ struct radv_queue *queue = q;
+
+ pthread_mutex_lock(&queue->thread_mutex);
+ while (!p_atomic_read(&queue->thread_exit)) {
+ struct radv_deferred_queue_submission *submission = queue->thread_submission;
+ struct list_head processing_list;
+ VkResult result = VK_SUCCESS;
+ if (!submission) {
+ pthread_cond_wait(&queue->thread_cond, &queue->thread_mutex);
+ continue;
+ }
+ pthread_mutex_unlock(&queue->thread_mutex);
+
+ /* Wait at most 5 seconds so we have a chance to notice shutdown when
+ * a semaphore never gets signaled. If it takes longer we just retry
+ * the wait next iteration. */
+ result = wait_for_submission_timelines_available(submission,
+ radv_get_absolute_timeout(5000000000));
+ if (result != VK_SUCCESS) {
+ pthread_mutex_lock(&queue->thread_mutex);
+ continue;
+ }
+
+ /* The lock isn't held but nobody will add one until we finish
+ * the current submission. */
+ p_atomic_set(&queue->thread_submission, NULL);
+
+ list_inithead(&processing_list);
+ list_addtail(&submission->processing_list, &processing_list);
+ result = radv_process_submissions(&processing_list);
+
+ pthread_mutex_lock(&queue->thread_mutex);
+ }
+ pthread_mutex_unlock(&queue->thread_mutex);
+ return NULL;
+}
+
+static VkResult
+radv_queue_trigger_submission(struct radv_deferred_queue_submission *submission,
+ uint32_t decrement,
+ struct list_head *processing_list)
+{
+ struct radv_queue *queue = submission->queue;
+ int ret;
+ if (p_atomic_add_return(&submission->submission_wait_count, -decrement))
+ return VK_SUCCESS;
+
+ if (wait_for_submission_timelines_available(submission, radv_get_absolute_timeout(0)) == VK_SUCCESS) {
+ list_addtail(&submission->processing_list, processing_list);
+ return VK_SUCCESS;
+ }
+
+ pthread_mutex_lock(&queue->thread_mutex);
+
+ /* A submission can only be ready for the thread if it doesn't have
+ * any predecessors in the same queue, so there can only be one such
+ * submission at a time. */
+ assert(queue->thread_submission == NULL);
+
+ /* Only start the thread on demand to save resources for the many games
+ * which only use binary semaphores. */
+ if (!queue->thread_running) {
+ ret = pthread_create(&queue->submission_thread, NULL,
+ radv_queue_submission_thread_run, queue);
+ if (ret) {
+ pthread_mutex_unlock(&queue->thread_mutex);
+ return vk_errorf(queue->device->instance,
+ VK_ERROR_DEVICE_LOST,
+ "Failed to start submission thread");
+ }
+ queue->thread_running = true;
+ }
+
+ queue->thread_submission = submission;
+ pthread_mutex_unlock(&queue->thread_mutex);
+
+ pthread_cond_signal(&queue->thread_cond);
+ return VK_SUCCESS;
+}
+
static VkResult radv_queue_submit(struct radv_queue *queue,
const struct radv_queue_submission *submission)
{
struct list_head processing_list;
list_inithead(&processing_list);
- radv_queue_enqueue_submission(deferred, &processing_list);
+ result = radv_queue_enqueue_submission(deferred, &processing_list);
+ if (result != VK_SUCCESS) {
+ /* If anything is in the list we leak. */
+ assert(list_is_empty(&processing_list));
+ return result;
+ }
return radv_process_submissions(&processing_list);
}
radv_destroy_fence(device, pAllocator, fence);
}
-
-uint64_t radv_get_current_time(void)
-{
- struct timespec tv;
- clock_gettime(CLOCK_MONOTONIC, &tv);
- return tv.tv_nsec + tv.tv_sec*1000000000ull;
-}
-
-static uint64_t radv_get_absolute_timeout(uint64_t timeout)
-{
- uint64_t current_time = radv_get_current_time();
-
- timeout = MIN2(UINT64_MAX - current_time, timeout);
-
- return current_time + timeout;
-}
-
-
static bool radv_all_fences_plain_and_submitted(struct radv_device *device,
uint32_t fenceCount, const VkFence *pFences)
{
static VkResult
-radv_timeline_wait_locked(struct radv_device *device,
- struct radv_timeline *timeline,
- uint64_t value,
- uint64_t abs_timeout)
+radv_timeline_wait(struct radv_device *device,
+ struct radv_timeline *timeline,
+ uint64_t value,
+ uint64_t abs_timeout)
{
+ pthread_mutex_lock(&timeline->mutex);
+
while(timeline->highest_submitted < value) {
struct timespec abstime;
timespec_from_nsec(&abstime, abs_timeout);
pthread_cond_timedwait(&device->timeline_cond, &timeline->mutex, &abstime);
- if (radv_get_current_time() >= abs_timeout && timeline->highest_submitted < value)
+ if (radv_get_current_time() >= abs_timeout && timeline->highest_submitted < value) {
+ pthread_mutex_unlock(&timeline->mutex);
return VK_TIMEOUT;
+ }
}
struct radv_timeline_point *point = radv_timeline_find_point_at_least_locked(device, timeline, value);
+ pthread_mutex_unlock(&timeline->mutex);
if (!point)
return VK_SUCCESS;
- pthread_mutex_unlock(&timeline->mutex);
-
bool success = device->ws->wait_syncobj(device->ws, &point->syncobj, 1, true, abs_timeout);
pthread_mutex_lock(&timeline->mutex);
point->wait_count--;
+ pthread_mutex_unlock(&timeline->mutex);
return success ? VK_SUCCESS : VK_TIMEOUT;
}
if (waiter->value > timeline->highest_submitted)
continue;
- if (p_atomic_dec_zero(&waiter->submission->submission_wait_count)) {
- list_addtail(&waiter->submission->processing_list, processing_list);
- }
+ radv_queue_trigger_submission(waiter->submission, 1, processing_list);
list_del(&waiter->list);
}
}
radv_destroy_timeline(device, &part->timeline);
break;
case RADV_SEMAPHORE_SYNCOBJ:
+ case RADV_SEMAPHORE_TIMELINE_SYNCOBJ:
device->ws->destroy_syncobj(device->ws, part->syncobj);
break;
}
sem->temporary.kind = RADV_SEMAPHORE_NONE;
sem->permanent.kind = RADV_SEMAPHORE_NONE;
- if (type == VK_SEMAPHORE_TYPE_TIMELINE) {
+ if (type == VK_SEMAPHORE_TYPE_TIMELINE &&
+ device->physical_device->rad_info.has_timeline_syncobj) {
+ int ret = device->ws->create_syncobj(device->ws, false, &sem->permanent.syncobj);
+ if (ret) {
+ radv_destroy_semaphore(device, pAllocator, sem);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+ }
+ device->ws->signal_syncobj(device->ws, sem->permanent.syncobj, initial_value);
+ sem->permanent.timeline_syncobj.max_point = initial_value;
+ sem->permanent.kind = RADV_SEMAPHORE_TIMELINE_SYNCOBJ;
+ } else if (type == VK_SEMAPHORE_TYPE_TIMELINE) {
radv_create_timeline(&sem->permanent.timeline, initial_value);
sem->permanent.kind = RADV_SEMAPHORE_TIMELINE;
} else if (device->always_use_syncobj || handleTypes) {
pthread_mutex_unlock(&part->timeline.mutex);
return VK_SUCCESS;
}
+ case RADV_SEMAPHORE_TIMELINE_SYNCOBJ: {
+ return device->ws->query_syncobj(device->ws, part->syncobj, pValue);
+ }
case RADV_SEMAPHORE_NONE:
case RADV_SEMAPHORE_SYNCOBJ:
case RADV_SEMAPHORE_WINSYS:
for (;;) {
for(uint32_t i = 0; i < pWaitInfo->semaphoreCount; ++i) {
RADV_FROM_HANDLE(radv_semaphore, semaphore, pWaitInfo->pSemaphores[i]);
- pthread_mutex_lock(&semaphore->permanent.timeline.mutex);
- VkResult result = radv_timeline_wait_locked(device, &semaphore->permanent.timeline, pWaitInfo->pValues[i], 0);
- pthread_mutex_unlock(&semaphore->permanent.timeline.mutex);
+ VkResult result = radv_timeline_wait(device, &semaphore->permanent.timeline, pWaitInfo->pValues[i], 0);
if (result == VK_SUCCESS)
return VK_SUCCESS;
for(uint32_t i = 0; i < pWaitInfo->semaphoreCount; ++i) {
RADV_FROM_HANDLE(radv_semaphore, semaphore, pWaitInfo->pSemaphores[i]);
- pthread_mutex_lock(&semaphore->permanent.timeline.mutex);
- VkResult result = radv_timeline_wait_locked(device, &semaphore->permanent.timeline, pWaitInfo->pValues[i], abs_timeout);
- pthread_mutex_unlock(&semaphore->permanent.timeline.mutex);
+ VkResult result = radv_timeline_wait(device, &semaphore->permanent.timeline, pWaitInfo->pValues[i], abs_timeout);
if (result != VK_SUCCESS)
return result;
{
RADV_FROM_HANDLE(radv_device, device, _device);
uint64_t abs_timeout = radv_get_absolute_timeout(timeout);
- return radv_wait_timelines(device, pWaitInfo, abs_timeout);
+
+ if (radv_semaphore_from_handle(pWaitInfo->pSemaphores[0])->permanent.kind == RADV_SEMAPHORE_TIMELINE)
+ return radv_wait_timelines(device, pWaitInfo, abs_timeout);
+
+ if (pWaitInfo->semaphoreCount > UINT32_MAX / sizeof(uint32_t))
+ return vk_errorf(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY, "semaphoreCount integer overflow");
+
+ bool wait_all = !(pWaitInfo->flags & VK_SEMAPHORE_WAIT_ANY_BIT_KHR);
+ uint32_t *handles = malloc(sizeof(*handles) * pWaitInfo->semaphoreCount);
+ if (!handles)
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ for (uint32_t i = 0; i < pWaitInfo->semaphoreCount; ++i) {
+ RADV_FROM_HANDLE(radv_semaphore, semaphore, pWaitInfo->pSemaphores[i]);
+ handles[i] = semaphore->permanent.syncobj;
+ }
+
+ bool success = device->ws->wait_timeline_syncobj(device->ws, handles, pWaitInfo->pValues,
+ pWaitInfo->semaphoreCount, wait_all, false,
+ abs_timeout);
+ free(handles);
+ return success ? VK_SUCCESS : VK_TIMEOUT;
}
VkResult
radv_timeline_trigger_waiters_locked(&part->timeline, &processing_list);
pthread_mutex_unlock(&part->timeline.mutex);
- return radv_process_submissions(&processing_list);
+ VkResult result = radv_process_submissions(&processing_list);
+
+ /* This needs to happen after radv_process_submissions, so
+ * that any submitted submissions that are now unblocked get
+ * processed before we wake the application. This way we
+ * ensure that any binary semaphores that are now unblocked
+ * are usable by the application. */
+ pthread_cond_broadcast(&device->timeline_cond);
+
+ return result;
+ }
+ case RADV_SEMAPHORE_TIMELINE_SYNCOBJ: {
+ part->timeline_syncobj.max_point = MAX2(part->timeline_syncobj.max_point, pSignalInfo->value);
+ device->ws->signal_syncobj(device->ws, part->syncobj, pSignalInfo->value);
+ break;
}
case RADV_SEMAPHORE_NONE:
case RADV_SEMAPHORE_SYNCOBJ:
}
} else {
if (fd == -1)
- device->ws->signal_syncobj(device->ws, syncobj_handle);
+ device->ws->signal_syncobj(device->ws, syncobj_handle, 0);
}
if (fd != -1) {
RADV_FROM_HANDLE(radv_semaphore, sem, pImportSemaphoreFdInfo->semaphore);
VkResult result;
struct radv_semaphore_part *dst = NULL;
+ bool timeline = sem->permanent.kind == RADV_SEMAPHORE_TIMELINE_SYNCOBJ;
if (pImportSemaphoreFdInfo->flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT) {
+ assert(!timeline);
dst = &sem->temporary;
} else {
dst = &sem->permanent;
}
- uint32_t syncobj = dst->kind == RADV_SEMAPHORE_SYNCOBJ ? dst->syncobj : 0;
+ uint32_t syncobj = (dst->kind == RADV_SEMAPHORE_SYNCOBJ ||
+ dst->kind == RADV_SEMAPHORE_TIMELINE_SYNCOBJ) ? dst->syncobj : 0;
switch(pImportSemaphoreFdInfo->handleType) {
case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT:
result = radv_import_opaque_fd(device, pImportSemaphoreFdInfo->fd, &syncobj);
break;
case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT:
+ assert(!timeline);
result = radv_import_sync_fd(device, pImportSemaphoreFdInfo->fd, &syncobj);
break;
default:
if (result == VK_SUCCESS) {
dst->syncobj = syncobj;
dst->kind = RADV_SEMAPHORE_SYNCOBJ;
+ if (timeline) {
+ dst->kind = RADV_SEMAPHORE_TIMELINE_SYNCOBJ;
+ dst->timeline_syncobj.max_point = 0;
+ }
}
return result;
uint32_t syncobj_handle;
if (sem->temporary.kind != RADV_SEMAPHORE_NONE) {
- assert(sem->temporary.kind == RADV_SEMAPHORE_SYNCOBJ);
+ assert(sem->temporary.kind == RADV_SEMAPHORE_SYNCOBJ ||
+ sem->temporary.kind == RADV_SEMAPHORE_TIMELINE_SYNCOBJ);
syncobj_handle = sem->temporary.syncobj;
} else {
- assert(sem->permanent.kind == RADV_SEMAPHORE_SYNCOBJ);
+ assert(sem->permanent.kind == RADV_SEMAPHORE_SYNCOBJ ||
+ sem->permanent.kind == RADV_SEMAPHORE_TIMELINE_SYNCOBJ);
syncobj_handle = sem->permanent.syncobj;
}
RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
VkSemaphoreTypeKHR type = radv_get_semaphore_type(pExternalSemaphoreInfo->pNext, NULL);
- if (type == VK_SEMAPHORE_TYPE_TIMELINE) {
+ if (type == VK_SEMAPHORE_TYPE_TIMELINE && pdevice->rad_info.has_timeline_syncobj &&
+ pExternalSemaphoreInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT) {
+ pExternalSemaphoreProperties->exportFromImportedHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT;
+ pExternalSemaphoreProperties->compatibleHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT;
+ pExternalSemaphoreProperties->externalSemaphoreFeatures = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
+ VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
+ } else if (type == VK_SEMAPHORE_TYPE_TIMELINE) {
pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
pExternalSemaphoreProperties->compatibleHandleTypes = 0;
pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;