instance->debug_flags |= RADV_DEBUG_ZERO_VRAM;
} else if (!strcmp(engine_name, "Quantic Dream Engine")) {
/* Fix various artifacts in Detroit: Become Human */
- instance->debug_flags |= RADV_DEBUG_ZERO_VRAM;
+ instance->debug_flags |= RADV_DEBUG_ZERO_VRAM |
+ RADV_DEBUG_DISCARD_TO_DEMOTE;
}
}
features->extendedDynamicState = true;
break;
}
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES_EXT: {
+ VkPhysicalDeviceImageRobustnessFeaturesEXT *features =
+ (VkPhysicalDeviceImageRobustnessFeaturesEXT *)ext;
+ features->robustImageAccess = true;
+ break;
+ }
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT_FEATURES_EXT: {
+ VkPhysicalDeviceShaderAtomicFloatFeaturesEXT *features =
+ (VkPhysicalDeviceShaderAtomicFloatFeaturesEXT *)ext;
+ features->shaderBufferFloat32Atomics = true;
+ features->shaderBufferFloat32AtomicAdd = false;
+ features->shaderBufferFloat64Atomics = true;
+ features->shaderBufferFloat64AtomicAdd = false;
+ features->shaderSharedFloat32Atomics = true;
+ features->shaderSharedFloat32AtomicAdd = pdevice->rad_info.chip_class >= GFX8 &&
+ (!pdevice->use_llvm || LLVM_VERSION_MAJOR >= 10);
+ features->shaderSharedFloat64Atomics = true;
+ features->shaderSharedFloat64AtomicAdd = false;
+ features->shaderImageFloat32Atomics = true;
+ features->shaderImageFloat32AtomicAdd = false;
+ features->sparseImageFloat32Atomics = false;
+ features->sparseImageFloat32AtomicAdd = false;
+ break;
+ }
default:
break;
}
p->roundingModeIndependence = VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL_KHR;
}
- /* Do not allow both preserving and flushing denorms because different
- * shaders in the same pipeline can have different settings and this
- * won't work for merged shaders. To make it work, this requires LLVM
+ /* With LLVM, do not allow both preserving and flushing denorms because
+ * different shaders in the same pipeline can have different settings and
+ * this won't work for merged shaders. To make it work, this requires LLVM
* support for changing the register. The same logic applies for the
* rounding modes because they are configured with the same config
- * register. TODO: we can enable a lot of these for ACO when it
- * supports all stages.
+ * register.
*/
p->shaderDenormFlushToZeroFloat32 = true;
- p->shaderDenormPreserveFloat32 = false;
+ p->shaderDenormPreserveFloat32 = !pdevice->use_llvm;
p->shaderRoundingModeRTEFloat32 = true;
- p->shaderRoundingModeRTZFloat32 = false;
+ p->shaderRoundingModeRTZFloat32 = !pdevice->use_llvm;
p->shaderSignedZeroInfNanPreserveFloat32 = true;
- p->shaderDenormFlushToZeroFloat16 = false;
+ p->shaderDenormFlushToZeroFloat16 = pdevice->rad_info.has_packed_math_16bit && !pdevice->use_llvm;
p->shaderDenormPreserveFloat16 = pdevice->rad_info.has_packed_math_16bit;
p->shaderRoundingModeRTEFloat16 = pdevice->rad_info.has_packed_math_16bit;
- p->shaderRoundingModeRTZFloat16 = false;
+ p->shaderRoundingModeRTZFloat16 = pdevice->rad_info.has_packed_math_16bit && !pdevice->use_llvm;
p->shaderSignedZeroInfNanPreserveFloat16 = pdevice->rad_info.has_packed_math_16bit;
- p->shaderDenormFlushToZeroFloat64 = false;
+ p->shaderDenormFlushToZeroFloat64 = pdevice->rad_info.chip_class >= GFX8 && !pdevice->use_llvm;
p->shaderDenormPreserveFloat64 = pdevice->rad_info.chip_class >= GFX8;
p->shaderRoundingModeRTEFloat64 = pdevice->rad_info.chip_class >= GFX8;
- p->shaderRoundingModeRTZFloat64 = false;
+ p->shaderRoundingModeRTZFloat64 = pdevice->rad_info.chip_class >= GFX8 && !pdevice->use_llvm;
p->shaderSignedZeroInfNanPreserveFloat64 = pdevice->rad_info.chip_class >= GFX8;
p->maxUpdateAfterBindDescriptorsInAllPools = UINT32_MAX / 64;
for (int family = 0; family < RADV_MAX_QUEUE_FAMILIES; ++family) {
device->empty_cs[family] = device->ws->cs_create(device->ws, family);
+ if (!device->empty_cs[family])
+ goto fail;
+
switch (family) {
case RADV_QUEUE_GENERAL:
radeon_emit(device->empty_cs[family], PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
radeon_emit(device->empty_cs[family], 0);
break;
}
- device->ws->cs_finalize(device->empty_cs[family]);
+
+ result = device->ws->cs_finalize(device->empty_cs[family]);
+ if (result != VK_SUCCESS)
+ goto fail;
}
if (device->physical_device->rad_info.chip_class >= GFX7)
VkFence _fence,
bool is_signal)
{
- int syncobj_idx = 0, sem_idx = 0;
+ int syncobj_idx = 0, non_reset_idx = 0, sem_idx = 0;
if (num_sems == 0 && _fence == VK_NULL_HANDLE)
return VK_SUCCESS;
switch(sems[i]->kind) {
case RADV_SEMAPHORE_SYNCOBJ:
counts->syncobj_count++;
+ counts->syncobj_reset_count++;
break;
case RADV_SEMAPHORE_WINSYS:
counts->sem_count++;
if (_fence != VK_NULL_HANDLE) {
RADV_FROM_HANDLE(radv_fence, fence, _fence);
- if (fence->temp_syncobj || fence->syncobj)
+
+ struct radv_fence_part *part =
+ fence->temporary.kind != RADV_FENCE_NONE ?
+ &fence->temporary : &fence->permanent;
+ if (part->kind == RADV_FENCE_SYNCOBJ)
counts->syncobj_count++;
}
}
}
+ non_reset_idx = counts->syncobj_reset_count;
+
for (uint32_t i = 0; i < num_sems; i++) {
switch(sems[i]->kind) {
case RADV_SEMAPHORE_NONE:
pthread_mutex_unlock(&sems[i]->timeline.mutex);
if (point) {
- counts->syncobj[syncobj_idx++] = point->syncobj;
+ counts->syncobj[non_reset_idx++] = point->syncobj;
} else {
/* Explicitly remove the semaphore so we might not find
* a point later post-submit. */
if (_fence != VK_NULL_HANDLE) {
RADV_FROM_HANDLE(radv_fence, fence, _fence);
- if (fence->temp_syncobj)
- counts->syncobj[syncobj_idx++] = fence->temp_syncobj;
- else if (fence->syncobj)
- counts->syncobj[syncobj_idx++] = fence->syncobj;
+
+ struct radv_fence_part *part =
+ fence->temporary.kind != RADV_FENCE_NONE ?
+ &fence->temporary : &fence->permanent;
+ if (part->kind == RADV_FENCE_SYNCOBJ)
+ counts->syncobj[non_reset_idx++] = part->syncobj;
}
- assert(syncobj_idx <= counts->syncobj_count);
- counts->syncobj_count = syncobj_idx;
+ assert(MAX2(syncobj_idx, non_reset_idx) <= counts->syncobj_count);
+ counts->syncobj_count = MAX2(syncobj_idx, non_reset_idx);
return VK_SUCCESS;
}
}
}
-static void
+static VkResult
radv_sparse_buffer_bind_memory(struct radv_device *device,
const VkSparseBufferMemoryBindInfo *bind)
{
RADV_FROM_HANDLE(radv_buffer, buffer, bind->buffer);
+ VkResult result;
for (uint32_t i = 0; i < bind->bindCount; ++i) {
struct radv_device_memory *mem = NULL;
if (bind->pBinds[i].memory != VK_NULL_HANDLE)
mem = radv_device_memory_from_handle(bind->pBinds[i].memory);
- device->ws->buffer_virtual_bind(buffer->bo,
- bind->pBinds[i].resourceOffset,
- bind->pBinds[i].size,
- mem ? mem->bo : NULL,
- bind->pBinds[i].memoryOffset);
+ result = device->ws->buffer_virtual_bind(buffer->bo,
+ bind->pBinds[i].resourceOffset,
+ bind->pBinds[i].size,
+ mem ? mem->bo : NULL,
+ bind->pBinds[i].memoryOffset);
+ if (result != VK_SUCCESS)
+ return result;
}
+
+ return VK_SUCCESS;
}
-static void
+static VkResult
radv_sparse_image_opaque_bind_memory(struct radv_device *device,
const VkSparseImageOpaqueMemoryBindInfo *bind)
{
RADV_FROM_HANDLE(radv_image, image, bind->image);
+ VkResult result;
for (uint32_t i = 0; i < bind->bindCount; ++i) {
struct radv_device_memory *mem = NULL;
if (bind->pBinds[i].memory != VK_NULL_HANDLE)
mem = radv_device_memory_from_handle(bind->pBinds[i].memory);
- device->ws->buffer_virtual_bind(image->bo,
- bind->pBinds[i].resourceOffset,
- bind->pBinds[i].size,
- mem ? mem->bo : NULL,
- bind->pBinds[i].memoryOffset);
+ result = device->ws->buffer_virtual_bind(image->bo,
+ bind->pBinds[i].resourceOffset,
+ bind->pBinds[i].size,
+ mem ? mem->bo : NULL,
+ bind->pBinds[i].memoryOffset);
+ if (result != VK_SUCCESS)
+ return result;
}
+
+ return VK_SUCCESS;
}
static VkResult
struct radv_queue *queue = submission->queue;
struct radeon_winsys_ctx *ctx = queue->hw_ctx;
uint32_t max_cs_submission = queue->device->trace_bo ? 1 : RADV_MAX_IBS_PER_SUBMIT;
- struct radeon_winsys_fence *base_fence = fence ? fence->fence : NULL;
+ struct radeon_winsys_fence *base_fence = NULL;
bool do_flush = submission->flush_caches || submission->wait_dst_stage_mask;
bool can_patch = true;
uint32_t advance;
struct radv_winsys_sem_info sem_info;
VkResult result;
- int ret;
struct radeon_cmdbuf *initial_preamble_cs = NULL;
struct radeon_cmdbuf *initial_flush_preamble_cs = NULL;
struct radeon_cmdbuf *continue_preamble_cs = NULL;
+ if (fence) {
+ /* Under most circumstances, out fences won't be temporary.
+ * However, the spec does allow it for opaque_fd.
+ *
+ * From the Vulkan 1.0.53 spec:
+ *
+ * "If the import is temporary, the implementation must
+ * restore the semaphore to its prior permanent state after
+ * submitting the next semaphore wait operation."
+ */
+ struct radv_fence_part *part =
+ fence->temporary.kind != RADV_FENCE_NONE ?
+ &fence->temporary : &fence->permanent;
+ if (part->kind == RADV_FENCE_WINSYS)
+ base_fence = part->fence;
+ }
+
result = radv_get_preambles(queue, submission->cmd_buffers,
submission->cmd_buffer_count,
&initial_preamble_cs,
goto fail;
for (uint32_t i = 0; i < submission->buffer_bind_count; ++i) {
- radv_sparse_buffer_bind_memory(queue->device,
- submission->buffer_binds + i);
+ result = radv_sparse_buffer_bind_memory(queue->device,
+ submission->buffer_binds + i);
+ if (result != VK_SUCCESS)
+ goto fail;
}
for (uint32_t i = 0; i < submission->image_opaque_bind_count; ++i) {
- radv_sparse_image_opaque_bind_memory(queue->device,
- submission->image_opaque_binds + i);
+ result = radv_sparse_image_opaque_bind_memory(queue->device,
+ submission->image_opaque_binds + i);
+ if (result != VK_SUCCESS)
+ goto fail;
}
if (!submission->cmd_buffer_count) {
- ret = queue->device->ws->cs_submit(ctx, queue->queue_idx,
- &queue->device->empty_cs[queue->queue_family_index],
- 1, NULL, NULL,
- &sem_info, NULL,
- false, base_fence);
- if (ret) {
- radv_loge("failed to submit CS\n");
- abort();
- }
-
- goto success;
+ result = queue->device->ws->cs_submit(ctx, queue->queue_idx,
+ &queue->device->empty_cs[queue->queue_family_index],
+ 1, NULL, NULL,
+ &sem_info, NULL,
+ false, base_fence);
+ if (result != VK_SUCCESS)
+ goto fail;
} else {
struct radeon_cmdbuf **cs_array = malloc(sizeof(struct radeon_cmdbuf *) *
(submission->cmd_buffer_count));
bo_list = &queue->device->bo_list.list;
}
- ret = queue->device->ws->cs_submit(ctx, queue->queue_idx, cs_array + j,
- advance, initial_preamble, continue_preamble_cs,
- &sem_info, bo_list,
- can_patch, base_fence);
+ result = queue->device->ws->cs_submit(ctx, queue->queue_idx, cs_array + j,
+ advance, initial_preamble, continue_preamble_cs,
+ &sem_info, bo_list,
+ can_patch, base_fence);
if (unlikely(queue->device->use_global_bo_list))
pthread_mutex_unlock(&queue->device->bo_list.mutex);
- if (ret) {
- radv_loge("failed to submit CS\n");
- abort();
- }
+ if (result != VK_SUCCESS)
+ goto fail;
+
if (queue->device->trace_bo) {
radv_check_gpu_hangs(queue, cs_array[j]);
}
free(cs_array);
}
-success:
radv_free_temp_syncobjs(queue->device,
submission->temporary_semaphore_part_count,
submission->temporary_semaphore_parts);
return VK_SUCCESS;
fail:
+ if (result != VK_SUCCESS && result != VK_ERROR_DEVICE_LOST) {
+ /* When something bad happened during the submission, such as
+ * an out of memory issue, it might be hard to recover from
+ * this inconsistent state. To avoid this sort of problem, we
+ * assume that we are in a really bad situation and return
+ * VK_ERROR_DEVICE_LOST to ensure the clients do not attempt
+ * to submit the same job again to this device.
+ */
+ result = VK_ERROR_DEVICE_LOST;
+ }
+
radv_free_temp_syncobjs(queue->device,
submission->temporary_semaphore_part_count,
submission->temporary_semaphore_parts);
free(submission);
- return VK_ERROR_DEVICE_LOST;
+ return result;
}
static VkResult
struct radeon_winsys_ctx *ctx = queue->hw_ctx;
struct radv_winsys_sem_info sem_info;
VkResult result;
- int ret;
result = radv_alloc_sem_info(queue->device, &sem_info, 0, NULL, 0, 0,
0, NULL, VK_NULL_HANDLE);
if (result != VK_SUCCESS)
return false;
- ret = queue->device->ws->cs_submit(ctx, queue->queue_idx, &cs, 1, NULL,
- NULL, &sem_info, NULL, false, NULL);
+ result = queue->device->ws->cs_submit(ctx, queue->queue_idx, &cs, 1,
+ NULL, NULL, &sem_info, NULL,
+ false, NULL);
radv_free_sem_info(&sem_info);
- return !ret;
+ if (result != VK_SUCCESS)
+ return false;
+
+ return true;
+
}
/* Signals fence as soon as all the work currently put on queue is done. */
return VK_SUCCESS;
}
+static void
+radv_destroy_fence_part(struct radv_device *device,
+ struct radv_fence_part *part)
+{
+ switch (part->kind) {
+ case RADV_FENCE_NONE:
+ break;
+ case RADV_FENCE_WINSYS:
+ device->ws->destroy_fence(part->fence);
+ break;
+ case RADV_FENCE_SYNCOBJ:
+ device->ws->destroy_syncobj(device->ws, part->syncobj);
+ break;
+ case RADV_FENCE_WSI:
+ part->fence_wsi->destroy(part->fence_wsi);
+ break;
+ default:
+ unreachable("Invalid fence type");
+ }
+
+ part->kind = RADV_FENCE_NONE;
+}
+
static void
radv_destroy_fence(struct radv_device *device,
const VkAllocationCallbacks *pAllocator,
struct radv_fence *fence)
{
- if (fence->temp_syncobj)
- device->ws->destroy_syncobj(device->ws, fence->temp_syncobj);
- if (fence->syncobj)
- device->ws->destroy_syncobj(device->ws, fence->syncobj);
- if (fence->fence)
- device->ws->destroy_fence(fence->fence);
- if (fence->fence_wsi)
- fence->fence_wsi->destroy(fence->fence_wsi);
+ radv_destroy_fence_part(device, &fence->temporary);
+ radv_destroy_fence_part(device, &fence->permanent);
vk_object_base_finish(&fence->base);
vk_free2(&device->vk.alloc, pAllocator, fence);
vk_find_struct_const(pCreateInfo->pNext, EXPORT_FENCE_CREATE_INFO);
VkExternalFenceHandleTypeFlags handleTypes =
export ? export->handleTypes : 0;
+ struct radv_fence *fence;
- struct radv_fence *fence = vk_alloc2(&device->vk.alloc, pAllocator,
- sizeof(*fence), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
-
+ fence = vk_zalloc2(&device->vk.alloc, pAllocator, sizeof(*fence), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!fence)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
vk_object_base_init(&device->vk, &fence->base, VK_OBJECT_TYPE_FENCE);
- fence->fence_wsi = NULL;
- fence->temp_syncobj = 0;
if (device->always_use_syncobj || handleTypes) {
- int ret = device->ws->create_syncobj(device->ws, &fence->syncobj);
+ fence->permanent.kind = RADV_FENCE_SYNCOBJ;
+
+ bool create_signaled = false;
+ if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT)
+ create_signaled = true;
+
+ int ret = device->ws->create_syncobj(device->ws, create_signaled,
+ &fence->permanent.syncobj);
if (ret) {
radv_destroy_fence(device, pAllocator, fence);
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
}
- if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
- device->ws->signal_syncobj(device->ws, fence->syncobj);
- }
- fence->fence = NULL;
} else {
- fence->fence = device->ws->create_fence();
- if (!fence->fence) {
+ fence->permanent.kind = RADV_FENCE_WINSYS;
+
+ fence->permanent.fence = device->ws->create_fence();
+ if (!fence->permanent.fence) {
+ vk_free2(&device->vk.alloc, pAllocator, fence);
radv_destroy_fence(device, pAllocator, fence);
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
}
- fence->syncobj = 0;
if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT)
- device->ws->signal_fence(fence->fence);
+ device->ws->signal_fence(fence->permanent.fence);
}
*pFence = radv_fence_to_handle(fence);
return VK_SUCCESS;
}
+
void radv_DestroyFence(
VkDevice _device,
VkFence _fence,
{
for (uint32_t i = 0; i < fenceCount; ++i) {
RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
- if (fence->fence == NULL || fence->syncobj ||
- fence->temp_syncobj || fence->fence_wsi ||
- (!device->ws->is_fence_waitable(fence->fence)))
+
+ struct radv_fence_part *part =
+ fence->temporary.kind != RADV_FENCE_NONE ?
+ &fence->temporary : &fence->permanent;
+ if (part->kind != RADV_FENCE_WINSYS ||
+ !device->ws->is_fence_waitable(part->fence))
return false;
}
return true;
{
for (uint32_t i = 0; i < fenceCount; ++i) {
RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
- if (fence->syncobj == 0 && fence->temp_syncobj == 0)
+
+ struct radv_fence_part *part =
+ fence->temporary.kind != RADV_FENCE_NONE ?
+ &fence->temporary : &fence->permanent;
+ if (part->kind != RADV_FENCE_SYNCOBJ)
return false;
}
return true;
for (uint32_t i = 0; i < fenceCount; ++i) {
RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
- handles[i] = fence->temp_syncobj ? fence->temp_syncobj : fence->syncobj;
+
+ struct radv_fence_part *part =
+ fence->temporary.kind != RADV_FENCE_NONE ?
+ &fence->temporary : &fence->permanent;
+
+ assert(part->kind == RADV_FENCE_SYNCOBJ);
+ handles[i] = part->syncobj;
}
bool success = device->ws->wait_syncobj(device->ws, handles, fenceCount, waitAll, timeout);
for (uint32_t i = 0; i < fenceCount; ++i) {
RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
- if (device->ws->fence_wait(device->ws, fence->fence, false, 0)) {
+ struct radv_fence_part *part =
+ fence->temporary.kind != RADV_FENCE_NONE ?
+ &fence->temporary : &fence->permanent;
+ assert(part->kind == RADV_FENCE_WINSYS);
+
+ if (device->ws->fence_wait(device->ws, part->fence, false, 0)) {
free(fences);
return VK_SUCCESS;
}
- fences[wait_count++] = fence->fence;
+ fences[wait_count++] = part->fence;
}
bool success = device->ws->fences_wait(device->ws, fences, wait_count,
RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
bool expired = false;
- if (fence->temp_syncobj) {
- if (!device->ws->wait_syncobj(device->ws, &fence->temp_syncobj, 1, true, timeout))
- return VK_TIMEOUT;
- continue;
- }
+ struct radv_fence_part *part =
+ fence->temporary.kind != RADV_FENCE_NONE ?
+ &fence->temporary : &fence->permanent;
- if (fence->syncobj) {
- if (!device->ws->wait_syncobj(device->ws, &fence->syncobj, 1, true, timeout))
- return VK_TIMEOUT;
- continue;
- }
-
- if (fence->fence) {
- if (!device->ws->is_fence_waitable(fence->fence)) {
- while(!device->ws->is_fence_waitable(fence->fence) &&
+ switch (part->kind) {
+ case RADV_FENCE_NONE:
+ break;
+ case RADV_FENCE_WINSYS:
+ if (!device->ws->is_fence_waitable(part->fence)) {
+ while (!device->ws->is_fence_waitable(part->fence) &&
radv_get_current_time() <= timeout)
/* Do nothing */;
}
expired = device->ws->fence_wait(device->ws,
- fence->fence,
+ part->fence,
true, timeout);
if (!expired)
return VK_TIMEOUT;
- }
-
- if (fence->fence_wsi) {
- VkResult result = fence->fence_wsi->wait(fence->fence_wsi, timeout);
+ break;
+ case RADV_FENCE_SYNCOBJ:
+ if (!device->ws->wait_syncobj(device->ws,
+ &part->syncobj, 1, true,
+ timeout))
+ return VK_TIMEOUT;
+ break;
+ case RADV_FENCE_WSI: {
+ VkResult result = part->fence_wsi->wait(part->fence_wsi, timeout);
if (result != VK_SUCCESS)
return result;
+ break;
+ }
+ default:
+ unreachable("Invalid fence type");
}
}
for (unsigned i = 0; i < fenceCount; ++i) {
RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
- if (fence->fence)
- device->ws->reset_fence(fence->fence);
- /* Per spec, we first restore the permanent payload, and then reset, so
- * having a temp syncobj should not skip resetting the permanent syncobj. */
- if (fence->temp_syncobj) {
- device->ws->destroy_syncobj(device->ws, fence->temp_syncobj);
- fence->temp_syncobj = 0;
- }
+ /* From the Vulkan 1.0.53 spec:
+ *
+ * "If any member of pFences currently has its payload
+ * imported with temporary permanence, that fence’s prior
+ * permanent payload is irst restored. The remaining
+ * operations described therefore operate on the restored
+ * payload."
+ */
+ if (fence->temporary.kind != RADV_FENCE_NONE)
+ radv_destroy_fence_part(device, &fence->temporary);
- if (fence->syncobj) {
- device->ws->reset_syncobj(device->ws, fence->syncobj);
+ struct radv_fence_part *part = &fence->permanent;
+
+ switch (part->kind) {
+ case RADV_FENCE_WSI:
+ device->ws->reset_fence(part->fence);
+ break;
+ case RADV_FENCE_SYNCOBJ:
+ device->ws->reset_syncobj(device->ws, part->syncobj);
+ break;
+ default:
+ unreachable("Invalid fence type");
}
}
RADV_FROM_HANDLE(radv_device, device, _device);
RADV_FROM_HANDLE(radv_fence, fence, _fence);
- if (fence->temp_syncobj) {
- bool success = device->ws->wait_syncobj(device->ws, &fence->temp_syncobj, 1, true, 0);
- return success ? VK_SUCCESS : VK_NOT_READY;
- }
+ struct radv_fence_part *part =
+ fence->temporary.kind != RADV_FENCE_NONE ?
+ &fence->temporary : &fence->permanent;
- if (fence->syncobj) {
- bool success = device->ws->wait_syncobj(device->ws, &fence->syncobj, 1, true, 0);
- return success ? VK_SUCCESS : VK_NOT_READY;
- }
-
- if (fence->fence) {
- if (!device->ws->fence_wait(device->ws, fence->fence, false, 0))
+ switch (part->kind) {
+ case RADV_FENCE_NONE:
+ break;
+ case RADV_FENCE_WINSYS:
+ if (!device->ws->fence_wait(device->ws, part->fence, false, 0))
return VK_NOT_READY;
+ break;
+ case RADV_FENCE_SYNCOBJ: {
+ bool success = device->ws->wait_syncobj(device->ws,
+ &part->syncobj, 1, true, 0);
+ if (!success)
+ return VK_NOT_READY;
+ break;
}
- if (fence->fence_wsi) {
- VkResult result = fence->fence_wsi->wait(fence->fence_wsi, 0);
-
+ case RADV_FENCE_WSI: {
+ VkResult result = part->fence_wsi->wait(part->fence_wsi, 0);
if (result != VK_SUCCESS) {
if (result == VK_TIMEOUT)
return VK_NOT_READY;
return result;
}
+ break;
}
+ default:
+ unreachable("Invalid fence type");
+ }
+
return VK_SUCCESS;
}
struct radv_timeline_point *ret = NULL;
struct radv_timeline_point *prev = NULL;
+ int r;
if (p <= timeline->highest_signaled)
return NULL;
if (list_is_empty(&timeline->free_points)) {
ret = malloc(sizeof(struct radv_timeline_point));
- device->ws->create_syncobj(device->ws, &ret->syncobj);
+ r = device->ws->create_syncobj(device->ws, false, &ret->syncobj);
+ if (r) {
+ free(ret);
+ return NULL;
+ }
} else {
ret = list_first_entry(&timeline->free_points, struct radv_timeline_point, list);
list_del(&ret->list);
sem->permanent.kind = RADV_SEMAPHORE_TIMELINE;
} else if (device->always_use_syncobj || handleTypes) {
assert (device->physical_device->rad_info.has_syncobj);
- int ret = device->ws->create_syncobj(device->ws, &sem->permanent.syncobj);
+ int ret = device->ws->create_syncobj(device->ws, false,
+ &sem->permanent.syncobj);
if (ret) {
radv_destroy_semaphore(device, pAllocator, sem);
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
* leave a syncobj in an undetermined state in the fence. */
uint32_t syncobj_handle = *syncobj;
if (!syncobj_handle) {
- int ret = device->ws->create_syncobj(device->ws, &syncobj_handle);
+ bool create_signaled = fd == -1 ? true : false;
+
+ int ret = device->ws->create_syncobj(device->ws, create_signaled,
+ &syncobj_handle);
if (ret) {
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
}
+ } else {
+ if (fd == -1)
+ device->ws->signal_syncobj(device->ws, syncobj_handle);
}
- if (fd == -1) {
- device->ws->signal_syncobj(device->ws, syncobj_handle);
- } else {
+ if (fd != -1) {
int ret = device->ws->import_syncobj_from_sync_file(device->ws, syncobj_handle, fd);
- if (ret != 0)
- return vk_error(device->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
+ if (ret)
+ return vk_error(device->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
+ close(fd);
}
*syncobj = syncobj_handle;
- if (fd != -1)
- close(fd);
return VK_SUCCESS;
}
{
RADV_FROM_HANDLE(radv_device, device, _device);
RADV_FROM_HANDLE(radv_fence, fence, pImportFenceFdInfo->fence);
- uint32_t *syncobj_dst = NULL;
-
+ struct radv_fence_part *dst = NULL;
+ VkResult result;
if (pImportFenceFdInfo->flags & VK_FENCE_IMPORT_TEMPORARY_BIT) {
- syncobj_dst = &fence->temp_syncobj;
+ dst = &fence->temporary;
} else {
- syncobj_dst = &fence->syncobj;
+ dst = &fence->permanent;
}
+ uint32_t syncobj = dst->kind == RADV_FENCE_SYNCOBJ ? dst->syncobj : 0;
+
switch(pImportFenceFdInfo->handleType) {
case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT:
- return radv_import_opaque_fd(device, pImportFenceFdInfo->fd, syncobj_dst);
+ result = radv_import_opaque_fd(device, pImportFenceFdInfo->fd, &syncobj);
+ break;
case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT:
- return radv_import_sync_fd(device, pImportFenceFdInfo->fd, syncobj_dst);
+ result = radv_import_sync_fd(device, pImportFenceFdInfo->fd, &syncobj);
+ break;
default:
unreachable("Unhandled fence handle type");
}
+
+ if (result == VK_SUCCESS) {
+ dst->syncobj = syncobj;
+ dst->kind = RADV_FENCE_SYNCOBJ;
+ }
+
+ return result;
}
VkResult radv_GetFenceFdKHR(VkDevice _device,
RADV_FROM_HANDLE(radv_device, device, _device);
RADV_FROM_HANDLE(radv_fence, fence, pGetFdInfo->fence);
int ret;
- uint32_t syncobj_handle;
- if (fence->temp_syncobj)
- syncobj_handle = fence->temp_syncobj;
- else
- syncobj_handle = fence->syncobj;
+ struct radv_fence_part *part =
+ fence->temporary.kind != RADV_FENCE_NONE ?
+ &fence->temporary : &fence->permanent;
switch(pGetFdInfo->handleType) {
case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT:
- ret = device->ws->export_syncobj(device->ws, syncobj_handle, pFd);
+ ret = device->ws->export_syncobj(device->ws, part->syncobj, pFd);
if (ret)
return vk_error(device->instance, VK_ERROR_TOO_MANY_OBJECTS);
break;
case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT:
- ret = device->ws->export_syncobj_to_sync_file(device->ws, syncobj_handle, pFd);
+ ret = device->ws->export_syncobj_to_sync_file(device->ws,
+ part->syncobj, pFd);
if (ret)
return vk_error(device->instance, VK_ERROR_TOO_MANY_OBJECTS);
- if (fence->temp_syncobj) {
- device->ws->destroy_syncobj(device->ws, fence->temp_syncobj);
- fence->temp_syncobj = 0;
+ if (part == &fence->temporary) {
+ radv_destroy_fence_part(device, part);
} else {
- device->ws->reset_syncobj(device->ws, syncobj_handle);
+ device->ws->reset_syncobj(device->ws, part->syncobj);
}
break;
default: