X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Famd%2Fvulkan%2Fradv_device.c;h=5b93083913b9c81afbe69d0595b9cdd066a557a3;hb=93200ce4c6f543808e768c379a04bea577d55c72;hp=18767a300333320c576d18ee723d9cdc70cb93bf;hpb=56d9bcdded8f3eb7bd45262ce013ef1809d8edb1;p=mesa.git diff --git a/src/amd/vulkan/radv_device.c b/src/amd/vulkan/radv_device.c index 18767a30033..5b93083913b 100644 --- a/src/amd/vulkan/radv_device.c +++ b/src/amd/vulkan/radv_device.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -83,6 +84,25 @@ static void radv_destroy_semaphore_part(struct radv_device *device, struct radv_semaphore_part *part); +static VkResult +radv_create_pthread_cond(pthread_cond_t *cond); + +uint64_t radv_get_current_time(void) +{ + struct timespec tv; + clock_gettime(CLOCK_MONOTONIC, &tv); + return tv.tv_nsec + tv.tv_sec*1000000000ull; +} + +static uint64_t radv_get_absolute_timeout(uint64_t timeout) +{ + uint64_t current_time = radv_get_current_time(); + + timeout = MIN2(UINT64_MAX - current_time, timeout); + + return current_time + timeout; +} + static int radv_device_get_cache_uuid(enum radeon_family family, void *uuid) { @@ -503,6 +523,7 @@ static const struct debug_control radv_debug_options[] = { {"metashaders", RADV_DEBUG_DUMP_META_SHADERS}, {"nomemorycache", RADV_DEBUG_NO_MEMORY_CACHE}, {"llvm", RADV_DEBUG_LLVM}, + {"forcecompress", RADV_DEBUG_FORCE_COMPRESS}, {NULL, 0} }; @@ -571,7 +592,8 @@ radv_handle_per_app_options(struct radv_instance *instance, instance->debug_flags |= RADV_DEBUG_ZERO_VRAM; } else if (!strcmp(engine_name, "Quantic Dream Engine")) { /* Fix various artifacts in Detroit: Become Human */ - instance->debug_flags |= RADV_DEBUG_ZERO_VRAM; + instance->debug_flags |= RADV_DEBUG_ZERO_VRAM | + RADV_DEBUG_DISCARD_TO_DEMOTE; } } @@ -593,6 +615,7 @@ DRI_CONF_BEGIN DRI_CONF_RADV_REPORT_LLVM9_VERSION_STRING("false") DRI_CONF_RADV_ENABLE_MRT_OUTPUT_NAN_FIXUP("false") DRI_CONF_RADV_NO_DYNAMIC_BOUNDS("false") + DRI_CONF_RADV_OVERRIDE_UNIFORM_OFFSET_ALIGNMENT(0) DRI_CONF_SECTION_END DRI_CONF_SECTION_DEBUG @@ -606,6 +629,8 @@ static void radv_init_dri_options(struct radv_instance *instance) driParseConfigFiles(&instance->dri_options, &instance->available_dri_options, 0, "radv", NULL, + instance->applicationName, + instance->applicationVersion, instance->engineName, instance->engineVersion); } @@ -633,6 +658,11 @@ VkResult radv_CreateInstance( if (pCreateInfo->pApplicationInfo) { const VkApplicationInfo *app = pCreateInfo->pApplicationInfo; + instance->applicationName = + vk_strdup(&instance->alloc, app->pApplicationName, + VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); + instance->applicationVersion = app->applicationVersion; + instance->engineName = vk_strdup(&instance->alloc, app->pEngineName, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE); @@ -646,8 +676,26 @@ VkResult radv_CreateInstance( instance->debug_flags = parse_debug_string(getenv("RADV_DEBUG"), radv_debug_options); - instance->perftest_flags = parse_debug_string(getenv("RADV_PERFTEST"), - radv_perftest_options); + const char *radv_perftest_str = getenv("RADV_PERFTEST"); + instance->perftest_flags = parse_debug_string(radv_perftest_str, + radv_perftest_options); + + if (radv_perftest_str) { + /* Output warnings for famous RADV_PERFTEST options that no + * longer exist or are deprecated. + */ + if (strstr(radv_perftest_str, "aco")) { + fprintf(stderr, "*******************************************************************************\n"); + fprintf(stderr, "* WARNING: Unknown option RADV_PERFTEST='aco'. ACO is enabled by default now. *\n"); + fprintf(stderr, "*******************************************************************************\n"); + } + if (strstr(radv_perftest_str, "llvm")) { + fprintf(stderr, "*********************************************************************************\n"); + fprintf(stderr, "* WARNING: Unknown option 'RADV_PERFTEST=llvm'. Did you mean 'RADV_DEBUG=llvm'? *\n"); + fprintf(stderr, "*********************************************************************************\n"); + abort(); + } + } if (instance->debug_flags & RADV_DEBUG_STARTUP) radv_logi("Created an instance"); @@ -751,6 +799,7 @@ void radv_DestroyInstance( } vk_free(&instance->alloc, instance->engineName); + vk_free(&instance->alloc, instance->applicationName); VG(VALGRIND_DESTROY_MEMPOOL(instance)); @@ -1002,8 +1051,8 @@ radv_get_physical_device_features_1_2(struct radv_physical_device *pdevice, f->bufferDeviceAddress = true; f->bufferDeviceAddressCaptureReplay = false; f->bufferDeviceAddressMultiDevice = false; - f->vulkanMemoryModel = false; - f->vulkanMemoryModelDeviceScope = false; + f->vulkanMemoryModel = true; + f->vulkanMemoryModelDeviceScope = true; f->vulkanMemoryModelAvailabilityVisibilityChains = false; f->shaderOutputViewportIndex = true; f->shaderOutputLayer = true; @@ -1327,12 +1376,51 @@ void radv_GetPhysicalDeviceFeatures2( features-> pipelineCreationCacheControl = true; break; } + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES_KHR: { + VkPhysicalDeviceVulkanMemoryModelFeaturesKHR *features = + (VkPhysicalDeviceVulkanMemoryModelFeaturesKHR *)ext; + CORE_FEATURE(1, 2, vulkanMemoryModel); + CORE_FEATURE(1, 2, vulkanMemoryModelDeviceScope); + CORE_FEATURE(1, 2, vulkanMemoryModelAvailabilityVisibilityChains); + break; + } case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT: { VkPhysicalDeviceExtendedDynamicStateFeaturesEXT *features = (VkPhysicalDeviceExtendedDynamicStateFeaturesEXT *) ext; features->extendedDynamicState = true; break; } + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES_EXT: { + VkPhysicalDeviceImageRobustnessFeaturesEXT *features = + (VkPhysicalDeviceImageRobustnessFeaturesEXT *)ext; + features->robustImageAccess = true; + break; + } + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT_FEATURES_EXT: { + VkPhysicalDeviceShaderAtomicFloatFeaturesEXT *features = + (VkPhysicalDeviceShaderAtomicFloatFeaturesEXT *)ext; + features->shaderBufferFloat32Atomics = true; + features->shaderBufferFloat32AtomicAdd = false; + features->shaderBufferFloat64Atomics = true; + features->shaderBufferFloat64AtomicAdd = false; + features->shaderSharedFloat32Atomics = true; + features->shaderSharedFloat32AtomicAdd = pdevice->rad_info.chip_class >= GFX8 && + (!pdevice->use_llvm || LLVM_VERSION_MAJOR >= 10); + features->shaderSharedFloat64Atomics = true; + features->shaderSharedFloat64AtomicAdd = false; + features->shaderImageFloat32Atomics = true; + features->shaderImageFloat32AtomicAdd = false; + features->sparseImageFloat32Atomics = false; + features->sparseImageFloat32AtomicAdd = false; + break; + } + case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT: { + VkPhysicalDevice4444FormatsFeaturesEXT *features = + (VkPhysicalDevice4444FormatsFeaturesEXT *)ext; + features->formatA4R4G4B4 = true; + features->formatA4B4G4R4 = true; + break; + } default: break; } @@ -1358,6 +1446,21 @@ radv_max_descriptor_set_size() 64 /* storage image */); } +static uint32_t +radv_uniform_buffer_offset_alignment(const struct radv_physical_device *pdevice) +{ + uint32_t uniform_offset_alignment = driQueryOptioni(&pdevice->instance->dri_options, + "radv_override_uniform_offset_alignment"); + if (!util_is_power_of_two_or_zero(uniform_offset_alignment)) { + fprintf(stderr, "ERROR: invalid radv_override_uniform_offset_alignment setting %d:" + "not a power of two\n", uniform_offset_alignment); + uniform_offset_alignment = 0; + } + + /* Take at least the hardware limit. */ + return MAX2(uniform_offset_alignment, 4); +} + void radv_GetPhysicalDeviceProperties( VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties* pProperties) @@ -1440,7 +1543,7 @@ void radv_GetPhysicalDeviceProperties( .viewportSubPixelBits = 8, .minMemoryMapAlignment = 4096, /* A page */ .minTexelBufferOffsetAlignment = 4, - .minUniformBufferOffsetAlignment = 4, + .minUniformBufferOffsetAlignment = radv_uniform_buffer_offset_alignment(pdevice), .minStorageBufferOffsetAlignment = 4, .minTexelOffset = -32, .maxTexelOffset = 31, @@ -2227,13 +2330,27 @@ radv_queue_init(struct radv_device *device, struct radv_queue *queue, list_inithead(&queue->pending_submissions); pthread_mutex_init(&queue->pending_mutex, NULL); + pthread_mutex_init(&queue->thread_mutex, NULL); + queue->thread_submission = NULL; + queue->thread_running = queue->thread_exit = false; + result = radv_create_pthread_cond(&queue->thread_cond); + if (result != VK_SUCCESS) + return vk_error(device->instance, result); + return VK_SUCCESS; } static void radv_queue_finish(struct radv_queue *queue) { + if (queue->thread_running) { + p_atomic_set(&queue->thread_exit, true); + pthread_cond_broadcast(&queue->thread_cond); + pthread_join(queue->submission_thread, NULL); + } + pthread_cond_destroy(&queue->thread_cond); pthread_mutex_destroy(&queue->pending_mutex); + pthread_mutex_destroy(&queue->thread_mutex); if (queue->hw_ctx) queue->device->ws->ctx_destroy(queue->hw_ctx); @@ -2473,6 +2590,25 @@ static void radv_device_finish_border_color(struct radv_device *device) } } +VkResult +_radv_device_set_lost(struct radv_device *device, + const char *file, int line, + const char *msg, ...) +{ + VkResult err; + va_list ap; + + p_atomic_inc(&device->lost); + + va_start(ap, msg); + err = __vk_errorv(device->physical_device->instance, device, + VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, + VK_ERROR_DEVICE_LOST, file, line, msg, ap); + va_end(ap); + + return err; +} + VkResult radv_CreateDevice( VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo* pCreateInfo, @@ -2697,6 +2833,9 @@ VkResult radv_CreateDevice( for (int family = 0; family < RADV_MAX_QUEUE_FAMILIES; ++family) { device->empty_cs[family] = device->ws->cs_create(device->ws, family); + if (!device->empty_cs[family]) + goto fail; + switch (family) { case RADV_QUEUE_GENERAL: radeon_emit(device->empty_cs[family], PKT3(PKT3_CONTEXT_CONTROL, 1, 0)); @@ -2708,7 +2847,10 @@ VkResult radv_CreateDevice( radeon_emit(device->empty_cs[family], 0); break; } - device->ws->cs_finalize(device->empty_cs[family]); + + result = device->ws->cs_finalize(device->empty_cs[family]); + if (result != VK_SUCCESS) + goto fail; } if (device->physical_device->rad_info.chip_class >= GFX7) @@ -3300,8 +3442,7 @@ radv_init_graphics_state(struct radeon_cmdbuf *cs, struct radv_queue *queue) static void radv_init_compute_state(struct radeon_cmdbuf *cs, struct radv_queue *queue) { - struct radv_physical_device *physical_device = queue->device->physical_device; - si_emit_compute(physical_device, cs); + si_emit_compute(queue->device, cs); } static VkResult @@ -3731,7 +3872,7 @@ static VkResult radv_alloc_sem_counts(struct radv_device *device, VkFence _fence, bool is_signal) { - int syncobj_idx = 0, sem_idx = 0; + int syncobj_idx = 0, non_reset_idx = 0, sem_idx = 0, timeline_idx = 0; if (num_sems == 0 && _fence == VK_NULL_HANDLE) return VK_SUCCESS; @@ -3740,6 +3881,7 @@ static VkResult radv_alloc_sem_counts(struct radv_device *device, switch(sems[i]->kind) { case RADV_SEMAPHORE_SYNCOBJ: counts->syncobj_count++; + counts->syncobj_reset_count++; break; case RADV_SEMAPHORE_WINSYS: counts->sem_count++; @@ -3749,6 +3891,9 @@ static VkResult radv_alloc_sem_counts(struct radv_device *device, case RADV_SEMAPHORE_TIMELINE: counts->syncobj_count++; break; + case RADV_SEMAPHORE_TIMELINE_SYNCOBJ: + counts->timeline_syncobj_count++; + break; } } @@ -3762,10 +3907,13 @@ static VkResult radv_alloc_sem_counts(struct radv_device *device, counts->syncobj_count++; } - if (counts->syncobj_count) { - counts->syncobj = (uint32_t *)malloc(sizeof(uint32_t) * counts->syncobj_count); - if (!counts->syncobj) + if (counts->syncobj_count || counts->timeline_syncobj_count) { + counts->points = (uint64_t *)malloc( + sizeof(*counts->syncobj) * counts->syncobj_count + + (sizeof(*counts->syncobj) + sizeof(*counts->points)) * counts->timeline_syncobj_count); + if (!counts->points) return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); + counts->syncobj = (uint32_t*)(counts->points + counts->timeline_syncobj_count); } if (counts->sem_count) { @@ -3776,6 +3924,8 @@ static VkResult radv_alloc_sem_counts(struct radv_device *device, } } + non_reset_idx = counts->syncobj_reset_count; + for (uint32_t i = 0; i < num_sems; i++) { switch(sems[i]->kind) { case RADV_SEMAPHORE_NONE: @@ -3799,7 +3949,7 @@ static VkResult radv_alloc_sem_counts(struct radv_device *device, pthread_mutex_unlock(&sems[i]->timeline.mutex); if (point) { - counts->syncobj[syncobj_idx++] = point->syncobj; + counts->syncobj[non_reset_idx++] = point->syncobj; } else { /* Explicitly remove the semaphore so we might not find * a point later post-submit. */ @@ -3807,6 +3957,11 @@ static VkResult radv_alloc_sem_counts(struct radv_device *device, } break; } + case RADV_SEMAPHORE_TIMELINE_SYNCOBJ: + counts->syncobj[counts->syncobj_count + timeline_idx] = sems[i]->syncobj; + counts->points[timeline_idx] = timeline_values[i]; + ++timeline_idx; + break; } } @@ -3817,11 +3972,11 @@ static VkResult radv_alloc_sem_counts(struct radv_device *device, fence->temporary.kind != RADV_FENCE_NONE ? &fence->temporary : &fence->permanent; if (part->kind == RADV_FENCE_SYNCOBJ) - counts->syncobj[syncobj_idx++] = part->syncobj; + counts->syncobj[non_reset_idx++] = part->syncobj; } - assert(syncobj_idx <= counts->syncobj_count); - counts->syncobj_count = syncobj_idx; + assert(MAX2(syncobj_idx, non_reset_idx) <= counts->syncobj_count); + counts->syncobj_count = MAX2(syncobj_idx, non_reset_idx); return VK_SUCCESS; } @@ -3829,9 +3984,9 @@ static VkResult radv_alloc_sem_counts(struct radv_device *device, static void radv_free_sem_info(struct radv_winsys_sem_info *sem_info) { - free(sem_info->wait.syncobj); + free(sem_info->wait.points); free(sem_info->wait.sem); - free(sem_info->signal.syncobj); + free(sem_info->signal.points); free(sem_info->signal.sem); } @@ -3901,15 +4056,19 @@ radv_finalize_timelines(struct radv_device *device, point->wait_count -= 2; radv_timeline_trigger_waiters_locked(&signal_sems[i]->timeline, processing_list); pthread_mutex_unlock(&signal_sems[i]->timeline.mutex); + } else if (signal_sems[i] && signal_sems[i]->kind == RADV_SEMAPHORE_TIMELINE_SYNCOBJ) { + signal_sems[i]->timeline_syncobj.max_point = + MAX2(signal_sems[i]->timeline_syncobj.max_point, signal_values[i]); } } } -static void +static VkResult radv_sparse_buffer_bind_memory(struct radv_device *device, const VkSparseBufferMemoryBindInfo *bind) { RADV_FROM_HANDLE(radv_buffer, buffer, bind->buffer); + VkResult result; for (uint32_t i = 0; i < bind->bindCount; ++i) { struct radv_device_memory *mem = NULL; @@ -3917,19 +4076,24 @@ radv_sparse_buffer_bind_memory(struct radv_device *device, if (bind->pBinds[i].memory != VK_NULL_HANDLE) mem = radv_device_memory_from_handle(bind->pBinds[i].memory); - device->ws->buffer_virtual_bind(buffer->bo, - bind->pBinds[i].resourceOffset, - bind->pBinds[i].size, - mem ? mem->bo : NULL, - bind->pBinds[i].memoryOffset); + result = device->ws->buffer_virtual_bind(buffer->bo, + bind->pBinds[i].resourceOffset, + bind->pBinds[i].size, + mem ? mem->bo : NULL, + bind->pBinds[i].memoryOffset); + if (result != VK_SUCCESS) + return result; } + + return VK_SUCCESS; } -static void +static VkResult radv_sparse_image_opaque_bind_memory(struct radv_device *device, const VkSparseImageOpaqueMemoryBindInfo *bind) { RADV_FROM_HANDLE(radv_image, image, bind->image); + VkResult result; for (uint32_t i = 0; i < bind->bindCount; ++i) { struct radv_device_memory *mem = NULL; @@ -3937,12 +4101,16 @@ radv_sparse_image_opaque_bind_memory(struct radv_device *device, if (bind->pBinds[i].memory != VK_NULL_HANDLE) mem = radv_device_memory_from_handle(bind->pBinds[i].memory); - device->ws->buffer_virtual_bind(image->bo, - bind->pBinds[i].resourceOffset, - bind->pBinds[i].size, - mem ? mem->bo : NULL, - bind->pBinds[i].memoryOffset); + result = device->ws->buffer_virtual_bind(image->bo, + bind->pBinds[i].resourceOffset, + bind->pBinds[i].size, + mem ? mem->bo : NULL, + bind->pBinds[i].memoryOffset); + if (result != VK_SUCCESS) + return result; } + + return VK_SUCCESS; } static VkResult @@ -4043,6 +4211,11 @@ struct radv_queue_submission { uint32_t signal_value_count; }; +static VkResult +radv_queue_trigger_submission(struct radv_deferred_queue_submission *submission, + uint32_t decrement, + struct list_head *processing_list); + static VkResult radv_create_deferred_submission(struct radv_queue *queue, const struct radv_queue_submission *submission, @@ -4138,7 +4311,7 @@ radv_create_deferred_submission(struct radv_queue *queue, return VK_SUCCESS; } -static void +static VkResult radv_queue_enqueue_submission(struct radv_deferred_queue_submission *submission, struct list_head *processing_list) { @@ -4169,9 +4342,7 @@ radv_queue_enqueue_submission(struct radv_deferred_queue_submission *submission, * submitted, but if the queue was empty, we decrement ourselves as there is no previous * submission. */ uint32_t decrement = submission->wait_semaphore_count - wait_cnt + (is_first ? 1 : 0); - if (__atomic_sub_fetch(&submission->submission_wait_count, decrement, __ATOMIC_ACQ_REL) == 0) { - list_addtail(&submission->processing_list, processing_list); - } + return radv_queue_trigger_submission(submission, decrement, processing_list); } static void @@ -4187,9 +4358,7 @@ radv_queue_submission_update_queue(struct radv_deferred_queue_submission *submis list_first_entry(&submission->queue->pending_submissions, struct radv_deferred_queue_submission, queue_pending_list); - if (p_atomic_dec_zero(&next_submission->submission_wait_count)) { - list_addtail(&next_submission->processing_list, processing_list); - } + radv_queue_trigger_submission(next_submission, 1, processing_list); } pthread_mutex_unlock(&submission->queue->pending_mutex); @@ -4252,13 +4421,17 @@ radv_queue_submit_deferred(struct radv_deferred_queue_submission *submission, goto fail; for (uint32_t i = 0; i < submission->buffer_bind_count; ++i) { - radv_sparse_buffer_bind_memory(queue->device, - submission->buffer_binds + i); + result = radv_sparse_buffer_bind_memory(queue->device, + submission->buffer_binds + i); + if (result != VK_SUCCESS) + goto fail; } for (uint32_t i = 0; i < submission->image_opaque_bind_count; ++i) { - radv_sparse_image_opaque_bind_memory(queue->device, - submission->image_opaque_binds + i); + result = radv_sparse_image_opaque_bind_memory(queue->device, + submission->image_opaque_binds + i); + if (result != VK_SUCCESS) + goto fail; } if (!submission->cmd_buffer_count) { @@ -4349,7 +4522,7 @@ fail: * VK_ERROR_DEVICE_LOST to ensure the clients do not attempt * to submit the same job again to this device. */ - result = VK_ERROR_DEVICE_LOST; + result = radv_device_set_lost(queue->device, "vkQueueSubmit() failed"); } radv_free_temp_syncobjs(queue->device, @@ -4374,6 +4547,131 @@ radv_process_submissions(struct list_head *processing_list) return VK_SUCCESS; } +static VkResult +wait_for_submission_timelines_available(struct radv_deferred_queue_submission *submission, + uint64_t timeout) +{ + struct radv_device *device = submission->queue->device; + uint32_t syncobj_count = 0; + uint32_t syncobj_idx = 0; + + for (uint32_t i = 0; i < submission->wait_semaphore_count; ++i) { + if (submission->wait_semaphores[i]->kind != RADV_SEMAPHORE_TIMELINE_SYNCOBJ) + continue; + + if (submission->wait_semaphores[i]->timeline_syncobj.max_point >= submission->wait_values[i]) + continue; + ++syncobj_count; + } + + if (!syncobj_count) + return VK_SUCCESS; + + uint64_t *points = malloc((sizeof(uint64_t) + sizeof(uint32_t)) * syncobj_count); + if (!points) + return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); + + uint32_t *syncobj = (uint32_t*)(points + syncobj_count); + + for (uint32_t i = 0; i < submission->wait_semaphore_count; ++i) { + if (submission->wait_semaphores[i]->kind != RADV_SEMAPHORE_TIMELINE_SYNCOBJ) + continue; + + if (submission->wait_semaphores[i]->timeline_syncobj.max_point >= submission->wait_values[i]) + continue; + + syncobj[syncobj_idx] = submission->wait_semaphores[i]->syncobj; + points[syncobj_idx] = submission->wait_values[i]; + ++syncobj_idx; + } + bool success = device->ws->wait_timeline_syncobj(device->ws, syncobj, points, syncobj_idx, true, true, timeout); + + free(points); + return success ? VK_SUCCESS : VK_TIMEOUT; +} + +static void* radv_queue_submission_thread_run(void *q) +{ + struct radv_queue *queue = q; + + pthread_mutex_lock(&queue->thread_mutex); + while (!p_atomic_read(&queue->thread_exit)) { + struct radv_deferred_queue_submission *submission = queue->thread_submission; + struct list_head processing_list; + VkResult result = VK_SUCCESS; + if (!submission) { + pthread_cond_wait(&queue->thread_cond, &queue->thread_mutex); + continue; + } + pthread_mutex_unlock(&queue->thread_mutex); + + /* Wait at most 5 seconds so we have a chance to notice shutdown when + * a semaphore never gets signaled. If it takes longer we just retry + * the wait next iteration. */ + result = wait_for_submission_timelines_available(submission, + radv_get_absolute_timeout(5000000000)); + if (result != VK_SUCCESS) { + pthread_mutex_lock(&queue->thread_mutex); + continue; + } + + /* The lock isn't held but nobody will add one until we finish + * the current submission. */ + p_atomic_set(&queue->thread_submission, NULL); + + list_inithead(&processing_list); + list_addtail(&submission->processing_list, &processing_list); + result = radv_process_submissions(&processing_list); + + pthread_mutex_lock(&queue->thread_mutex); + } + pthread_mutex_unlock(&queue->thread_mutex); + return NULL; +} + +static VkResult +radv_queue_trigger_submission(struct radv_deferred_queue_submission *submission, + uint32_t decrement, + struct list_head *processing_list) +{ + struct radv_queue *queue = submission->queue; + int ret; + if (p_atomic_add_return(&submission->submission_wait_count, -decrement)) + return VK_SUCCESS; + + if (wait_for_submission_timelines_available(submission, radv_get_absolute_timeout(0)) == VK_SUCCESS) { + list_addtail(&submission->processing_list, processing_list); + return VK_SUCCESS; + } + + pthread_mutex_lock(&queue->thread_mutex); + + /* A submission can only be ready for the thread if it doesn't have + * any predecessors in the same queue, so there can only be one such + * submission at a time. */ + assert(queue->thread_submission == NULL); + + /* Only start the thread on demand to save resources for the many games + * which only use binary semaphores. */ + if (!queue->thread_running) { + ret = pthread_create(&queue->submission_thread, NULL, + radv_queue_submission_thread_run, queue); + if (ret) { + pthread_mutex_unlock(&queue->thread_mutex); + return vk_errorf(queue->device->instance, + VK_ERROR_DEVICE_LOST, + "Failed to start submission thread"); + } + queue->thread_running = true; + } + + queue->thread_submission = submission; + pthread_mutex_unlock(&queue->thread_mutex); + + pthread_cond_signal(&queue->thread_cond); + return VK_SUCCESS; +} + static VkResult radv_queue_submit(struct radv_queue *queue, const struct radv_queue_submission *submission) { @@ -4386,7 +4684,12 @@ static VkResult radv_queue_submit(struct radv_queue *queue, struct list_head processing_list; list_inithead(&processing_list); - radv_queue_enqueue_submission(deferred, &processing_list); + result = radv_queue_enqueue_submission(deferred, &processing_list); + if (result != VK_SUCCESS) { + /* If anything is in the list we leak. */ + assert(list_is_empty(&processing_list)); + return result; + } return radv_process_submissions(&processing_list); } @@ -4440,6 +4743,9 @@ VkResult radv_QueueSubmit( uint32_t fence_idx = 0; bool flushed_caches = false; + if (radv_device_is_lost(queue->device)) + return VK_ERROR_DEVICE_LOST; + if (fence != VK_NULL_HANDLE) { for (uint32_t i = 0; i < submitCount; ++i) if (radv_submit_has_effects(pSubmits + i)) @@ -4489,11 +4795,29 @@ VkResult radv_QueueSubmit( return VK_SUCCESS; } +static const char * +radv_get_queue_family_name(struct radv_queue *queue) +{ + switch (queue->queue_family_index) { + case RADV_QUEUE_GENERAL: + return "graphics"; + case RADV_QUEUE_COMPUTE: + return "compute"; + case RADV_QUEUE_TRANSFER: + return "transfer"; + default: + unreachable("Unknown queue family"); + } +} + VkResult radv_QueueWaitIdle( VkQueue _queue) { RADV_FROM_HANDLE(radv_queue, queue, _queue); + if (radv_device_is_lost(queue->device)) + return VK_ERROR_DEVICE_LOST; + pthread_mutex_lock(&queue->pending_mutex); while (!list_is_empty(&queue->pending_submissions)) { pthread_cond_wait(&queue->device->timeline_cond, &queue->pending_mutex); @@ -4502,8 +4826,12 @@ VkResult radv_QueueWaitIdle( if (!queue->device->ws->ctx_wait_idle(queue->hw_ctx, radv_queue_family_to_ring(queue->queue_family_index), - queue->queue_idx)) - return VK_ERROR_DEVICE_LOST; + queue->queue_idx)) { + return radv_device_set_lost(queue->device, + "Failed to wait for a '%s' queue " + "to be idle. GPU hang ?", + radv_get_queue_family_name(queue)); + } return VK_SUCCESS; } @@ -4801,6 +5129,26 @@ static VkResult radv_alloc_memory(struct radv_device *device, } else { close(import_info->fd); } + + if (mem->image && mem->image->plane_count == 1 && + !vk_format_is_depth_or_stencil(mem->image->vk_format)) { + struct radeon_bo_metadata metadata; + device->ws->buffer_get_metadata(mem->bo, &metadata); + + struct radv_image_create_info create_info = { + .no_metadata_planes = true, + .bo_metadata = &metadata + }; + + /* This gives a basic ability to import radeonsi images + * that don't have DCC. This is not guaranteed by any + * spec and can be removed after we support modifiers. */ + result = radv_image_create_layout(device, create_info, mem->image); + if (result != VK_SUCCESS) { + device->ws->buffer_destroy(mem->bo); + goto fail; + } + } } else if (host_ptr_info) { assert(host_ptr_info->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT); mem->bo = device->ws->buffer_from_ptr(device->ws, host_ptr_info->pHostPointer, @@ -5149,6 +5497,9 @@ static bool radv_sparse_bind_has_effects(const VkBindSparseInfo *info) VkResult result; uint32_t fence_idx = 0; + if (radv_device_is_lost(queue->device)) + return VK_ERROR_DEVICE_LOST; + if (fence != VK_NULL_HANDLE) { for (uint32_t i = 0; i < bindInfoCount; ++i) if (radv_sparse_bind_has_effects(pBindInfo + i)) @@ -5293,24 +5644,6 @@ void radv_DestroyFence( radv_destroy_fence(device, pAllocator, fence); } - -uint64_t radv_get_current_time(void) -{ - struct timespec tv; - clock_gettime(CLOCK_MONOTONIC, &tv); - return tv.tv_nsec + tv.tv_sec*1000000000ull; -} - -static uint64_t radv_get_absolute_timeout(uint64_t timeout) -{ - uint64_t current_time = radv_get_current_time(); - - timeout = MIN2(UINT64_MAX - current_time, timeout); - - return current_time + timeout; -} - - static bool radv_all_fences_plain_and_submitted(struct radv_device *device, uint32_t fenceCount, const VkFence *pFences) { @@ -5349,6 +5682,10 @@ VkResult radv_WaitForFences( uint64_t timeout) { RADV_FROM_HANDLE(radv_device, device, _device); + + if (radv_device_is_lost(device)) + return VK_ERROR_DEVICE_LOST; + timeout = radv_get_absolute_timeout(timeout); if (device->always_use_syncobj && @@ -5505,6 +5842,9 @@ VkResult radv_GetFenceStatus(VkDevice _device, VkFence _fence) fence->temporary.kind != RADV_FENCE_NONE ? &fence->temporary : &fence->permanent; + if (radv_device_is_lost(device)) + return VK_ERROR_DEVICE_LOST; + switch (part->kind) { case RADV_FENCE_NONE: break; @@ -5614,6 +5954,7 @@ radv_timeline_add_point_locked(struct radv_device *device, struct radv_timeline_point *ret = NULL; struct radv_timeline_point *prev = NULL; + int r; if (p <= timeline->highest_signaled) return NULL; @@ -5630,7 +5971,11 @@ radv_timeline_add_point_locked(struct radv_device *device, if (list_is_empty(&timeline->free_points)) { ret = malloc(sizeof(struct radv_timeline_point)); - device->ws->create_syncobj(device->ws, false, &ret->syncobj); + r = device->ws->create_syncobj(device->ws, false, &ret->syncobj); + if (r) { + free(ret); + return NULL; + } } else { ret = list_first_entry(&timeline->free_points, struct radv_timeline_point, list); list_del(&ret->list); @@ -5651,31 +5996,35 @@ radv_timeline_add_point_locked(struct radv_device *device, static VkResult -radv_timeline_wait_locked(struct radv_device *device, - struct radv_timeline *timeline, - uint64_t value, - uint64_t abs_timeout) +radv_timeline_wait(struct radv_device *device, + struct radv_timeline *timeline, + uint64_t value, + uint64_t abs_timeout) { + pthread_mutex_lock(&timeline->mutex); + while(timeline->highest_submitted < value) { struct timespec abstime; timespec_from_nsec(&abstime, abs_timeout); pthread_cond_timedwait(&device->timeline_cond, &timeline->mutex, &abstime); - if (radv_get_current_time() >= abs_timeout && timeline->highest_submitted < value) + if (radv_get_current_time() >= abs_timeout && timeline->highest_submitted < value) { + pthread_mutex_unlock(&timeline->mutex); return VK_TIMEOUT; + } } struct radv_timeline_point *point = radv_timeline_find_point_at_least_locked(device, timeline, value); + pthread_mutex_unlock(&timeline->mutex); if (!point) return VK_SUCCESS; - pthread_mutex_unlock(&timeline->mutex); - bool success = device->ws->wait_syncobj(device->ws, &point->syncobj, 1, true, abs_timeout); pthread_mutex_lock(&timeline->mutex); point->wait_count--; + pthread_mutex_unlock(&timeline->mutex); return success ? VK_SUCCESS : VK_TIMEOUT; } @@ -5688,9 +6037,7 @@ radv_timeline_trigger_waiters_locked(struct radv_timeline *timeline, if (waiter->value > timeline->highest_submitted) continue; - if (p_atomic_dec_zero(&waiter->submission->submission_wait_count)) { - list_addtail(&waiter->submission->processing_list, processing_list); - } + radv_queue_trigger_submission(waiter->submission, 1, processing_list); list_del(&waiter->list); } } @@ -5709,6 +6056,7 @@ void radv_destroy_semaphore_part(struct radv_device *device, radv_destroy_timeline(device, &part->timeline); break; case RADV_SEMAPHORE_SYNCOBJ: + case RADV_SEMAPHORE_TIMELINE_SYNCOBJ: device->ws->destroy_syncobj(device->ws, part->syncobj); break; } @@ -5766,7 +6114,17 @@ VkResult radv_CreateSemaphore( sem->temporary.kind = RADV_SEMAPHORE_NONE; sem->permanent.kind = RADV_SEMAPHORE_NONE; - if (type == VK_SEMAPHORE_TYPE_TIMELINE) { + if (type == VK_SEMAPHORE_TYPE_TIMELINE && + device->physical_device->rad_info.has_timeline_syncobj) { + int ret = device->ws->create_syncobj(device->ws, false, &sem->permanent.syncobj); + if (ret) { + radv_destroy_semaphore(device, pAllocator, sem); + return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); + } + device->ws->signal_syncobj(device->ws, sem->permanent.syncobj, initial_value); + sem->permanent.timeline_syncobj.max_point = initial_value; + sem->permanent.kind = RADV_SEMAPHORE_TIMELINE_SYNCOBJ; + } else if (type == VK_SEMAPHORE_TYPE_TIMELINE) { radv_create_timeline(&sem->permanent.timeline, initial_value); sem->permanent.kind = RADV_SEMAPHORE_TIMELINE; } else if (device->always_use_syncobj || handleTypes) { @@ -5812,6 +6170,9 @@ radv_GetSemaphoreCounterValue(VkDevice _device, RADV_FROM_HANDLE(radv_device, device, _device); RADV_FROM_HANDLE(radv_semaphore, semaphore, _semaphore); + if (radv_device_is_lost(device)) + return VK_ERROR_DEVICE_LOST; + struct radv_semaphore_part *part = semaphore->temporary.kind != RADV_SEMAPHORE_NONE ? &semaphore->temporary : &semaphore->permanent; @@ -5823,6 +6184,9 @@ radv_GetSemaphoreCounterValue(VkDevice _device, pthread_mutex_unlock(&part->timeline.mutex); return VK_SUCCESS; } + case RADV_SEMAPHORE_TIMELINE_SYNCOBJ: { + return device->ws->query_syncobj(device->ws, part->syncobj, pValue); + } case RADV_SEMAPHORE_NONE: case RADV_SEMAPHORE_SYNCOBJ: case RADV_SEMAPHORE_WINSYS: @@ -5841,9 +6205,7 @@ radv_wait_timelines(struct radv_device *device, for (;;) { for(uint32_t i = 0; i < pWaitInfo->semaphoreCount; ++i) { RADV_FROM_HANDLE(radv_semaphore, semaphore, pWaitInfo->pSemaphores[i]); - pthread_mutex_lock(&semaphore->permanent.timeline.mutex); - VkResult result = radv_timeline_wait_locked(device, &semaphore->permanent.timeline, pWaitInfo->pValues[i], 0); - pthread_mutex_unlock(&semaphore->permanent.timeline.mutex); + VkResult result = radv_timeline_wait(device, &semaphore->permanent.timeline, pWaitInfo->pValues[i], 0); if (result == VK_SUCCESS) return VK_SUCCESS; @@ -5855,9 +6217,7 @@ radv_wait_timelines(struct radv_device *device, for(uint32_t i = 0; i < pWaitInfo->semaphoreCount; ++i) { RADV_FROM_HANDLE(radv_semaphore, semaphore, pWaitInfo->pSemaphores[i]); - pthread_mutex_lock(&semaphore->permanent.timeline.mutex); - VkResult result = radv_timeline_wait_locked(device, &semaphore->permanent.timeline, pWaitInfo->pValues[i], abs_timeout); - pthread_mutex_unlock(&semaphore->permanent.timeline.mutex); + VkResult result = radv_timeline_wait(device, &semaphore->permanent.timeline, pWaitInfo->pValues[i], abs_timeout); if (result != VK_SUCCESS) return result; @@ -5870,8 +6230,33 @@ radv_WaitSemaphores(VkDevice _device, uint64_t timeout) { RADV_FROM_HANDLE(radv_device, device, _device); + + if (radv_device_is_lost(device)) + return VK_ERROR_DEVICE_LOST; + uint64_t abs_timeout = radv_get_absolute_timeout(timeout); - return radv_wait_timelines(device, pWaitInfo, abs_timeout); + + if (radv_semaphore_from_handle(pWaitInfo->pSemaphores[0])->permanent.kind == RADV_SEMAPHORE_TIMELINE) + return radv_wait_timelines(device, pWaitInfo, abs_timeout); + + if (pWaitInfo->semaphoreCount > UINT32_MAX / sizeof(uint32_t)) + return vk_errorf(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY, "semaphoreCount integer overflow"); + + bool wait_all = !(pWaitInfo->flags & VK_SEMAPHORE_WAIT_ANY_BIT_KHR); + uint32_t *handles = malloc(sizeof(*handles) * pWaitInfo->semaphoreCount); + if (!handles) + return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); + + for (uint32_t i = 0; i < pWaitInfo->semaphoreCount; ++i) { + RADV_FROM_HANDLE(radv_semaphore, semaphore, pWaitInfo->pSemaphores[i]); + handles[i] = semaphore->permanent.syncobj; + } + + bool success = device->ws->wait_timeline_syncobj(device->ws, handles, pWaitInfo->pValues, + pWaitInfo->semaphoreCount, wait_all, false, + abs_timeout); + free(handles); + return success ? VK_SUCCESS : VK_TIMEOUT; } VkResult @@ -5896,7 +6281,21 @@ radv_SignalSemaphore(VkDevice _device, radv_timeline_trigger_waiters_locked(&part->timeline, &processing_list); pthread_mutex_unlock(&part->timeline.mutex); - return radv_process_submissions(&processing_list); + VkResult result = radv_process_submissions(&processing_list); + + /* This needs to happen after radv_process_submissions, so + * that any submitted submissions that are now unblocked get + * processed before we wake the application. This way we + * ensure that any binary semaphores that are now unblocked + * are usable by the application. */ + pthread_cond_broadcast(&device->timeline_cond); + + return result; + } + case RADV_SEMAPHORE_TIMELINE_SYNCOBJ: { + part->timeline_syncobj.max_point = MAX2(part->timeline_syncobj.max_point, pSignalInfo->value); + device->ws->signal_syncobj(device->ws, part->syncobj, pSignalInfo->value); + break; } case RADV_SEMAPHORE_NONE: case RADV_SEMAPHORE_SYNCOBJ: @@ -5971,8 +6370,12 @@ VkResult radv_GetEventStatus( VkDevice _device, VkEvent _event) { + RADV_FROM_HANDLE(radv_device, device, _device); RADV_FROM_HANDLE(radv_event, event, _event); + if (radv_device_is_lost(device)) + return VK_ERROR_DEVICE_LOST; + if (*event->map == 1) return VK_EVENT_SET; return VK_EVENT_RESET; @@ -7163,7 +7566,7 @@ static VkResult radv_import_sync_fd(struct radv_device *device, } } else { if (fd == -1) - device->ws->signal_syncobj(device->ws, syncobj_handle); + device->ws->signal_syncobj(device->ws, syncobj_handle, 0); } if (fd != -1) { @@ -7185,20 +7588,24 @@ VkResult radv_ImportSemaphoreFdKHR(VkDevice _device, RADV_FROM_HANDLE(radv_semaphore, sem, pImportSemaphoreFdInfo->semaphore); VkResult result; struct radv_semaphore_part *dst = NULL; + bool timeline = sem->permanent.kind == RADV_SEMAPHORE_TIMELINE_SYNCOBJ; if (pImportSemaphoreFdInfo->flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT) { + assert(!timeline); dst = &sem->temporary; } else { dst = &sem->permanent; } - uint32_t syncobj = dst->kind == RADV_SEMAPHORE_SYNCOBJ ? dst->syncobj : 0; + uint32_t syncobj = (dst->kind == RADV_SEMAPHORE_SYNCOBJ || + dst->kind == RADV_SEMAPHORE_TIMELINE_SYNCOBJ) ? dst->syncobj : 0; switch(pImportSemaphoreFdInfo->handleType) { case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT: result = radv_import_opaque_fd(device, pImportSemaphoreFdInfo->fd, &syncobj); break; case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT: + assert(!timeline); result = radv_import_sync_fd(device, pImportSemaphoreFdInfo->fd, &syncobj); break; default: @@ -7208,6 +7615,10 @@ VkResult radv_ImportSemaphoreFdKHR(VkDevice _device, if (result == VK_SUCCESS) { dst->syncobj = syncobj; dst->kind = RADV_SEMAPHORE_SYNCOBJ; + if (timeline) { + dst->kind = RADV_SEMAPHORE_TIMELINE_SYNCOBJ; + dst->timeline_syncobj.max_point = 0; + } } return result; @@ -7223,10 +7634,12 @@ VkResult radv_GetSemaphoreFdKHR(VkDevice _device, uint32_t syncobj_handle; if (sem->temporary.kind != RADV_SEMAPHORE_NONE) { - assert(sem->temporary.kind == RADV_SEMAPHORE_SYNCOBJ); + assert(sem->temporary.kind == RADV_SEMAPHORE_SYNCOBJ || + sem->temporary.kind == RADV_SEMAPHORE_TIMELINE_SYNCOBJ); syncobj_handle = sem->temporary.syncobj; } else { - assert(sem->permanent.kind == RADV_SEMAPHORE_SYNCOBJ); + assert(sem->permanent.kind == RADV_SEMAPHORE_SYNCOBJ || + sem->permanent.kind == RADV_SEMAPHORE_TIMELINE_SYNCOBJ); syncobj_handle = sem->permanent.syncobj; } @@ -7262,7 +7675,13 @@ void radv_GetPhysicalDeviceExternalSemaphoreProperties( RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice); VkSemaphoreTypeKHR type = radv_get_semaphore_type(pExternalSemaphoreInfo->pNext, NULL); - if (type == VK_SEMAPHORE_TYPE_TIMELINE) { + if (type == VK_SEMAPHORE_TYPE_TIMELINE && pdevice->rad_info.has_timeline_syncobj && + pExternalSemaphoreInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT) { + pExternalSemaphoreProperties->exportFromImportedHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT; + pExternalSemaphoreProperties->compatibleHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT; + pExternalSemaphoreProperties->externalSemaphoreFeatures = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT | + VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT; + } else if (type == VK_SEMAPHORE_TYPE_TIMELINE) { pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0; pExternalSemaphoreProperties->compatibleHandleTypes = 0; pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;