*/
#include "dirent.h"
-#include <errno.h>
-#include <fcntl.h>
-#include <linux/audit.h>
-#include <linux/bpf.h>
-#include <linux/filter.h>
-#include <linux/seccomp.h>
-#include <linux/unistd.h>
+
+#include <stdatomic.h>
#include <stdbool.h>
-#include <stddef.h>
-#include <stdio.h>
#include <string.h>
-#include <sys/prctl.h>
-#include <sys/wait.h>
#include <unistd.h>
#include <fcntl.h>
void radv_destroy_semaphore_part(struct radv_device *device,
struct radv_semaphore_part *part);
+static VkResult
+radv_create_pthread_cond(pthread_cond_t *cond);
+
+uint64_t radv_get_current_time(void)
+{
+ struct timespec tv;
+ clock_gettime(CLOCK_MONOTONIC, &tv);
+ return tv.tv_nsec + tv.tv_sec*1000000000ull;
+}
+
+static uint64_t radv_get_absolute_timeout(uint64_t timeout)
+{
+ uint64_t current_time = radv_get_current_time();
+
+ timeout = MIN2(UINT64_MAX - current_time, timeout);
+
+ return current_time + timeout;
+}
+
static int
radv_device_get_cache_uuid(enum radeon_family family, void *uuid)
{
disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
device->disk_cache = disk_cache_create(device->name, buf, shader_env_flags);
- if (device->rad_info.chip_class < GFX8 || !device->use_llvm)
+ if (device->rad_info.chip_class < GFX8)
fprintf(stderr, "WARNING: radv is not a conformant vulkan implementation, testing use only.\n");
radv_get_driver_uuid(&device->driver_uuid);
{"metashaders", RADV_DEBUG_DUMP_META_SHADERS},
{"nomemorycache", RADV_DEBUG_NO_MEMORY_CACHE},
{"llvm", RADV_DEBUG_LLVM},
+ {"forcecompress", RADV_DEBUG_FORCE_COMPRESS},
{NULL, 0}
};
instance->debug_flags |= RADV_DEBUG_ZERO_VRAM;
} else if (!strcmp(engine_name, "Quantic Dream Engine")) {
/* Fix various artifacts in Detroit: Become Human */
- instance->debug_flags |= RADV_DEBUG_ZERO_VRAM;
+ instance->debug_flags |= RADV_DEBUG_ZERO_VRAM |
+ RADV_DEBUG_DISCARD_TO_DEMOTE;
}
}
DRI_CONF_RADV_REPORT_LLVM9_VERSION_STRING("false")
DRI_CONF_RADV_ENABLE_MRT_OUTPUT_NAN_FIXUP("false")
DRI_CONF_RADV_NO_DYNAMIC_BOUNDS("false")
+ DRI_CONF_RADV_OVERRIDE_UNIFORM_OFFSET_ALIGNMENT(0)
DRI_CONF_SECTION_END
DRI_CONF_SECTION_DEBUG
driParseConfigFiles(&instance->dri_options,
&instance->available_dri_options,
0, "radv", NULL,
+ instance->applicationName,
+ instance->applicationVersion,
instance->engineName,
instance->engineVersion);
}
if (pCreateInfo->pApplicationInfo) {
const VkApplicationInfo *app = pCreateInfo->pApplicationInfo;
+ instance->applicationName =
+ vk_strdup(&instance->alloc, app->pApplicationName,
+ VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
+ instance->applicationVersion = app->applicationVersion;
+
instance->engineName =
vk_strdup(&instance->alloc, app->pEngineName,
VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
instance->debug_flags = parse_debug_string(getenv("RADV_DEBUG"),
radv_debug_options);
- instance->perftest_flags = parse_debug_string(getenv("RADV_PERFTEST"),
- radv_perftest_options);
+ const char *radv_perftest_str = getenv("RADV_PERFTEST");
+ instance->perftest_flags = parse_debug_string(radv_perftest_str,
+ radv_perftest_options);
+
+ if (radv_perftest_str) {
+ /* Output warnings for famous RADV_PERFTEST options that no
+ * longer exist or are deprecated.
+ */
+ if (strstr(radv_perftest_str, "aco")) {
+ fprintf(stderr, "*******************************************************************************\n");
+ fprintf(stderr, "* WARNING: Unknown option RADV_PERFTEST='aco'. ACO is enabled by default now. *\n");
+ fprintf(stderr, "*******************************************************************************\n");
+ }
+ if (strstr(radv_perftest_str, "llvm")) {
+ fprintf(stderr, "*********************************************************************************\n");
+ fprintf(stderr, "* WARNING: Unknown option 'RADV_PERFTEST=llvm'. Did you mean 'RADV_DEBUG=llvm'? *\n");
+ fprintf(stderr, "*********************************************************************************\n");
+ abort();
+ }
+ }
if (instance->debug_flags & RADV_DEBUG_STARTUP)
radv_logi("Created an instance");
}
vk_free(&instance->alloc, instance->engineName);
+ vk_free(&instance->alloc, instance->applicationName);
VG(VALGRIND_DESTROY_MEMPOOL(instance));
f->bufferDeviceAddress = true;
f->bufferDeviceAddressCaptureReplay = false;
f->bufferDeviceAddressMultiDevice = false;
- f->vulkanMemoryModel = false;
- f->vulkanMemoryModelDeviceScope = false;
+ f->vulkanMemoryModel = true;
+ f->vulkanMemoryModelDeviceScope = true;
f->vulkanMemoryModelAvailabilityVisibilityChains = false;
f->shaderOutputViewportIndex = true;
f->shaderOutputLayer = true;
features-> pipelineCreationCacheControl = true;
break;
}
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES_KHR: {
+ VkPhysicalDeviceVulkanMemoryModelFeaturesKHR *features =
+ (VkPhysicalDeviceVulkanMemoryModelFeaturesKHR *)ext;
+ CORE_FEATURE(1, 2, vulkanMemoryModel);
+ CORE_FEATURE(1, 2, vulkanMemoryModelDeviceScope);
+ CORE_FEATURE(1, 2, vulkanMemoryModelAvailabilityVisibilityChains);
+ break;
+ }
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT: {
VkPhysicalDeviceExtendedDynamicStateFeaturesEXT *features =
(VkPhysicalDeviceExtendedDynamicStateFeaturesEXT *) ext;
features->robustImageAccess = true;
break;
}
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT_FEATURES_EXT: {
+ VkPhysicalDeviceShaderAtomicFloatFeaturesEXT *features =
+ (VkPhysicalDeviceShaderAtomicFloatFeaturesEXT *)ext;
+ features->shaderBufferFloat32Atomics = true;
+ features->shaderBufferFloat32AtomicAdd = false;
+ features->shaderBufferFloat64Atomics = true;
+ features->shaderBufferFloat64AtomicAdd = false;
+ features->shaderSharedFloat32Atomics = true;
+ features->shaderSharedFloat32AtomicAdd = pdevice->rad_info.chip_class >= GFX8 &&
+ (!pdevice->use_llvm || LLVM_VERSION_MAJOR >= 10);
+ features->shaderSharedFloat64Atomics = true;
+ features->shaderSharedFloat64AtomicAdd = false;
+ features->shaderImageFloat32Atomics = true;
+ features->shaderImageFloat32AtomicAdd = false;
+ features->sparseImageFloat32Atomics = false;
+ features->sparseImageFloat32AtomicAdd = false;
+ break;
+ }
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT: {
+ VkPhysicalDevice4444FormatsFeaturesEXT *features =
+ (VkPhysicalDevice4444FormatsFeaturesEXT *)ext;
+ features->formatA4R4G4B4 = true;
+ features->formatA4B4G4R4 = true;
+ break;
+ }
default:
break;
}
64 /* storage image */);
}
+static uint32_t
+radv_uniform_buffer_offset_alignment(const struct radv_physical_device *pdevice)
+{
+ uint32_t uniform_offset_alignment = driQueryOptioni(&pdevice->instance->dri_options,
+ "radv_override_uniform_offset_alignment");
+ if (!util_is_power_of_two_or_zero(uniform_offset_alignment)) {
+ fprintf(stderr, "ERROR: invalid radv_override_uniform_offset_alignment setting %d:"
+ "not a power of two\n", uniform_offset_alignment);
+ uniform_offset_alignment = 0;
+ }
+
+ /* Take at least the hardware limit. */
+ return MAX2(uniform_offset_alignment, 4);
+}
+
void radv_GetPhysicalDeviceProperties(
VkPhysicalDevice physicalDevice,
VkPhysicalDeviceProperties* pProperties)
.viewportSubPixelBits = 8,
.minMemoryMapAlignment = 4096, /* A page */
.minTexelBufferOffsetAlignment = 4,
- .minUniformBufferOffsetAlignment = 4,
+ .minUniformBufferOffsetAlignment = radv_uniform_buffer_offset_alignment(pdevice),
.minStorageBufferOffsetAlignment = 4,
.minTexelOffset = -32,
.maxTexelOffset = 31,
p->conformanceVersion = (VkConformanceVersion) {
.major = 1,
.minor = 2,
- .subminor = 0,
+ .subminor = 3,
.patch = 0,
};
list_inithead(&queue->pending_submissions);
pthread_mutex_init(&queue->pending_mutex, NULL);
+ pthread_mutex_init(&queue->thread_mutex, NULL);
+ queue->thread_submission = NULL;
+ queue->thread_running = queue->thread_exit = false;
+ result = radv_create_pthread_cond(&queue->thread_cond);
+ if (result != VK_SUCCESS)
+ return vk_error(device->instance, result);
+
return VK_SUCCESS;
}
static void
radv_queue_finish(struct radv_queue *queue)
{
+ if (queue->thread_running) {
+ p_atomic_set(&queue->thread_exit, true);
+ pthread_cond_broadcast(&queue->thread_cond);
+ pthread_join(queue->submission_thread, NULL);
+ }
+ pthread_cond_destroy(&queue->thread_cond);
pthread_mutex_destroy(&queue->pending_mutex);
+ pthread_mutex_destroy(&queue->thread_mutex);
if (queue->hw_ctx)
queue->device->ws->ctx_destroy(queue->hw_ctx);
static void
radv_bo_list_init(struct radv_bo_list *bo_list)
{
- pthread_mutex_init(&bo_list->mutex, NULL);
+ pthread_rwlock_init(&bo_list->rwlock, NULL);
bo_list->list.count = bo_list->capacity = 0;
bo_list->list.bos = NULL;
}
radv_bo_list_finish(struct radv_bo_list *bo_list)
{
free(bo_list->list.bos);
- pthread_mutex_destroy(&bo_list->mutex);
+ pthread_rwlock_destroy(&bo_list->rwlock);
}
VkResult radv_bo_list_add(struct radv_device *device,
if (unlikely(!device->use_global_bo_list))
return VK_SUCCESS;
- pthread_mutex_lock(&bo_list->mutex);
+ pthread_rwlock_wrlock(&bo_list->rwlock);
if (bo_list->list.count == bo_list->capacity) {
unsigned capacity = MAX2(4, bo_list->capacity * 2);
void *data = realloc(bo_list->list.bos, capacity * sizeof(struct radeon_winsys_bo*));
if (!data) {
- pthread_mutex_unlock(&bo_list->mutex);
+ pthread_rwlock_unlock(&bo_list->rwlock);
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
}
bo_list->list.bos[bo_list->list.count++] = bo;
- pthread_mutex_unlock(&bo_list->mutex);
+ pthread_rwlock_unlock(&bo_list->rwlock);
return VK_SUCCESS;
}
if (unlikely(!device->use_global_bo_list))
return;
- pthread_mutex_lock(&bo_list->mutex);
+ pthread_rwlock_wrlock(&bo_list->rwlock);
/* Loop the list backwards so we find the most recently added
* memory first. */
for(unsigned i = bo_list->list.count; i-- > 0;) {
break;
}
}
- pthread_mutex_unlock(&bo_list->mutex);
+ pthread_rwlock_unlock(&bo_list->rwlock);
}
static void
return result;
}
+static bool radv_thread_trace_enabled()
+{
+ return radv_get_int_debug_option("RADV_THREAD_TRACE", -1) >= 0 ||
+ getenv("RADV_THREAD_TRACE_TRIGGER");
+}
+
static void
radv_device_init_dispatch(struct radv_device *device)
{
const struct radv_instance *instance = device->physical_device->instance;
const struct radv_device_dispatch_table *dispatch_table_layer = NULL;
bool unchecked = instance->debug_flags & RADV_DEBUG_ALL_ENTRYPOINTS;
- int radv_thread_trace = radv_get_int_debug_option("RADV_THREAD_TRACE", -1);
- if (radv_thread_trace >= 0) {
+ if (radv_thread_trace_enabled()) {
/* Use device entrypoints from the SQTT layer if enabled. */
dispatch_table_layer = &sqtt_device_dispatch_table;
}
}
}
+VkResult
+_radv_device_set_lost(struct radv_device *device,
+ const char *file, int line,
+ const char *msg, ...)
+{
+ VkResult err;
+ va_list ap;
+
+ p_atomic_inc(&device->lost);
+
+ va_start(ap, msg);
+ err = __vk_errorv(device->physical_device->instance, device,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
+ VK_ERROR_DEVICE_LOST, file, line, msg, ap);
+ va_end(ap);
+
+ return err;
+}
+
VkResult radv_CreateDevice(
VkPhysicalDevice physicalDevice,
const VkDeviceCreateInfo* pCreateInfo,
fprintf(stderr, "*****************************************************************************\n");
fprintf(stderr, "Trace file will be dumped to %s\n", filename);
+
+ /* Wait for idle after every draw/dispatch to identify the
+ * first bad call.
+ */
+ device->instance->debug_flags |= RADV_DEBUG_SYNC_SHADERS;
+
radv_dump_enabled_options(device, stderr);
}
- int radv_thread_trace = radv_get_int_debug_option("RADV_THREAD_TRACE", -1);
- if (radv_thread_trace >= 0) {
+ if (radv_thread_trace_enabled()) {
fprintf(stderr, "*************************************************\n");
fprintf(stderr, "* WARNING: Thread trace support is experimental *\n");
fprintf(stderr, "*************************************************\n");
/* Default buffer size set to 1MB per SE. */
device->thread_trace_buffer_size =
radv_get_int_debug_option("RADV_THREAD_TRACE_BUFFER_SIZE", 1024 * 1024);
- device->thread_trace_start_frame = radv_thread_trace;
+ device->thread_trace_start_frame = radv_get_int_debug_option("RADV_THREAD_TRACE", -1);
+
+ const char *trigger_file = getenv("RADV_THREAD_TRACE_TRIGGER");
+ if (trigger_file)
+ device->thread_trace_trigger_file = strdup(trigger_file);
if (!radv_thread_trace_init(device))
goto fail;
}
+ if (getenv("RADV_TRAP_HANDLER")) {
+ /* TODO: Add support for more hardware. */
+ assert(device->physical_device->rad_info.chip_class == GFX8);
+
+ fprintf(stderr, "**********************************************************************\n");
+ fprintf(stderr, "* WARNING: RADV_TRAP_HANDLER is experimental and only for debugging! *\n");
+ fprintf(stderr, "**********************************************************************\n");
+
+ /* To get the disassembly of the faulty shaders, we have to
+ * keep some shader info around.
+ */
+ keep_shader_info = true;
+
+ if (!radv_trap_handler_init(device))
+ goto fail;
+ }
+
device->keep_shader_info = keep_shader_info;
result = radv_device_init_meta(device);
if (result != VK_SUCCESS)
for (int family = 0; family < RADV_MAX_QUEUE_FAMILIES; ++family) {
device->empty_cs[family] = device->ws->cs_create(device->ws, family);
+ if (!device->empty_cs[family])
+ goto fail;
+
switch (family) {
case RADV_QUEUE_GENERAL:
radeon_emit(device->empty_cs[family], PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
radeon_emit(device->empty_cs[family], 0);
break;
}
- device->ws->cs_finalize(device->empty_cs[family]);
+
+ result = device->ws->cs_finalize(device->empty_cs[family]);
+ if (result != VK_SUCCESS)
+ goto fail;
}
if (device->physical_device->rad_info.chip_class >= GFX7)
radv_bo_list_finish(&device->bo_list);
radv_thread_trace_finish(device);
+ free(device->thread_trace_trigger_file);
+
+ radv_trap_handler_finish(device);
if (device->trace_bo)
device->ws->buffer_destroy(device->trace_bo);
VkPipelineCache pc = radv_pipeline_cache_to_handle(device->mem_cache);
radv_DestroyPipelineCache(radv_device_to_handle(device), pc, NULL);
+ radv_trap_handler_finish(device);
+
radv_destroy_shader_slabs(device);
pthread_cond_destroy(&device->timeline_cond);
radv_bo_list_finish(&device->bo_list);
+ free(device->thread_trace_trigger_file);
radv_thread_trace_finish(device);
vk_free(&device->vk.alloc, device);
if (device->physical_device->rad_info.chip_class >= GFX8)
--max_offchip_buffers;
hs_offchip_param =
- S_03093C_OFFCHIP_BUFFERING(max_offchip_buffers) |
- S_03093C_OFFCHIP_GRANULARITY(offchip_granularity);
+ S_03093C_OFFCHIP_BUFFERING_GFX7(max_offchip_buffers) |
+ S_03093C_OFFCHIP_GRANULARITY_GFX7(offchip_granularity);
} else {
hs_offchip_param =
S_0089B0_OFFCHIP_BUFFERING(max_offchip_buffers);
}
}
+static void
+radv_emit_trap_handler(struct radv_queue *queue,
+ struct radeon_cmdbuf *cs,
+ struct radeon_winsys_bo *tma_bo)
+{
+ struct radv_device *device = queue->device;
+ struct radeon_winsys_bo *tba_bo;
+ uint64_t tba_va, tma_va;
+
+ if (!device->trap_handler_shader || !tma_bo)
+ return;
+
+ tba_bo = device->trap_handler_shader->bo;
+
+ tba_va = radv_buffer_get_va(tba_bo) + device->trap_handler_shader->bo_offset;
+ tma_va = radv_buffer_get_va(tma_bo);
+
+ radv_cs_add_buffer(queue->device->ws, cs, tba_bo);
+ radv_cs_add_buffer(queue->device->ws, cs, tma_bo);
+
+ if (queue->queue_family_index == RADV_QUEUE_GENERAL) {
+ uint32_t regs[] = {R_00B000_SPI_SHADER_TBA_LO_PS,
+ R_00B100_SPI_SHADER_TBA_LO_VS,
+ R_00B200_SPI_SHADER_TBA_LO_GS,
+ R_00B300_SPI_SHADER_TBA_LO_ES,
+ R_00B400_SPI_SHADER_TBA_LO_HS,
+ R_00B500_SPI_SHADER_TBA_LO_LS};
+
+ for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
+ radeon_set_sh_reg_seq(cs, regs[i], 4);
+ radeon_emit(cs, tba_va >> 8);
+ radeon_emit(cs, tba_va >> 40);
+ radeon_emit(cs, tma_va >> 8);
+ radeon_emit(cs, tma_va >> 40);
+ }
+ } else {
+ radeon_set_sh_reg_seq(cs, R_00B838_COMPUTE_TBA_LO, 4);
+ radeon_emit(cs, tba_va >> 8);
+ radeon_emit(cs, tba_va >> 40);
+ radeon_emit(cs, tma_va >> 8);
+ radeon_emit(cs, tma_va >> 40);
+ }
+}
+
static void
radv_init_graphics_state(struct radeon_cmdbuf *cs, struct radv_queue *queue)
{
static void
radv_init_compute_state(struct radeon_cmdbuf *cs, struct radv_queue *queue)
{
- struct radv_physical_device *physical_device = queue->device->physical_device;
- si_emit_compute(physical_device, cs);
+ si_emit_compute(queue->device, cs);
}
static VkResult
compute_scratch_waves, compute_scratch_bo);
radv_emit_graphics_scratch(queue, cs, scratch_size_per_wave,
scratch_waves, scratch_bo);
+ radv_emit_trap_handler(queue, cs, queue->device->tma_bo);
if (gds_bo)
radv_cs_add_buffer(queue->device->ws, cs, gds_bo);
VkFence _fence,
bool is_signal)
{
- int syncobj_idx = 0, sem_idx = 0;
+ int syncobj_idx = 0, non_reset_idx = 0, sem_idx = 0, timeline_idx = 0;
if (num_sems == 0 && _fence == VK_NULL_HANDLE)
return VK_SUCCESS;
switch(sems[i]->kind) {
case RADV_SEMAPHORE_SYNCOBJ:
counts->syncobj_count++;
+ counts->syncobj_reset_count++;
break;
case RADV_SEMAPHORE_WINSYS:
counts->sem_count++;
case RADV_SEMAPHORE_TIMELINE:
counts->syncobj_count++;
break;
+ case RADV_SEMAPHORE_TIMELINE_SYNCOBJ:
+ counts->timeline_syncobj_count++;
+ break;
}
}
counts->syncobj_count++;
}
- if (counts->syncobj_count) {
- counts->syncobj = (uint32_t *)malloc(sizeof(uint32_t) * counts->syncobj_count);
- if (!counts->syncobj)
+ if (counts->syncobj_count || counts->timeline_syncobj_count) {
+ counts->points = (uint64_t *)malloc(
+ sizeof(*counts->syncobj) * counts->syncobj_count +
+ (sizeof(*counts->syncobj) + sizeof(*counts->points)) * counts->timeline_syncobj_count);
+ if (!counts->points)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+ counts->syncobj = (uint32_t*)(counts->points + counts->timeline_syncobj_count);
}
if (counts->sem_count) {
}
}
+ non_reset_idx = counts->syncobj_reset_count;
+
for (uint32_t i = 0; i < num_sems; i++) {
switch(sems[i]->kind) {
case RADV_SEMAPHORE_NONE:
pthread_mutex_unlock(&sems[i]->timeline.mutex);
if (point) {
- counts->syncobj[syncobj_idx++] = point->syncobj;
+ counts->syncobj[non_reset_idx++] = point->syncobj;
} else {
/* Explicitly remove the semaphore so we might not find
* a point later post-submit. */
}
break;
}
+ case RADV_SEMAPHORE_TIMELINE_SYNCOBJ:
+ counts->syncobj[counts->syncobj_count + timeline_idx] = sems[i]->syncobj;
+ counts->points[timeline_idx] = timeline_values[i];
+ ++timeline_idx;
+ break;
}
}
fence->temporary.kind != RADV_FENCE_NONE ?
&fence->temporary : &fence->permanent;
if (part->kind == RADV_FENCE_SYNCOBJ)
- counts->syncobj[syncobj_idx++] = part->syncobj;
+ counts->syncobj[non_reset_idx++] = part->syncobj;
}
- assert(syncobj_idx <= counts->syncobj_count);
- counts->syncobj_count = syncobj_idx;
+ assert(MAX2(syncobj_idx, non_reset_idx) <= counts->syncobj_count);
+ counts->syncobj_count = MAX2(syncobj_idx, non_reset_idx);
return VK_SUCCESS;
}
static void
radv_free_sem_info(struct radv_winsys_sem_info *sem_info)
{
- free(sem_info->wait.syncobj);
+ free(sem_info->wait.points);
free(sem_info->wait.sem);
- free(sem_info->signal.syncobj);
+ free(sem_info->signal.points);
free(sem_info->signal.sem);
}
point->wait_count -= 2;
radv_timeline_trigger_waiters_locked(&signal_sems[i]->timeline, processing_list);
pthread_mutex_unlock(&signal_sems[i]->timeline.mutex);
+ } else if (signal_sems[i] && signal_sems[i]->kind == RADV_SEMAPHORE_TIMELINE_SYNCOBJ) {
+ signal_sems[i]->timeline_syncobj.max_point =
+ MAX2(signal_sems[i]->timeline_syncobj.max_point, signal_values[i]);
}
}
}
-static void
+static VkResult
radv_sparse_buffer_bind_memory(struct radv_device *device,
const VkSparseBufferMemoryBindInfo *bind)
{
RADV_FROM_HANDLE(radv_buffer, buffer, bind->buffer);
+ VkResult result;
for (uint32_t i = 0; i < bind->bindCount; ++i) {
struct radv_device_memory *mem = NULL;
if (bind->pBinds[i].memory != VK_NULL_HANDLE)
mem = radv_device_memory_from_handle(bind->pBinds[i].memory);
- device->ws->buffer_virtual_bind(buffer->bo,
- bind->pBinds[i].resourceOffset,
- bind->pBinds[i].size,
- mem ? mem->bo : NULL,
- bind->pBinds[i].memoryOffset);
+ result = device->ws->buffer_virtual_bind(buffer->bo,
+ bind->pBinds[i].resourceOffset,
+ bind->pBinds[i].size,
+ mem ? mem->bo : NULL,
+ bind->pBinds[i].memoryOffset);
+ if (result != VK_SUCCESS)
+ return result;
}
+
+ return VK_SUCCESS;
}
-static void
+static VkResult
radv_sparse_image_opaque_bind_memory(struct radv_device *device,
const VkSparseImageOpaqueMemoryBindInfo *bind)
{
RADV_FROM_HANDLE(radv_image, image, bind->image);
+ VkResult result;
for (uint32_t i = 0; i < bind->bindCount; ++i) {
struct radv_device_memory *mem = NULL;
if (bind->pBinds[i].memory != VK_NULL_HANDLE)
mem = radv_device_memory_from_handle(bind->pBinds[i].memory);
- device->ws->buffer_virtual_bind(image->bo,
- bind->pBinds[i].resourceOffset,
- bind->pBinds[i].size,
- mem ? mem->bo : NULL,
- bind->pBinds[i].memoryOffset);
+ result = device->ws->buffer_virtual_bind(image->bo,
+ bind->pBinds[i].resourceOffset,
+ bind->pBinds[i].size,
+ mem ? mem->bo : NULL,
+ bind->pBinds[i].memoryOffset);
+ if (result != VK_SUCCESS)
+ return result;
}
+
+ return VK_SUCCESS;
}
static VkResult
uint32_t signal_value_count;
};
+static VkResult
+radv_queue_trigger_submission(struct radv_deferred_queue_submission *submission,
+ uint32_t decrement,
+ struct list_head *processing_list);
+
static VkResult
radv_create_deferred_submission(struct radv_queue *queue,
const struct radv_queue_submission *submission,
return VK_SUCCESS;
}
-static void
+static VkResult
radv_queue_enqueue_submission(struct radv_deferred_queue_submission *submission,
struct list_head *processing_list)
{
* submitted, but if the queue was empty, we decrement ourselves as there is no previous
* submission. */
uint32_t decrement = submission->wait_semaphore_count - wait_cnt + (is_first ? 1 : 0);
- if (__atomic_sub_fetch(&submission->submission_wait_count, decrement, __ATOMIC_ACQ_REL) == 0) {
- list_addtail(&submission->processing_list, processing_list);
- }
+
+ /* if decrement is zero, then we don't have a refcounted reference to the
+ * submission anymore, so it is not safe to access the submission. */
+ if (!decrement)
+ return VK_SUCCESS;
+
+ return radv_queue_trigger_submission(submission, decrement, processing_list);
}
static void
list_first_entry(&submission->queue->pending_submissions,
struct radv_deferred_queue_submission,
queue_pending_list);
- if (p_atomic_dec_zero(&next_submission->submission_wait_count)) {
- list_addtail(&next_submission->processing_list, processing_list);
- }
+ radv_queue_trigger_submission(next_submission, 1, processing_list);
}
pthread_mutex_unlock(&submission->queue->pending_mutex);
goto fail;
for (uint32_t i = 0; i < submission->buffer_bind_count; ++i) {
- radv_sparse_buffer_bind_memory(queue->device,
- submission->buffer_binds + i);
+ result = radv_sparse_buffer_bind_memory(queue->device,
+ submission->buffer_binds + i);
+ if (result != VK_SUCCESS)
+ goto fail;
}
for (uint32_t i = 0; i < submission->image_opaque_bind_count; ++i) {
- radv_sparse_image_opaque_bind_memory(queue->device,
- submission->image_opaque_binds + i);
+ result = radv_sparse_image_opaque_bind_memory(queue->device,
+ submission->image_opaque_binds + i);
+ if (result != VK_SUCCESS)
+ goto fail;
}
if (!submission->cmd_buffer_count) {
sem_info.cs_emit_signal = j + advance == submission->cmd_buffer_count;
if (unlikely(queue->device->use_global_bo_list)) {
- pthread_mutex_lock(&queue->device->bo_list.mutex);
+ pthread_rwlock_rdlock(&queue->device->bo_list.rwlock);
bo_list = &queue->device->bo_list.list;
}
can_patch, base_fence);
if (unlikely(queue->device->use_global_bo_list))
- pthread_mutex_unlock(&queue->device->bo_list.mutex);
+ pthread_rwlock_unlock(&queue->device->bo_list.rwlock);
if (result != VK_SUCCESS)
goto fail;
if (queue->device->trace_bo) {
radv_check_gpu_hangs(queue, cs_array[j]);
}
+
+ if (queue->device->tma_bo) {
+ radv_check_trap_handler(queue);
+ }
}
free(cs_array);
* VK_ERROR_DEVICE_LOST to ensure the clients do not attempt
* to submit the same job again to this device.
*/
- result = VK_ERROR_DEVICE_LOST;
+ result = radv_device_set_lost(queue->device, "vkQueueSubmit() failed");
}
radv_free_temp_syncobjs(queue->device,
return VK_SUCCESS;
}
+static VkResult
+wait_for_submission_timelines_available(struct radv_deferred_queue_submission *submission,
+ uint64_t timeout)
+{
+ struct radv_device *device = submission->queue->device;
+ uint32_t syncobj_count = 0;
+ uint32_t syncobj_idx = 0;
+
+ for (uint32_t i = 0; i < submission->wait_semaphore_count; ++i) {
+ if (submission->wait_semaphores[i]->kind != RADV_SEMAPHORE_TIMELINE_SYNCOBJ)
+ continue;
+
+ if (submission->wait_semaphores[i]->timeline_syncobj.max_point >= submission->wait_values[i])
+ continue;
+ ++syncobj_count;
+ }
+
+ if (!syncobj_count)
+ return VK_SUCCESS;
+
+ uint64_t *points = malloc((sizeof(uint64_t) + sizeof(uint32_t)) * syncobj_count);
+ if (!points)
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ uint32_t *syncobj = (uint32_t*)(points + syncobj_count);
+
+ for (uint32_t i = 0; i < submission->wait_semaphore_count; ++i) {
+ if (submission->wait_semaphores[i]->kind != RADV_SEMAPHORE_TIMELINE_SYNCOBJ)
+ continue;
+
+ if (submission->wait_semaphores[i]->timeline_syncobj.max_point >= submission->wait_values[i])
+ continue;
+
+ syncobj[syncobj_idx] = submission->wait_semaphores[i]->syncobj;
+ points[syncobj_idx] = submission->wait_values[i];
+ ++syncobj_idx;
+ }
+ bool success = device->ws->wait_timeline_syncobj(device->ws, syncobj, points, syncobj_idx, true, true, timeout);
+
+ free(points);
+ return success ? VK_SUCCESS : VK_TIMEOUT;
+}
+
+static void* radv_queue_submission_thread_run(void *q)
+{
+ struct radv_queue *queue = q;
+
+ pthread_mutex_lock(&queue->thread_mutex);
+ while (!p_atomic_read(&queue->thread_exit)) {
+ struct radv_deferred_queue_submission *submission = queue->thread_submission;
+ struct list_head processing_list;
+ VkResult result = VK_SUCCESS;
+ if (!submission) {
+ pthread_cond_wait(&queue->thread_cond, &queue->thread_mutex);
+ continue;
+ }
+ pthread_mutex_unlock(&queue->thread_mutex);
+
+ /* Wait at most 5 seconds so we have a chance to notice shutdown when
+ * a semaphore never gets signaled. If it takes longer we just retry
+ * the wait next iteration. */
+ result = wait_for_submission_timelines_available(submission,
+ radv_get_absolute_timeout(5000000000));
+ if (result != VK_SUCCESS) {
+ pthread_mutex_lock(&queue->thread_mutex);
+ continue;
+ }
+
+ /* The lock isn't held but nobody will add one until we finish
+ * the current submission. */
+ p_atomic_set(&queue->thread_submission, NULL);
+
+ list_inithead(&processing_list);
+ list_addtail(&submission->processing_list, &processing_list);
+ result = radv_process_submissions(&processing_list);
+
+ pthread_mutex_lock(&queue->thread_mutex);
+ }
+ pthread_mutex_unlock(&queue->thread_mutex);
+ return NULL;
+}
+
+static VkResult
+radv_queue_trigger_submission(struct radv_deferred_queue_submission *submission,
+ uint32_t decrement,
+ struct list_head *processing_list)
+{
+ struct radv_queue *queue = submission->queue;
+ int ret;
+ if (p_atomic_add_return(&submission->submission_wait_count, -decrement))
+ return VK_SUCCESS;
+
+ if (wait_for_submission_timelines_available(submission, radv_get_absolute_timeout(0)) == VK_SUCCESS) {
+ list_addtail(&submission->processing_list, processing_list);
+ return VK_SUCCESS;
+ }
+
+ pthread_mutex_lock(&queue->thread_mutex);
+
+ /* A submission can only be ready for the thread if it doesn't have
+ * any predecessors in the same queue, so there can only be one such
+ * submission at a time. */
+ assert(queue->thread_submission == NULL);
+
+ /* Only start the thread on demand to save resources for the many games
+ * which only use binary semaphores. */
+ if (!queue->thread_running) {
+ ret = pthread_create(&queue->submission_thread, NULL,
+ radv_queue_submission_thread_run, queue);
+ if (ret) {
+ pthread_mutex_unlock(&queue->thread_mutex);
+ return vk_errorf(queue->device->instance,
+ VK_ERROR_DEVICE_LOST,
+ "Failed to start submission thread");
+ }
+ queue->thread_running = true;
+ }
+
+ queue->thread_submission = submission;
+ pthread_mutex_unlock(&queue->thread_mutex);
+
+ pthread_cond_signal(&queue->thread_cond);
+ return VK_SUCCESS;
+}
+
static VkResult radv_queue_submit(struct radv_queue *queue,
const struct radv_queue_submission *submission)
{
struct list_head processing_list;
list_inithead(&processing_list);
- radv_queue_enqueue_submission(deferred, &processing_list);
+ result = radv_queue_enqueue_submission(deferred, &processing_list);
+ if (result != VK_SUCCESS) {
+ /* If anything is in the list we leak. */
+ assert(list_is_empty(&processing_list));
+ return result;
+ }
return radv_process_submissions(&processing_list);
}
uint32_t fence_idx = 0;
bool flushed_caches = false;
+ if (radv_device_is_lost(queue->device))
+ return VK_ERROR_DEVICE_LOST;
+
if (fence != VK_NULL_HANDLE) {
for (uint32_t i = 0; i < submitCount; ++i)
if (radv_submit_has_effects(pSubmits + i))
return VK_SUCCESS;
}
+static const char *
+radv_get_queue_family_name(struct radv_queue *queue)
+{
+ switch (queue->queue_family_index) {
+ case RADV_QUEUE_GENERAL:
+ return "graphics";
+ case RADV_QUEUE_COMPUTE:
+ return "compute";
+ case RADV_QUEUE_TRANSFER:
+ return "transfer";
+ default:
+ unreachable("Unknown queue family");
+ }
+}
+
VkResult radv_QueueWaitIdle(
VkQueue _queue)
{
RADV_FROM_HANDLE(radv_queue, queue, _queue);
+ if (radv_device_is_lost(queue->device))
+ return VK_ERROR_DEVICE_LOST;
+
pthread_mutex_lock(&queue->pending_mutex);
while (!list_is_empty(&queue->pending_submissions)) {
pthread_cond_wait(&queue->device->timeline_cond, &queue->pending_mutex);
if (!queue->device->ws->ctx_wait_idle(queue->hw_ctx,
radv_queue_family_to_ring(queue->queue_family_index),
- queue->queue_idx))
- return VK_ERROR_DEVICE_LOST;
+ queue->queue_idx)) {
+ return radv_device_set_lost(queue->device,
+ "Failed to wait for a '%s' queue "
+ "to be idle. GPU hang ?",
+ radv_get_queue_family_name(queue));
+ }
return VK_SUCCESS;
}
} else {
close(import_info->fd);
}
+
+ if (mem->image && mem->image->plane_count == 1 &&
+ !vk_format_is_depth_or_stencil(mem->image->vk_format)) {
+ struct radeon_bo_metadata metadata;
+ device->ws->buffer_get_metadata(mem->bo, &metadata);
+
+ struct radv_image_create_info create_info = {
+ .no_metadata_planes = true,
+ .bo_metadata = &metadata
+ };
+
+ /* This gives a basic ability to import radeonsi images
+ * that don't have DCC. This is not guaranteed by any
+ * spec and can be removed after we support modifiers. */
+ result = radv_image_create_layout(device, create_info, mem->image);
+ if (result != VK_SUCCESS) {
+ device->ws->buffer_destroy(mem->bo);
+ goto fail;
+ }
+ }
} else if (host_ptr_info) {
assert(host_ptr_info->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT);
mem->bo = device->ws->buffer_from_ptr(device->ws, host_ptr_info->pHostPointer,
VkResult result;
uint32_t fence_idx = 0;
+ if (radv_device_is_lost(queue->device))
+ return VK_ERROR_DEVICE_LOST;
+
if (fence != VK_NULL_HANDLE) {
for (uint32_t i = 0; i < bindInfoCount; ++i)
if (radv_sparse_bind_has_effects(pBindInfo + i))
radv_destroy_fence(device, pAllocator, fence);
}
-
-uint64_t radv_get_current_time(void)
-{
- struct timespec tv;
- clock_gettime(CLOCK_MONOTONIC, &tv);
- return tv.tv_nsec + tv.tv_sec*1000000000ull;
-}
-
-static uint64_t radv_get_absolute_timeout(uint64_t timeout)
-{
- uint64_t current_time = radv_get_current_time();
-
- timeout = MIN2(UINT64_MAX - current_time, timeout);
-
- return current_time + timeout;
-}
-
-
static bool radv_all_fences_plain_and_submitted(struct radv_device *device,
uint32_t fenceCount, const VkFence *pFences)
{
uint64_t timeout)
{
RADV_FROM_HANDLE(radv_device, device, _device);
+
+ if (radv_device_is_lost(device))
+ return VK_ERROR_DEVICE_LOST;
+
timeout = radv_get_absolute_timeout(timeout);
if (device->always_use_syncobj &&
fence->temporary.kind != RADV_FENCE_NONE ?
&fence->temporary : &fence->permanent;
+ if (radv_device_is_lost(device))
+ return VK_ERROR_DEVICE_LOST;
+
switch (part->kind) {
case RADV_FENCE_NONE:
break;
struct radv_timeline_point *ret = NULL;
struct radv_timeline_point *prev = NULL;
+ int r;
if (p <= timeline->highest_signaled)
return NULL;
if (list_is_empty(&timeline->free_points)) {
ret = malloc(sizeof(struct radv_timeline_point));
- device->ws->create_syncobj(device->ws, false, &ret->syncobj);
+ r = device->ws->create_syncobj(device->ws, false, &ret->syncobj);
+ if (r) {
+ free(ret);
+ return NULL;
+ }
} else {
ret = list_first_entry(&timeline->free_points, struct radv_timeline_point, list);
list_del(&ret->list);
static VkResult
-radv_timeline_wait_locked(struct radv_device *device,
- struct radv_timeline *timeline,
- uint64_t value,
- uint64_t abs_timeout)
+radv_timeline_wait(struct radv_device *device,
+ struct radv_timeline *timeline,
+ uint64_t value,
+ uint64_t abs_timeout)
{
+ pthread_mutex_lock(&timeline->mutex);
+
while(timeline->highest_submitted < value) {
struct timespec abstime;
timespec_from_nsec(&abstime, abs_timeout);
pthread_cond_timedwait(&device->timeline_cond, &timeline->mutex, &abstime);
- if (radv_get_current_time() >= abs_timeout && timeline->highest_submitted < value)
+ if (radv_get_current_time() >= abs_timeout && timeline->highest_submitted < value) {
+ pthread_mutex_unlock(&timeline->mutex);
return VK_TIMEOUT;
+ }
}
struct radv_timeline_point *point = radv_timeline_find_point_at_least_locked(device, timeline, value);
+ pthread_mutex_unlock(&timeline->mutex);
if (!point)
return VK_SUCCESS;
- pthread_mutex_unlock(&timeline->mutex);
-
bool success = device->ws->wait_syncobj(device->ws, &point->syncobj, 1, true, abs_timeout);
pthread_mutex_lock(&timeline->mutex);
point->wait_count--;
+ pthread_mutex_unlock(&timeline->mutex);
return success ? VK_SUCCESS : VK_TIMEOUT;
}
if (waiter->value > timeline->highest_submitted)
continue;
- if (p_atomic_dec_zero(&waiter->submission->submission_wait_count)) {
- list_addtail(&waiter->submission->processing_list, processing_list);
- }
+ radv_queue_trigger_submission(waiter->submission, 1, processing_list);
list_del(&waiter->list);
}
}
radv_destroy_timeline(device, &part->timeline);
break;
case RADV_SEMAPHORE_SYNCOBJ:
+ case RADV_SEMAPHORE_TIMELINE_SYNCOBJ:
device->ws->destroy_syncobj(device->ws, part->syncobj);
break;
}
sem->temporary.kind = RADV_SEMAPHORE_NONE;
sem->permanent.kind = RADV_SEMAPHORE_NONE;
- if (type == VK_SEMAPHORE_TYPE_TIMELINE) {
+ if (type == VK_SEMAPHORE_TYPE_TIMELINE &&
+ device->physical_device->rad_info.has_timeline_syncobj) {
+ int ret = device->ws->create_syncobj(device->ws, false, &sem->permanent.syncobj);
+ if (ret) {
+ radv_destroy_semaphore(device, pAllocator, sem);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+ }
+ device->ws->signal_syncobj(device->ws, sem->permanent.syncobj, initial_value);
+ sem->permanent.timeline_syncobj.max_point = initial_value;
+ sem->permanent.kind = RADV_SEMAPHORE_TIMELINE_SYNCOBJ;
+ } else if (type == VK_SEMAPHORE_TYPE_TIMELINE) {
radv_create_timeline(&sem->permanent.timeline, initial_value);
sem->permanent.kind = RADV_SEMAPHORE_TIMELINE;
} else if (device->always_use_syncobj || handleTypes) {
RADV_FROM_HANDLE(radv_device, device, _device);
RADV_FROM_HANDLE(radv_semaphore, semaphore, _semaphore);
+ if (radv_device_is_lost(device))
+ return VK_ERROR_DEVICE_LOST;
+
struct radv_semaphore_part *part =
semaphore->temporary.kind != RADV_SEMAPHORE_NONE ? &semaphore->temporary : &semaphore->permanent;
pthread_mutex_unlock(&part->timeline.mutex);
return VK_SUCCESS;
}
+ case RADV_SEMAPHORE_TIMELINE_SYNCOBJ: {
+ return device->ws->query_syncobj(device->ws, part->syncobj, pValue);
+ }
case RADV_SEMAPHORE_NONE:
case RADV_SEMAPHORE_SYNCOBJ:
case RADV_SEMAPHORE_WINSYS:
for (;;) {
for(uint32_t i = 0; i < pWaitInfo->semaphoreCount; ++i) {
RADV_FROM_HANDLE(radv_semaphore, semaphore, pWaitInfo->pSemaphores[i]);
- pthread_mutex_lock(&semaphore->permanent.timeline.mutex);
- VkResult result = radv_timeline_wait_locked(device, &semaphore->permanent.timeline, pWaitInfo->pValues[i], 0);
- pthread_mutex_unlock(&semaphore->permanent.timeline.mutex);
+ VkResult result = radv_timeline_wait(device, &semaphore->permanent.timeline, pWaitInfo->pValues[i], 0);
if (result == VK_SUCCESS)
return VK_SUCCESS;
for(uint32_t i = 0; i < pWaitInfo->semaphoreCount; ++i) {
RADV_FROM_HANDLE(radv_semaphore, semaphore, pWaitInfo->pSemaphores[i]);
- pthread_mutex_lock(&semaphore->permanent.timeline.mutex);
- VkResult result = radv_timeline_wait_locked(device, &semaphore->permanent.timeline, pWaitInfo->pValues[i], abs_timeout);
- pthread_mutex_unlock(&semaphore->permanent.timeline.mutex);
+ VkResult result = radv_timeline_wait(device, &semaphore->permanent.timeline, pWaitInfo->pValues[i], abs_timeout);
if (result != VK_SUCCESS)
return result;
uint64_t timeout)
{
RADV_FROM_HANDLE(radv_device, device, _device);
+
+ if (radv_device_is_lost(device))
+ return VK_ERROR_DEVICE_LOST;
+
uint64_t abs_timeout = radv_get_absolute_timeout(timeout);
- return radv_wait_timelines(device, pWaitInfo, abs_timeout);
+
+ if (radv_semaphore_from_handle(pWaitInfo->pSemaphores[0])->permanent.kind == RADV_SEMAPHORE_TIMELINE)
+ return radv_wait_timelines(device, pWaitInfo, abs_timeout);
+
+ if (pWaitInfo->semaphoreCount > UINT32_MAX / sizeof(uint32_t))
+ return vk_errorf(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY, "semaphoreCount integer overflow");
+
+ bool wait_all = !(pWaitInfo->flags & VK_SEMAPHORE_WAIT_ANY_BIT_KHR);
+ uint32_t *handles = malloc(sizeof(*handles) * pWaitInfo->semaphoreCount);
+ if (!handles)
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ for (uint32_t i = 0; i < pWaitInfo->semaphoreCount; ++i) {
+ RADV_FROM_HANDLE(radv_semaphore, semaphore, pWaitInfo->pSemaphores[i]);
+ handles[i] = semaphore->permanent.syncobj;
+ }
+
+ bool success = device->ws->wait_timeline_syncobj(device->ws, handles, pWaitInfo->pValues,
+ pWaitInfo->semaphoreCount, wait_all, false,
+ abs_timeout);
+ free(handles);
+ return success ? VK_SUCCESS : VK_TIMEOUT;
}
VkResult
radv_timeline_trigger_waiters_locked(&part->timeline, &processing_list);
pthread_mutex_unlock(&part->timeline.mutex);
- return radv_process_submissions(&processing_list);
+ VkResult result = radv_process_submissions(&processing_list);
+
+ /* This needs to happen after radv_process_submissions, so
+ * that any submitted submissions that are now unblocked get
+ * processed before we wake the application. This way we
+ * ensure that any binary semaphores that are now unblocked
+ * are usable by the application. */
+ pthread_cond_broadcast(&device->timeline_cond);
+
+ return result;
+ }
+ case RADV_SEMAPHORE_TIMELINE_SYNCOBJ: {
+ part->timeline_syncobj.max_point = MAX2(part->timeline_syncobj.max_point, pSignalInfo->value);
+ device->ws->signal_syncobj(device->ws, part->syncobj, pSignalInfo->value);
+ break;
}
case RADV_SEMAPHORE_NONE:
case RADV_SEMAPHORE_SYNCOBJ:
VkDevice _device,
VkEvent _event)
{
+ RADV_FROM_HANDLE(radv_device, device, _device);
RADV_FROM_HANDLE(radv_event, event, _event);
+ if (radv_device_is_lost(device))
+ return VK_ERROR_DEVICE_LOST;
+
if (*event->map == 1)
return VK_EVENT_SET;
return VK_EVENT_RESET;
sampler->state[2] |=
S_008F38_DISABLE_LSB_CEIL(device->physical_device->rad_info.chip_class <= GFX8) |
S_008F38_FILTER_PREC_FIX(1) |
- S_008F38_ANISO_OVERRIDE_GFX6(device->physical_device->rad_info.chip_class >= GFX8);
+ S_008F38_ANISO_OVERRIDE_GFX8(device->physical_device->rad_info.chip_class >= GFX8);
}
}
}
} else {
if (fd == -1)
- device->ws->signal_syncobj(device->ws, syncobj_handle);
+ device->ws->signal_syncobj(device->ws, syncobj_handle, 0);
}
if (fd != -1) {
RADV_FROM_HANDLE(radv_semaphore, sem, pImportSemaphoreFdInfo->semaphore);
VkResult result;
struct radv_semaphore_part *dst = NULL;
+ bool timeline = sem->permanent.kind == RADV_SEMAPHORE_TIMELINE_SYNCOBJ;
if (pImportSemaphoreFdInfo->flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT) {
+ assert(!timeline);
dst = &sem->temporary;
} else {
dst = &sem->permanent;
}
- uint32_t syncobj = dst->kind == RADV_SEMAPHORE_SYNCOBJ ? dst->syncobj : 0;
+ uint32_t syncobj = (dst->kind == RADV_SEMAPHORE_SYNCOBJ ||
+ dst->kind == RADV_SEMAPHORE_TIMELINE_SYNCOBJ) ? dst->syncobj : 0;
switch(pImportSemaphoreFdInfo->handleType) {
case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT:
result = radv_import_opaque_fd(device, pImportSemaphoreFdInfo->fd, &syncobj);
break;
case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT:
+ assert(!timeline);
result = radv_import_sync_fd(device, pImportSemaphoreFdInfo->fd, &syncobj);
break;
default:
if (result == VK_SUCCESS) {
dst->syncobj = syncobj;
dst->kind = RADV_SEMAPHORE_SYNCOBJ;
+ if (timeline) {
+ dst->kind = RADV_SEMAPHORE_TIMELINE_SYNCOBJ;
+ dst->timeline_syncobj.max_point = 0;
+ }
}
return result;
uint32_t syncobj_handle;
if (sem->temporary.kind != RADV_SEMAPHORE_NONE) {
- assert(sem->temporary.kind == RADV_SEMAPHORE_SYNCOBJ);
+ assert(sem->temporary.kind == RADV_SEMAPHORE_SYNCOBJ ||
+ sem->temporary.kind == RADV_SEMAPHORE_TIMELINE_SYNCOBJ);
syncobj_handle = sem->temporary.syncobj;
} else {
- assert(sem->permanent.kind == RADV_SEMAPHORE_SYNCOBJ);
+ assert(sem->permanent.kind == RADV_SEMAPHORE_SYNCOBJ ||
+ sem->permanent.kind == RADV_SEMAPHORE_TIMELINE_SYNCOBJ);
syncobj_handle = sem->permanent.syncobj;
}
RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
VkSemaphoreTypeKHR type = radv_get_semaphore_type(pExternalSemaphoreInfo->pNext, NULL);
- if (type == VK_SEMAPHORE_TYPE_TIMELINE) {
+ if (type == VK_SEMAPHORE_TYPE_TIMELINE && pdevice->rad_info.has_timeline_syncobj &&
+ pExternalSemaphoreInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT) {
+ pExternalSemaphoreProperties->exportFromImportedHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT;
+ pExternalSemaphoreProperties->compatibleHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT;
+ pExternalSemaphoreProperties->externalSemaphoreFeatures = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
+ VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
+ } else if (type == VK_SEMAPHORE_TYPE_TIMELINE) {
pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
pExternalSemaphoreProperties->compatibleHandleTypes = 0;
pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
static const VkTimeDomainEXT radv_time_domains[] = {
VK_TIME_DOMAIN_DEVICE_EXT,
VK_TIME_DOMAIN_CLOCK_MONOTONIC_EXT,
+#ifdef CLOCK_MONOTONIC_RAW
VK_TIME_DOMAIN_CLOCK_MONOTONIC_RAW_EXT,
+#endif
};
VkResult radv_GetPhysicalDeviceCalibrateableTimeDomainsEXT(
int ret;
ret = clock_gettime(clock_id, ¤t);
+#ifdef CLOCK_MONOTONIC_RAW
if (ret < 0 && clock_id == CLOCK_MONOTONIC_RAW)
ret = clock_gettime(CLOCK_MONOTONIC, ¤t);
+#endif
if (ret < 0)
return 0;
uint64_t begin, end;
uint64_t max_clock_period = 0;
+#ifdef CLOCK_MONOTONIC_RAW
begin = radv_clock_gettime(CLOCK_MONOTONIC_RAW);
+#else
+ begin = radv_clock_gettime(CLOCK_MONOTONIC);
+#endif
for (d = 0; d < timestampCount; d++) {
switch (pTimestampInfos[d].timeDomain) {
max_clock_period = MAX2(max_clock_period, 1);
break;
+#ifdef CLOCK_MONOTONIC_RAW
case VK_TIME_DOMAIN_CLOCK_MONOTONIC_RAW_EXT:
pTimestamps[d] = begin;
break;
+#endif
default:
pTimestamps[d] = 0;
break;
}
}
+#ifdef CLOCK_MONOTONIC_RAW
end = radv_clock_gettime(CLOCK_MONOTONIC_RAW);
+#else
+ end = radv_clock_gettime(CLOCK_MONOTONIC);
+#endif
/*
* The maximum deviation is the sum of the interval over which we