VkResult result;
drmVersionPtr version;
int fd;
+ int master_fd = -1;
fd = open(path, O_RDWR | O_CLOEXEC);
- if (fd < 0)
- return vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
+ if (fd < 0) {
+ if (instance->debug_flags & RADV_DEBUG_STARTUP)
+ radv_logi("Could not open device '%s'", path);
+
+ return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
+ }
version = drmGetVersion(fd);
if (!version) {
close(fd);
- return vk_errorf(VK_ERROR_INCOMPATIBLE_DRIVER,
+
+ if (instance->debug_flags & RADV_DEBUG_STARTUP)
+ radv_logi("Could not get the kernel driver version for device '%s'", path);
+
+ return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
"failed to get version %s: %m", path);
}
if (strcmp(version->name, "amdgpu")) {
drmFreeVersion(version);
+ if (master_fd != -1)
+ close(master_fd);
close(fd);
+
+ if (instance->debug_flags & RADV_DEBUG_STARTUP)
+ radv_logi("Device '%s' is not using the amdgpu kernel driver.", path);
+
return VK_ERROR_INCOMPATIBLE_DRIVER;
}
drmFreeVersion(version);
+ if (instance->debug_flags & RADV_DEBUG_STARTUP)
+ radv_logi("Found compatible device '%s'.", path);
+
device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
device->instance = instance;
assert(strlen(path) < ARRAY_SIZE(device->path));
device->ws = radv_amdgpu_winsys_create(fd, instance->debug_flags,
instance->perftest_flags);
if (!device->ws) {
- result = VK_ERROR_INCOMPATIBLE_DRIVER;
+ result = vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
goto fail;
}
+ if (instance->enabled_extensions.KHR_display) {
+ master_fd = open(drm_device->nodes[DRM_NODE_PRIMARY], O_RDWR | O_CLOEXEC);
+ if (master_fd >= 0) {
+ uint32_t accel_working = 0;
+ struct drm_amdgpu_info request = {
+ .return_pointer = (uintptr_t)&accel_working,
+ .return_size = sizeof(accel_working),
+ .query = AMDGPU_INFO_ACCEL_WORKING
+ };
+
+ if (drmCommandWrite(master_fd, DRM_AMDGPU_INFO, &request, sizeof (struct drm_amdgpu_info)) < 0 || !accel_working) {
+ close(master_fd);
+ master_fd = -1;
+ }
+ }
+ }
+
+ device->master_fd = master_fd;
device->local_fd = fd;
device->ws->query_info(device->ws, &device->rad_info);
if (radv_device_get_cache_uuid(device->rad_info.family, device->cache_uuid)) {
device->ws->destroy(device->ws);
- result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
+ result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
"cannot generate UUID");
goto fail;
}
device->out_of_order_rast_allowed = device->has_out_of_order_rast &&
!(device->instance->debug_flags & RADV_DEBUG_NO_OUT_OF_ORDER);
- device->dcc_msaa_allowed = device->rad_info.chip_class == VI &&
- (device->instance->perftest_flags & RADV_PERFTEST_DCC_MSAA);
+ device->dcc_msaa_allowed =
+ (device->instance->perftest_flags & RADV_PERFTEST_DCC_MSAA);
radv_physical_device_init_mem_types(device);
radv_fill_device_extension_table(device, &device->supported_extensions);
result = radv_init_wsi(device);
if (result != VK_SUCCESS) {
device->ws->destroy(device->ws);
+ vk_error(instance, result);
goto fail;
}
fail:
close(fd);
+ if (master_fd != -1)
+ close(master_fd);
return result;
}
device->ws->destroy(device->ws);
disk_cache_destroy(device->disk_cache);
close(device->local_fd);
+ if (device->master_fd != -1)
+ close(device->master_fd);
}
static void *
{"nodynamicbounds", RADV_DEBUG_NO_DYNAMIC_BOUNDS},
{"nooutoforder", RADV_DEBUG_NO_OUT_OF_ORDER},
{"info", RADV_DEBUG_INFO},
+ {"errors", RADV_DEBUG_ERRORS},
+ {"startup", RADV_DEBUG_STARTUP},
+ {"checkir", RADV_DEBUG_CHECKIR},
+ {"nothreadllvm", RADV_DEBUG_NOTHREADLLVM},
{NULL, 0}
};
instance = vk_zalloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
if (!instance)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
instance->apiVersion = client_version;
instance->physicalDeviceCount = -1;
+ instance->debug_flags = parse_debug_string(getenv("RADV_DEBUG"),
+ radv_debug_options);
+
+ instance->perftest_flags = parse_debug_string(getenv("RADV_PERFTEST"),
+ radv_perftest_options);
+
+
+ if (instance->debug_flags & RADV_DEBUG_STARTUP)
+ radv_logi("Created an instance");
+
for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
int index = radv_get_instance_extension_index(ext_name);
if (index < 0 || !radv_supported_instance_extensions.extensions[index]) {
vk_free2(&default_alloc, pAllocator, instance);
- return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
+ return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
}
instance->enabled_extensions.extensions[index] = true;
result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
if (result != VK_SUCCESS) {
vk_free2(&default_alloc, pAllocator, instance);
- return vk_error(result);
+ return vk_error(instance, result);
}
_mesa_locale_init();
VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
- instance->debug_flags = parse_debug_string(getenv("RADV_DEBUG"),
- radv_debug_options);
-
- instance->perftest_flags = parse_debug_string(getenv("RADV_PERFTEST"),
- radv_perftest_options);
-
radv_handle_per_app_options(instance, pCreateInfo->pApplicationInfo);
*pInstance = radv_instance_to_handle(instance);
instance->physicalDeviceCount = 0;
max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
+
+ if (instance->debug_flags & RADV_DEBUG_STARTUP)
+ radv_logi("Found %d drm nodes", max_devices);
+
if (max_devices < 1)
- return vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
+ return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
for (unsigned i = 0; i < (unsigned)max_devices; i++) {
if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
VkPhysicalDevice physicalDevice,
VkPhysicalDeviceFeatures2KHR *pFeatures)
{
+ RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
vk_foreach_struct(ext, pFeatures->pNext) {
switch (ext->sType) {
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR: {
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
VkPhysicalDevice16BitStorageFeatures *features =
(VkPhysicalDevice16BitStorageFeatures*)ext;
- features->storageBuffer16BitAccess = false;
- features->uniformAndStorageBuffer16BitAccess = false;
- features->storagePushConstant16 = false;
- features->storageInputOutput16 = false;
+ bool enabled = HAVE_LLVM >= 0x0700 && pdevice->rad_info.chip_class >= VI;
+ features->storageBuffer16BitAccess = enabled;
+ features->uniformAndStorageBuffer16BitAccess = enabled;
+ features->storagePushConstant16 = enabled;
+ features->storageInputOutput16 = enabled;
break;
}
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
features->runtimeDescriptorArray = true;
break;
}
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
+ VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
+ (VkPhysicalDeviceConditionalRenderingFeaturesEXT*)ext;
+ features->conditionalRendering = true;
+ features->inheritedConditionalRendering = false;
+ break;
+ }
default:
break;
}
VK_SUBGROUP_FEATURE_BASIC_BIT |
VK_SUBGROUP_FEATURE_BALLOT_BIT |
VK_SUBGROUP_FEATURE_QUAD_BIT |
- VK_SUBGROUP_FEATURE_SHUFFLE_BIT |
- VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT |
VK_SUBGROUP_FEATURE_VOTE_BIT;
+ if (pdevice->rad_info.chip_class >= VI) {
+ properties->supportedOperations |=
+ VK_SUBGROUP_FEATURE_SHUFFLE_BIT |
+ VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT;
+ }
properties->quadOperationsInAllStages = true;
break;
}
queue->hw_ctx = device->ws->ctx_create(device->ws, queue->priority);
if (!queue->hw_ctx)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
return VK_SUCCESS;
}
unsigned num_features = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
for (uint32_t i = 0; i < num_features; i++) {
if (enabled_feature[i] && !supported_feature[i])
- return vk_error(VK_ERROR_FEATURE_NOT_PRESENT);
+ return vk_error(physical_device->instance, VK_ERROR_FEATURE_NOT_PRESENT);
}
}
sizeof(*device), 8,
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
if (!device)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
device->instance = physical_device->instance;
int index = radv_get_device_extension_index(ext_name);
if (index < 0 || !physical_device->supported_extensions.extensions[index]) {
vk_free(&device->alloc, device);
- return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
+ return vk_error(physical_device->instance, VK_ERROR_EXTENSION_NOT_PRESENT);
}
device->enabled_extensions.extensions[index] = true;
}
device->pbb_allowed = device->physical_device->rad_info.chip_class >= GFX9 &&
- (device->instance->perftest_flags & RADV_PERFTEST_BINNING);
+ ((device->instance->perftest_flags & RADV_PERFTEST_BINNING) ||
+ device->physical_device->rad_info.family == CHIP_RAVEN);
/* Disabled and not implemented for now. */
- device->dfsm_allowed = device->pbb_allowed && false;
+ device->dfsm_allowed = device->pbb_allowed &&
+ device->physical_device->rad_info.family == CHIP_RAVEN;
#ifdef ANDROID
device->always_use_syncobj = device->physical_device->rad_info.has_syncobj_wait_for_submit;
if (!radv_init_trace(device))
goto fail;
+ fprintf(stderr, "*****************************************************************************\n");
+ fprintf(stderr, "* WARNING: RADV_TRACE_FILE is costly and should only be used for debugging! *\n");
+ fprintf(stderr, "*****************************************************************************\n");
+
fprintf(stderr, "Trace file will be dumped to %s\n", filename);
radv_dump_enabled_options(device, stderr);
}
}
/* None supported at this time */
- return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
+ return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
}
VkResult radv_EnumerateDeviceLayerProperties(
}
/* None supported at this time */
- return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
+ return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
}
void radv_GetDeviceQueue2(
}
static void
-radv_emit_gs_ring_sizes(struct radv_queue *queue, struct radeon_winsys_cs *cs,
+radv_emit_gs_ring_sizes(struct radv_queue *queue, struct radeon_cmdbuf *cs,
struct radeon_winsys_bo *esgs_ring_bo,
uint32_t esgs_ring_size,
struct radeon_winsys_bo *gsvs_ring_bo,
return;
if (esgs_ring_bo)
- radv_cs_add_buffer(queue->device->ws, cs, esgs_ring_bo, 8);
+ radv_cs_add_buffer(queue->device->ws, cs, esgs_ring_bo);
if (gsvs_ring_bo)
- radv_cs_add_buffer(queue->device->ws, cs, gsvs_ring_bo, 8);
+ radv_cs_add_buffer(queue->device->ws, cs, gsvs_ring_bo);
if (queue->device->physical_device->rad_info.chip_class >= CIK) {
radeon_set_uconfig_reg_seq(cs, R_030900_VGT_ESGS_RING_SIZE, 2);
}
static void
-radv_emit_tess_factor_ring(struct radv_queue *queue, struct radeon_winsys_cs *cs,
+radv_emit_tess_factor_ring(struct radv_queue *queue, struct radeon_cmdbuf *cs,
unsigned hs_offchip_param, unsigned tf_ring_size,
struct radeon_winsys_bo *tess_rings_bo)
{
tf_va = radv_buffer_get_va(tess_rings_bo);
- radv_cs_add_buffer(queue->device->ws, cs, tess_rings_bo, 8);
+ radv_cs_add_buffer(queue->device->ws, cs, tess_rings_bo);
if (queue->device->physical_device->rad_info.chip_class >= CIK) {
radeon_set_uconfig_reg(cs, R_030938_VGT_TF_RING_SIZE,
}
static void
-radv_emit_compute_scratch(struct radv_queue *queue, struct radeon_winsys_cs *cs,
+radv_emit_compute_scratch(struct radv_queue *queue, struct radeon_cmdbuf *cs,
struct radeon_winsys_bo *compute_scratch_bo)
{
uint64_t scratch_va;
scratch_va = radv_buffer_get_va(compute_scratch_bo);
- radv_cs_add_buffer(queue->device->ws, cs, compute_scratch_bo, 8);
+ radv_cs_add_buffer(queue->device->ws, cs, compute_scratch_bo);
radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0, 2);
radeon_emit(cs, scratch_va);
static void
radv_emit_global_shader_pointers(struct radv_queue *queue,
- struct radeon_winsys_cs *cs,
+ struct radeon_cmdbuf *cs,
struct radeon_winsys_bo *descriptor_bo)
{
uint64_t va;
va = radv_buffer_get_va(descriptor_bo);
- radv_cs_add_buffer(queue->device->ws, cs, descriptor_bo, 8);
+ radv_cs_add_buffer(queue->device->ws, cs, descriptor_bo);
if (queue->device->physical_device->rad_info.chip_class >= GFX9) {
uint32_t regs[] = {R_00B030_SPI_SHADER_USER_DATA_PS_0,
R_00B408_SPI_SHADER_USER_DATA_ADDR_LO_HS};
for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
- radv_emit_shader_pointer(cs, regs[i], va);
+ radv_emit_shader_pointer(queue->device, cs, regs[i],
+ va, true);
}
} else {
uint32_t regs[] = {R_00B030_SPI_SHADER_USER_DATA_PS_0,
R_00B530_SPI_SHADER_USER_DATA_LS_0};
for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
- radv_emit_shader_pointer(cs, regs[i], va);
+ radv_emit_shader_pointer(queue->device, cs, regs[i],
+ va, true);
}
}
}
uint32_t gsvs_ring_size,
bool needs_tess_rings,
bool needs_sample_positions,
- struct radeon_winsys_cs **initial_full_flush_preamble_cs,
- struct radeon_winsys_cs **initial_preamble_cs,
- struct radeon_winsys_cs **continue_preamble_cs)
+ struct radeon_cmdbuf **initial_full_flush_preamble_cs,
+ struct radeon_cmdbuf **initial_preamble_cs,
+ struct radeon_cmdbuf **continue_preamble_cs)
{
struct radeon_winsys_bo *scratch_bo = NULL;
struct radeon_winsys_bo *descriptor_bo = NULL;
struct radeon_winsys_bo *esgs_ring_bo = NULL;
struct radeon_winsys_bo *gsvs_ring_bo = NULL;
struct radeon_winsys_bo *tess_rings_bo = NULL;
- struct radeon_winsys_cs *dest_cs[3] = {0};
+ struct radeon_cmdbuf *dest_cs[3] = {0};
bool add_tess_rings = false, add_sample_positions = false;
unsigned tess_factor_ring_size = 0, tess_offchip_ring_size = 0;
unsigned max_offchip_buffers;
descriptor_bo = queue->descriptor_bo;
for(int i = 0; i < 3; ++i) {
- struct radeon_winsys_cs *cs = NULL;
+ struct radeon_cmdbuf *cs = NULL;
cs = queue->device->ws->cs_create(queue->device->ws,
queue->queue_family_index ? RING_COMPUTE : RING_GFX);
if (!cs)
dest_cs[i] = cs;
if (scratch_bo)
- radv_cs_add_buffer(queue->device->ws, cs, scratch_bo, 8);
+ radv_cs_add_buffer(queue->device->ws, cs, scratch_bo);
if (descriptor_bo != queue->descriptor_bo) {
uint32_t *map = (uint32_t*)queue->device->ws->buffer_map(descriptor_bo);
RADV_CMD_FLAG_INV_ICACHE |
RADV_CMD_FLAG_INV_SMEM_L1 |
RADV_CMD_FLAG_INV_VMEM_L1 |
- RADV_CMD_FLAG_INV_GLOBAL_L2);
+ RADV_CMD_FLAG_INV_GLOBAL_L2 |
+ RADV_CMD_FLAG_START_PIPELINE_STATS, 0);
} else if (i == 1) {
si_cs_emit_cache_flush(cs,
queue->device->physical_device->rad_info.chip_class,
RADV_CMD_FLAG_INV_ICACHE |
RADV_CMD_FLAG_INV_SMEM_L1 |
RADV_CMD_FLAG_INV_VMEM_L1 |
- RADV_CMD_FLAG_INV_GLOBAL_L2);
+ RADV_CMD_FLAG_INV_GLOBAL_L2 |
+ RADV_CMD_FLAG_START_PIPELINE_STATS, 0);
}
if (!queue->device->ws->cs_finalize(cs))
queue->device->ws->buffer_destroy(gsvs_ring_bo);
if (tess_rings_bo && tess_rings_bo != queue->tess_rings_bo)
queue->device->ws->buffer_destroy(tess_rings_bo);
- return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
+ return vk_error(queue->device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
}
-static VkResult radv_alloc_sem_counts(struct radv_winsys_sem_counts *counts,
+static VkResult radv_alloc_sem_counts(struct radv_instance *instance,
+ struct radv_winsys_sem_counts *counts,
int num_sems,
const VkSemaphore *sems,
VkFence _fence,
if (counts->syncobj_count) {
counts->syncobj = (uint32_t *)malloc(sizeof(uint32_t) * counts->syncobj_count);
if (!counts->syncobj)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(instance, VK_ERROR_OUT_OF_HOST_MEMORY);
}
if (counts->sem_count) {
counts->sem = (struct radeon_winsys_sem **)malloc(sizeof(struct radeon_winsys_sem *) * counts->sem_count);
if (!counts->sem) {
free(counts->syncobj);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(instance, VK_ERROR_OUT_OF_HOST_MEMORY);
}
}
return VK_SUCCESS;
}
-void radv_free_sem_info(struct radv_winsys_sem_info *sem_info)
+static void
+radv_free_sem_info(struct radv_winsys_sem_info *sem_info)
{
free(sem_info->wait.syncobj);
free(sem_info->wait.sem);
}
}
-VkResult radv_alloc_sem_info(struct radv_winsys_sem_info *sem_info,
- int num_wait_sems,
- const VkSemaphore *wait_sems,
- int num_signal_sems,
- const VkSemaphore *signal_sems,
- VkFence fence)
+static VkResult
+radv_alloc_sem_info(struct radv_instance *instance,
+ struct radv_winsys_sem_info *sem_info,
+ int num_wait_sems,
+ const VkSemaphore *wait_sems,
+ int num_signal_sems,
+ const VkSemaphore *signal_sems,
+ VkFence fence)
{
VkResult ret;
memset(sem_info, 0, sizeof(*sem_info));
- ret = radv_alloc_sem_counts(&sem_info->wait, num_wait_sems, wait_sems, VK_NULL_HANDLE, true);
+ ret = radv_alloc_sem_counts(instance, &sem_info->wait, num_wait_sems, wait_sems, VK_NULL_HANDLE, true);
if (ret)
return ret;
- ret = radv_alloc_sem_counts(&sem_info->signal, num_signal_sems, signal_sems, fence, false);
+ ret = radv_alloc_sem_counts(instance, &sem_info->signal, num_signal_sems, signal_sems, fence, false);
if (ret)
radv_free_sem_info(sem_info);
VkResult result;
struct radv_winsys_sem_info sem_info;
- result = radv_alloc_sem_info(&sem_info, 0, NULL, 0, NULL,
+ result = radv_alloc_sem_info(queue->device->instance, &sem_info, 0, NULL, 0, NULL,
radv_fence_to_handle(fence));
if (result != VK_SUCCESS)
return result;
false, fence->fence);
radv_free_sem_info(&sem_info);
- /* TODO: find a better error */
if (ret)
- return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
+ return vk_error(queue->device->instance, VK_ERROR_DEVICE_LOST);
return VK_SUCCESS;
}
uint32_t scratch_size = 0;
uint32_t compute_scratch_size = 0;
uint32_t esgs_ring_size = 0, gsvs_ring_size = 0;
- struct radeon_winsys_cs *initial_preamble_cs = NULL, *initial_flush_preamble_cs = NULL, *continue_preamble_cs = NULL;
+ struct radeon_cmdbuf *initial_preamble_cs = NULL, *initial_flush_preamble_cs = NULL, *continue_preamble_cs = NULL;
VkResult result;
bool fence_emitted = false;
bool tess_rings_needed = false;
return result;
for (uint32_t i = 0; i < submitCount; i++) {
- struct radeon_winsys_cs **cs_array;
+ struct radeon_cmdbuf **cs_array;
bool do_flush = !i || pSubmits[i].pWaitDstStageMask;
bool can_patch = true;
uint32_t advance;
struct radv_winsys_sem_info sem_info;
- result = radv_alloc_sem_info(&sem_info,
+ result = radv_alloc_sem_info(queue->device->instance,
+ &sem_info,
pSubmits[i].waitSemaphoreCount,
pSubmits[i].pWaitSemaphores,
pSubmits[i].signalSemaphoreCount,
continue;
}
- cs_array = malloc(sizeof(struct radeon_winsys_cs *) *
+ cs_array = malloc(sizeof(struct radeon_cmdbuf *) *
(pSubmits[i].commandBufferCount));
for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
}
for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j += advance) {
- struct radeon_winsys_cs *initial_preamble = (do_flush && !j) ? initial_flush_preamble_cs : initial_preamble_cs;
+ struct radeon_cmdbuf *initial_preamble = (do_flush && !j) ? initial_flush_preamble_cs : initial_preamble_cs;
const struct radv_winsys_bo_list *bo_list = NULL;
advance = MIN2(max_cs_submission,
if (fence) {
if (!fence_emitted) {
- radv_signal_fence(queue, fence);
+ result = radv_signal_fence(queue, fence);
+ if (result != VK_SUCCESS)
+ return result;
}
fence->submitted = true;
}
mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (mem == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
if (wsi_info && wsi_info->implicit_sync)
flags |= RADEON_FLAG_IMPLICIT_SYNC;
return VK_SUCCESS;
}
- return vk_error(VK_ERROR_MEMORY_MAP_FAILED);
+ return vk_error(device->instance, VK_ERROR_MEMORY_MAP_FAILED);
}
void radv_UnmapMemory(
RADV_FROM_HANDLE(radv_queue, queue, _queue);
struct radeon_winsys_fence *base_fence = fence ? fence->fence : NULL;
bool fence_emitted = false;
+ VkResult result;
+ int ret;
for (uint32_t i = 0; i < bindInfoCount; ++i) {
struct radv_winsys_sem_info sem_info;
}
VkResult result;
- result = radv_alloc_sem_info(&sem_info,
+ result = radv_alloc_sem_info(queue->device->instance,
+ &sem_info,
pBindInfo[i].waitSemaphoreCount,
pBindInfo[i].pWaitSemaphores,
pBindInfo[i].signalSemaphoreCount,
return result;
if (pBindInfo[i].waitSemaphoreCount || pBindInfo[i].signalSemaphoreCount) {
- queue->device->ws->cs_submit(queue->hw_ctx, queue->queue_idx,
- &queue->device->empty_cs[queue->queue_family_index],
- 1, NULL, NULL,
- &sem_info, NULL,
- false, base_fence);
+ ret = queue->device->ws->cs_submit(queue->hw_ctx, queue->queue_idx,
+ &queue->device->empty_cs[queue->queue_family_index],
+ 1, NULL, NULL,
+ &sem_info, NULL,
+ false, base_fence);
+ if (ret) {
+ radv_loge("failed to submit CS %d\n", i);
+ abort();
+ }
+
fence_emitted = true;
if (fence)
fence->submitted = true;
if (fence) {
if (!fence_emitted) {
- radv_signal_fence(queue, fence);
+ result = radv_signal_fence(queue, fence);
+ if (result != VK_SUCCESS)
+ return result;
}
fence->submitted = true;
}
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!fence)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+ fence->fence_wsi = NULL;
fence->submitted = false;
fence->signalled = !!(pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT);
fence->temp_syncobj = 0;
int ret = device->ws->create_syncobj(device->ws, &fence->syncobj);
if (ret) {
vk_free2(&device->alloc, pAllocator, fence);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
}
if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
device->ws->signal_syncobj(device->ws, fence->syncobj);
fence->fence = device->ws->create_fence();
if (!fence->fence) {
vk_free2(&device->alloc, pAllocator, fence);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
}
fence->syncobj = 0;
}
device->ws->destroy_syncobj(device->ws, fence->syncobj);
if (fence->fence)
device->ws->destroy_fence(fence->fence);
+ if (fence->fence_wsi)
+ fence->fence_wsi->destroy(fence->fence_wsi);
vk_free2(&device->alloc, pAllocator, fence);
}
{
for (uint32_t i = 0; i < fenceCount; ++i) {
RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
- if (fence->syncobj || fence->temp_syncobj || (!fence->signalled && !fence->submitted))
+ if (fence->fence == NULL || fence->syncobj ||
+ fence->temp_syncobj ||
+ (!fence->signalled && !fence->submitted))
+ return false;
+ }
+ return true;
+}
+
+static bool radv_all_fences_syncobj(uint32_t fenceCount, const VkFence *pFences)
+{
+ for (uint32_t i = 0; i < fenceCount; ++i) {
+ RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
+ if (fence->syncobj == 0 && fence->temp_syncobj == 0)
return false;
}
return true;
RADV_FROM_HANDLE(radv_device, device, _device);
timeout = radv_get_absolute_timeout(timeout);
- if (device->always_use_syncobj) {
+ if (device->always_use_syncobj &&
+ radv_all_fences_syncobj(fenceCount, pFences))
+ {
uint32_t *handles = malloc(sizeof(uint32_t) * fenceCount);
if (!handles)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
for (uint32_t i = 0; i < fenceCount; ++i) {
RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
uint32_t wait_count = 0;
struct radeon_winsys_fence **fences = malloc(sizeof(struct radeon_winsys_fence *) * fenceCount);
if (!fences)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
for (uint32_t i = 0; i < fenceCount; ++i) {
RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
if (fence->signalled)
continue;
- if (!fence->submitted) {
- while(radv_get_current_time() <= timeout && !fence->submitted)
- /* Do nothing */;
+ if (fence->fence) {
+ if (!fence->submitted) {
+ while(radv_get_current_time() <= timeout &&
+ !fence->submitted)
+ /* Do nothing */;
- if (!fence->submitted)
- return VK_TIMEOUT;
+ if (!fence->submitted)
+ return VK_TIMEOUT;
- /* Recheck as it may have been set by submitting operations. */
- if (fence->signalled)
- continue;
+ /* Recheck as it may have been set by
+ * submitting operations. */
+
+ if (fence->signalled)
+ continue;
+ }
+
+ expired = device->ws->fence_wait(device->ws,
+ fence->fence,
+ true, timeout);
+ if (!expired)
+ return VK_TIMEOUT;
}
- expired = device->ws->fence_wait(device->ws, fence->fence, true, timeout);
- if (!expired)
- return VK_TIMEOUT;
+ if (fence->fence_wsi) {
+ VkResult result = fence->fence_wsi->wait(fence->fence_wsi, timeout);
+ if (result != VK_SUCCESS)
+ return result;
+ }
fence->signalled = true;
}
return VK_SUCCESS;
if (!fence->submitted)
return VK_NOT_READY;
- if (!device->ws->fence_wait(device->ws, fence->fence, false, 0))
- return VK_NOT_READY;
+ if (fence->fence) {
+ if (!device->ws->fence_wait(device->ws, fence->fence, false, 0))
+ return VK_NOT_READY;
+ }
+ if (fence->fence_wsi) {
+ VkResult result = fence->fence_wsi->wait(fence->fence_wsi, 0);
+ if (result != VK_SUCCESS) {
+ if (result == VK_TIMEOUT)
+ return VK_NOT_READY;
+ return result;
+ }
+ }
return VK_SUCCESS;
}
sizeof(*sem), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!sem)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
sem->temp_syncobj = 0;
/* create a syncobject if we are going to export this semaphore */
int ret = device->ws->create_syncobj(device->ws, &sem->syncobj);
if (ret) {
vk_free2(&device->alloc, pAllocator, sem);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
}
sem->sem = NULL;
} else {
sem->sem = device->ws->create_sem(device->ws);
if (!sem->sem) {
vk_free2(&device->alloc, pAllocator, sem);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
}
sem->syncobj = 0;
}
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!event)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
event->bo = device->ws->buffer_create(device->ws, 8, 8,
RADEON_DOMAIN_GTT,
RADEON_FLAG_VA_UNCACHED | RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING);
if (!event->bo) {
vk_free2(&device->alloc, pAllocator, event);
- return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
}
event->map = (uint64_t*)device->ws->buffer_map(event->bo);
buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (buffer == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
buffer->size = pCreateInfo->size;
buffer->usage = pCreateInfo->usage;
4096, 0, RADEON_FLAG_VIRTUAL);
if (!buffer->bo) {
vk_free2(&device->alloc, pAllocator, buffer);
- return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
}
}
ds->db_z_info = S_028038_FORMAT(format) |
S_028038_NUM_SAMPLES(util_logbase2(iview->image->info.samples)) |
S_028038_SW_MODE(iview->image->surface.u.gfx9.surf.swizzle_mode) |
- S_028038_MAXMIP(iview->image->info.levels - 1);
+ S_028038_MAXMIP(iview->image->info.levels - 1) |
+ S_028038_ZRANGE_PRECISION(1);
ds->db_stencil_info = S_02803C_FORMAT(stencil_format) |
S_02803C_SW_MODE(iview->image->surface.u.gfx9.stencil.swizzle_mode);
framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (framebuffer == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
framebuffer->attachment_count = pCreateInfo->attachmentCount;
framebuffer->width = pCreateInfo->width;
sampler = vk_alloc2(&device->alloc, pAllocator, sizeof(*sampler), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!sampler)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
radv_init_sampler(device, sampler, pCreateInfo);
*pSampler = radv_sampler_to_handle(sampler);
bool ret = radv_get_memory_fd(device, memory, pFD);
if (ret == false)
- return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
return VK_SUCCESS;
}
int fd,
VkMemoryFdPropertiesKHR *pMemoryFdProperties)
{
+ RADV_FROM_HANDLE(radv_device, device, _device);
+
switch (handleType) {
case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT:
pMemoryFdProperties->memoryTypeBits = (1 << RADV_MEM_TYPE_COUNT) - 1;
*
* So opaque handle types fall into the default "unsupported" case.
*/
- return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
+ return vk_error(device->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
}
}
uint32_t syncobj_handle = 0;
int ret = device->ws->import_syncobj(device->ws, fd, &syncobj_handle);
if (ret != 0)
- return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
+ return vk_error(device->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
if (*syncobj)
device->ws->destroy_syncobj(device->ws, *syncobj);
if (!syncobj_handle) {
int ret = device->ws->create_syncobj(device->ws, &syncobj_handle);
if (ret) {
- return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
+ return vk_error(device->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
}
}
} else {
int ret = device->ws->import_syncobj_from_sync_file(device->ws, syncobj_handle, fd);
if (ret != 0)
- return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
+ return vk_error(device->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
}
*syncobj = syncobj_handle;
}
if (ret)
- return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
+ return vk_error(device->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
return VK_SUCCESS;
}
}
if (ret)
- return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
+ return vk_error(device->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
return VK_SUCCESS;
}