}
}
+VkResult
+_radv_device_set_lost(struct radv_device *device,
+ const char *file, int line,
+ const char *msg, ...)
+{
+ VkResult err;
+ va_list ap;
+
+ p_atomic_inc(&device->lost);
+
+ va_start(ap, msg);
+ err = __vk_errorv(device->physical_device->instance, device,
+ VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
+ VK_ERROR_DEVICE_LOST, file, line, msg, ap);
+ va_end(ap);
+
+ return err;
+}
+
VkResult radv_CreateDevice(
VkPhysicalDevice physicalDevice,
const VkDeviceCreateInfo* pCreateInfo,
goto fail;
}
+ if (getenv("RADV_TRAP_HANDLER")) {
+ /* TODO: Add support for more hardware. */
+ assert(device->physical_device->rad_info.chip_class == GFX8);
+
+ /* To get the disassembly of the faulty shaders, we have to
+ * keep some shader info around.
+ */
+ keep_shader_info = true;
+
+ if (!radv_trap_handler_init(device))
+ goto fail;
+ }
+
device->keep_shader_info = keep_shader_info;
result = radv_device_init_meta(device);
if (result != VK_SUCCESS)
radv_thread_trace_finish(device);
+ radv_trap_handler_finish(device);
+
if (device->trace_bo)
device->ws->buffer_destroy(device->trace_bo);
VkPipelineCache pc = radv_pipeline_cache_to_handle(device->mem_cache);
radv_DestroyPipelineCache(radv_device_to_handle(device), pc, NULL);
+ radv_trap_handler_finish(device);
+
radv_destroy_shader_slabs(device);
pthread_cond_destroy(&device->timeline_cond);
}
}
+static void
+radv_emit_trap_handler(struct radv_queue *queue,
+ struct radeon_cmdbuf *cs,
+ struct radeon_winsys_bo *tma_bo)
+{
+ struct radv_device *device = queue->device;
+ struct radeon_winsys_bo *tba_bo;
+ uint64_t tba_va, tma_va;
+
+ if (!device->trap_handler_shader || !tma_bo)
+ return;
+
+ tba_bo = device->trap_handler_shader->bo;
+
+ tba_va = radv_buffer_get_va(tba_bo) + device->trap_handler_shader->bo_offset;
+ tma_va = radv_buffer_get_va(tma_bo);
+
+ radv_cs_add_buffer(queue->device->ws, cs, tba_bo);
+ radv_cs_add_buffer(queue->device->ws, cs, tma_bo);
+
+ if (queue->queue_family_index == RADV_QUEUE_GENERAL) {
+ uint32_t regs[] = {R_00B000_SPI_SHADER_TBA_LO_PS,
+ R_00B100_SPI_SHADER_TBA_LO_VS,
+ R_00B200_SPI_SHADER_TBA_LO_GS,
+ R_00B300_SPI_SHADER_TBA_LO_ES,
+ R_00B400_SPI_SHADER_TBA_LO_HS,
+ R_00B500_SPI_SHADER_TBA_LO_LS};
+
+ for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
+ radeon_set_sh_reg_seq(cs, regs[i], 4);
+ radeon_emit(cs, tba_va >> 8);
+ radeon_emit(cs, tba_va >> 40);
+ radeon_emit(cs, tma_va >> 8);
+ radeon_emit(cs, tma_va >> 40);
+ }
+ } else {
+ radeon_set_sh_reg_seq(cs, R_00B838_COMPUTE_TBA_LO, 4);
+ radeon_emit(cs, tba_va >> 8);
+ radeon_emit(cs, tba_va >> 40);
+ radeon_emit(cs, tma_va >> 8);
+ radeon_emit(cs, tma_va >> 40);
+ }
+}
+
static void
radv_init_graphics_state(struct radeon_cmdbuf *cs, struct radv_queue *queue)
{
compute_scratch_waves, compute_scratch_bo);
radv_emit_graphics_scratch(queue, cs, scratch_size_per_wave,
scratch_waves, scratch_bo);
+ radv_emit_trap_handler(queue, cs, queue->device->tma_bo);
if (gds_bo)
radv_cs_add_buffer(queue->device->ws, cs, gds_bo);
if (queue->device->trace_bo) {
radv_check_gpu_hangs(queue, cs_array[j]);
}
+
+ if (queue->device->tma_bo) {
+ radv_check_trap_handler(queue);
+ }
}
free(cs_array);
* VK_ERROR_DEVICE_LOST to ensure the clients do not attempt
* to submit the same job again to this device.
*/
- result = VK_ERROR_DEVICE_LOST;
+ result = radv_device_set_lost(queue->device, "vkQueueSubmit() failed");
}
radv_free_temp_syncobjs(queue->device,
uint32_t fence_idx = 0;
bool flushed_caches = false;
+ if (radv_device_is_lost(queue->device))
+ return VK_ERROR_DEVICE_LOST;
+
if (fence != VK_NULL_HANDLE) {
for (uint32_t i = 0; i < submitCount; ++i)
if (radv_submit_has_effects(pSubmits + i))
{
RADV_FROM_HANDLE(radv_queue, queue, _queue);
+ if (radv_device_is_lost(queue->device))
+ return VK_ERROR_DEVICE_LOST;
+
pthread_mutex_lock(&queue->pending_mutex);
while (!list_is_empty(&queue->pending_submissions)) {
pthread_cond_wait(&queue->device->timeline_cond, &queue->pending_mutex);
if (!queue->device->ws->ctx_wait_idle(queue->hw_ctx,
radv_queue_family_to_ring(queue->queue_family_index),
queue->queue_idx)) {
- return vk_errorf(queue->device->instance, VK_ERROR_DEVICE_LOST,
- "Failed to wait for a '%s' queue to be idle. "
- "GPU hang ?", radv_get_queue_family_name(queue));
+ return radv_device_set_lost(queue->device,
+ "Failed to wait for a '%s' queue "
+ "to be idle. GPU hang ?",
+ radv_get_queue_family_name(queue));
}
return VK_SUCCESS;
VkResult result;
uint32_t fence_idx = 0;
+ if (radv_device_is_lost(queue->device))
+ return VK_ERROR_DEVICE_LOST;
+
if (fence != VK_NULL_HANDLE) {
for (uint32_t i = 0; i < bindInfoCount; ++i)
if (radv_sparse_bind_has_effects(pBindInfo + i))
uint64_t timeout)
{
RADV_FROM_HANDLE(radv_device, device, _device);
+
+ if (radv_device_is_lost(device))
+ return VK_ERROR_DEVICE_LOST;
+
timeout = radv_get_absolute_timeout(timeout);
if (device->always_use_syncobj &&
fence->temporary.kind != RADV_FENCE_NONE ?
&fence->temporary : &fence->permanent;
+ if (radv_device_is_lost(device))
+ return VK_ERROR_DEVICE_LOST;
+
switch (part->kind) {
case RADV_FENCE_NONE:
break;
RADV_FROM_HANDLE(radv_device, device, _device);
RADV_FROM_HANDLE(radv_semaphore, semaphore, _semaphore);
+ if (radv_device_is_lost(device))
+ return VK_ERROR_DEVICE_LOST;
+
struct radv_semaphore_part *part =
semaphore->temporary.kind != RADV_SEMAPHORE_NONE ? &semaphore->temporary : &semaphore->permanent;
uint64_t timeout)
{
RADV_FROM_HANDLE(radv_device, device, _device);
+
+ if (radv_device_is_lost(device))
+ return VK_ERROR_DEVICE_LOST;
+
uint64_t abs_timeout = radv_get_absolute_timeout(timeout);
if (radv_semaphore_from_handle(pWaitInfo->pSemaphores[0])->permanent.kind == RADV_SEMAPHORE_TIMELINE)
VkDevice _device,
VkEvent _event)
{
+ RADV_FROM_HANDLE(radv_device, device, _device);
RADV_FROM_HANDLE(radv_event, event, _event);
+ if (radv_device_is_lost(device))
+ return VK_ERROR_DEVICE_LOST;
+
if (*event->map == 1)
return VK_EVENT_SET;
return VK_EVENT_RESET;