ac: declare an enum for the OOB select field on GFX10
[mesa.git] / src / amd / vulkan / radv_device.c
index 5fdb1903c09435a782217f029b410092c2b550fc..d56125d328e49f490d26a44007fe84010d2f8671 100644 (file)
  * IN THE SOFTWARE.
  */
 
+#include "dirent.h"
+#include <errno.h>
+#include <fcntl.h>
+#include <linux/audit.h>
+#include <linux/bpf.h>
+#include <linux/filter.h>
+#include <linux/seccomp.h>
+#include <linux/unistd.h>
 #include <stdbool.h>
+#include <stddef.h>
+#include <stdio.h>
 #include <string.h>
+#include <sys/prctl.h>
+#include <sys/wait.h>
 #include <unistd.h>
 #include <fcntl.h>
 #include <llvm/Config/llvm-config.h>
+
 #include "radv_debug.h"
 #include "radv_private.h"
 #include "radv_shader.h"
 #include "radv_cs.h"
 #include "util/disk_cache.h"
-#include "util/strtod.h"
 #include "vk_util.h"
 #include <xf86drm.h>
 #include <amdgpu.h>
 #include "util/build_id.h"
 #include "util/debug.h"
 #include "util/mesa-sha1.h"
+#include "util/timespec.h"
+#include "util/u_atomic.h"
 #include "compiler/glsl_types.h"
 #include "util/xmlpool.h"
 
+static struct radv_timeline_point *
+radv_timeline_find_point_at_least_locked(struct radv_device *device,
+                                         struct radv_timeline *timeline,
+                                         uint64_t p);
+
+static struct radv_timeline_point *
+radv_timeline_add_point_locked(struct radv_device *device,
+                               struct radv_timeline *timeline,
+                               uint64_t p);
+
+static void
+radv_timeline_trigger_waiters_locked(struct radv_timeline *timeline,
+                                     struct list_head *processing_list);
+
+static
+void radv_destroy_semaphore_part(struct radv_device *device,
+                                 struct radv_semaphore_part *part);
+
 static int
 radv_device_get_cache_uuid(enum radeon_family family, void *uuid)
 {
@@ -97,6 +129,42 @@ radv_get_vram_size(struct radv_physical_device *device)
        return device->rad_info.vram_size - radv_get_visible_vram_size(device);
 }
 
+static bool
+radv_is_mem_type_vram(enum radv_mem_type type)
+{
+       return type == RADV_MEM_TYPE_VRAM ||
+              type == RADV_MEM_TYPE_VRAM_UNCACHED;
+}
+
+static bool
+radv_is_mem_type_vram_visible(enum radv_mem_type type)
+{
+       return type == RADV_MEM_TYPE_VRAM_CPU_ACCESS ||
+              type == RADV_MEM_TYPE_VRAM_CPU_ACCESS_UNCACHED;
+}
+static bool
+radv_is_mem_type_gtt_wc(enum radv_mem_type type)
+{
+       return type == RADV_MEM_TYPE_GTT_WRITE_COMBINE ||
+              type == RADV_MEM_TYPE_GTT_WRITE_COMBINE_VRAM_UNCACHED;
+}
+
+static bool
+radv_is_mem_type_gtt_cached(enum radv_mem_type type)
+{
+       return type == RADV_MEM_TYPE_GTT_CACHED ||
+              type == RADV_MEM_TYPE_GTT_CACHED_VRAM_UNCACHED;
+}
+
+static bool
+radv_is_mem_type_uncached(enum radv_mem_type type)
+{
+       return type == RADV_MEM_TYPE_VRAM_UNCACHED ||
+              type == RADV_MEM_TYPE_VRAM_CPU_ACCESS_UNCACHED ||
+              type == RADV_MEM_TYPE_GTT_WRITE_COMBINE_VRAM_UNCACHED ||
+              type == RADV_MEM_TYPE_GTT_CACHED_VRAM_UNCACHED;
+}
+
 static void
 radv_physical_device_init_mem_types(struct radv_physical_device *device)
 {
@@ -177,6 +245,46 @@ radv_physical_device_init_mem_types(struct radv_physical_device *device)
                };
        }
        device->memory_properties.memoryTypeCount = type_count;
+
+       if (device->rad_info.has_l2_uncached) {
+               for (int i = 0; i < device->memory_properties.memoryTypeCount; i++) {
+                       VkMemoryType mem_type = device->memory_properties.memoryTypes[i];
+
+                       if ((mem_type.propertyFlags & (VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
+                                                      VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)) ||
+                           mem_type.propertyFlags == VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) {
+                               enum radv_mem_type mem_type_id;
+
+                               switch (device->mem_type_indices[i]) {
+                               case RADV_MEM_TYPE_VRAM:
+                                       mem_type_id = RADV_MEM_TYPE_VRAM_UNCACHED;
+                                       break;
+                               case RADV_MEM_TYPE_VRAM_CPU_ACCESS:
+                                       mem_type_id = RADV_MEM_TYPE_VRAM_CPU_ACCESS_UNCACHED;
+                                       break;
+                               case RADV_MEM_TYPE_GTT_WRITE_COMBINE:
+                                       mem_type_id = RADV_MEM_TYPE_GTT_WRITE_COMBINE_VRAM_UNCACHED;
+                                       break;
+                               case RADV_MEM_TYPE_GTT_CACHED:
+                                       mem_type_id = RADV_MEM_TYPE_GTT_CACHED_VRAM_UNCACHED;
+                                       break;
+                               default:
+                                       unreachable("invalid memory type");
+                               }
+
+                               VkMemoryPropertyFlags property_flags = mem_type.propertyFlags |
+                                       VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD |
+                                       VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD;
+
+                               device->mem_type_indices[type_count] = mem_type_id;
+                               device->memory_properties.memoryTypes[type_count++] = (VkMemoryType) {
+                                       .propertyFlags = property_flags,
+                                       .heapIndex = mem_type.heapIndex,
+                               };
+                       }
+               }
+               device->memory_properties.memoryTypeCount = type_count;
+       }
 }
 
 static void
@@ -290,8 +398,7 @@ radv_physical_device_init(struct radv_physical_device *device,
        radv_handle_env_var_force_family(device);
 
        device->use_aco = instance->perftest_flags & RADV_PERFTEST_ACO;
-       if ((device->rad_info.chip_class < GFX8 ||
-            device->rad_info.chip_class > GFX9) && device->use_aco) {
+       if (device->rad_info.chip_class < GFX7 && device->use_aco) {
                fprintf(stderr, "WARNING: disabling ACO on unsupported GPUs.\n");
                device->use_aco = false;
        }
@@ -310,8 +417,7 @@ radv_physical_device_init(struct radv_physical_device *device,
        /* These flags affect shader compilation. */
        uint64_t shader_env_flags =
                (device->instance->perftest_flags & RADV_PERFTEST_SISCHED ? 0x1 : 0) |
-               (device->instance->debug_flags & RADV_DEBUG_UNSAFE_MATH ? 0x2 : 0) |
-               (device->use_aco ? 0x4 : 0);
+               (device->use_aco ? 0x2 : 0);
 
        /* The gpu id is already embedded in the uuid so we just pass "radv"
         * when creating the cache.
@@ -333,8 +439,8 @@ radv_physical_device_init(struct radv_physical_device *device,
        device->dcc_msaa_allowed =
                (device->instance->perftest_flags & RADV_PERFTEST_DCC_MSAA);
 
-       device->use_shader_ballot = device->rad_info.chip_class >= GFX8 &&
-                                   (device->use_aco || device->instance->perftest_flags & RADV_PERFTEST_SHADER_BALLOT);
+       device->use_shader_ballot = (device->use_aco && device->rad_info.chip_class >= GFX8) ||
+                                   (device->instance->perftest_flags & RADV_PERFTEST_SHADER_BALLOT);
 
        device->use_ngg = device->rad_info.chip_class >= GFX10 &&
                          device->rad_info.family != CHIP_NAVI14 &&
@@ -437,7 +543,6 @@ static const struct debug_control radv_debug_options[] = {
        {"shaderstats", RADV_DEBUG_DUMP_SHADER_STATS},
        {"nohiz", RADV_DEBUG_NO_HIZ},
        {"nocompute", RADV_DEBUG_NO_COMPUTE_QUEUE},
-       {"unsafemath", RADV_DEBUG_UNSAFE_MATH},
        {"allbos", RADV_DEBUG_ALL_BOS},
        {"noibs", RADV_DEBUG_NO_IBS},
        {"spirv", RADV_DEBUG_DUMP_SPIRV},
@@ -459,6 +564,7 @@ static const struct debug_control radv_debug_options[] = {
        {"noshaderballot", RADV_DEBUG_NO_SHADER_BALLOT},
        {"allentrypoints", RADV_DEBUG_ALL_ENTRYPOINTS},
        {"metashaders", RADV_DEBUG_DUMP_META_SHADERS},
+       {"nomemorycache", RADV_DEBUG_NO_MEMORY_CACHE},
        {NULL, 0}
 };
 
@@ -520,10 +626,11 @@ radv_handle_per_app_options(struct radv_instance *instance,
                if (LLVM_VERSION_MAJOR < 9)
                        instance->debug_flags |= RADV_DEBUG_NO_LOAD_STORE_OPT;
        } else if (!strcmp(name, "Wolfenstein: Youngblood")) {
-               if (!(instance->debug_flags & RADV_DEBUG_NO_SHADER_BALLOT)) {
+               if (!(instance->debug_flags & RADV_DEBUG_NO_SHADER_BALLOT) &&
+                   !(instance->perftest_flags & RADV_PERFTEST_ACO)) {
                        /* Force enable VK_AMD_shader_ballot because it looks
                         * safe and it gives a nice boost (+20% on Vega 56 at
-                        * this time).
+                        * this time). It also prevents corruption on LLVM.
                         */
                        instance->perftest_flags |= RADV_PERFTEST_SHADER_BALLOT;
                }
@@ -606,9 +713,19 @@ VkResult radv_CreateInstance(
        instance->apiVersion = client_version;
        instance->physicalDeviceCount = -1;
 
+       /* Get secure compile thread count. NOTE: We cap this at 32 */
+#define MAX_SC_PROCS 32
+       char *num_sc_threads = getenv("RADV_SECURE_COMPILE_THREADS");
+       if (num_sc_threads)
+               instance->num_sc_threads = MIN2(strtoul(num_sc_threads, NULL, 10), MAX_SC_PROCS);
+
        instance->debug_flags = parse_debug_string(getenv("RADV_DEBUG"),
                                                   radv_debug_options);
 
+       /* Disable memory cache when secure compile is set */
+       if (radv_device_use_secure_compile(instance))
+               instance->debug_flags |= RADV_DEBUG_NO_MEMORY_CACHE;
+
        instance->perftest_flags = parse_debug_string(getenv("RADV_PERFTEST"),
                                                   radv_perftest_options);
 
@@ -640,7 +757,6 @@ VkResult radv_CreateInstance(
                                         VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
        instance->engineVersion = engine_version;
 
-       _mesa_locale_init();
        glsl_type_singleton_init_or_ref();
 
        VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
@@ -671,7 +787,6 @@ void radv_DestroyInstance(
        VG(VALGRIND_DESTROY_MEMPOOL(instance));
 
        glsl_type_singleton_decref();
-       _mesa_locale_fini();
 
        driDestroyOptionCache(&instance->dri_options);
        driDestroyOptionInfo(&instance->available_dri_options);
@@ -914,8 +1029,8 @@ void radv_GetPhysicalDeviceFeatures2(
                case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT: {
                        VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *features =
                                (VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *)ext;
-                       features->vertexAttributeInstanceRateDivisor = VK_TRUE;
-                       features->vertexAttributeInstanceRateZeroDivisor = VK_TRUE;
+                       features->vertexAttributeInstanceRateDivisor = true;
+                       features->vertexAttributeInstanceRateZeroDivisor = true;
                        break;
                }
                case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT: {
@@ -934,7 +1049,7 @@ void radv_GetPhysicalDeviceFeatures2(
                case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PRIORITY_FEATURES_EXT: {
                        VkPhysicalDeviceMemoryPriorityFeaturesEXT *features =
                                (VkPhysicalDeviceMemoryPriorityFeaturesEXT *)ext;
-                       features->memoryPriority = VK_TRUE;
+                       features->memoryPriority = true;
                        break;
                }
                case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_EXT: {
@@ -945,6 +1060,14 @@ void radv_GetPhysicalDeviceFeatures2(
                        features->bufferDeviceAddressMultiDevice = false;
                        break;
                }
+               case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_KHR: {
+                       VkPhysicalDeviceBufferDeviceAddressFeaturesKHR *features =
+                               (VkPhysicalDeviceBufferDeviceAddressFeaturesKHR *)ext;
+                       features->bufferDeviceAddress = true;
+                       features->bufferDeviceAddressCaptureReplay = false;
+                       features->bufferDeviceAddressMultiDevice = false;
+                       break;
+               }
                case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT: {
                        VkPhysicalDeviceDepthClipEnableFeaturesEXT *features =
                                (VkPhysicalDeviceDepthClipEnableFeaturesEXT *)ext;
@@ -1044,6 +1167,37 @@ void radv_GetPhysicalDeviceFeatures2(
                        features->texelBufferAlignment = true;
                        break;
                }
+               case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES_KHR: {
+                       VkPhysicalDeviceTimelineSemaphoreFeaturesKHR *features =
+                               (VkPhysicalDeviceTimelineSemaphoreFeaturesKHR *) ext;
+                       features->timelineSemaphore = true;
+                       break;
+               }
+               case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT: {
+                       VkPhysicalDeviceSubgroupSizeControlFeaturesEXT *features =
+                               (VkPhysicalDeviceSubgroupSizeControlFeaturesEXT *)ext;
+                       features->subgroupSizeControl = true;
+                       features->computeFullSubgroups = true;
+                       break;
+               }
+               case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COHERENT_MEMORY_FEATURES_AMD: {
+                       VkPhysicalDeviceCoherentMemoryFeaturesAMD *features =
+                               (VkPhysicalDeviceCoherentMemoryFeaturesAMD *)ext;
+                       features->deviceCoherentMemory = pdevice->rad_info.has_l2_uncached;
+                       break;
+               }
+               case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES_KHR: {
+                       VkPhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR *features =
+                               (VkPhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR *)ext;
+                       features->shaderSubgroupExtendedTypes = true;
+                       break;
+               }
+               case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES_KHR: {
+                       VkPhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR *features =
+                               (VkPhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR *)ext;
+                       features->separateDepthStencilLayouts = true;
+                       break;
+               }
                default:
                        break;
                }
@@ -1051,25 +1205,32 @@ void radv_GetPhysicalDeviceFeatures2(
        return radv_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
 }
 
-void radv_GetPhysicalDeviceProperties(
-       VkPhysicalDevice                            physicalDevice,
-       VkPhysicalDeviceProperties*                 pProperties)
+static size_t
+radv_max_descriptor_set_size()
 {
-       RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
-       VkSampleCountFlags sample_counts = 0xf;
-
        /* make sure that the entire descriptor set is addressable with a signed
         * 32-bit int. So the sum of all limits scaled by descriptor size has to
         * be at most 2 GiB. the combined image & samples object count as one of
         * both. This limit is for the pipeline layout, not for the set layout, but
         * there is no set limit, so we just set a pipeline limit. I don't think
         * any app is going to hit this soon. */
-       size_t max_descriptor_set_size = ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS) /
+       return ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS
+                            - MAX_INLINE_UNIFORM_BLOCK_SIZE * MAX_INLINE_UNIFORM_BLOCK_COUNT) /
                  (32 /* uniform buffer, 32 due to potential space wasted on alignment */ +
                   32 /* storage buffer, 32 due to potential space wasted on alignment */ +
                   32 /* sampler, largest when combined with image */ +
                   64 /* sampled image */ +
                   64 /* storage image */);
+}
+
+void radv_GetPhysicalDeviceProperties(
+       VkPhysicalDevice                            physicalDevice,
+       VkPhysicalDeviceProperties*                 pProperties)
+{
+       RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
+       VkSampleCountFlags sample_counts = 0xf;
+
+       size_t max_descriptor_set_size = radv_max_descriptor_set_size();
 
        VkPhysicalDeviceLimits limits = {
                .maxImageDimension1D                      = (1 << 14),
@@ -1125,11 +1286,11 @@ void radv_GetPhysicalDeviceProperties(
                .maxFragmentCombinedOutputResources       = 8,
                .maxComputeSharedMemorySize               = 32768,
                .maxComputeWorkGroupCount                 = { 65535, 65535, 65535 },
-               .maxComputeWorkGroupInvocations           = 2048,
+               .maxComputeWorkGroupInvocations           = 1024,
                .maxComputeWorkGroupSize = {
-                       2048,
-                       2048,
-                       2048
+                       1024,
+                       1024,
+                       1024
                },
                .subPixelPrecisionBits                    = 8,
                .subTexelPrecisionBits                    = 8,
@@ -1247,17 +1408,18 @@ void radv_GetPhysicalDeviceProperties2(
                case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES: {
                        VkPhysicalDeviceSubgroupProperties *properties =
                            (VkPhysicalDeviceSubgroupProperties*)ext;
-                       properties->subgroupSize = 64;
+                       properties->subgroupSize = RADV_SUBGROUP_SIZE;
                        properties->supportedStages = VK_SHADER_STAGE_ALL;
                        properties->supportedOperations =
                                                        VK_SUBGROUP_FEATURE_BASIC_BIT |
-                                                       VK_SUBGROUP_FEATURE_BALLOT_BIT |
-                                                       VK_SUBGROUP_FEATURE_QUAD_BIT |
-                                                       VK_SUBGROUP_FEATURE_VOTE_BIT;
-                       if (pdevice->rad_info.chip_class >= GFX8) {
-                               properties->supportedOperations |=
+                                                       VK_SUBGROUP_FEATURE_VOTE_BIT |
                                                        VK_SUBGROUP_FEATURE_ARITHMETIC_BIT |
+                                                       VK_SUBGROUP_FEATURE_BALLOT_BIT |
                                                        VK_SUBGROUP_FEATURE_CLUSTERED_BIT |
+                                                       VK_SUBGROUP_FEATURE_QUAD_BIT;
+                       if (pdevice->rad_info.chip_class == GFX8 ||
+                           pdevice->rad_info.chip_class == GFX9) {
+                               properties->supportedOperations |=
                                                        VK_SUBGROUP_FEATURE_SHUFFLE_BIT |
                                                        VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT;
                        }
@@ -1267,11 +1429,8 @@ void radv_GetPhysicalDeviceProperties2(
                case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
                        VkPhysicalDeviceMaintenance3Properties *properties =
                            (VkPhysicalDeviceMaintenance3Properties*)ext;
-                       /* Make sure everything is addressable by a signed 32-bit int, and
-                        * our largest descriptors are 96 bytes. */
-                       properties->maxPerSetDescriptors = (1ull << 31) / 96;
-                       /* Our buffer size fields allow only this much */
-                       properties->maxMemoryAllocationSize = 0xFFFFFFFFull;
+                       properties->maxPerSetDescriptors = RADV_MAX_PER_SET_DESCRIPTORS;
+                       properties->maxMemoryAllocationSize = RADV_MAX_MEMORY_ALLOCATION_SIZE;
                        break;
                }
                case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES_EXT: {
@@ -1348,13 +1507,7 @@ void radv_GetPhysicalDeviceProperties2(
                        properties->robustBufferAccessUpdateAfterBind = false;
                        properties->quadDivergentImplicitLod = false;
 
-                       size_t max_descriptor_set_size = ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS -
-                               MAX_INLINE_UNIFORM_BLOCK_SIZE * MAX_INLINE_UNIFORM_BLOCK_COUNT) /
-                                 (32 /* uniform buffer, 32 due to potential space wasted on alignment */ +
-                                  32 /* storage buffer, 32 due to potential space wasted on alignment */ +
-                                  32 /* sampler, largest when combined with image */ +
-                                  64 /* sampled image */ +
-                                  64 /* storage image */);
+                       size_t max_descriptor_set_size = radv_max_descriptor_set_size();
                        properties->maxPerStageDescriptorUpdateAfterBindSamplers = max_descriptor_set_size;
                        properties->maxPerStageDescriptorUpdateAfterBindUniformBuffers = max_descriptor_set_size;
                        properties->maxPerStageDescriptorUpdateAfterBindStorageBuffers = max_descriptor_set_size;
@@ -1384,12 +1537,12 @@ void radv_GetPhysicalDeviceProperties2(
                        properties->primitiveOverestimationSize = 0;
                        properties->maxExtraPrimitiveOverestimationSize = 0;
                        properties->extraPrimitiveOverestimationSizeGranularity = 0;
-                       properties->primitiveUnderestimation = VK_FALSE;
-                       properties->conservativePointAndLineRasterization = VK_FALSE;
-                       properties->degenerateTrianglesRasterized = VK_FALSE;
-                       properties->degenerateLinesRasterized = VK_FALSE;
-                       properties->fullyCoveredFragmentShaderInputVariable = VK_FALSE;
-                       properties->conservativeRasterizationPostDepthCoverage = VK_FALSE;
+                       properties->primitiveUnderestimation = false;
+                       properties->conservativePointAndLineRasterization = false;
+                       properties->degenerateTrianglesRasterized = false;
+                       properties->degenerateLinesRasterized = false;
+                       properties->fullyCoveredFragmentShaderInputVariable = false;
+                       properties->conservativeRasterizationPostDepthCoverage = false;
                        break;
                }
                case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT: {
@@ -1455,7 +1608,7 @@ void radv_GetPhysicalDeviceProperties2(
                        properties->sampleLocationCoordinateRange[0] = 0.0f;
                        properties->sampleLocationCoordinateRange[1] = 0.9375f;
                        properties->sampleLocationSubPixelBits = 4;
-                       properties->variableSampleLocations = VK_FALSE;
+                       properties->variableSampleLocations = false;
                        break;
                }
                case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES_KHR: {
@@ -1475,8 +1628,8 @@ void radv_GetPhysicalDeviceProperties2(
                                VK_RESOLVE_MODE_MIN_BIT_KHR |
                                VK_RESOLVE_MODE_MAX_BIT_KHR;
 
-                       properties->independentResolveNone = VK_TRUE;
-                       properties->independentResolve = VK_TRUE;
+                       properties->independentResolveNone = true;
+                       properties->independentResolve = true;
                        break;
                }
                case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES_EXT: {
@@ -1506,6 +1659,8 @@ void radv_GetPhysicalDeviceProperties2(
                         * support for changing the register. The same logic
                         * applies for the rounding modes because they are
                         * configured with the same config register.
+                        * TODO: we can enable a lot of these for ACO when it
+                        * supports all stages
                         */
                        properties->shaderDenormFlushToZeroFloat32 = true;
                        properties->shaderDenormPreserveFloat32 = false;
@@ -1514,16 +1669,37 @@ void radv_GetPhysicalDeviceProperties2(
                        properties->shaderSignedZeroInfNanPreserveFloat32 = true;
 
                        properties->shaderDenormFlushToZeroFloat16 = false;
-                       properties->shaderDenormPreserveFloat16 = true;
-                       properties->shaderRoundingModeRTEFloat16 = true;
+                       properties->shaderDenormPreserveFloat16 = pdevice->rad_info.chip_class >= GFX8;
+                       properties->shaderRoundingModeRTEFloat16 = pdevice->rad_info.chip_class >= GFX8;
                        properties->shaderRoundingModeRTZFloat16 = false;
-                       properties->shaderSignedZeroInfNanPreserveFloat16 = true;
+                       properties->shaderSignedZeroInfNanPreserveFloat16 = pdevice->rad_info.chip_class >= GFX8;
 
                        properties->shaderDenormFlushToZeroFloat64 = false;
-                       properties->shaderDenormPreserveFloat64 = true;
-                       properties->shaderRoundingModeRTEFloat64 = true;
+                       properties->shaderDenormPreserveFloat64 = pdevice->rad_info.chip_class >= GFX8;
+                       properties->shaderRoundingModeRTEFloat64 = pdevice->rad_info.chip_class >= GFX8;
                        properties->shaderRoundingModeRTZFloat64 = false;
-                       properties->shaderSignedZeroInfNanPreserveFloat64 = true;
+                       properties->shaderSignedZeroInfNanPreserveFloat64 = pdevice->rad_info.chip_class >= GFX8;
+                       break;
+               }
+               case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES_KHR: {
+                       VkPhysicalDeviceTimelineSemaphorePropertiesKHR *props =
+                               (VkPhysicalDeviceTimelineSemaphorePropertiesKHR *) ext;
+                       props->maxTimelineSemaphoreValueDifference = UINT64_MAX;
+                       break;
+               }
+               case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT: {
+                       VkPhysicalDeviceSubgroupSizeControlPropertiesEXT *props =
+                               (VkPhysicalDeviceSubgroupSizeControlPropertiesEXT *)ext;
+                       props->minSubgroupSize = 64;
+                       props->maxSubgroupSize = 64;
+                       props->maxComputeWorkgroupSubgroups = UINT32_MAX;
+                       props->requiredSubgroupSizeStages = 0;
+
+                       if (pdevice->rad_info.chip_class >= GFX10) {
+                               /* Only GFX10+ supports wave32. */
+                               props->minSubgroupSize = 32;
+                               props->requiredSubgroupSizeStages = VK_SHADER_STAGE_COMPUTE_BIT;
+                       }
                        break;
                }
                default:
@@ -1539,7 +1715,7 @@ static void radv_get_physical_device_queue_family_properties(
 {
        int num_queue_families = 1;
        int idx;
-       if (pdevice->rad_info.num_compute_rings > 0 &&
+       if (pdevice->rad_info.num_rings[RING_COMPUTE] > 0 &&
            !(pdevice->instance->debug_flags & RADV_DEBUG_NO_COMPUTE_QUEUE))
                num_queue_families++;
 
@@ -1565,14 +1741,14 @@ static void radv_get_physical_device_queue_family_properties(
                idx++;
        }
 
-       if (pdevice->rad_info.num_compute_rings > 0 &&
+       if (pdevice->rad_info.num_rings[RING_COMPUTE] > 0 &&
            !(pdevice->instance->debug_flags & RADV_DEBUG_NO_COMPUTE_QUEUE)) {
                if (*pCount > idx) {
                        *pQueueFamilyProperties[idx] = (VkQueueFamilyProperties) {
                                .queueFlags = VK_QUEUE_COMPUTE_BIT |
                                              VK_QUEUE_TRANSFER_BIT |
                                              VK_QUEUE_SPARSE_BINDING_BIT,
-                               .queueCount = pdevice->rad_info.num_compute_rings,
+                               .queueCount = pdevice->rad_info.num_rings[RING_COMPUTE],
                                .timestampValidBits = 64,
                                .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
                        };
@@ -1652,8 +1828,7 @@ radv_get_memory_budget_properties(VkPhysicalDevice physicalDevice,
        for (int i = 0; i < device->memory_properties.memoryTypeCount; i++) {
                uint32_t heap_index = device->memory_properties.memoryTypes[i].heapIndex;
 
-               switch (device->mem_type_indices[i]) {
-               case RADV_MEM_TYPE_VRAM:
+               if (radv_is_mem_type_vram(device->mem_type_indices[i])) {
                        heap_usage = device->ws->query_value(device->ws,
                                                             RADEON_ALLOCATED_VRAM);
 
@@ -1663,8 +1838,7 @@ radv_get_memory_budget_properties(VkPhysicalDevice physicalDevice,
 
                        memoryBudget->heapBudget[heap_index] = heap_budget;
                        memoryBudget->heapUsage[heap_index] = heap_usage;
-                       break;
-               case RADV_MEM_TYPE_VRAM_CPU_ACCESS:
+               } else if (radv_is_mem_type_vram_visible(device->mem_type_indices[i])) {
                        heap_usage = device->ws->query_value(device->ws,
                                                             RADEON_ALLOCATED_VRAM_VIS);
 
@@ -1674,8 +1848,7 @@ radv_get_memory_budget_properties(VkPhysicalDevice physicalDevice,
 
                        memoryBudget->heapBudget[heap_index] = heap_budget;
                        memoryBudget->heapUsage[heap_index] = heap_usage;
-                       break;
-               case RADV_MEM_TYPE_GTT_WRITE_COMBINE:
+               } else if (radv_is_mem_type_gtt_wc(device->mem_type_indices[i])) {
                        heap_usage = device->ws->query_value(device->ws,
                                                             RADEON_ALLOCATED_GTT);
 
@@ -1685,9 +1858,6 @@ radv_get_memory_budget_properties(VkPhysicalDevice physicalDevice,
 
                        memoryBudget->heapBudget[heap_index] = heap_budget;
                        memoryBudget->heapUsage[heap_index] = heap_usage;
-                       break;
-               default:
-                       break;
                }
        }
 
@@ -1729,7 +1899,7 @@ VkResult radv_GetMemoryHostPointerPropertiesEXT(
                const struct radv_physical_device *physical_device = device->physical_device;
                uint32_t memoryTypeBits = 0;
                for (int i = 0; i < physical_device->memory_properties.memoryTypeCount; i++) {
-                       if (physical_device->mem_type_indices[i] == RADV_MEM_TYPE_GTT_CACHED) {
+                       if (radv_is_mem_type_gtt_cached(physical_device->mem_type_indices[i])) {
                                memoryTypeBits = (1 << i);
                                break;
                        }
@@ -1781,12 +1951,17 @@ radv_queue_init(struct radv_device *device, struct radv_queue *queue,
        if (!queue->hw_ctx)
                return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
 
+       list_inithead(&queue->pending_submissions);
+       pthread_mutex_init(&queue->pending_mutex, NULL);
+
        return VK_SUCCESS;
 }
 
 static void
 radv_queue_finish(struct radv_queue *queue)
 {
+       pthread_mutex_destroy(&queue->pending_mutex);
+
        if (queue->hw_ctx)
                queue->device->ws->ctx_destroy(queue->hw_ctx);
 
@@ -1919,6 +2094,553 @@ radv_get_int_debug_option(const char *name, int default_value)
        return result;
 }
 
+static int install_seccomp_filter() {
+
+       struct sock_filter filter[] = {
+               /* Check arch is 64bit x86 */
+               BPF_STMT(BPF_LD + BPF_W + BPF_ABS, (offsetof(struct seccomp_data, arch))),
+               BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, AUDIT_ARCH_X86_64, 0, 12),
+
+               /* Futex is required for mutex locks */
+               #if defined __NR__newselect
+               BPF_STMT(BPF_LD + BPF_W + BPF_ABS, (offsetof(struct seccomp_data, nr))),
+               BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, __NR__newselect, 11, 0),
+               #elif defined __NR_select
+               BPF_STMT(BPF_LD + BPF_W + BPF_ABS, (offsetof(struct seccomp_data, nr))),
+               BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, __NR_select, 11, 0),
+               #else
+               BPF_STMT(BPF_LD + BPF_W + BPF_ABS, (offsetof(struct seccomp_data, nr))),
+               BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, __NR_pselect6, 11, 0),
+               #endif
+
+               /* Allow system exit calls for the forked process */
+               BPF_STMT(BPF_LD + BPF_W + BPF_ABS, (offsetof(struct seccomp_data, nr))),
+               BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, __NR_exit_group, 9, 0),
+
+               /* Allow system read calls */
+               BPF_STMT(BPF_LD + BPF_W + BPF_ABS, (offsetof(struct seccomp_data, nr))),
+               BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, __NR_read, 7, 0),
+
+               /* Allow system write calls */
+               BPF_STMT(BPF_LD + BPF_W + BPF_ABS, (offsetof(struct seccomp_data, nr))),
+               BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, __NR_write, 5, 0),
+
+               /* Allow system brk calls (we need this for malloc) */
+               BPF_STMT(BPF_LD + BPF_W + BPF_ABS, (offsetof(struct seccomp_data, nr))),
+               BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, __NR_brk, 3, 0),
+
+               /* Futex is required for mutex locks */
+               BPF_STMT(BPF_LD + BPF_W + BPF_ABS, (offsetof(struct seccomp_data, nr))),
+               BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, __NR_futex, 1, 0),
+
+               /* Return error if we hit a system call not on the whitelist */
+               BPF_STMT(BPF_RET + BPF_K, SECCOMP_RET_ERRNO | (EPERM & SECCOMP_RET_DATA)),
+
+               /* Allow whitelisted system calls */
+               BPF_STMT(BPF_RET + BPF_K, SECCOMP_RET_ALLOW),
+       };
+
+       struct sock_fprog prog = {
+               .len = (unsigned short)(sizeof(filter) / sizeof(filter[0])),
+               .filter = filter,
+       };
+
+       if (prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0))
+               return -1;
+
+       if (prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog))
+               return -1;
+
+       return 0;
+}
+
+/* Helper function with timeout support for reading from the pipe between
+ * processes used for secure compile.
+ */
+bool radv_sc_read(int fd, void *buf, size_t size, bool timeout)
+{
+       fd_set fds;
+       struct timeval tv;
+
+       FD_ZERO(&fds);
+       FD_SET(fd, &fds);
+
+       while (true) {
+               /* We can't rely on the value of tv after calling select() so
+                * we must reset it on each iteration of the loop.
+                */
+               tv.tv_sec = 5;
+               tv.tv_usec = 0;
+
+               int rval = select(fd + 1, &fds, NULL, NULL, timeout ? &tv : NULL);
+
+               if (rval == -1) {
+                       /* select error */
+                       return false;
+               } else if (rval) {
+                       ssize_t bytes_read = read(fd, buf, size);
+                       if (bytes_read < 0)
+                               return false;
+
+                       buf += bytes_read;
+                       size -= bytes_read;
+                       if (size == 0)
+                               return true;
+               } else {
+                       /* select timeout */
+                       return false;
+               }
+       }
+}
+
+static bool radv_close_all_fds(const int *keep_fds, int keep_fd_count)
+{
+       DIR *d;
+       struct dirent *dir;
+       d = opendir("/proc/self/fd");
+       if (!d)
+               return false;
+       int dir_fd = dirfd(d);
+
+       while ((dir = readdir(d)) != NULL) {
+               if (dir->d_name[0] == '.')
+                       continue;
+
+               int fd = atoi(dir->d_name);
+               if (fd == dir_fd)
+                       continue;
+
+               bool keep = false;
+               for (int i = 0; !keep && i < keep_fd_count; ++i)
+                       if (keep_fds[i] == fd)
+                               keep = true;
+
+               if (keep)
+                       continue;
+
+               close(fd);
+       }
+       closedir(d);
+       return true;
+}
+
+static bool secure_compile_open_fifo_fds(struct radv_secure_compile_state *sc,
+                                        int *fd_server, int *fd_client,
+                                        unsigned process, bool make_fifo)
+{
+       bool result = false;
+       char *fifo_server_path = NULL;
+       char *fifo_client_path = NULL;
+
+       if (asprintf(&fifo_server_path, "/tmp/radv_server_%s_%u", sc->uid, process) == -1)
+               goto open_fifo_exit;
+
+       if (asprintf(&fifo_client_path, "/tmp/radv_client_%s_%u", sc->uid, process) == -1)
+               goto open_fifo_exit;
+
+       if (make_fifo) {
+               int file1 = mkfifo(fifo_server_path, 0666);
+               if(file1 < 0)
+                       goto open_fifo_exit;
+
+               int file2 = mkfifo(fifo_client_path, 0666);
+               if(file2 < 0)
+                       goto open_fifo_exit;
+       }
+
+       *fd_server = open(fifo_server_path, O_RDWR);
+       if(*fd_server < 1)
+               goto open_fifo_exit;
+
+       *fd_client = open(fifo_client_path, O_RDWR);
+       if(*fd_client < 1) {
+               close(*fd_server);
+               goto open_fifo_exit;
+       }
+
+       result = true;
+
+open_fifo_exit:
+       free(fifo_server_path);
+       free(fifo_client_path);
+
+       return result;
+}
+
+static void run_secure_compile_device(struct radv_device *device, unsigned process,
+                                     int fd_idle_device_output)
+{
+       int fd_secure_input;
+       int fd_secure_output;
+       bool fifo_result = secure_compile_open_fifo_fds(device->sc_state,
+                                                       &fd_secure_input,
+                                                       &fd_secure_output,
+                                                       process, false);
+
+       enum radv_secure_compile_type sc_type;
+
+       const int needed_fds[] = {
+               fd_secure_input,
+               fd_secure_output,
+               fd_idle_device_output,
+       };
+
+       if (!fifo_result || !radv_close_all_fds(needed_fds, ARRAY_SIZE(needed_fds)) ||
+           install_seccomp_filter() == -1) {
+               sc_type = RADV_SC_TYPE_INIT_FAILURE;
+       } else {
+               sc_type = RADV_SC_TYPE_INIT_SUCCESS;
+               device->sc_state->secure_compile_processes[process].fd_secure_input = fd_secure_input;
+               device->sc_state->secure_compile_processes[process].fd_secure_output = fd_secure_output;
+       }
+
+       write(fd_idle_device_output, &sc_type, sizeof(sc_type));
+
+       if (sc_type == RADV_SC_TYPE_INIT_FAILURE)
+               goto secure_compile_exit;
+
+       while (true) {
+               radv_sc_read(fd_secure_input, &sc_type, sizeof(sc_type), false);
+
+               if (sc_type == RADV_SC_TYPE_COMPILE_PIPELINE) {
+                       struct radv_pipeline *pipeline;
+                       bool sc_read = true;
+
+                       pipeline = vk_zalloc2(&device->alloc, NULL, sizeof(*pipeline), 8,
+                                             VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+
+                       pipeline->device = device;
+
+                       /* Read pipeline layout */
+                       struct radv_pipeline_layout layout;
+                       sc_read = radv_sc_read(fd_secure_input, &layout, sizeof(struct radv_pipeline_layout), true);
+                       sc_read &= radv_sc_read(fd_secure_input, &layout.num_sets, sizeof(uint32_t), true);
+                       if (!sc_read)
+                               goto secure_compile_exit;
+
+                       for (uint32_t set = 0; set < layout.num_sets; set++) {
+                               uint32_t layout_size;
+                               sc_read &= radv_sc_read(fd_secure_input, &layout_size, sizeof(uint32_t), true);
+                               if (!sc_read)
+                                       goto secure_compile_exit;
+
+                               layout.set[set].layout = malloc(layout_size);
+                               layout.set[set].layout->layout_size = layout_size;
+                               sc_read &= radv_sc_read(fd_secure_input, layout.set[set].layout,
+                                                       layout.set[set].layout->layout_size, true);
+                       }
+
+                       pipeline->layout = &layout;
+
+                       /* Read pipeline key */
+                       struct radv_pipeline_key key;
+                       sc_read &= radv_sc_read(fd_secure_input, &key, sizeof(struct radv_pipeline_key), true);
+
+                       /* Read pipeline create flags */
+                       VkPipelineCreateFlags flags;
+                       sc_read &= radv_sc_read(fd_secure_input, &flags, sizeof(VkPipelineCreateFlags), true);
+
+                       /* Read stage and shader information */
+                       uint32_t num_stages;
+                       const VkPipelineShaderStageCreateInfo *pStages[MESA_SHADER_STAGES] = { 0, };
+                       sc_read &= radv_sc_read(fd_secure_input, &num_stages, sizeof(uint32_t), true);
+                       if (!sc_read)
+                               goto secure_compile_exit;
+
+                       for (uint32_t i = 0; i < num_stages; i++) {
+
+                               /* Read stage */
+                               gl_shader_stage stage;
+                               sc_read &= radv_sc_read(fd_secure_input, &stage, sizeof(gl_shader_stage), true);
+
+                               VkPipelineShaderStageCreateInfo *pStage = calloc(1, sizeof(VkPipelineShaderStageCreateInfo));
+
+                               /* Read entry point name */
+                               size_t name_size;
+                               sc_read &= radv_sc_read(fd_secure_input, &name_size, sizeof(size_t), true);
+                               if (!sc_read)
+                                       goto secure_compile_exit;
+
+                               char *ep_name = malloc(name_size);
+                               sc_read &= radv_sc_read(fd_secure_input, ep_name, name_size, true);
+                               pStage->pName = ep_name;
+
+                               /* Read shader module */
+                               size_t module_size;
+                               sc_read &= radv_sc_read(fd_secure_input, &module_size, sizeof(size_t), true);
+                               if (!sc_read)
+                                       goto secure_compile_exit;
+
+                               struct radv_shader_module *module = malloc(module_size);
+                               sc_read &= radv_sc_read(fd_secure_input, module, module_size, true);
+                               pStage->module = radv_shader_module_to_handle(module);
+
+                               /* Read specialization info */
+                               bool has_spec_info;
+                               sc_read &= radv_sc_read(fd_secure_input, &has_spec_info, sizeof(bool), true);
+                               if (!sc_read)
+                                       goto secure_compile_exit;
+
+                               if (has_spec_info) {
+                                       VkSpecializationInfo *specInfo = malloc(sizeof(VkSpecializationInfo));
+                                       pStage->pSpecializationInfo = specInfo;
+
+                                       sc_read &= radv_sc_read(fd_secure_input, &specInfo->dataSize, sizeof(size_t), true);
+                                       if (!sc_read)
+                                               goto secure_compile_exit;
+
+                                       void *si_data = malloc(specInfo->dataSize);
+                                       sc_read &= radv_sc_read(fd_secure_input, si_data, specInfo->dataSize, true);
+                                       specInfo->pData = si_data;
+
+                                       sc_read &= radv_sc_read(fd_secure_input, &specInfo->mapEntryCount, sizeof(uint32_t), true);
+                                       if (!sc_read)
+                                               goto secure_compile_exit;
+
+                                       VkSpecializationMapEntry *mapEntries = malloc(sizeof(VkSpecializationMapEntry) * specInfo->mapEntryCount);
+                                       for (uint32_t j = 0; j < specInfo->mapEntryCount; j++) {
+                                               sc_read &= radv_sc_read(fd_secure_input, &mapEntries[j], sizeof(VkSpecializationMapEntry), true);
+                                               if (!sc_read)
+                                                       goto secure_compile_exit;
+                                       }
+
+                                       specInfo->pMapEntries = mapEntries;
+                               }
+
+                               pStages[stage] = pStage;
+                       }
+
+                       /* Compile the shaders */
+                       VkPipelineCreationFeedbackEXT *stage_feedbacks[MESA_SHADER_STAGES] = { 0 };
+                       radv_create_shaders(pipeline, device, NULL, &key, pStages, flags, NULL, stage_feedbacks);
+
+                       /* free memory allocated above */
+                       for (uint32_t set = 0; set < layout.num_sets; set++)
+                               free(layout.set[set].layout);
+
+                       for (uint32_t i = 0; i < MESA_SHADER_STAGES; i++) {
+                               if (!pStages[i])
+                                       continue;
+
+                               free((void *) pStages[i]->pName);
+                               free(radv_shader_module_from_handle(pStages[i]->module));
+                               if (pStages[i]->pSpecializationInfo) {
+                                       free((void *) pStages[i]->pSpecializationInfo->pData);
+                                       free((void *) pStages[i]->pSpecializationInfo->pMapEntries);
+                                       free((void *) pStages[i]->pSpecializationInfo);
+                               }
+                               free((void *) pStages[i]);
+                       }
+
+                       vk_free(&device->alloc, pipeline);
+
+                       sc_type = RADV_SC_TYPE_COMPILE_PIPELINE_FINISHED;
+                       write(fd_secure_output, &sc_type, sizeof(sc_type));
+
+               } else if (sc_type == RADV_SC_TYPE_DESTROY_DEVICE) {
+                       goto secure_compile_exit;
+               }
+       }
+
+secure_compile_exit:
+       close(fd_secure_input);
+       close(fd_secure_output);
+       close(fd_idle_device_output);
+       _exit(0);
+}
+
+static enum radv_secure_compile_type fork_secure_compile_device(struct radv_device *device, unsigned process)
+{
+       int fd_secure_input[2];
+       int fd_secure_output[2];
+
+       /* create pipe descriptors (used to communicate between processes) */
+       if (pipe(fd_secure_input) == -1 || pipe(fd_secure_output) == -1)
+               return RADV_SC_TYPE_INIT_FAILURE;
+
+
+       int sc_pid;
+       if ((sc_pid = fork()) == 0) {
+               device->sc_state->secure_compile_thread_counter = process;
+               run_secure_compile_device(device, process, fd_secure_output[1]);
+       } else {
+               if (sc_pid == -1)
+                       return RADV_SC_TYPE_INIT_FAILURE;
+
+               /* Read the init result returned from the secure process */
+               enum radv_secure_compile_type sc_type;
+               bool sc_read = radv_sc_read(fd_secure_output[0], &sc_type, sizeof(sc_type), true);
+
+               if (sc_type == RADV_SC_TYPE_INIT_FAILURE || !sc_read) {
+                       close(fd_secure_input[0]);
+                       close(fd_secure_input[1]);
+                       close(fd_secure_output[1]);
+                       close(fd_secure_output[0]);
+                       int status;
+                       waitpid(sc_pid, &status, 0);
+
+                       return RADV_SC_TYPE_INIT_FAILURE;
+               } else {
+                       assert(sc_type == RADV_SC_TYPE_INIT_SUCCESS);
+                       write(device->sc_state->secure_compile_processes[process].fd_secure_output, &sc_type, sizeof(sc_type));
+
+                       close(fd_secure_input[0]);
+                       close(fd_secure_input[1]);
+                       close(fd_secure_output[1]);
+                       close(fd_secure_output[0]);
+
+                       int status;
+                       waitpid(sc_pid, &status, 0);
+               }
+       }
+
+       return RADV_SC_TYPE_INIT_SUCCESS;
+}
+
+/* Run a bare bones fork of a device that was forked right after its creation.
+ * This device will have low overhead when it is forked again before each
+ * pipeline compilation. This device sits idle and its only job is to fork
+ * itself.
+ */
+static void run_secure_compile_idle_device(struct radv_device *device, unsigned process,
+                                           int fd_secure_input, int fd_secure_output)
+{
+       enum radv_secure_compile_type sc_type = RADV_SC_TYPE_INIT_SUCCESS;
+       device->sc_state->secure_compile_processes[process].fd_secure_input = fd_secure_input;
+       device->sc_state->secure_compile_processes[process].fd_secure_output = fd_secure_output;
+
+       write(fd_secure_output, &sc_type, sizeof(sc_type));
+
+       while (true) {
+               radv_sc_read(fd_secure_input, &sc_type, sizeof(sc_type), false);
+
+               if (sc_type == RADV_SC_TYPE_FORK_DEVICE) {
+                       sc_type = fork_secure_compile_device(device, process);
+
+                       if (sc_type == RADV_SC_TYPE_INIT_FAILURE)
+                               goto secure_compile_exit;
+
+               } else if (sc_type == RADV_SC_TYPE_DESTROY_DEVICE) {
+                       goto secure_compile_exit;
+               }
+       }
+
+secure_compile_exit:
+       close(fd_secure_input);
+       close(fd_secure_output);
+       _exit(0);
+}
+
+static void destroy_secure_compile_device(struct radv_device *device, unsigned process)
+{
+       int fd_secure_input = device->sc_state->secure_compile_processes[process].fd_secure_input;
+
+       enum radv_secure_compile_type sc_type = RADV_SC_TYPE_DESTROY_DEVICE;
+       write(fd_secure_input, &sc_type, sizeof(sc_type));
+
+       close(device->sc_state->secure_compile_processes[process].fd_secure_input);
+       close(device->sc_state->secure_compile_processes[process].fd_secure_output);
+
+       int status;
+       waitpid(device->sc_state->secure_compile_processes[process].sc_pid, &status, 0);
+}
+
+static VkResult fork_secure_compile_idle_device(struct radv_device *device)
+{
+       device->sc_state = vk_zalloc(&device->alloc,
+                                    sizeof(struct radv_secure_compile_state),
+                                    8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
+
+       mtx_init(&device->sc_state->secure_compile_mutex, mtx_plain);
+
+       pid_t upid = getpid();
+       time_t seconds = time(NULL);
+
+       char *uid;
+       if (asprintf(&uid, "%ld_%ld", (long) upid, (long) seconds) == -1)
+               return VK_ERROR_INITIALIZATION_FAILED;
+
+       device->sc_state->uid = uid;
+
+       uint8_t sc_threads = device->instance->num_sc_threads;
+       int fd_secure_input[MAX_SC_PROCS][2];
+       int fd_secure_output[MAX_SC_PROCS][2];
+
+       /* create pipe descriptors (used to communicate between processes) */
+       for (unsigned i = 0; i < sc_threads; i++) {
+               if (pipe(fd_secure_input[i]) == -1 ||
+                   pipe(fd_secure_output[i]) == -1) {
+                       return VK_ERROR_INITIALIZATION_FAILED;
+               }
+       }
+
+       device->sc_state->secure_compile_processes = vk_zalloc(&device->alloc,
+                                                               sizeof(struct radv_secure_compile_process) * sc_threads, 8,
+                                                               VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
+
+       for (unsigned process = 0; process < sc_threads; process++) {
+               if ((device->sc_state->secure_compile_processes[process].sc_pid = fork()) == 0) {
+                       device->sc_state->secure_compile_thread_counter = process;
+                       run_secure_compile_idle_device(device, process, fd_secure_input[process][0], fd_secure_output[process][1]);
+               } else {
+                       if (device->sc_state->secure_compile_processes[process].sc_pid == -1)
+                               return VK_ERROR_INITIALIZATION_FAILED;
+
+                       /* Read the init result returned from the secure process */
+                       enum radv_secure_compile_type sc_type;
+                       bool sc_read = radv_sc_read(fd_secure_output[process][0], &sc_type, sizeof(sc_type), true);
+
+                       bool fifo_result;
+                       if (sc_read && sc_type == RADV_SC_TYPE_INIT_SUCCESS) {
+                               fifo_result = secure_compile_open_fifo_fds(device->sc_state,
+                                                                          &device->sc_state->secure_compile_processes[process].fd_server,
+                                                                          &device->sc_state->secure_compile_processes[process].fd_client,
+                                                                          process, true);
+
+                               device->sc_state->secure_compile_processes[process].fd_secure_input = fd_secure_input[process][1];
+                               device->sc_state->secure_compile_processes[process].fd_secure_output = fd_secure_output[process][0];
+                       }
+
+                       if (sc_type == RADV_SC_TYPE_INIT_FAILURE || !sc_read || !fifo_result) {
+                               close(fd_secure_input[process][0]);
+                               close(fd_secure_input[process][1]);
+                               close(fd_secure_output[process][1]);
+                               close(fd_secure_output[process][0]);
+                               int status;
+                               waitpid(device->sc_state->secure_compile_processes[process].sc_pid, &status, 0);
+
+                               /* Destroy any forks that were created sucessfully */
+                               for (unsigned i = 0; i < process; i++) {
+                                       destroy_secure_compile_device(device, i);
+                               }
+
+                               return VK_ERROR_INITIALIZATION_FAILED;
+                       }
+               }
+       }
+       return VK_SUCCESS;
+}
+
+static VkResult
+radv_create_pthread_cond(pthread_cond_t *cond)
+{
+       pthread_condattr_t condattr;
+       if (pthread_condattr_init(&condattr)) {
+               return VK_ERROR_INITIALIZATION_FAILED;
+       }
+
+       if (pthread_condattr_setclock(&condattr, CLOCK_MONOTONIC)) {
+               pthread_condattr_destroy(&condattr);
+               return VK_ERROR_INITIALIZATION_FAILED;
+       }
+       if (pthread_cond_init(cond, &condattr)) {
+               pthread_condattr_destroy(&condattr);
+               return VK_ERROR_INITIALIZATION_FAILED;
+       }
+       pthread_condattr_destroy(&condattr);
+       return VK_SUCCESS;
+}
+
 VkResult radv_CreateDevice(
        VkPhysicalDevice                            physicalDevice,
        const VkDeviceCreateInfo*                   pCreateInfo,
@@ -1979,7 +2701,8 @@ VkResult radv_CreateDevice(
        device->use_global_bo_list =
                (device->instance->perftest_flags & RADV_PERFTEST_BO_LIST) ||
                device->enabled_extensions.EXT_descriptor_indexing ||
-               device->enabled_extensions.EXT_buffer_device_address;
+               device->enabled_extensions.EXT_buffer_device_address ||
+               device->enabled_extensions.KHR_buffer_device_address;
 
        device->robust_buffer_access = pCreateInfo->pEnabledFeatures &&
                                       pCreateInfo->pEnabledFeatures->robustBufferAccess;
@@ -2024,9 +2747,7 @@ VkResult radv_CreateDevice(
        device->dfsm_allowed = device->pbb_allowed &&
                               (device->instance->perftest_flags & RADV_PERFTEST_DFSM);
 
-#ifdef ANDROID
        device->always_use_syncobj = device->physical_device->rad_info.has_syncobj_wait_for_submit;
-#endif
 
        /* The maximum number of scratch waves. Scratch space isn't divided
         * evenly between CUs. The number is only a function of the number of CUs.
@@ -2044,8 +2765,7 @@ VkResult radv_CreateDevice(
        device->scratch_waves = MAX2(32 * physical_device->rad_info.num_good_compute_units,
                                     max_threads_per_block / 64);
 
-       device->dispatch_initiator = S_00B800_COMPUTE_SHADER_EN(1) |
-                                    S_00B800_CS_W32_EN(device->physical_device->cs_wave_size == 32);
+       device->dispatch_initiator = S_00B800_COMPUTE_SHADER_EN(1);
 
        if (device->physical_device->rad_info.chip_class >= GFX7) {
                /* If the KMD allows it (there is a KMD hw register for it),
@@ -2075,8 +2795,12 @@ VkResult radv_CreateDevice(
                radv_dump_enabled_options(device, stderr);
        }
 
-       device->keep_shader_info = keep_shader_info;
+       /* Temporarily disable secure compile while we create meta shaders, etc */
+       uint8_t sc_threads = device->instance->num_sc_threads;
+       if (sc_threads)
+               device->instance->num_sc_threads = 0;
 
+       device->keep_shader_info = keep_shader_info;
        result = radv_device_init_meta(device);
        if (result != VK_SUCCESS)
                goto fail;
@@ -2116,6 +2840,10 @@ VkResult radv_CreateDevice(
 
        device->mem_cache = radv_pipeline_cache_from_handle(pc);
 
+       result = radv_create_pthread_cond(&device->timeline_cond);
+       if (result != VK_SUCCESS)
+               goto fail_mem_cache;
+
        device->force_aniso =
                MIN2(16, radv_get_int_debug_option("RADV_TEX_ANISO", -1));
        if (device->force_aniso >= 0) {
@@ -2123,9 +2851,20 @@ VkResult radv_CreateDevice(
                        1 << util_logbase2(device->force_aniso));
        }
 
+       /* Fork device for secure compile as required */
+       device->instance->num_sc_threads = sc_threads;
+       if (radv_device_use_secure_compile(device->instance)) {
+
+               result = fork_secure_compile_idle_device(device);
+               if (result != VK_SUCCESS)
+                       goto fail_meta;
+       }
+
        *pDevice = radv_device_to_handle(device);
        return VK_SUCCESS;
 
+fail_mem_cache:
+       radv_DestroyPipelineCache(radv_device_to_handle(device), pc, NULL);
 fail_meta:
        radv_device_finish_meta(device);
 fail:
@@ -2178,7 +2917,19 @@ void radv_DestroyDevice(
 
        radv_destroy_shader_slabs(device);
 
+       pthread_cond_destroy(&device->timeline_cond);
        radv_bo_list_finish(&device->bo_list);
+       if (radv_device_use_secure_compile(device->instance)) {
+               for (unsigned i = 0; i < device->instance->num_sc_threads; i++ ) {
+                       destroy_secure_compile_device(device, i);
+               }
+       }
+
+       if (device->sc_state) {
+               free(device->sc_state->uid);
+               vk_free(&device->alloc, device->sc_state->secure_compile_processes);
+       }
+       vk_free(&device->alloc, device->sc_state);
        vk_free(&device->alloc, device);
 }
 
@@ -2282,7 +3033,7 @@ fill_geom_tess_rings(struct radv_queue *queue,
 
                if (queue->device->physical_device->rad_info.chip_class >= GFX10) {
                        desc[3] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
-                                  S_008F0C_OOB_SELECT(2) |
+                                  S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_DISABLED) |
                                   S_008F0C_RESOURCE_LEVEL(1);
                } else {
                        desc[3] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
@@ -2303,7 +3054,7 @@ fill_geom_tess_rings(struct radv_queue *queue,
 
                if (queue->device->physical_device->rad_info.chip_class >= GFX10) {
                        desc[7] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
-                                  S_008F0C_OOB_SELECT(2) |
+                                  S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_DISABLED) |
                                   S_008F0C_RESOURCE_LEVEL(1);
                } else {
                        desc[7] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
@@ -2329,7 +3080,7 @@ fill_geom_tess_rings(struct radv_queue *queue,
 
                if (queue->device->physical_device->rad_info.chip_class >= GFX10) {
                        desc[3] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
-                                  S_008F0C_OOB_SELECT(2) |
+                                  S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_DISABLED) |
                                   S_008F0C_RESOURCE_LEVEL(1);
                } else {
                        desc[3] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
@@ -2352,7 +3103,7 @@ fill_geom_tess_rings(struct radv_queue *queue,
 
                if (queue->device->physical_device->rad_info.chip_class >= GFX10) {
                        desc[7] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
-                                  S_008F0C_OOB_SELECT(2) |
+                                  S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_DISABLED) |
                                   S_008F0C_RESOURCE_LEVEL(1);
                } else {
                        desc[7] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
@@ -2378,7 +3129,7 @@ fill_geom_tess_rings(struct radv_queue *queue,
 
                if (queue->device->physical_device->rad_info.chip_class >= GFX10) {
                        desc[3] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
-                                  S_008F0C_OOB_SELECT(3) |
+                                  S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) |
                                   S_008F0C_RESOURCE_LEVEL(1);
                } else {
                        desc[3] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
@@ -2395,7 +3146,7 @@ fill_geom_tess_rings(struct radv_queue *queue,
 
                if (queue->device->physical_device->rad_info.chip_class >= GFX10) {
                        desc[7] |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
-                                  S_008F0C_OOB_SELECT(3) |
+                                  S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) |
                                   S_008F0C_RESOURCE_LEVEL(1);
                } else {
                        desc[7] |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
@@ -2557,22 +3308,45 @@ radv_emit_tess_factor_ring(struct radv_queue *queue, struct radeon_cmdbuf *cs,
 }
 
 static void
-radv_emit_compute_scratch(struct radv_queue *queue, struct radeon_cmdbuf *cs,
-                         struct radeon_winsys_bo *compute_scratch_bo)
+radv_emit_graphics_scratch(struct radv_queue *queue, struct radeon_cmdbuf *cs,
+                           uint32_t size_per_wave, uint32_t waves,
+                           struct radeon_winsys_bo *scratch_bo)
 {
-       uint64_t scratch_va;
+       if (queue->queue_family_index != RADV_QUEUE_GENERAL)
+               return;
 
-       if (!compute_scratch_bo)
+       if (!scratch_bo)
                return;
 
-       scratch_va = radv_buffer_get_va(compute_scratch_bo);
+       radv_cs_add_buffer(queue->device->ws, cs, scratch_bo);
 
-       radv_cs_add_buffer(queue->device->ws, cs, compute_scratch_bo);
+       radeon_set_context_reg(cs, R_0286E8_SPI_TMPRING_SIZE,
+                              S_0286E8_WAVES(waves) |
+                              S_0286E8_WAVESIZE(round_up_u32(size_per_wave, 1024)));
+}
+
+static void
+radv_emit_compute_scratch(struct radv_queue *queue, struct radeon_cmdbuf *cs,
+                          uint32_t size_per_wave, uint32_t waves,
+                          struct radeon_winsys_bo *compute_scratch_bo)
+{
+       uint64_t scratch_va;
+
+       if (!compute_scratch_bo)
+               return;
+
+       scratch_va = radv_buffer_get_va(compute_scratch_bo);
+
+       radv_cs_add_buffer(queue->device->ws, cs, compute_scratch_bo);
 
        radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0, 2);
        radeon_emit(cs, scratch_va);
        radeon_emit(cs, S_008F04_BASE_ADDRESS_HI(scratch_va >> 32) |
                        S_008F04_SWIZZLE_ENABLE(1));
+
+       radeon_set_sh_reg(cs, R_00B860_COMPUTE_TMPRING_SIZE,
+                        S_00B860_WAVES(waves) |
+                        S_00B860_WAVESIZE(round_up_u32(size_per_wave, 1024)));
 }
 
 static void
@@ -2653,8 +3427,10 @@ radv_init_compute_state(struct radeon_cmdbuf *cs, struct radv_queue *queue)
 
 static VkResult
 radv_get_preamble_cs(struct radv_queue *queue,
-                     uint32_t scratch_size,
-                     uint32_t compute_scratch_size,
+                    uint32_t scratch_size_per_wave,
+                    uint32_t scratch_waves,
+                    uint32_t compute_scratch_size_per_wave,
+                    uint32_t compute_scratch_waves,
                     uint32_t esgs_ring_size,
                     uint32_t gsvs_ring_size,
                     bool needs_tess_rings,
@@ -2698,8 +3474,22 @@ radv_get_preamble_cs(struct radv_queue *queue,
        tess_offchip_ring_size = max_offchip_buffers *
                queue->device->tess_offchip_block_dw_size * 4;
 
-       if (scratch_size <= queue->scratch_size &&
-           compute_scratch_size <= queue->compute_scratch_size &&
+       scratch_size_per_wave = MAX2(scratch_size_per_wave, queue->scratch_size_per_wave);
+       if (scratch_size_per_wave)
+               scratch_waves = MIN2(scratch_waves, UINT32_MAX / scratch_size_per_wave);
+       else
+               scratch_waves = 0;
+
+       compute_scratch_size_per_wave = MAX2(compute_scratch_size_per_wave, queue->compute_scratch_size_per_wave);
+       if (compute_scratch_size_per_wave)
+               compute_scratch_waves = MIN2(compute_scratch_waves, UINT32_MAX / compute_scratch_size_per_wave);
+       else
+               compute_scratch_waves = 0;
+
+       if (scratch_size_per_wave <= queue->scratch_size_per_wave &&
+           scratch_waves <= queue->scratch_waves &&
+           compute_scratch_size_per_wave <= queue->compute_scratch_size_per_wave &&
+           compute_scratch_waves <= queue->compute_scratch_waves &&
            esgs_ring_size <= queue->esgs_ring_size &&
            gsvs_ring_size <= queue->gsvs_ring_size &&
            !add_tess_rings && !add_gds && !add_sample_positions &&
@@ -2707,13 +3497,16 @@ radv_get_preamble_cs(struct radv_queue *queue,
                *initial_full_flush_preamble_cs = queue->initial_full_flush_preamble_cs;
                *initial_preamble_cs = queue->initial_preamble_cs;
                *continue_preamble_cs = queue->continue_preamble_cs;
-               if (!scratch_size && !compute_scratch_size && !esgs_ring_size && !gsvs_ring_size &&
-                   !needs_tess_rings && !needs_gds && !needs_sample_positions)
+               if (!scratch_size_per_wave && !compute_scratch_size_per_wave &&
+                   !esgs_ring_size && !gsvs_ring_size && !needs_tess_rings &&
+                   !needs_gds && !needs_sample_positions)
                        *continue_preamble_cs = NULL;
                return VK_SUCCESS;
        }
 
-       if (scratch_size > queue->scratch_size) {
+       uint32_t scratch_size = scratch_size_per_wave * scratch_waves;
+       uint32_t queue_scratch_size = queue->scratch_size_per_wave * queue->scratch_waves;
+       if (scratch_size > queue_scratch_size) {
                scratch_bo = queue->device->ws->buffer_create(queue->device->ws,
                                                              scratch_size,
                                                              4096,
@@ -2725,7 +3518,9 @@ radv_get_preamble_cs(struct radv_queue *queue,
        } else
                scratch_bo = queue->scratch_bo;
 
-       if (compute_scratch_size > queue->compute_scratch_size) {
+       uint32_t compute_scratch_size = compute_scratch_size_per_wave * compute_scratch_waves;
+       uint32_t compute_queue_scratch_size = queue->compute_scratch_size_per_wave * queue->compute_scratch_waves;
+       if (compute_scratch_size > compute_queue_scratch_size) {
                compute_scratch_bo = queue->device->ws->buffer_create(queue->device->ws,
                                                                      compute_scratch_size,
                                                                      4096,
@@ -2893,7 +3688,10 @@ radv_get_preamble_cs(struct radv_queue *queue,
                radv_emit_tess_factor_ring(queue, cs, hs_offchip_param,
                                           tess_factor_ring_size, tess_rings_bo);
                radv_emit_global_shader_pointers(queue, cs, descriptor_bo);
-               radv_emit_compute_scratch(queue, cs, compute_scratch_bo);
+               radv_emit_compute_scratch(queue, cs, compute_scratch_size_per_wave,
+                                         compute_scratch_waves, compute_scratch_bo);
+               radv_emit_graphics_scratch(queue, cs, scratch_size_per_wave,
+                                          scratch_waves, scratch_bo);
 
                if (gds_bo)
                        radv_cs_add_buffer(queue->device->ws, cs, gds_bo);
@@ -2946,15 +3744,17 @@ radv_get_preamble_cs(struct radv_queue *queue,
                if (queue->scratch_bo)
                        queue->device->ws->buffer_destroy(queue->scratch_bo);
                queue->scratch_bo = scratch_bo;
-               queue->scratch_size = scratch_size;
        }
+       queue->scratch_size_per_wave = scratch_size_per_wave;
+       queue->scratch_waves = scratch_waves;
 
        if (compute_scratch_bo != queue->compute_scratch_bo) {
                if (queue->compute_scratch_bo)
                        queue->device->ws->buffer_destroy(queue->compute_scratch_bo);
                queue->compute_scratch_bo = compute_scratch_bo;
-               queue->compute_scratch_size = compute_scratch_size;
        }
+       queue->compute_scratch_size_per_wave = compute_scratch_size_per_wave;
+       queue->compute_scratch_waves = compute_scratch_waves;
 
        if (esgs_ring_bo != queue->esgs_ring_bo) {
                if (queue->esgs_ring_bo)
@@ -3023,12 +3823,13 @@ fail:
        return vk_error(queue->device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
 }
 
-static VkResult radv_alloc_sem_counts(struct radv_instance *instance,
+static VkResult radv_alloc_sem_counts(struct radv_device *device,
                                      struct radv_winsys_sem_counts *counts,
                                      int num_sems,
-                                     const VkSemaphore *sems,
+                                     struct radv_semaphore_part **sems,
+                                     const uint64_t *timeline_values,
                                      VkFence _fence,
-                                     bool reset_temp)
+                                     bool is_signal)
 {
        int syncobj_idx = 0, sem_idx = 0;
 
@@ -3036,12 +3837,19 @@ static VkResult radv_alloc_sem_counts(struct radv_instance *instance,
                return VK_SUCCESS;
 
        for (uint32_t i = 0; i < num_sems; i++) {
-               RADV_FROM_HANDLE(radv_semaphore, sem, sems[i]);
-
-               if (sem->temp_syncobj || sem->syncobj)
+               switch(sems[i]->kind) {
+               case RADV_SEMAPHORE_SYNCOBJ:
                        counts->syncobj_count++;
-               else
+                       break;
+               case RADV_SEMAPHORE_WINSYS:
                        counts->sem_count++;
+                       break;
+               case RADV_SEMAPHORE_NONE:
+                       break;
+               case RADV_SEMAPHORE_TIMELINE:
+                       counts->syncobj_count++;
+                       break;
+               }
        }
 
        if (_fence != VK_NULL_HANDLE) {
@@ -3053,28 +3861,48 @@ static VkResult radv_alloc_sem_counts(struct radv_instance *instance,
        if (counts->syncobj_count) {
                counts->syncobj = (uint32_t *)malloc(sizeof(uint32_t) * counts->syncobj_count);
                if (!counts->syncobj)
-                       return vk_error(instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+                       return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
        }
 
        if (counts->sem_count) {
                counts->sem = (struct radeon_winsys_sem **)malloc(sizeof(struct radeon_winsys_sem *) * counts->sem_count);
                if (!counts->sem) {
                        free(counts->syncobj);
-                       return vk_error(instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+                       return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
                }
        }
 
        for (uint32_t i = 0; i < num_sems; i++) {
-               RADV_FROM_HANDLE(radv_semaphore, sem, sems[i]);
+               switch(sems[i]->kind) {
+               case RADV_SEMAPHORE_NONE:
+                       unreachable("Empty semaphore");
+                       break;
+               case RADV_SEMAPHORE_SYNCOBJ:
+                       counts->syncobj[syncobj_idx++] = sems[i]->syncobj;
+                       break;
+               case RADV_SEMAPHORE_WINSYS:
+                       counts->sem[sem_idx++] = sems[i]->ws_sem;
+                       break;
+               case RADV_SEMAPHORE_TIMELINE: {
+                       pthread_mutex_lock(&sems[i]->timeline.mutex);
+                       struct radv_timeline_point *point = NULL;
+                       if (is_signal) {
+                               point = radv_timeline_add_point_locked(device, &sems[i]->timeline, timeline_values[i]);
+                       } else {
+                               point = radv_timeline_find_point_at_least_locked(device, &sems[i]->timeline, timeline_values[i]);
+                       }
+
+                       pthread_mutex_unlock(&sems[i]->timeline.mutex);
 
-               if (sem->temp_syncobj) {
-                       counts->syncobj[syncobj_idx++] = sem->temp_syncobj;
+                       if (point) {
+                               counts->syncobj[syncobj_idx++] = point->syncobj;
+                       } else {
+                               /* Explicitly remove the semaphore so we might not find
+                                * a point later post-submit. */
+                               sems[i] = NULL;
+                       }
+                       break;
                }
-               else if (sem->syncobj)
-                       counts->syncobj[syncobj_idx++] = sem->syncobj;
-               else {
-                       assert(sem->sem);
-                       counts->sem[sem_idx++] = sem->sem;
                }
        }
 
@@ -3086,6 +3914,9 @@ static VkResult radv_alloc_sem_counts(struct radv_instance *instance,
                        counts->syncobj[syncobj_idx++] = fence->syncobj;
        }
 
+       assert(syncobj_idx <= counts->syncobj_count);
+       counts->syncobj_count = syncobj_idx;
+
        return VK_SUCCESS;
 }
 
@@ -3101,34 +3932,31 @@ radv_free_sem_info(struct radv_winsys_sem_info *sem_info)
 
 static void radv_free_temp_syncobjs(struct radv_device *device,
                                    int num_sems,
-                                   const VkSemaphore *sems)
+                                   struct radv_semaphore_part *sems)
 {
        for (uint32_t i = 0; i < num_sems; i++) {
-               RADV_FROM_HANDLE(radv_semaphore, sem, sems[i]);
-
-               if (sem->temp_syncobj) {
-                       device->ws->destroy_syncobj(device->ws, sem->temp_syncobj);
-                       sem->temp_syncobj = 0;
-               }
+               radv_destroy_semaphore_part(device, sems + i);
        }
 }
 
 static VkResult
-radv_alloc_sem_info(struct radv_instance *instance,
+radv_alloc_sem_info(struct radv_device *device,
                    struct radv_winsys_sem_info *sem_info,
                    int num_wait_sems,
-                   const VkSemaphore *wait_sems,
+                   struct radv_semaphore_part **wait_sems,
+                   const uint64_t *wait_values,
                    int num_signal_sems,
-                   const VkSemaphore *signal_sems,
+                   struct radv_semaphore_part **signal_sems,
+                   const uint64_t *signal_values,
                    VkFence fence)
 {
        VkResult ret;
        memset(sem_info, 0, sizeof(*sem_info));
 
-       ret = radv_alloc_sem_counts(instance, &sem_info->wait, num_wait_sems, wait_sems, VK_NULL_HANDLE, true);
+       ret = radv_alloc_sem_counts(device, &sem_info->wait, num_wait_sems, wait_sems, wait_values, VK_NULL_HANDLE, false);
        if (ret)
                return ret;
-       ret = radv_alloc_sem_counts(instance, &sem_info->signal, num_signal_sems, signal_sems, fence, false);
+       ret = radv_alloc_sem_counts(device, &sem_info->signal, num_signal_sems, signal_sems, signal_values, fence, true);
        if (ret)
                radv_free_sem_info(sem_info);
 
@@ -3138,119 +3966,395 @@ radv_alloc_sem_info(struct radv_instance *instance,
        return ret;
 }
 
-/* Signals fence as soon as all the work currently put on queue is done. */
-static VkResult radv_signal_fence(struct radv_queue *queue,
-                              struct radv_fence *fence)
+static void
+radv_finalize_timelines(struct radv_device *device,
+                        uint32_t num_wait_sems,
+                        struct radv_semaphore_part **wait_sems,
+                        const uint64_t *wait_values,
+                        uint32_t num_signal_sems,
+                        struct radv_semaphore_part **signal_sems,
+                        const uint64_t *signal_values,
+                        struct list_head *processing_list)
+{
+       for (uint32_t i = 0; i < num_wait_sems; ++i) {
+               if (wait_sems[i] && wait_sems[i]->kind == RADV_SEMAPHORE_TIMELINE) {
+                       pthread_mutex_lock(&wait_sems[i]->timeline.mutex);
+                       struct radv_timeline_point *point =
+                               radv_timeline_find_point_at_least_locked(device, &wait_sems[i]->timeline, wait_values[i]);
+                       point->wait_count -= 2;
+                       pthread_mutex_unlock(&wait_sems[i]->timeline.mutex);
+               }
+       }
+       for (uint32_t i = 0; i < num_signal_sems; ++i) {
+               if (signal_sems[i] && signal_sems[i]->kind == RADV_SEMAPHORE_TIMELINE) {
+                       pthread_mutex_lock(&signal_sems[i]->timeline.mutex);
+                       struct radv_timeline_point *point =
+                               radv_timeline_find_point_at_least_locked(device, &signal_sems[i]->timeline, signal_values[i]);
+                       signal_sems[i]->timeline.highest_submitted =
+                               MAX2(signal_sems[i]->timeline.highest_submitted, point->value);
+                       point->wait_count -= 2;
+                       radv_timeline_trigger_waiters_locked(&signal_sems[i]->timeline, processing_list);
+                       pthread_mutex_unlock(&signal_sems[i]->timeline.mutex);
+               }
+       }
+}
+
+static void
+radv_sparse_buffer_bind_memory(struct radv_device *device,
+                               const VkSparseBufferMemoryBindInfo *bind)
 {
-       int ret;
-       VkResult result;
-       struct radv_winsys_sem_info sem_info;
+       RADV_FROM_HANDLE(radv_buffer, buffer, bind->buffer);
 
-       result = radv_alloc_sem_info(queue->device->instance, &sem_info, 0, NULL, 0, NULL,
-                                    radv_fence_to_handle(fence));
-       if (result != VK_SUCCESS)
-               return result;
+       for (uint32_t i = 0; i < bind->bindCount; ++i) {
+               struct radv_device_memory *mem = NULL;
 
-       ret = queue->device->ws->cs_submit(queue->hw_ctx, queue->queue_idx,
-                                          &queue->device->empty_cs[queue->queue_family_index],
-                                          1, NULL, NULL, &sem_info, NULL,
-                                          false, fence->fence);
-       radv_free_sem_info(&sem_info);
+               if (bind->pBinds[i].memory != VK_NULL_HANDLE)
+                       mem = radv_device_memory_from_handle(bind->pBinds[i].memory);
 
-       if (ret)
-               return vk_error(queue->device->instance, VK_ERROR_DEVICE_LOST);
+               device->ws->buffer_virtual_bind(buffer->bo,
+                                               bind->pBinds[i].resourceOffset,
+                                               bind->pBinds[i].size,
+                                               mem ? mem->bo : NULL,
+                                               bind->pBinds[i].memoryOffset);
+       }
+}
+
+static void
+radv_sparse_image_opaque_bind_memory(struct radv_device *device,
+                                     const VkSparseImageOpaqueMemoryBindInfo *bind)
+{
+       RADV_FROM_HANDLE(radv_image, image, bind->image);
+
+       for (uint32_t i = 0; i < bind->bindCount; ++i) {
+               struct radv_device_memory *mem = NULL;
+
+               if (bind->pBinds[i].memory != VK_NULL_HANDLE)
+                       mem = radv_device_memory_from_handle(bind->pBinds[i].memory);
+
+               device->ws->buffer_virtual_bind(image->bo,
+                                               bind->pBinds[i].resourceOffset,
+                                               bind->pBinds[i].size,
+                                               mem ? mem->bo : NULL,
+                                               bind->pBinds[i].memoryOffset);
+       }
+}
+
+static VkResult
+radv_get_preambles(struct radv_queue *queue,
+                   const VkCommandBuffer *cmd_buffers,
+                   uint32_t cmd_buffer_count,
+                   struct radeon_cmdbuf **initial_full_flush_preamble_cs,
+                   struct radeon_cmdbuf **initial_preamble_cs,
+                   struct radeon_cmdbuf **continue_preamble_cs)
+{
+       uint32_t scratch_size_per_wave = 0, waves_wanted = 0;
+       uint32_t compute_scratch_size_per_wave = 0, compute_waves_wanted = 0;
+       uint32_t esgs_ring_size = 0, gsvs_ring_size = 0;
+       bool tess_rings_needed = false;
+       bool gds_needed = false;
+       bool sample_positions_needed = false;
+
+       for (uint32_t j = 0; j < cmd_buffer_count; j++) {
+               RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer,
+                                cmd_buffers[j]);
+
+               scratch_size_per_wave = MAX2(scratch_size_per_wave, cmd_buffer->scratch_size_per_wave_needed);
+               waves_wanted = MAX2(waves_wanted, cmd_buffer->scratch_waves_wanted);
+               compute_scratch_size_per_wave = MAX2(compute_scratch_size_per_wave,
+                                                    cmd_buffer->compute_scratch_size_per_wave_needed);
+               compute_waves_wanted = MAX2(compute_waves_wanted,
+                                           cmd_buffer->compute_scratch_waves_wanted);
+               esgs_ring_size = MAX2(esgs_ring_size, cmd_buffer->esgs_ring_size_needed);
+               gsvs_ring_size = MAX2(gsvs_ring_size, cmd_buffer->gsvs_ring_size_needed);
+               tess_rings_needed |= cmd_buffer->tess_rings_needed;
+               gds_needed |= cmd_buffer->gds_needed;
+               sample_positions_needed |= cmd_buffer->sample_positions_needed;
+       }
+
+       return radv_get_preamble_cs(queue, scratch_size_per_wave, waves_wanted,
+                                   compute_scratch_size_per_wave, compute_waves_wanted,
+                                   esgs_ring_size, gsvs_ring_size, tess_rings_needed,
+                                   gds_needed, sample_positions_needed,
+                                   initial_full_flush_preamble_cs,
+                                   initial_preamble_cs, continue_preamble_cs);
+}
+
+struct radv_deferred_queue_submission {
+       struct radv_queue *queue;
+       VkCommandBuffer *cmd_buffers;
+       uint32_t cmd_buffer_count;
+
+       /* Sparse bindings that happen on a queue. */
+       VkSparseBufferMemoryBindInfo *buffer_binds;
+       uint32_t buffer_bind_count;
+       VkSparseImageOpaqueMemoryBindInfo *image_opaque_binds;
+       uint32_t image_opaque_bind_count;
+
+       bool flush_caches;
+       VkShaderStageFlags wait_dst_stage_mask;
+       struct radv_semaphore_part **wait_semaphores;
+       uint32_t wait_semaphore_count;
+       struct radv_semaphore_part **signal_semaphores;
+       uint32_t signal_semaphore_count;
+       VkFence fence;
+
+       uint64_t *wait_values;
+       uint64_t *signal_values;
+
+       struct radv_semaphore_part *temporary_semaphore_parts;
+       uint32_t temporary_semaphore_part_count;
+
+       struct list_head queue_pending_list;
+       uint32_t submission_wait_count;
+       struct radv_timeline_waiter *wait_nodes;
+
+       struct list_head processing_list;
+};
+
+struct radv_queue_submission {
+       const VkCommandBuffer *cmd_buffers;
+       uint32_t cmd_buffer_count;
+
+       /* Sparse bindings that happen on a queue. */
+       const VkSparseBufferMemoryBindInfo *buffer_binds;
+       uint32_t buffer_bind_count;
+       const VkSparseImageOpaqueMemoryBindInfo *image_opaque_binds;
+       uint32_t image_opaque_bind_count;
+
+       bool flush_caches;
+       VkPipelineStageFlags wait_dst_stage_mask;
+       const VkSemaphore *wait_semaphores;
+       uint32_t wait_semaphore_count;
+       const VkSemaphore *signal_semaphores;
+       uint32_t signal_semaphore_count;
+       VkFence fence;
+
+       const uint64_t *wait_values;
+       uint32_t wait_value_count;
+       const uint64_t *signal_values;
+       uint32_t signal_value_count;
+};
+
+static VkResult
+radv_create_deferred_submission(struct radv_queue *queue,
+                                const struct radv_queue_submission *submission,
+                                struct radv_deferred_queue_submission **out)
+{
+       struct radv_deferred_queue_submission *deferred = NULL;
+       size_t size = sizeof(struct radv_deferred_queue_submission);
+
+       uint32_t temporary_count = 0;
+       for (uint32_t i = 0; i < submission->wait_semaphore_count; ++i) {
+               RADV_FROM_HANDLE(radv_semaphore, semaphore, submission->wait_semaphores[i]);
+               if (semaphore->temporary.kind != RADV_SEMAPHORE_NONE)
+                       ++temporary_count;
+       }
+
+       size += submission->cmd_buffer_count * sizeof(VkCommandBuffer);
+       size += submission->buffer_bind_count * sizeof(VkSparseBufferMemoryBindInfo);
+       size += submission->image_opaque_bind_count * sizeof(VkSparseImageOpaqueMemoryBindInfo);
+       size += submission->wait_semaphore_count * sizeof(struct radv_semaphore_part *);
+       size += temporary_count * sizeof(struct radv_semaphore_part);
+       size += submission->signal_semaphore_count * sizeof(struct radv_semaphore_part *);
+       size += submission->wait_value_count * sizeof(uint64_t);
+       size += submission->signal_value_count * sizeof(uint64_t);
+       size += submission->wait_semaphore_count * sizeof(struct radv_timeline_waiter);
+
+       deferred = calloc(1, size);
+       if (!deferred)
+               return VK_ERROR_OUT_OF_HOST_MEMORY;
+
+       deferred->queue = queue;
+
+       deferred->cmd_buffers = (void*)(deferred + 1);
+       deferred->cmd_buffer_count = submission->cmd_buffer_count;
+       memcpy(deferred->cmd_buffers, submission->cmd_buffers,
+              submission->cmd_buffer_count * sizeof(*deferred->cmd_buffers));
+
+       deferred->buffer_binds = (void*)(deferred->cmd_buffers + submission->cmd_buffer_count);
+       deferred->buffer_bind_count = submission->buffer_bind_count;
+       memcpy(deferred->buffer_binds, submission->buffer_binds,
+              submission->buffer_bind_count * sizeof(*deferred->buffer_binds));
+
+       deferred->image_opaque_binds = (void*)(deferred->buffer_binds + submission->buffer_bind_count);
+       deferred->image_opaque_bind_count = submission->image_opaque_bind_count;
+       memcpy(deferred->image_opaque_binds, submission->image_opaque_binds,
+              submission->image_opaque_bind_count * sizeof(*deferred->image_opaque_binds));
+
+       deferred->flush_caches = submission->flush_caches;
+       deferred->wait_dst_stage_mask = submission->wait_dst_stage_mask;
+
+       deferred->wait_semaphores = (void*)(deferred->image_opaque_binds + deferred->image_opaque_bind_count);
+       deferred->wait_semaphore_count = submission->wait_semaphore_count;
+
+       deferred->signal_semaphores = (void*)(deferred->wait_semaphores + deferred->wait_semaphore_count);
+       deferred->signal_semaphore_count = submission->signal_semaphore_count;
+
+       deferred->fence = submission->fence;
+
+       deferred->temporary_semaphore_parts = (void*)(deferred->signal_semaphores + deferred->signal_semaphore_count);
+       deferred->temporary_semaphore_part_count = temporary_count;
+
+       uint32_t temporary_idx = 0;
+       for (uint32_t i = 0; i < submission->wait_semaphore_count; ++i) {
+               RADV_FROM_HANDLE(radv_semaphore, semaphore, submission->wait_semaphores[i]);
+               if (semaphore->temporary.kind != RADV_SEMAPHORE_NONE) {
+                       deferred->wait_semaphores[i] = &deferred->temporary_semaphore_parts[temporary_idx];
+                       deferred->temporary_semaphore_parts[temporary_idx] = semaphore->temporary;
+                       semaphore->temporary.kind = RADV_SEMAPHORE_NONE;
+                       ++temporary_idx;
+               } else
+                       deferred->wait_semaphores[i] = &semaphore->permanent;
+       }
+
+       for (uint32_t i = 0; i < submission->signal_semaphore_count; ++i) {
+               RADV_FROM_HANDLE(radv_semaphore, semaphore, submission->signal_semaphores[i]);
+               if (semaphore->temporary.kind != RADV_SEMAPHORE_NONE) {
+                       deferred->signal_semaphores[i] = &semaphore->temporary;
+               } else {
+                       deferred->signal_semaphores[i] = &semaphore->permanent;
+               }
+       }
+
+       deferred->wait_values = (void*)(deferred->temporary_semaphore_parts + temporary_count);
+       memcpy(deferred->wait_values, submission->wait_values, submission->wait_value_count * sizeof(uint64_t));
+       deferred->signal_values = deferred->wait_values + submission->wait_value_count;
+       memcpy(deferred->signal_values, submission->signal_values, submission->signal_value_count * sizeof(uint64_t));
 
+       deferred->wait_nodes = (void*)(deferred->signal_values + submission->signal_value_count);
+       /* This is worst-case. radv_queue_enqueue_submission will fill in further, but this
+        * ensure the submission is not accidentally triggered early when adding wait timelines. */
+       deferred->submission_wait_count = 1 + submission->wait_semaphore_count;
+
+       *out = deferred;
        return VK_SUCCESS;
 }
 
-VkResult radv_QueueSubmit(
-       VkQueue                                     _queue,
-       uint32_t                                    submitCount,
-       const VkSubmitInfo*                         pSubmits,
-       VkFence                                     _fence)
+static void
+radv_queue_enqueue_submission(struct radv_deferred_queue_submission *submission,
+                              struct list_head *processing_list)
+{
+       uint32_t wait_cnt = 0;
+       struct radv_timeline_waiter *waiter = submission->wait_nodes;
+       for (uint32_t i = 0; i < submission->wait_semaphore_count; ++i) {
+               if (submission->wait_semaphores[i]->kind == RADV_SEMAPHORE_TIMELINE) {
+                       pthread_mutex_lock(&submission->wait_semaphores[i]->timeline.mutex);
+                       if (submission->wait_semaphores[i]->timeline.highest_submitted < submission->wait_values[i]) {
+                               ++wait_cnt;
+                               waiter->value = submission->wait_values[i];
+                               waiter->submission = submission;
+                               list_addtail(&waiter->list, &submission->wait_semaphores[i]->timeline.waiters);
+                               ++waiter;
+                       }
+                       pthread_mutex_unlock(&submission->wait_semaphores[i]->timeline.mutex);
+               }
+       }
+
+       pthread_mutex_lock(&submission->queue->pending_mutex);
+
+       bool is_first = list_is_empty(&submission->queue->pending_submissions);
+       list_addtail(&submission->queue_pending_list, &submission->queue->pending_submissions);
+
+       pthread_mutex_unlock(&submission->queue->pending_mutex);
+
+       /* If there is already a submission in the queue, that will decrement the counter by 1 when
+        * submitted, but if the queue was empty, we decrement ourselves as there is no previous
+        * submission. */
+       uint32_t decrement = submission->wait_semaphore_count - wait_cnt + (is_first ? 1 : 0);
+       if (__atomic_sub_fetch(&submission->submission_wait_count, decrement, __ATOMIC_ACQ_REL) == 0) {
+               list_addtail(&submission->processing_list, processing_list);
+       }
+}
+
+static void
+radv_queue_submission_update_queue(struct radv_deferred_queue_submission *submission,
+                                   struct list_head *processing_list)
 {
-       RADV_FROM_HANDLE(radv_queue, queue, _queue);
-       RADV_FROM_HANDLE(radv_fence, fence, _fence);
-       struct radeon_winsys_fence *base_fence = fence ? fence->fence : NULL;
+       pthread_mutex_lock(&submission->queue->pending_mutex);
+       list_del(&submission->queue_pending_list);
+
+       /* trigger the next submission in the queue. */
+       if (!list_is_empty(&submission->queue->pending_submissions)) {
+               struct radv_deferred_queue_submission *next_submission =
+                       list_first_entry(&submission->queue->pending_submissions,
+                                        struct radv_deferred_queue_submission,
+                                        queue_pending_list);
+               if (p_atomic_dec_zero(&next_submission->submission_wait_count)) {
+                       list_addtail(&next_submission->processing_list, processing_list);
+               }
+       }
+       pthread_mutex_unlock(&submission->queue->pending_mutex);
+
+       pthread_cond_broadcast(&submission->queue->device->timeline_cond);
+}
+
+static VkResult
+radv_queue_submit_deferred(struct radv_deferred_queue_submission *submission,
+                           struct list_head *processing_list)
+{
+       RADV_FROM_HANDLE(radv_fence, fence, submission->fence);
+       struct radv_queue *queue = submission->queue;
        struct radeon_winsys_ctx *ctx = queue->hw_ctx;
-       int ret;
        uint32_t max_cs_submission = queue->device->trace_bo ? 1 : RADV_MAX_IBS_PER_SUBMIT;
-       uint32_t scratch_size = 0;
-       uint32_t compute_scratch_size = 0;
-       uint32_t esgs_ring_size = 0, gsvs_ring_size = 0;
-       struct radeon_cmdbuf *initial_preamble_cs = NULL, *initial_flush_preamble_cs = NULL, *continue_preamble_cs = NULL;
+       struct radeon_winsys_fence *base_fence = fence ? fence->fence : NULL;
+       bool do_flush = submission->flush_caches || submission->wait_dst_stage_mask;
+       bool can_patch = true;
+       uint32_t advance;
+       struct radv_winsys_sem_info sem_info;
        VkResult result;
-       bool fence_emitted = false;
-       bool tess_rings_needed = false;
-       bool gds_needed = false;
-       bool sample_positions_needed = false;
+       int ret;
+       struct radeon_cmdbuf *initial_preamble_cs = NULL;
+       struct radeon_cmdbuf *initial_flush_preamble_cs = NULL;
+       struct radeon_cmdbuf *continue_preamble_cs = NULL;
+
+       result = radv_get_preambles(queue, submission->cmd_buffers,
+                                   submission->cmd_buffer_count,
+                                   &initial_preamble_cs,
+                                   &initial_flush_preamble_cs,
+                                   &continue_preamble_cs);
+       if (result != VK_SUCCESS)
+               goto fail;
 
-       /* Do this first so failing to allocate scratch buffers can't result in
-        * partially executed submissions. */
-       for (uint32_t i = 0; i < submitCount; i++) {
-               for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
-                       RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer,
-                                        pSubmits[i].pCommandBuffers[j]);
-
-                       scratch_size = MAX2(scratch_size, cmd_buffer->scratch_size_needed);
-                       compute_scratch_size = MAX2(compute_scratch_size,
-                                                   cmd_buffer->compute_scratch_size_needed);
-                       esgs_ring_size = MAX2(esgs_ring_size, cmd_buffer->esgs_ring_size_needed);
-                       gsvs_ring_size = MAX2(gsvs_ring_size, cmd_buffer->gsvs_ring_size_needed);
-                       tess_rings_needed |= cmd_buffer->tess_rings_needed;
-                       gds_needed |= cmd_buffer->gds_needed;
-                       sample_positions_needed |= cmd_buffer->sample_positions_needed;
-               }
-       }
-
-       result = radv_get_preamble_cs(queue, scratch_size, compute_scratch_size,
-                                     esgs_ring_size, gsvs_ring_size, tess_rings_needed,
-                                     gds_needed, sample_positions_needed,
-                                     &initial_flush_preamble_cs,
-                                     &initial_preamble_cs, &continue_preamble_cs);
+       result = radv_alloc_sem_info(queue->device,
+                                    &sem_info,
+                                    submission->wait_semaphore_count,
+                                    submission->wait_semaphores,
+                                    submission->wait_values,
+                                    submission->signal_semaphore_count,
+                                    submission->signal_semaphores,
+                                    submission->signal_values,
+                                    submission->fence);
        if (result != VK_SUCCESS)
-               return result;
+               goto fail;
 
-       for (uint32_t i = 0; i < submitCount; i++) {
-               struct radeon_cmdbuf **cs_array;
-               bool do_flush = !i || pSubmits[i].pWaitDstStageMask;
-               bool can_patch = true;
-               uint32_t advance;
-               struct radv_winsys_sem_info sem_info;
-
-               result = radv_alloc_sem_info(queue->device->instance,
-                                            &sem_info,
-                                            pSubmits[i].waitSemaphoreCount,
-                                            pSubmits[i].pWaitSemaphores,
-                                            pSubmits[i].signalSemaphoreCount,
-                                            pSubmits[i].pSignalSemaphores,
-                                            _fence);
-               if (result != VK_SUCCESS)
-                       return result;
+       for (uint32_t i = 0; i < submission->buffer_bind_count; ++i) {
+               radv_sparse_buffer_bind_memory(queue->device,
+                                              submission->buffer_binds + i);
+       }
 
-               if (!pSubmits[i].commandBufferCount) {
-                       if (pSubmits[i].waitSemaphoreCount || pSubmits[i].signalSemaphoreCount) {
-                               ret = queue->device->ws->cs_submit(ctx, queue->queue_idx,
-                                                                  &queue->device->empty_cs[queue->queue_family_index],
-                                                                  1, NULL, NULL,
-                                                                  &sem_info, NULL,
-                                                                  false, base_fence);
-                               if (ret) {
-                                       radv_loge("failed to submit CS %d\n", i);
-                                       abort();
-                               }
-                               fence_emitted = true;
-                       }
-                       radv_free_sem_info(&sem_info);
-                       continue;
+       for (uint32_t i = 0; i < submission->image_opaque_bind_count; ++i) {
+               radv_sparse_image_opaque_bind_memory(queue->device,
+                                                    submission->image_opaque_binds + i);
+       }
+
+       if (!submission->cmd_buffer_count) {
+               ret = queue->device->ws->cs_submit(ctx, queue->queue_idx,
+                                                  &queue->device->empty_cs[queue->queue_family_index],
+                                                  1, NULL, NULL,
+                                                  &sem_info, NULL,
+                                                  false, base_fence);
+               if (ret) {
+                       radv_loge("failed to submit CS\n");
+                       abort();
                }
 
-               cs_array = malloc(sizeof(struct radeon_cmdbuf *) *
-                                               (pSubmits[i].commandBufferCount));
+               goto success;
+       } else {
+               struct radeon_cmdbuf **cs_array = malloc(sizeof(struct radeon_cmdbuf *) *
+                                                        (submission->cmd_buffer_count));
 
-               for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
-                       RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer,
-                                        pSubmits[i].pCommandBuffers[j]);
+               for (uint32_t j = 0; j < submission->cmd_buffer_count; j++) {
+                       RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, submission->cmd_buffers[j]);
                        assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
 
                        cs_array[j] = cmd_buffer->cs;
@@ -3260,18 +4364,18 @@ VkResult radv_QueueSubmit(
                        cmd_buffer->status = RADV_CMD_BUFFER_STATUS_PENDING;
                }
 
-               for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j += advance) {
+               for (uint32_t j = 0; j < submission->cmd_buffer_count; j += advance) {
                        struct radeon_cmdbuf *initial_preamble = (do_flush && !j) ? initial_flush_preamble_cs : initial_preamble_cs;
                        const struct radv_winsys_bo_list *bo_list = NULL;
 
                        advance = MIN2(max_cs_submission,
-                                      pSubmits[i].commandBufferCount - j);
+                                      submission->cmd_buffer_count - j);
 
                        if (queue->device->trace_bo)
                                *queue->device->trace_id_ptr = 0;
 
                        sem_info.cs_emit_wait = j == 0;
-                       sem_info.cs_emit_signal = j + advance == pSubmits[i].commandBufferCount;
+                       sem_info.cs_emit_signal = j + advance == submission->cmd_buffer_count;
 
                        if (unlikely(queue->device->use_global_bo_list)) {
                                pthread_mutex_lock(&queue->device->bo_list.mutex);
@@ -3279,36 +4383,155 @@ VkResult radv_QueueSubmit(
                        }
 
                        ret = queue->device->ws->cs_submit(ctx, queue->queue_idx, cs_array + j,
-                                                       advance, initial_preamble, continue_preamble_cs,
-                                                       &sem_info, bo_list,
-                                                       can_patch, base_fence);
+                                                          advance, initial_preamble, continue_preamble_cs,
+                                                          &sem_info, bo_list,
+                                                          can_patch, base_fence);
 
                        if (unlikely(queue->device->use_global_bo_list))
                                pthread_mutex_unlock(&queue->device->bo_list.mutex);
 
                        if (ret) {
-                               radv_loge("failed to submit CS %d\n", i);
+                               radv_loge("failed to submit CS\n");
                                abort();
                        }
-                       fence_emitted = true;
                        if (queue->device->trace_bo) {
                                radv_check_gpu_hangs(queue, cs_array[j]);
                        }
                }
 
-               radv_free_temp_syncobjs(queue->device,
-                                       pSubmits[i].waitSemaphoreCount,
-                                       pSubmits[i].pWaitSemaphores);
-               radv_free_sem_info(&sem_info);
                free(cs_array);
        }
 
-       if (fence) {
-               if (!fence_emitted) {
-                       result = radv_signal_fence(queue, fence);
-                       if (result != VK_SUCCESS)
-                               return result;
-               }
+success:
+       radv_free_temp_syncobjs(queue->device,
+                               submission->temporary_semaphore_part_count,
+                               submission->temporary_semaphore_parts);
+       radv_finalize_timelines(queue->device,
+                               submission->wait_semaphore_count,
+                               submission->wait_semaphores,
+                               submission->wait_values,
+                               submission->signal_semaphore_count,
+                               submission->signal_semaphores,
+                               submission->signal_values,
+                               processing_list);
+       /* Has to happen after timeline finalization to make sure the
+        * condition variable is only triggered when timelines and queue have
+        * been updated. */
+       radv_queue_submission_update_queue(submission, processing_list);
+       radv_free_sem_info(&sem_info);
+       free(submission);
+       return VK_SUCCESS;
+
+fail:
+       radv_free_temp_syncobjs(queue->device,
+                               submission->temporary_semaphore_part_count,
+                               submission->temporary_semaphore_parts);
+       free(submission);
+       return VK_ERROR_DEVICE_LOST;
+}
+
+static VkResult
+radv_process_submissions(struct list_head *processing_list)
+{
+       while(!list_is_empty(processing_list)) {
+               struct radv_deferred_queue_submission *submission =
+                       list_first_entry(processing_list, struct radv_deferred_queue_submission, processing_list);
+               list_del(&submission->processing_list);
+
+               VkResult result = radv_queue_submit_deferred(submission, processing_list);
+               if (result != VK_SUCCESS)
+                       return result;
+       }
+       return VK_SUCCESS;
+}
+
+static VkResult radv_queue_submit(struct radv_queue *queue,
+                                  const struct radv_queue_submission *submission)
+{
+       struct radv_deferred_queue_submission *deferred = NULL;
+
+       VkResult result = radv_create_deferred_submission(queue, submission, &deferred);
+       if (result != VK_SUCCESS)
+               return result;
+
+       struct list_head processing_list;
+       list_inithead(&processing_list);
+
+       radv_queue_enqueue_submission(deferred, &processing_list);
+       return radv_process_submissions(&processing_list);
+}
+
+/* Signals fence as soon as all the work currently put on queue is done. */
+static VkResult radv_signal_fence(struct radv_queue *queue,
+                              VkFence fence)
+{
+       return radv_queue_submit(queue, &(struct radv_queue_submission) {
+                       .fence = fence
+               });
+}
+
+static bool radv_submit_has_effects(const VkSubmitInfo *info)
+{
+       return info->commandBufferCount ||
+              info->waitSemaphoreCount ||
+              info->signalSemaphoreCount;
+}
+
+VkResult radv_QueueSubmit(
+       VkQueue                                     _queue,
+       uint32_t                                    submitCount,
+       const VkSubmitInfo*                         pSubmits,
+       VkFence                                     fence)
+{
+       RADV_FROM_HANDLE(radv_queue, queue, _queue);
+       VkResult result;
+       uint32_t fence_idx = 0;
+       bool flushed_caches = false;
+
+       if (fence != VK_NULL_HANDLE) {
+               for (uint32_t i = 0; i < submitCount; ++i)
+                       if (radv_submit_has_effects(pSubmits + i))
+                               fence_idx = i;
+       } else
+               fence_idx = UINT32_MAX;
+
+       for (uint32_t i = 0; i < submitCount; i++) {
+               if (!radv_submit_has_effects(pSubmits + i) && fence_idx != i)
+                       continue;
+
+               VkPipelineStageFlags wait_dst_stage_mask = 0;
+               for (unsigned j = 0; j < pSubmits[i].waitSemaphoreCount; ++j) {
+                       wait_dst_stage_mask |= pSubmits[i].pWaitDstStageMask[j];
+               }
+
+               const VkTimelineSemaphoreSubmitInfoKHR *timeline_info =
+                       vk_find_struct_const(pSubmits[i].pNext, TIMELINE_SEMAPHORE_SUBMIT_INFO_KHR);
+
+               result = radv_queue_submit(queue, &(struct radv_queue_submission) {
+                               .cmd_buffers = pSubmits[i].pCommandBuffers,
+                               .cmd_buffer_count = pSubmits[i].commandBufferCount,
+                               .wait_dst_stage_mask = wait_dst_stage_mask,
+                               .flush_caches = !flushed_caches,
+                               .wait_semaphores = pSubmits[i].pWaitSemaphores,
+                               .wait_semaphore_count = pSubmits[i].waitSemaphoreCount,
+                               .signal_semaphores = pSubmits[i].pSignalSemaphores,
+                               .signal_semaphore_count = pSubmits[i].signalSemaphoreCount,
+                               .fence = i == fence_idx ? fence : VK_NULL_HANDLE,
+                               .wait_values = timeline_info ? timeline_info->pWaitSemaphoreValues : NULL,
+                               .wait_value_count = timeline_info && timeline_info->pWaitSemaphoreValues ? timeline_info->waitSemaphoreValueCount : 0,
+                               .signal_values = timeline_info ? timeline_info->pSignalSemaphoreValues : NULL,
+                               .signal_value_count = timeline_info && timeline_info->pSignalSemaphoreValues ? timeline_info->signalSemaphoreValueCount : 0,
+                       });
+               if (result != VK_SUCCESS)
+                       return result;
+
+               flushed_caches  = true;
+       }
+
+       if (fence != VK_NULL_HANDLE && !submitCount) {
+               result = radv_signal_fence(queue, fence);
+               if (result != VK_SUCCESS)
+                       return result;
        }
 
        return VK_SUCCESS;
@@ -3319,6 +4542,12 @@ VkResult radv_QueueWaitIdle(
 {
        RADV_FROM_HANDLE(radv_queue, queue, _queue);
 
+       pthread_mutex_lock(&queue->pending_mutex);
+       while (!list_is_empty(&queue->pending_submissions)) {
+               pthread_cond_wait(&queue->device->timeline_cond, &queue->pending_mutex);
+       }
+       pthread_mutex_unlock(&queue->pending_mutex);
+
        queue->device->ws->ctx_wait_idle(queue->hw_ctx,
                                         radv_queue_family_to_ring(queue->queue_family_index),
                                         queue->queue_idx);
@@ -3571,7 +4800,7 @@ static VkResult radv_alloc_memory(struct radv_device *device,
                }
        } else if (host_ptr_info) {
                assert(host_ptr_info->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT);
-               assert(mem_type_index == RADV_MEM_TYPE_GTT_CACHED);
+               assert(radv_is_mem_type_gtt_cached(mem_type_index));
                mem->bo = device->ws->buffer_from_ptr(device->ws, host_ptr_info->pHostPointer,
                                                      pAllocateInfo->allocationSize,
                                                      priority);
@@ -3583,18 +4812,18 @@ static VkResult radv_alloc_memory(struct radv_device *device,
                }
        } else {
                uint64_t alloc_size = align_u64(pAllocateInfo->allocationSize, 4096);
-               if (mem_type_index == RADV_MEM_TYPE_GTT_WRITE_COMBINE ||
-                   mem_type_index == RADV_MEM_TYPE_GTT_CACHED)
+               if (radv_is_mem_type_gtt_wc(mem_type_index) ||
+                   radv_is_mem_type_gtt_cached(mem_type_index))
                        domain = RADEON_DOMAIN_GTT;
                else
                        domain = RADEON_DOMAIN_VRAM;
 
-               if (mem_type_index == RADV_MEM_TYPE_VRAM)
+               if (radv_is_mem_type_vram(mem_type_index))
                        flags |= RADEON_FLAG_NO_CPU_ACCESS;
                else
                        flags |= RADEON_FLAG_CPU_ACCESS;
 
-               if (mem_type_index == RADV_MEM_TYPE_GTT_WRITE_COMBINE)
+               if (radv_is_mem_type_gtt_wc(mem_type_index))
                        flags |= RADEON_FLAG_GTT_WC;
 
                if (!dedicate_info && !import_info && (!export_info || !export_info->handleTypes)) {
@@ -3604,6 +4833,11 @@ static VkResult radv_alloc_memory(struct radv_device *device,
                        }
                }
 
+               if (radv_is_mem_type_uncached(mem_type_index)) {
+                       assert(device->physical_device->rad_info.has_l2_uncached);
+                       flags |= RADEON_FLAG_VA_UNCACHED;
+               }
+
                mem->bo = device->ws->buffer_create(device->ws, alloc_size, device->physical_device->rad_info.max_alignment,
                                                    domain, flags, priority);
 
@@ -3885,107 +5119,63 @@ VkResult radv_BindImageMemory(
        return radv_BindImageMemory2(device, 1, &info);
 }
 
-
-static void
-radv_sparse_buffer_bind_memory(struct radv_device *device,
-                               const VkSparseBufferMemoryBindInfo *bind)
-{
-       RADV_FROM_HANDLE(radv_buffer, buffer, bind->buffer);
-
-       for (uint32_t i = 0; i < bind->bindCount; ++i) {
-               struct radv_device_memory *mem = NULL;
-
-               if (bind->pBinds[i].memory != VK_NULL_HANDLE)
-                       mem = radv_device_memory_from_handle(bind->pBinds[i].memory);
-
-               device->ws->buffer_virtual_bind(buffer->bo,
-                                               bind->pBinds[i].resourceOffset,
-                                               bind->pBinds[i].size,
-                                               mem ? mem->bo : NULL,
-                                               bind->pBinds[i].memoryOffset);
-       }
-}
-
-static void
-radv_sparse_image_opaque_bind_memory(struct radv_device *device,
-                                     const VkSparseImageOpaqueMemoryBindInfo *bind)
+static bool radv_sparse_bind_has_effects(const VkBindSparseInfo *info)
 {
-       RADV_FROM_HANDLE(radv_image, image, bind->image);
-
-       for (uint32_t i = 0; i < bind->bindCount; ++i) {
-               struct radv_device_memory *mem = NULL;
-
-               if (bind->pBinds[i].memory != VK_NULL_HANDLE)
-                       mem = radv_device_memory_from_handle(bind->pBinds[i].memory);
-
-               device->ws->buffer_virtual_bind(image->bo,
-                                               bind->pBinds[i].resourceOffset,
-                                               bind->pBinds[i].size,
-                                               mem ? mem->bo : NULL,
-                                               bind->pBinds[i].memoryOffset);
-       }
+       return info->bufferBindCount ||
+              info->imageOpaqueBindCount ||
+              info->imageBindCount ||
+              info->waitSemaphoreCount ||
+              info->signalSemaphoreCount;
 }
 
  VkResult radv_QueueBindSparse(
        VkQueue                                     _queue,
        uint32_t                                    bindInfoCount,
        const VkBindSparseInfo*                     pBindInfo,
-       VkFence                                     _fence)
+       VkFence                                     fence)
 {
-       RADV_FROM_HANDLE(radv_fence, fence, _fence);
        RADV_FROM_HANDLE(radv_queue, queue, _queue);
-       struct radeon_winsys_fence *base_fence = fence ? fence->fence : NULL;
-       bool fence_emitted = false;
        VkResult result;
-       int ret;
+       uint32_t fence_idx = 0;
+
+       if (fence != VK_NULL_HANDLE) {
+               for (uint32_t i = 0; i < bindInfoCount; ++i)
+                       if (radv_sparse_bind_has_effects(pBindInfo + i))
+                               fence_idx = i;
+       } else
+               fence_idx = UINT32_MAX;
 
        for (uint32_t i = 0; i < bindInfoCount; ++i) {
-               struct radv_winsys_sem_info sem_info;
-               for (uint32_t j = 0; j < pBindInfo[i].bufferBindCount; ++j) {
-                       radv_sparse_buffer_bind_memory(queue->device,
-                                                      pBindInfo[i].pBufferBinds + j);
-               }
+               if (i != fence_idx && !radv_sparse_bind_has_effects(pBindInfo + i))
+                       continue;
 
-               for (uint32_t j = 0; j < pBindInfo[i].imageOpaqueBindCount; ++j) {
-                       radv_sparse_image_opaque_bind_memory(queue->device,
-                                                            pBindInfo[i].pImageOpaqueBinds + j);
-               }
+               const VkTimelineSemaphoreSubmitInfoKHR *timeline_info =
+                       vk_find_struct_const(pBindInfo[i].pNext, TIMELINE_SEMAPHORE_SUBMIT_INFO_KHR);
+
+               VkResult result = radv_queue_submit(queue, &(struct radv_queue_submission) {
+                               .buffer_binds = pBindInfo[i].pBufferBinds,
+                               .buffer_bind_count = pBindInfo[i].bufferBindCount,
+                               .image_opaque_binds = pBindInfo[i].pImageOpaqueBinds,
+                               .image_opaque_bind_count = pBindInfo[i].imageOpaqueBindCount,
+                               .wait_semaphores = pBindInfo[i].pWaitSemaphores,
+                               .wait_semaphore_count = pBindInfo[i].waitSemaphoreCount,
+                               .signal_semaphores = pBindInfo[i].pSignalSemaphores,
+                               .signal_semaphore_count = pBindInfo[i].signalSemaphoreCount,
+                               .fence = i == fence_idx ? fence : VK_NULL_HANDLE,
+                               .wait_values = timeline_info ? timeline_info->pWaitSemaphoreValues : NULL,
+                               .wait_value_count = timeline_info && timeline_info->pWaitSemaphoreValues ? timeline_info->waitSemaphoreValueCount : 0,
+                               .signal_values = timeline_info ? timeline_info->pSignalSemaphoreValues : NULL,
+                               .signal_value_count = timeline_info && timeline_info->pSignalSemaphoreValues ? timeline_info->signalSemaphoreValueCount : 0,
+                       });
 
-               VkResult result;
-               result = radv_alloc_sem_info(queue->device->instance,
-                                            &sem_info,
-                                            pBindInfo[i].waitSemaphoreCount,
-                                            pBindInfo[i].pWaitSemaphores,
-                                            pBindInfo[i].signalSemaphoreCount,
-                                            pBindInfo[i].pSignalSemaphores,
-                                            _fence);
                if (result != VK_SUCCESS)
                        return result;
-
-               if (pBindInfo[i].waitSemaphoreCount || pBindInfo[i].signalSemaphoreCount) {
-                       ret = queue->device->ws->cs_submit(queue->hw_ctx, queue->queue_idx,
-                                                         &queue->device->empty_cs[queue->queue_family_index],
-                                                         1, NULL, NULL,
-                                                         &sem_info, NULL,
-                                                         false, base_fence);
-                       if (ret) {
-                               radv_loge("failed to submit CS %d\n", i);
-                               abort();
-                       }
-
-                       fence_emitted = true;
-               }
-
-               radv_free_sem_info(&sem_info);
-
        }
 
-       if (fence) {
-               if (!fence_emitted) {
-                       result = radv_signal_fence(queue, fence);
-                       if (result != VK_SUCCESS)
-                               return result;
-               }
+       if (fence != VK_NULL_HANDLE && !bindInfoCount) {
+               result = radv_signal_fence(queue, fence);
+               if (result != VK_SUCCESS)
+                       return result;
        }
 
        return VK_SUCCESS;
@@ -4264,6 +5454,197 @@ VkResult radv_GetFenceStatus(VkDevice _device, VkFence _fence)
 
 // Queue semaphore functions
 
+static void
+radv_create_timeline(struct radv_timeline *timeline, uint64_t value)
+{
+       timeline->highest_signaled = value;
+       timeline->highest_submitted = value;
+       list_inithead(&timeline->points);
+       list_inithead(&timeline->free_points);
+       list_inithead(&timeline->waiters);
+       pthread_mutex_init(&timeline->mutex, NULL);
+}
+
+static void
+radv_destroy_timeline(struct radv_device *device,
+                      struct radv_timeline *timeline)
+{
+       list_for_each_entry_safe(struct radv_timeline_point, point,
+                                &timeline->free_points, list) {
+               list_del(&point->list);
+               device->ws->destroy_syncobj(device->ws, point->syncobj);
+               free(point);
+       }
+       list_for_each_entry_safe(struct radv_timeline_point, point,
+                                &timeline->points, list) {
+               list_del(&point->list);
+               device->ws->destroy_syncobj(device->ws, point->syncobj);
+               free(point);
+       }
+       pthread_mutex_destroy(&timeline->mutex);
+}
+
+static void
+radv_timeline_gc_locked(struct radv_device *device,
+                        struct radv_timeline *timeline)
+{
+       list_for_each_entry_safe(struct radv_timeline_point, point,
+                                &timeline->points, list) {
+               if (point->wait_count || point->value > timeline->highest_submitted)
+                       return;
+
+               if (device->ws->wait_syncobj(device->ws, &point->syncobj, 1, true, 0)) {
+                       timeline->highest_signaled = point->value;
+                       list_del(&point->list);
+                       list_add(&point->list, &timeline->free_points);
+               }
+       }
+}
+
+static struct radv_timeline_point *
+radv_timeline_find_point_at_least_locked(struct radv_device *device,
+                                         struct radv_timeline *timeline,
+                                         uint64_t p)
+{
+       radv_timeline_gc_locked(device, timeline);
+
+       if (p <= timeline->highest_signaled)
+               return NULL;
+
+       list_for_each_entry(struct radv_timeline_point, point,
+                           &timeline->points, list) {
+               if (point->value >= p) {
+                       ++point->wait_count;
+                       return point;
+               }
+       }
+       return NULL;
+}
+
+static struct radv_timeline_point *
+radv_timeline_add_point_locked(struct radv_device *device,
+                               struct radv_timeline *timeline,
+                               uint64_t p)
+{
+       radv_timeline_gc_locked(device, timeline);
+
+       struct radv_timeline_point *ret = NULL;
+       struct radv_timeline_point *prev = NULL;
+
+       if (p <= timeline->highest_signaled)
+               return NULL;
+
+       list_for_each_entry(struct radv_timeline_point, point,
+                           &timeline->points, list) {
+               if (point->value == p) {
+                       return NULL;
+               }
+
+               if (point->value < p)
+                       prev = point;
+       }
+
+       if (list_is_empty(&timeline->free_points)) {
+               ret = malloc(sizeof(struct radv_timeline_point));
+               device->ws->create_syncobj(device->ws, &ret->syncobj);
+       } else {
+               ret = list_first_entry(&timeline->free_points, struct radv_timeline_point, list);
+               list_del(&ret->list);
+
+               device->ws->reset_syncobj(device->ws, ret->syncobj);
+       }
+
+       ret->value = p;
+       ret->wait_count = 1;
+
+       if (prev) {
+               list_add(&ret->list, &prev->list);
+       } else {
+               list_addtail(&ret->list, &timeline->points);
+       }
+       return ret;
+}
+
+
+static VkResult
+radv_timeline_wait_locked(struct radv_device *device,
+                          struct radv_timeline *timeline,
+                          uint64_t value,
+                          uint64_t abs_timeout)
+{
+       while(timeline->highest_submitted < value) {
+               struct timespec abstime;
+               timespec_from_nsec(&abstime, abs_timeout);
+
+               pthread_cond_timedwait(&device->timeline_cond, &timeline->mutex, &abstime);
+
+               if (radv_get_current_time() >= abs_timeout && timeline->highest_submitted < value)
+                       return VK_TIMEOUT;
+       }
+
+       struct radv_timeline_point *point = radv_timeline_find_point_at_least_locked(device, timeline, value);
+       if (!point)
+               return VK_SUCCESS;
+
+       pthread_mutex_unlock(&timeline->mutex);
+
+       bool success = device->ws->wait_syncobj(device->ws, &point->syncobj, 1, true, abs_timeout);
+
+       pthread_mutex_lock(&timeline->mutex);
+       point->wait_count--;
+       return success ? VK_SUCCESS : VK_TIMEOUT;
+}
+
+static void
+radv_timeline_trigger_waiters_locked(struct radv_timeline *timeline,
+                                     struct list_head *processing_list)
+{
+       list_for_each_entry_safe(struct radv_timeline_waiter, waiter,
+                                &timeline->waiters, list) {
+               if (waiter->value > timeline->highest_submitted)
+                       continue;
+
+               if (p_atomic_dec_zero(&waiter->submission->submission_wait_count)) {
+                       list_addtail(&waiter->submission->processing_list, processing_list);
+               }
+               list_del(&waiter->list);
+       }
+}
+
+static
+void radv_destroy_semaphore_part(struct radv_device *device,
+                                 struct radv_semaphore_part *part)
+{
+       switch(part->kind) {
+       case RADV_SEMAPHORE_NONE:
+               break;
+       case RADV_SEMAPHORE_WINSYS:
+               device->ws->destroy_sem(part->ws_sem);
+               break;
+       case RADV_SEMAPHORE_TIMELINE:
+               radv_destroy_timeline(device, &part->timeline);
+               break;
+       case RADV_SEMAPHORE_SYNCOBJ:
+               device->ws->destroy_syncobj(device->ws, part->syncobj);
+               break;
+       }
+       part->kind = RADV_SEMAPHORE_NONE;
+}
+
+static VkSemaphoreTypeKHR
+radv_get_semaphore_type(const void *pNext, uint64_t *initial_value)
+{
+       const VkSemaphoreTypeCreateInfoKHR *type_info =
+               vk_find_struct_const(pNext, SEMAPHORE_TYPE_CREATE_INFO_KHR);
+
+       if (!type_info)
+               return VK_SEMAPHORE_TYPE_BINARY_KHR;
+
+       if (initial_value)
+               *initial_value = type_info->initialValue;
+       return type_info->semaphoreType;
+}
+
 VkResult radv_CreateSemaphore(
        VkDevice                                    _device,
        const VkSemaphoreCreateInfo*                pCreateInfo,
@@ -4275,6 +5656,8 @@ VkResult radv_CreateSemaphore(
                vk_find_struct_const(pCreateInfo->pNext, EXPORT_SEMAPHORE_CREATE_INFO);
        VkExternalSemaphoreHandleTypeFlags handleTypes =
                export ? export->handleTypes : 0;
+       uint64_t initial_value = 0;
+       VkSemaphoreTypeKHR type = radv_get_semaphore_type(pCreateInfo->pNext, &initial_value);
 
        struct radv_semaphore *sem = vk_alloc2(&device->alloc, pAllocator,
                                               sizeof(*sem), 8,
@@ -4282,23 +5665,27 @@ VkResult radv_CreateSemaphore(
        if (!sem)
                return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
 
-       sem->temp_syncobj = 0;
-       /* create a syncobject if we are going to export this semaphore */
-       if (device->always_use_syncobj || handleTypes) {
+       sem->temporary.kind = RADV_SEMAPHORE_NONE;
+       sem->permanent.kind = RADV_SEMAPHORE_NONE;
+
+       if (type == VK_SEMAPHORE_TYPE_TIMELINE_KHR) {
+               radv_create_timeline(&sem->permanent.timeline, initial_value);
+               sem->permanent.kind = RADV_SEMAPHORE_TIMELINE;
+       } else if (device->always_use_syncobj || handleTypes) {
                assert (device->physical_device->rad_info.has_syncobj);
-               int ret = device->ws->create_syncobj(device->ws, &sem->syncobj);
+               int ret = device->ws->create_syncobj(device->ws, &sem->permanent.syncobj);
                if (ret) {
                        vk_free2(&device->alloc, pAllocator, sem);
                        return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
                }
-               sem->sem = NULL;
+               sem->permanent.kind = RADV_SEMAPHORE_SYNCOBJ;
        } else {
-               sem->sem = device->ws->create_sem(device->ws);
-               if (!sem->sem) {
+               sem->permanent.ws_sem = device->ws->create_sem(device->ws);
+               if (!sem->permanent.ws_sem) {
                        vk_free2(&device->alloc, pAllocator, sem);
                        return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
                }
-               sem->syncobj = 0;
+               sem->permanent.kind = RADV_SEMAPHORE_WINSYS;
        }
 
        *pSemaphore = radv_semaphore_to_handle(sem);
@@ -4315,13 +5702,115 @@ void radv_DestroySemaphore(
        if (!_semaphore)
                return;
 
-       if (sem->syncobj)
-               device->ws->destroy_syncobj(device->ws, sem->syncobj);
-       else
-               device->ws->destroy_sem(sem->sem);
+       radv_destroy_semaphore_part(device, &sem->temporary);
+       radv_destroy_semaphore_part(device, &sem->permanent);
        vk_free2(&device->alloc, pAllocator, sem);
 }
 
+VkResult
+radv_GetSemaphoreCounterValueKHR(VkDevice _device,
+                                 VkSemaphore _semaphore,
+                                 uint64_t* pValue)
+{
+       RADV_FROM_HANDLE(radv_device, device, _device);
+       RADV_FROM_HANDLE(radv_semaphore, semaphore, _semaphore);
+
+       struct radv_semaphore_part *part =
+               semaphore->temporary.kind != RADV_SEMAPHORE_NONE ? &semaphore->temporary : &semaphore->permanent;
+
+       switch (part->kind) {
+       case RADV_SEMAPHORE_TIMELINE: {
+               pthread_mutex_lock(&part->timeline.mutex);
+               radv_timeline_gc_locked(device, &part->timeline);
+               *pValue = part->timeline.highest_signaled;
+               pthread_mutex_unlock(&part->timeline.mutex);
+               return VK_SUCCESS;
+       }
+       case RADV_SEMAPHORE_NONE:
+       case RADV_SEMAPHORE_SYNCOBJ:
+       case RADV_SEMAPHORE_WINSYS:
+               unreachable("Invalid semaphore type");
+       }
+       unreachable("Unhandled semaphore type");
+}
+
+
+static VkResult
+radv_wait_timelines(struct radv_device *device,
+                    const VkSemaphoreWaitInfoKHR* pWaitInfo,
+                    uint64_t abs_timeout)
+{
+       if ((pWaitInfo->flags & VK_SEMAPHORE_WAIT_ANY_BIT_KHR) && pWaitInfo->semaphoreCount > 1) {
+               for (;;) {
+                       for(uint32_t i = 0; i < pWaitInfo->semaphoreCount; ++i) {
+                               RADV_FROM_HANDLE(radv_semaphore, semaphore, pWaitInfo->pSemaphores[i]);
+                               pthread_mutex_lock(&semaphore->permanent.timeline.mutex);
+                               VkResult result = radv_timeline_wait_locked(device, &semaphore->permanent.timeline, pWaitInfo->pValues[i], 0);
+                               pthread_mutex_unlock(&semaphore->permanent.timeline.mutex);
+
+                               if (result == VK_SUCCESS)
+                                       return VK_SUCCESS;
+                       }
+                       if (radv_get_current_time() > abs_timeout)
+                               return VK_TIMEOUT;
+               }
+       }
+
+       for(uint32_t i = 0; i < pWaitInfo->semaphoreCount; ++i) {
+               RADV_FROM_HANDLE(radv_semaphore, semaphore, pWaitInfo->pSemaphores[i]);
+               pthread_mutex_lock(&semaphore->permanent.timeline.mutex);
+               VkResult result = radv_timeline_wait_locked(device, &semaphore->permanent.timeline, pWaitInfo->pValues[i], abs_timeout);
+               pthread_mutex_unlock(&semaphore->permanent.timeline.mutex);
+
+               if (result != VK_SUCCESS)
+                       return result;
+       }
+       return VK_SUCCESS;
+}
+VkResult
+radv_WaitSemaphoresKHR(VkDevice _device,
+                       const VkSemaphoreWaitInfoKHR* pWaitInfo,
+                       uint64_t timeout)
+{
+       RADV_FROM_HANDLE(radv_device, device, _device);
+       uint64_t abs_timeout = radv_get_absolute_timeout(timeout);
+       return radv_wait_timelines(device, pWaitInfo, abs_timeout);
+}
+
+VkResult
+radv_SignalSemaphoreKHR(VkDevice _device,
+                        const VkSemaphoreSignalInfoKHR* pSignalInfo)
+{
+       RADV_FROM_HANDLE(radv_device, device, _device);
+       RADV_FROM_HANDLE(radv_semaphore, semaphore, pSignalInfo->semaphore);
+
+       struct radv_semaphore_part *part =
+               semaphore->temporary.kind != RADV_SEMAPHORE_NONE ? &semaphore->temporary : &semaphore->permanent;
+
+       switch(part->kind) {
+       case RADV_SEMAPHORE_TIMELINE: {
+               pthread_mutex_lock(&part->timeline.mutex);
+               radv_timeline_gc_locked(device, &part->timeline);
+               part->timeline.highest_submitted = MAX2(part->timeline.highest_submitted, pSignalInfo->value);
+               part->timeline.highest_signaled = MAX2(part->timeline.highest_signaled, pSignalInfo->value);
+
+               struct list_head processing_list;
+               list_inithead(&processing_list);
+               radv_timeline_trigger_waiters_locked(&part->timeline, &processing_list);
+               pthread_mutex_unlock(&part->timeline.mutex);
+
+               return radv_process_submissions(&processing_list);
+       }
+       case RADV_SEMAPHORE_NONE:
+       case RADV_SEMAPHORE_SYNCOBJ:
+       case RADV_SEMAPHORE_WINSYS:
+               unreachable("Invalid semaphore type");
+       }
+       return VK_SUCCESS;
+}
+
+
+
 VkResult radv_CreateEvent(
        VkDevice                                    _device,
        const VkEventCreateInfo*                    pCreateInfo,
@@ -4455,15 +5944,27 @@ void radv_DestroyBuffer(
        vk_free2(&device->alloc, pAllocator, buffer);
 }
 
-VkDeviceAddress radv_GetBufferDeviceAddressEXT(
+VkDeviceAddress radv_GetBufferDeviceAddressKHR(
        VkDevice                                    device,
-       const VkBufferDeviceAddressInfoEXT*         pInfo)
+       const VkBufferDeviceAddressInfoKHR*         pInfo)
 {
        RADV_FROM_HANDLE(radv_buffer, buffer, pInfo->buffer);
        return radv_buffer_get_va(buffer->bo) + buffer->offset;
 }
 
 
+uint64_t radv_GetBufferOpaqueCaptureAddressKHR(VkDevice device,
+                                              const VkBufferDeviceAddressInfoKHR* pInfo)
+{
+       return 0;
+}
+
+uint64_t radv_GetDeviceMemoryOpaqueCaptureAddressKHR(VkDevice device,
+                                                    const VkDeviceMemoryOpaqueCaptureAddressInfoKHR* pInfo)
+{
+       return 0;
+}
+
 static inline unsigned
 si_tile_mode_index(const struct radv_image_plane *plane, unsigned level, bool stencil)
 {
@@ -5416,22 +6917,34 @@ VkResult radv_ImportSemaphoreFdKHR(VkDevice _device,
 {
        RADV_FROM_HANDLE(radv_device, device, _device);
        RADV_FROM_HANDLE(radv_semaphore, sem, pImportSemaphoreFdInfo->semaphore);
-       uint32_t *syncobj_dst = NULL;
+       VkResult result;
+       struct radv_semaphore_part *dst = NULL;
 
        if (pImportSemaphoreFdInfo->flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT) {
-               syncobj_dst = &sem->temp_syncobj;
+               dst = &sem->temporary;
        } else {
-               syncobj_dst = &sem->syncobj;
+               dst = &sem->permanent;
        }
 
+       uint32_t syncobj = dst->kind == RADV_SEMAPHORE_SYNCOBJ ? dst->syncobj : 0;
+
        switch(pImportSemaphoreFdInfo->handleType) {
                case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT:
-                       return radv_import_opaque_fd(device, pImportSemaphoreFdInfo->fd, syncobj_dst);
+                       result = radv_import_opaque_fd(device, pImportSemaphoreFdInfo->fd, &syncobj);
+                       break;
                case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT:
-                       return radv_import_sync_fd(device, pImportSemaphoreFdInfo->fd, syncobj_dst);
+                       result = radv_import_sync_fd(device, pImportSemaphoreFdInfo->fd, &syncobj);
+                       break;
                default:
                        unreachable("Unhandled semaphore handle type");
        }
+
+       if (result == VK_SUCCESS) {
+               dst->syncobj = syncobj;
+               dst->kind = RADV_SEMAPHORE_SYNCOBJ;
+       }
+
+       return result;
 }
 
 VkResult radv_GetSemaphoreFdKHR(VkDevice _device,
@@ -5443,10 +6956,13 @@ VkResult radv_GetSemaphoreFdKHR(VkDevice _device,
        int ret;
        uint32_t syncobj_handle;
 
-       if (sem->temp_syncobj)
-               syncobj_handle = sem->temp_syncobj;
-       else
-               syncobj_handle = sem->syncobj;
+       if (sem->temporary.kind != RADV_SEMAPHORE_NONE) {
+               assert(sem->temporary.kind == RADV_SEMAPHORE_SYNCOBJ);
+               syncobj_handle = sem->temporary.syncobj;
+       } else {
+               assert(sem->permanent.kind == RADV_SEMAPHORE_SYNCOBJ);
+               syncobj_handle = sem->permanent.syncobj;
+       }
 
        switch(pGetFdInfo->handleType) {
        case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT:
@@ -5455,9 +6971,8 @@ VkResult radv_GetSemaphoreFdKHR(VkDevice _device,
        case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT:
                ret = device->ws->export_syncobj_to_sync_file(device->ws, syncobj_handle, pFd);
                if (!ret) {
-                       if (sem->temp_syncobj) {
-                               close (sem->temp_syncobj);
-                               sem->temp_syncobj = 0;
+                       if (sem->temporary.kind != RADV_SEMAPHORE_NONE) {
+                               radv_destroy_semaphore_part(device, &sem->temporary);
                        } else {
                                device->ws->reset_syncobj(device->ws, syncobj_handle);
                        }
@@ -5478,11 +6993,17 @@ void radv_GetPhysicalDeviceExternalSemaphoreProperties(
        VkExternalSemaphoreProperties               *pExternalSemaphoreProperties)
 {
        RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
+       VkSemaphoreTypeKHR type = radv_get_semaphore_type(pExternalSemaphoreInfo->pNext, NULL);
+       
+       if (type == VK_SEMAPHORE_TYPE_TIMELINE_KHR) {
+               pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
+               pExternalSemaphoreProperties->compatibleHandleTypes = 0;
+               pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
 
        /* Require has_syncobj_wait_for_submit for the syncobj signal ioctl introduced at virtually the same time */
-       if (pdevice->rad_info.has_syncobj_wait_for_submit &&
-           (pExternalSemaphoreInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT || 
-            pExternalSemaphoreInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT)) {
+       } else if (pdevice->rad_info.has_syncobj_wait_for_submit &&
+                  (pExternalSemaphoreInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT || 
+                   pExternalSemaphoreInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT)) {
                pExternalSemaphoreProperties->exportFromImportedHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT | VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
                pExternalSemaphoreProperties->compatibleHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT | VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
                pExternalSemaphoreProperties->externalSemaphoreFeatures = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |