.specVersion = 5,
},
#endif
+ {
+ .extensionName = VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,
+ .specVersion = 1,
+ },
};
static const VkExtensionProperties common_device_extensions[] = {
+ {
+ .extensionName = VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME,
+ .specVersion = 1,
+ },
+ {
+ .extensionName = VK_KHR_INCREMENTAL_PRESENT_EXTENSION_NAME,
+ .specVersion = 1,
+ },
{
.extensionName = VK_KHR_MAINTENANCE1_EXTENSION_NAME,
.specVersion = 1,
},
+ {
+ .extensionName = VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME,
+ .specVersion = 1,
+ },
{
.extensionName = VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME,
.specVersion = 1,
.extensionName = VK_AMD_DRAW_INDIRECT_COUNT_EXTENSION_NAME,
.specVersion = 1,
},
- {
- .extensionName = VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,
- .specVersion = 1,
- },
{
.extensionName = VK_KHR_SHADER_DRAW_PARAMETERS_EXTENSION_NAME,
.specVersion = 1,
{
RADV_FROM_HANDLE(radv_instance, instance, _instance);
+ if (!instance)
+ return;
+
for (int i = 0; i < instance->physicalDeviceCount; ++i) {
radv_physical_device_finish(instance->physicalDevices + i);
}
vk_free(&instance->alloc, instance);
}
+static VkResult
+radv_enumerate_devices(struct radv_instance *instance)
+{
+ /* TODO: Check for more devices ? */
+ drmDevicePtr devices[8];
+ VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
+ int max_devices;
+
+ instance->physicalDeviceCount = 0;
+
+ max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
+ if (max_devices < 1)
+ return VK_ERROR_INCOMPATIBLE_DRIVER;
+
+ for (unsigned i = 0; i < (unsigned)max_devices; i++) {
+ if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
+ devices[i]->bustype == DRM_BUS_PCI &&
+ devices[i]->deviceinfo.pci->vendor_id == 0x1002) {
+
+ result = radv_physical_device_init(instance->physicalDevices +
+ instance->physicalDeviceCount,
+ instance,
+ devices[i]->nodes[DRM_NODE_RENDER]);
+ if (result == VK_SUCCESS)
+ ++instance->physicalDeviceCount;
+ else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
+ break;
+ }
+ }
+ drmFreeDevices(devices, max_devices);
+
+ return result;
+}
+
VkResult radv_EnumeratePhysicalDevices(
VkInstance _instance,
uint32_t* pPhysicalDeviceCount,
VkResult result;
if (instance->physicalDeviceCount < 0) {
- char path[20];
- instance->physicalDeviceCount = 0;
- for (unsigned i = 0; i < RADV_MAX_DRM_DEVICES; i++) {
- snprintf(path, sizeof(path), "/dev/dri/renderD%d", 128 + i);
- result = radv_physical_device_init(instance->physicalDevices +
- instance->physicalDeviceCount,
- instance, path);
- if (result == VK_SUCCESS)
- ++instance->physicalDeviceCount;
- else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
- return result;
- }
+ result = radv_enumerate_devices(instance);
+ if (result != VK_SUCCESS &&
+ result != VK_ERROR_INCOMPATIBLE_DRIVER)
+ return result;
}
if (!pPhysicalDevices) {
.imageCubeArray = true,
.independentBlend = true,
.geometryShader = true,
- .tessellationShader = false,
+ .tessellationShader = true,
.sampleRateShading = false,
.dualSrcBlend = true,
.logicOp = true,
.textureCompressionASTC_LDR = false,
.textureCompressionBC = true,
.occlusionQueryPrecise = true,
- .pipelineStatisticsQuery = false,
+ .pipelineStatisticsQuery = true,
.vertexPipelineStoresAndAtomics = true,
.fragmentStoresAndAtomics = true,
.shaderTessellationAndGeometryPointSize = true,
.shaderFloat64 = true,
.shaderInt64 = false,
.shaderInt16 = false,
- .alphaToOne = true,
- .variableMultisampleRate = false,
- .inheritedQueries = false,
+ .sparseBinding = true,
+ .variableMultisampleRate = true,
+ .inheritedQueries = true,
};
}
return radv_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
}
+static uint32_t radv_get_driver_version()
+{
+ const char *minor_string = strchr(VERSION, '.');
+ const char *patch_string = minor_string ? strchr(minor_string + 1, ','): NULL;
+ int major = atoi(VERSION);
+ int minor = minor_string ? atoi(minor_string + 1) : 0;
+ int patch = patch_string ? atoi(patch_string + 1) : 0;
+ if (strstr(VERSION, "devel")) {
+ if (patch == 0) {
+ patch = 99;
+ if (minor == 0) {
+ minor = 99;
+ --major;
+ } else
+ --minor;
+ } else
+ --patch;
+ }
+ uint32_t version = VK_MAKE_VERSION(major, minor, patch);
+ return version;
+}
+
void radv_GetPhysicalDeviceProperties(
VkPhysicalDevice physicalDevice,
VkPhysicalDeviceProperties* pProperties)
{
RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
VkSampleCountFlags sample_counts = 0xf;
+
+ /* make sure that the entire descriptor set is addressable with a signed
+ * 32-bit int. So the sum of all limits scaled by descriptor size has to
+ * be at most 2 GiB. the combined image & samples object count as one of
+ * both. This limit is for the pipeline layout, not for the set layout, but
+ * there is no set limit, so we just set a pipeline limit. I don't think
+ * any app is going to hit this soon. */
+ size_t max_descriptor_set_size = ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS) /
+ (32 /* uniform buffer, 32 due to potential space wasted on alignement */ +
+ 32 /* storage buffer, 32 due to potential space wasted on alignement */ +
+ 32 /* sampler, largest when combined with image */ +
+ 64 /* sampled image */ +
+ 64 /* storage image */);
+
VkPhysicalDeviceLimits limits = {
.maxImageDimension1D = (1 << 14),
.maxImageDimension2D = (1 << 14),
.maxMemoryAllocationCount = UINT32_MAX,
.maxSamplerAllocationCount = 64 * 1024,
.bufferImageGranularity = 64, /* A cache line */
- .sparseAddressSpaceSize = 0,
+ .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
.maxBoundDescriptorSets = MAX_SETS,
- .maxPerStageDescriptorSamplers = 64,
- .maxPerStageDescriptorUniformBuffers = 64,
- .maxPerStageDescriptorStorageBuffers = 64,
- .maxPerStageDescriptorSampledImages = 64,
- .maxPerStageDescriptorStorageImages = 64,
- .maxPerStageDescriptorInputAttachments = 64,
- .maxPerStageResources = 128,
- .maxDescriptorSetSamplers = 256,
- .maxDescriptorSetUniformBuffers = 256,
- .maxDescriptorSetUniformBuffersDynamic = 256,
- .maxDescriptorSetStorageBuffers = 256,
- .maxDescriptorSetStorageBuffersDynamic = 256,
- .maxDescriptorSetSampledImages = 256,
- .maxDescriptorSetStorageImages = 256,
- .maxDescriptorSetInputAttachments = 256,
+ .maxPerStageDescriptorSamplers = max_descriptor_set_size,
+ .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
+ .maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
+ .maxPerStageDescriptorSampledImages = max_descriptor_set_size,
+ .maxPerStageDescriptorStorageImages = max_descriptor_set_size,
+ .maxPerStageDescriptorInputAttachments = max_descriptor_set_size,
+ .maxPerStageResources = max_descriptor_set_size,
+ .maxDescriptorSetSamplers = max_descriptor_set_size,
+ .maxDescriptorSetUniformBuffers = max_descriptor_set_size,
+ .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_BUFFERS / 2,
+ .maxDescriptorSetStorageBuffers = max_descriptor_set_size,
+ .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_BUFFERS / 2,
+ .maxDescriptorSetSampledImages = max_descriptor_set_size,
+ .maxDescriptorSetStorageImages = max_descriptor_set_size,
+ .maxDescriptorSetInputAttachments = max_descriptor_set_size,
.maxVertexInputAttributes = 32,
.maxVertexInputBindings = 32,
.maxVertexInputAttributeOffset = 2047,
.maxVertexInputBindingStride = 2048,
.maxVertexOutputComponents = 128,
- .maxTessellationGenerationLevel = 0,
- .maxTessellationPatchSize = 0,
- .maxTessellationControlPerVertexInputComponents = 0,
- .maxTessellationControlPerVertexOutputComponents = 0,
- .maxTessellationControlPerPatchOutputComponents = 0,
- .maxTessellationControlTotalOutputComponents = 0,
- .maxTessellationEvaluationInputComponents = 0,
- .maxTessellationEvaluationOutputComponents = 0,
- .maxGeometryShaderInvocations = 32,
+ .maxTessellationGenerationLevel = 64,
+ .maxTessellationPatchSize = 32,
+ .maxTessellationControlPerVertexInputComponents = 128,
+ .maxTessellationControlPerVertexOutputComponents = 128,
+ .maxTessellationControlPerPatchOutputComponents = 120,
+ .maxTessellationControlTotalOutputComponents = 4096,
+ .maxTessellationEvaluationInputComponents = 128,
+ .maxTessellationEvaluationOutputComponents = 128,
+ .maxGeometryShaderInvocations = 127,
.maxGeometryInputComponents = 64,
.maxGeometryOutputComponents = 128,
.maxGeometryOutputVertices = 256,
.sampledImageStencilSampleCounts = sample_counts,
.storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
.maxSampleMaskWords = 1,
- .timestampComputeAndGraphics = false,
- .timestampPeriod = 100000.0 / pdevice->rad_info.clock_crystal_freq,
+ .timestampComputeAndGraphics = true,
+ .timestampPeriod = 1000000.0 / pdevice->rad_info.clock_crystal_freq,
.maxClipDistances = 8,
.maxCullDistances = 8,
.maxCombinedClipAndCullDistances = 8,
};
*pProperties = (VkPhysicalDeviceProperties) {
- .apiVersion = VK_MAKE_VERSION(1, 0, 5),
- .driverVersion = 1,
+ .apiVersion = VK_MAKE_VERSION(1, 0, 42),
+ .driverVersion = radv_get_driver_version(),
.vendorID = 0x1002,
.deviceID = pdevice->rad_info.pci_id,
.deviceType = VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU,
.limits = limits,
- .sparseProperties = {0}, /* Broadwell doesn't do sparse. */
+ .sparseProperties = {0},
};
strcpy(pProperties->deviceName, pdevice->name);
VkPhysicalDevice physicalDevice,
VkPhysicalDeviceProperties2KHR *pProperties)
{
- return radv_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
+ radv_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
+
+ vk_foreach_struct(ext, pProperties->pNext) {
+ switch (ext->sType) {
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
+ VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
+ (VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
+ properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
+ break;
+ }
+ default:
+ break;
+ }
+ }
}
static void radv_get_physical_device_queue_family_properties(
if (*pCount >= 1) {
*pQueueFamilyProperties[idx] = (VkQueueFamilyProperties) {
.queueFlags = VK_QUEUE_GRAPHICS_BIT |
- VK_QUEUE_COMPUTE_BIT |
- VK_QUEUE_TRANSFER_BIT,
+ VK_QUEUE_COMPUTE_BIT |
+ VK_QUEUE_TRANSFER_BIT |
+ VK_QUEUE_SPARSE_BINDING_BIT,
.queueCount = 1,
.timestampValidBits = 64,
.minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
!(pdevice->instance->debug_flags & RADV_DEBUG_NO_COMPUTE_QUEUE)) {
if (*pCount > idx) {
*pQueueFamilyProperties[idx] = (VkQueueFamilyProperties) {
- .queueFlags = VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT,
+ .queueFlags = VK_QUEUE_COMPUTE_BIT |
+ VK_QUEUE_TRANSFER_BIT |
+ VK_QUEUE_SPARSE_BINDING_BIT,
.queueCount = pdevice->rad_info.compute_rings,
.timestampValidBits = 64,
.minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
queue->device->ws->buffer_destroy(queue->esgs_ring_bo);
if (queue->gsvs_ring_bo)
queue->device->ws->buffer_destroy(queue->gsvs_ring_bo);
+ if (queue->tess_factor_ring_bo)
+ queue->device->ws->buffer_destroy(queue->tess_factor_ring_bo);
+ if (queue->tess_offchip_ring_bo)
+ queue->device->ws->buffer_destroy(queue->tess_offchip_ring_bo);
if (queue->compute_scratch_bo)
queue->device->ws->buffer_destroy(queue->compute_scratch_bo);
}
case CHIP_FIJI:
case CHIP_POLARIS10:
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
device->gs_table_depth = 32;
return;
default:
radv_device_init_gs_info(device);
+ device->tess_offchip_block_dw_size =
+ device->physical_device->rad_info.family == CHIP_HAWAII ? 4096 : 8192;
+ device->has_distributed_tess =
+ device->physical_device->rad_info.chip_class >= VI &&
+ device->physical_device->rad_info.max_se >= 2;
+
result = radv_device_init_meta(device);
if (result != VK_SUCCESS)
goto fail;
break;
}
device->ws->cs_finalize(device->flush_cs[family]);
+
+ device->flush_shader_cs[family] = device->ws->cs_create(device->ws, family);
+ switch (family) {
+ case RADV_QUEUE_GENERAL:
+ case RADV_QUEUE_COMPUTE:
+ si_cs_emit_cache_flush(device->flush_shader_cs[family],
+ device->physical_device->rad_info.chip_class,
+ family == RADV_QUEUE_COMPUTE && device->physical_device->rad_info.chip_class >= CIK,
+ family == RADV_QUEUE_COMPUTE ? RADV_CMD_FLAG_CS_PARTIAL_FLUSH : (RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_PS_PARTIAL_FLUSH) |
+ RADV_CMD_FLAG_INV_ICACHE |
+ RADV_CMD_FLAG_INV_SMEM_L1 |
+ RADV_CMD_FLAG_INV_VMEM_L1 |
+ RADV_CMD_FLAG_INV_GLOBAL_L2);
+ break;
+ }
+ device->ws->cs_finalize(device->flush_shader_cs[family]);
}
if (getenv("RADV_TRACE_FILE")) {
if (device->physical_device->rad_info.chip_class >= CIK)
cik_create_gfx_config(device);
+ VkPipelineCacheCreateInfo ci;
+ ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
+ ci.pNext = NULL;
+ ci.flags = 0;
+ ci.pInitialData = NULL;
+ ci.initialDataSize = 0;
+ VkPipelineCache pc;
+ result = radv_CreatePipelineCache(radv_device_to_handle(device),
+ &ci, NULL, &pc);
+ if (result != VK_SUCCESS)
+ goto fail;
+
+ device->mem_cache = radv_pipeline_cache_from_handle(pc);
+
*pDevice = radv_device_to_handle(device);
return VK_SUCCESS;
{
RADV_FROM_HANDLE(radv_device, device, _device);
+ if (!device)
+ return;
+
if (device->trace_bo)
device->ws->buffer_destroy(device->trace_bo);
device->ws->cs_destroy(device->empty_cs[i]);
if (device->flush_cs[i])
device->ws->cs_destroy(device->flush_cs[i]);
+ if (device->flush_shader_cs[i])
+ device->ws->cs_destroy(device->flush_shader_cs[i]);
}
radv_device_finish_meta(device);
+ VkPipelineCache pc = radv_pipeline_cache_to_handle(device->mem_cache);
+ radv_DestroyPipelineCache(radv_device_to_handle(device), pc, NULL);
+
vk_free(&device->alloc, device);
}
}
static void
-fill_geom_rings(struct radv_queue *queue,
- uint32_t *map,
- uint32_t esgs_ring_size,
- struct radeon_winsys_bo *esgs_ring_bo,
- uint32_t gsvs_ring_size,
- struct radeon_winsys_bo *gsvs_ring_bo)
+fill_geom_tess_rings(struct radv_queue *queue,
+ uint32_t *map,
+ bool add_sample_positions,
+ uint32_t esgs_ring_size,
+ struct radeon_winsys_bo *esgs_ring_bo,
+ uint32_t gsvs_ring_size,
+ struct radeon_winsys_bo *gsvs_ring_bo,
+ uint32_t tess_factor_ring_size,
+ struct radeon_winsys_bo *tess_factor_ring_bo,
+ uint32_t tess_offchip_ring_size,
+ struct radeon_winsys_bo *tess_offchip_ring_bo)
{
uint64_t esgs_va = 0, gsvs_va = 0;
+ uint64_t tess_factor_va = 0, tess_offchip_va = 0;
uint32_t *desc = &map[4];
if (esgs_ring_bo)
esgs_va = queue->device->ws->buffer_get_va(esgs_ring_bo);
if (gsvs_ring_bo)
gsvs_va = queue->device->ws->buffer_get_va(gsvs_ring_bo);
+ if (tess_factor_ring_bo)
+ tess_factor_va = queue->device->ws->buffer_get_va(tess_factor_ring_bo);
+ if (tess_offchip_ring_bo)
+ tess_offchip_va = queue->device->ws->buffer_get_va(tess_offchip_ring_bo);
/* stride 0, num records - size, add tid, swizzle, elsize4,
index stride 64 */
S_008F0C_ELEMENT_SIZE(1) |
S_008F0C_INDEX_STRIDE(1) |
S_008F0C_ADD_TID_ENABLE(true);
+ desc += 4;
+
+ desc[0] = tess_factor_va;
+ desc[1] = S_008F04_BASE_ADDRESS_HI(tess_factor_va >> 32) |
+ S_008F04_STRIDE(0) |
+ S_008F04_SWIZZLE_ENABLE(false);
+ desc[2] = tess_factor_ring_size;
+ desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
+ S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
+ S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
+ S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
+ S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
+ S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
+ S_008F0C_ELEMENT_SIZE(0) |
+ S_008F0C_INDEX_STRIDE(0) |
+ S_008F0C_ADD_TID_ENABLE(false);
+ desc += 4;
+
+ desc[0] = tess_offchip_va;
+ desc[1] = S_008F04_BASE_ADDRESS_HI(tess_offchip_va >> 32) |
+ S_008F04_STRIDE(0) |
+ S_008F04_SWIZZLE_ENABLE(false);
+ desc[2] = tess_offchip_ring_size;
+ desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
+ S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
+ S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
+ S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
+ S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
+ S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
+ S_008F0C_ELEMENT_SIZE(0) |
+ S_008F0C_INDEX_STRIDE(0) |
+ S_008F0C_ADD_TID_ENABLE(false);
+ desc += 4;
+
+ /* add sample positions after all rings */
+ memcpy(desc, queue->device->sample_locations_1x, 8);
+ desc += 2;
+ memcpy(desc, queue->device->sample_locations_2x, 16);
+ desc += 4;
+ memcpy(desc, queue->device->sample_locations_4x, 32);
+ desc += 8;
+ memcpy(desc, queue->device->sample_locations_8x, 64);
+ desc += 16;
+ memcpy(desc, queue->device->sample_locations_16x, 128);
+}
+
+static unsigned
+radv_get_hs_offchip_param(struct radv_device *device, uint32_t *max_offchip_buffers_p)
+{
+ bool double_offchip_buffers = device->physical_device->rad_info.chip_class >= CIK &&
+ device->physical_device->rad_info.family != CHIP_CARRIZO &&
+ device->physical_device->rad_info.family != CHIP_STONEY;
+ unsigned max_offchip_buffers_per_se = double_offchip_buffers ? 128 : 64;
+ unsigned max_offchip_buffers = max_offchip_buffers_per_se *
+ device->physical_device->rad_info.max_se;
+ unsigned offchip_granularity;
+ unsigned hs_offchip_param;
+ switch (device->tess_offchip_block_dw_size) {
+ default:
+ assert(0);
+ /* fall through */
+ case 8192:
+ offchip_granularity = V_03093C_X_8K_DWORDS;
+ break;
+ case 4096:
+ offchip_granularity = V_03093C_X_4K_DWORDS;
+ break;
+ }
+
+ switch (device->physical_device->rad_info.chip_class) {
+ case SI:
+ max_offchip_buffers = MIN2(max_offchip_buffers, 126);
+ break;
+ case CIK:
+ max_offchip_buffers = MIN2(max_offchip_buffers, 508);
+ break;
+ case VI:
+ default:
+ max_offchip_buffers = MIN2(max_offchip_buffers, 512);
+ break;
+ }
+
+ *max_offchip_buffers_p = max_offchip_buffers;
+ if (device->physical_device->rad_info.chip_class >= CIK) {
+ if (device->physical_device->rad_info.chip_class >= VI)
+ --max_offchip_buffers;
+ hs_offchip_param =
+ S_03093C_OFFCHIP_BUFFERING(max_offchip_buffers) |
+ S_03093C_OFFCHIP_GRANULARITY(offchip_granularity);
+ } else {
+ hs_offchip_param =
+ S_0089B0_OFFCHIP_BUFFERING(max_offchip_buffers);
+ }
+ return hs_offchip_param;
}
static VkResult
uint32_t compute_scratch_size,
uint32_t esgs_ring_size,
uint32_t gsvs_ring_size,
+ bool needs_tess_rings,
+ bool needs_sample_positions,
struct radeon_winsys_cs **initial_preamble_cs,
struct radeon_winsys_cs **continue_preamble_cs)
{
struct radeon_winsys_bo *compute_scratch_bo = NULL;
struct radeon_winsys_bo *esgs_ring_bo = NULL;
struct radeon_winsys_bo *gsvs_ring_bo = NULL;
+ struct radeon_winsys_bo *tess_factor_ring_bo = NULL;
+ struct radeon_winsys_bo *tess_offchip_ring_bo = NULL;
struct radeon_winsys_cs *dest_cs[2] = {0};
+ bool add_tess_rings = false, add_sample_positions = false;
+ unsigned tess_factor_ring_size = 0, tess_offchip_ring_size = 0;
+ unsigned max_offchip_buffers;
+ unsigned hs_offchip_param = 0;
+ if (!queue->has_tess_rings) {
+ if (needs_tess_rings)
+ add_tess_rings = true;
+ }
+ if (!queue->has_sample_positions) {
+ if (needs_sample_positions)
+ add_sample_positions = true;
+ }
+ tess_factor_ring_size = 32768 * queue->device->physical_device->rad_info.max_se;
+ hs_offchip_param = radv_get_hs_offchip_param(queue->device,
+ &max_offchip_buffers);
+ tess_offchip_ring_size = max_offchip_buffers *
+ queue->device->tess_offchip_block_dw_size * 4;
if (scratch_size <= queue->scratch_size &&
compute_scratch_size <= queue->compute_scratch_size &&
esgs_ring_size <= queue->esgs_ring_size &&
gsvs_ring_size <= queue->gsvs_ring_size &&
+ !add_tess_rings && !add_sample_positions &&
queue->initial_preamble_cs) {
*initial_preamble_cs = queue->initial_preamble_cs;
*continue_preamble_cs = queue->continue_preamble_cs;
gsvs_ring_size = queue->gsvs_ring_size;
}
+ if (add_tess_rings) {
+ tess_factor_ring_bo = queue->device->ws->buffer_create(queue->device->ws,
+ tess_factor_ring_size,
+ 256,
+ RADEON_DOMAIN_VRAM,
+ RADEON_FLAG_NO_CPU_ACCESS);
+ if (!tess_factor_ring_bo)
+ goto fail;
+ tess_offchip_ring_bo = queue->device->ws->buffer_create(queue->device->ws,
+ tess_offchip_ring_size,
+ 256,
+ RADEON_DOMAIN_VRAM,
+ RADEON_FLAG_NO_CPU_ACCESS);
+ if (!tess_offchip_ring_bo)
+ goto fail;
+ } else {
+ tess_factor_ring_bo = queue->tess_factor_ring_bo;
+ tess_offchip_ring_bo = queue->tess_offchip_ring_bo;
+ }
+
if (scratch_bo != queue->scratch_bo ||
esgs_ring_bo != queue->esgs_ring_bo ||
- gsvs_ring_bo != queue->gsvs_ring_bo) {
+ gsvs_ring_bo != queue->gsvs_ring_bo ||
+ tess_factor_ring_bo != queue->tess_factor_ring_bo ||
+ tess_offchip_ring_bo != queue->tess_offchip_ring_bo || add_sample_positions) {
uint32_t size = 0;
- if (gsvs_ring_bo || esgs_ring_bo)
- size = 80; /* 2 dword + 2 padding + 4 dword * 4 */
+ if (gsvs_ring_bo || esgs_ring_bo ||
+ tess_factor_ring_bo || tess_offchip_ring_bo || add_sample_positions) {
+ size = 112; /* 2 dword + 2 padding + 4 dword * 6 */
+ if (add_sample_positions)
+ size += 256; /* 32+16+8+4+2+1 samples * 4 * 2 = 248 bytes. */
+ }
else if (scratch_bo)
size = 8; /* 2 dword */
if (gsvs_ring_bo)
queue->device->ws->cs_add_buffer(cs, gsvs_ring_bo, 8);
+ if (tess_factor_ring_bo)
+ queue->device->ws->cs_add_buffer(cs, tess_factor_ring_bo, 8);
+
+ if (tess_offchip_ring_bo)
+ queue->device->ws->cs_add_buffer(cs, tess_offchip_ring_bo, 8);
+
if (descriptor_bo)
queue->device->ws->cs_add_buffer(cs, descriptor_bo, 8);
map[1] = rsrc1;
}
- if (esgs_ring_bo || gsvs_ring_bo)
- fill_geom_rings(queue, map, esgs_ring_size, esgs_ring_bo, gsvs_ring_size, gsvs_ring_bo);
+ if (esgs_ring_bo || gsvs_ring_bo || tess_factor_ring_bo || tess_offchip_ring_bo ||
+ add_sample_positions)
+ fill_geom_tess_rings(queue, map, add_sample_positions,
+ esgs_ring_size, esgs_ring_bo,
+ gsvs_ring_size, gsvs_ring_bo,
+ tess_factor_ring_size, tess_factor_ring_bo,
+ tess_offchip_ring_size, tess_offchip_ring_bo);
queue->device->ws->buffer_unmap(descriptor_bo);
}
- if (esgs_ring_bo || gsvs_ring_bo) {
+ if (esgs_ring_bo || gsvs_ring_bo || tess_factor_ring_bo || tess_offchip_ring_bo) {
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
+ }
+ if (esgs_ring_bo || gsvs_ring_bo) {
if (queue->device->physical_device->rad_info.chip_class >= CIK) {
radeon_set_uconfig_reg_seq(cs, R_030900_VGT_ESGS_RING_SIZE, 2);
radeon_emit(cs, esgs_ring_size >> 8);
}
}
+ if (tess_factor_ring_bo) {
+ uint64_t tf_va = queue->device->ws->buffer_get_va(tess_factor_ring_bo);
+ if (queue->device->physical_device->rad_info.chip_class >= CIK) {
+ radeon_set_uconfig_reg(cs, R_030938_VGT_TF_RING_SIZE,
+ S_030938_SIZE(tess_factor_ring_size / 4));
+ radeon_set_uconfig_reg(cs, R_030940_VGT_TF_MEMORY_BASE,
+ tf_va >> 8);
+ radeon_set_uconfig_reg(cs, R_03093C_VGT_HS_OFFCHIP_PARAM, hs_offchip_param);
+ } else {
+ radeon_set_config_reg(cs, R_008988_VGT_TF_RING_SIZE,
+ S_008988_SIZE(tess_factor_ring_size / 4));
+ radeon_set_config_reg(cs, R_0089B8_VGT_TF_MEMORY_BASE,
+ tf_va >> 8);
+ radeon_set_config_reg(cs, R_0089B0_VGT_HS_OFFCHIP_PARAM,
+ hs_offchip_param);
+ }
+ }
+
if (descriptor_bo) {
uint32_t regs[] = {R_00B030_SPI_SHADER_USER_DATA_PS_0,
R_00B130_SPI_SHADER_USER_DATA_VS_0,
queue->gsvs_ring_size = gsvs_ring_size;
}
+ if (tess_factor_ring_bo != queue->tess_factor_ring_bo) {
+ queue->tess_factor_ring_bo = tess_factor_ring_bo;
+ }
+
+ if (tess_offchip_ring_bo != queue->tess_offchip_ring_bo) {
+ queue->tess_offchip_ring_bo = tess_offchip_ring_bo;
+ queue->has_tess_rings = true;
+ }
+
if (descriptor_bo != queue->descriptor_bo) {
if (queue->descriptor_bo)
queue->device->ws->buffer_destroy(queue->descriptor_bo);
queue->descriptor_bo = descriptor_bo;
}
+ if (add_sample_positions)
+ queue->has_sample_positions = true;
+
*initial_preamble_cs = queue->initial_preamble_cs;
*continue_preamble_cs = queue->continue_preamble_cs;
if (!scratch_size && !compute_scratch_size && !esgs_ring_size && !gsvs_ring_size)
queue->device->ws->buffer_destroy(esgs_ring_bo);
if (gsvs_ring_bo && gsvs_ring_bo != queue->gsvs_ring_bo)
queue->device->ws->buffer_destroy(gsvs_ring_bo);
+ if (tess_factor_ring_bo && tess_factor_ring_bo != queue->tess_factor_ring_bo)
+ queue->device->ws->buffer_destroy(tess_factor_ring_bo);
+ if (tess_offchip_ring_bo && tess_offchip_ring_bo != queue->tess_offchip_ring_bo)
+ queue->device->ws->buffer_destroy(tess_offchip_ring_bo);
return VK_ERROR_OUT_OF_DEVICE_MEMORY;
}
struct radeon_winsys_cs *initial_preamble_cs = NULL, *continue_preamble_cs = NULL;
VkResult result;
bool fence_emitted = false;
+ bool tess_rings_needed = false;
+ bool sample_positions_needed = false;
/* Do this first so failing to allocate scratch buffers can't result in
* partially executed submissions. */
cmd_buffer->compute_scratch_size_needed);
esgs_ring_size = MAX2(esgs_ring_size, cmd_buffer->esgs_ring_size_needed);
gsvs_ring_size = MAX2(gsvs_ring_size, cmd_buffer->gsvs_ring_size_needed);
+ tess_rings_needed |= cmd_buffer->tess_rings_needed;
+ sample_positions_needed |= cmd_buffer->sample_positions_needed;
}
}
result = radv_get_preamble_cs(queue, scratch_size, compute_scratch_size,
- esgs_ring_size, gsvs_ring_size,
+ esgs_ring_size, gsvs_ring_size, tess_rings_needed,
+ sample_positions_needed,
&initial_preamble_cs, &continue_preamble_cs);
if (result != VK_SUCCESS)
return result;
for (uint32_t i = 0; i < submitCount; i++) {
struct radeon_winsys_cs **cs_array;
- bool has_flush = !submitCount;
- bool can_patch = !has_flush;
+ bool do_flush = !i || pSubmits[i].pWaitDstStageMask;
+ bool can_patch = !do_flush;
uint32_t advance;
if (!pSubmits[i].commandBufferCount) {
}
cs_array = malloc(sizeof(struct radeon_winsys_cs *) *
- (pSubmits[i].commandBufferCount + has_flush));
+ (pSubmits[i].commandBufferCount + do_flush));
- if(has_flush)
- cs_array[0] = queue->device->flush_cs[queue->queue_family_index];
+ if(do_flush)
+ cs_array[0] = pSubmits[i].waitSemaphoreCount ?
+ queue->device->flush_shader_cs[queue->queue_family_index] :
+ queue->device->flush_cs[queue->queue_family_index];
for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer,
pSubmits[i].pCommandBuffers[j]);
assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
- cs_array[j + has_flush] = cmd_buffer->cs;
+ cs_array[j + do_flush] = cmd_buffer->cs;
if ((cmd_buffer->usage_flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT))
can_patch = false;
}
- for (uint32_t j = 0; j < pSubmits[i].commandBufferCount + has_flush; j += advance) {
+ for (uint32_t j = 0; j < pSubmits[i].commandBufferCount + do_flush; j += advance) {
advance = MIN2(max_cs_submission,
- pSubmits[i].commandBufferCount + has_flush - j);
+ pSubmits[i].commandBufferCount + do_flush - j);
bool b = j == 0;
- bool e = j + advance == pSubmits[i].commandBufferCount + has_flush;
+ bool e = j + advance == pSubmits[i].commandBufferCount + do_flush;
if (queue->device->trace_bo)
*queue->device->trace_id_ptr = 0;
pMemoryRequirements->memoryTypeBits = (1u << RADV_MEM_TYPE_COUNT) - 1;
- pMemoryRequirements->size = buffer->size;
- pMemoryRequirements->alignment = 16;
+ if (buffer->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)
+ pMemoryRequirements->alignment = 4096;
+ else
+ pMemoryRequirements->alignment = 16;
+
+ pMemoryRequirements->size = align64(buffer->size, pMemoryRequirements->alignment);
}
void radv_GetImageMemoryRequirements(
return VK_SUCCESS;
}
-VkResult radv_QueueBindSparse(
- VkQueue queue,
+
+static void
+radv_sparse_buffer_bind_memory(struct radv_device *device,
+ const VkSparseBufferMemoryBindInfo *bind)
+{
+ RADV_FROM_HANDLE(radv_buffer, buffer, bind->buffer);
+
+ for (uint32_t i = 0; i < bind->bindCount; ++i) {
+ struct radv_device_memory *mem = NULL;
+
+ if (bind->pBinds[i].memory != VK_NULL_HANDLE)
+ mem = radv_device_memory_from_handle(bind->pBinds[i].memory);
+
+ device->ws->buffer_virtual_bind(buffer->bo,
+ bind->pBinds[i].resourceOffset,
+ bind->pBinds[i].size,
+ mem ? mem->bo : NULL,
+ bind->pBinds[i].memoryOffset);
+ }
+}
+
+static void
+radv_sparse_image_opaque_bind_memory(struct radv_device *device,
+ const VkSparseImageOpaqueMemoryBindInfo *bind)
+{
+ RADV_FROM_HANDLE(radv_image, image, bind->image);
+
+ for (uint32_t i = 0; i < bind->bindCount; ++i) {
+ struct radv_device_memory *mem = NULL;
+
+ if (bind->pBinds[i].memory != VK_NULL_HANDLE)
+ mem = radv_device_memory_from_handle(bind->pBinds[i].memory);
+
+ device->ws->buffer_virtual_bind(image->bo,
+ bind->pBinds[i].resourceOffset,
+ bind->pBinds[i].size,
+ mem ? mem->bo : NULL,
+ bind->pBinds[i].memoryOffset);
+ }
+}
+
+ VkResult radv_QueueBindSparse(
+ VkQueue _queue,
uint32_t bindInfoCount,
const VkBindSparseInfo* pBindInfo,
- VkFence fence)
+ VkFence _fence)
{
- stub_return(VK_ERROR_INCOMPATIBLE_DRIVER);
+ RADV_FROM_HANDLE(radv_fence, fence, _fence);
+ RADV_FROM_HANDLE(radv_queue, queue, _queue);
+ struct radeon_winsys_fence *base_fence = fence ? fence->fence : NULL;
+ bool fence_emitted = false;
+
+ for (uint32_t i = 0; i < bindInfoCount; ++i) {
+ for (uint32_t j = 0; j < pBindInfo[i].bufferBindCount; ++j) {
+ radv_sparse_buffer_bind_memory(queue->device,
+ pBindInfo[i].pBufferBinds + j);
+ }
+
+ for (uint32_t j = 0; j < pBindInfo[i].imageOpaqueBindCount; ++j) {
+ radv_sparse_image_opaque_bind_memory(queue->device,
+ pBindInfo[i].pImageOpaqueBinds + j);
+ }
+
+ if (pBindInfo[i].waitSemaphoreCount || pBindInfo[i].signalSemaphoreCount) {
+ queue->device->ws->cs_submit(queue->hw_ctx, queue->queue_idx,
+ &queue->device->empty_cs[queue->queue_family_index],
+ 1, NULL, NULL,
+ (struct radeon_winsys_sem **)pBindInfo[i].pWaitSemaphores,
+ pBindInfo[i].waitSemaphoreCount,
+ (struct radeon_winsys_sem **)pBindInfo[i].pSignalSemaphores,
+ pBindInfo[i].signalSemaphoreCount,
+ false, base_fence);
+ fence_emitted = true;
+ if (fence)
+ fence->submitted = true;
+ }
+ }
+
+ if (fence && !fence_emitted) {
+ fence->signalled = true;
+ }
+
+ return VK_SUCCESS;
}
VkResult radv_CreateFence(
if (!sem)
return VK_ERROR_OUT_OF_HOST_MEMORY;
- *pSemaphore = (VkSemaphore)sem;
+ *pSemaphore = radeon_winsys_sem_to_handle(sem);
return VK_SUCCESS;
}
const VkAllocationCallbacks* pAllocator)
{
RADV_FROM_HANDLE(radv_device, device, _device);
- struct radeon_winsys_sem *sem;
+ RADV_FROM_HANDLE(radeon_winsys_sem, sem, _semaphore);
if (!_semaphore)
return;
- sem = (struct radeon_winsys_sem *)_semaphore;
device->ws->destroy_sem(sem);
}
buffer->usage = pCreateInfo->usage;
buffer->bo = NULL;
buffer->offset = 0;
+ buffer->flags = pCreateInfo->flags;
+
+ if (pCreateInfo->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT) {
+ buffer->bo = device->ws->buffer_create(device->ws,
+ align64(buffer->size, 4096),
+ 4096, 0, RADEON_FLAG_VIRTUAL);
+ if (!buffer->bo) {
+ vk_free2(&device->alloc, pAllocator, buffer);
+ return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
+ }
+ }
*pBuffer = radv_buffer_to_handle(buffer);
if (!buffer)
return;
+ if (buffer->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)
+ device->ws->buffer_destroy(buffer->bo);
+
vk_free2(&device->alloc, pAllocator, buffer);
}
cb->cb_color_attrib = S_028C74_FORCE_DST_ALPHA_1(desc->swizzle[3] == VK_SWIZZLE_1) |
S_028C74_TILE_MODE_INDEX(tile_mode_index);
- if (iview->image->samples > 1) {
- unsigned log_samples = util_logbase2(iview->image->samples);
+ if (iview->image->info.samples > 1) {
+ unsigned log_samples = util_logbase2(iview->image->info.samples);
cb->cb_color_attrib |= S_028C74_NUM_SAMPLES(log_samples) |
S_028C74_NUM_FRAGMENTS(log_samples);
format != V_028C70_COLOR_24_8) |
S_028C70_NUMBER_TYPE(ntype) |
S_028C70_ENDIAN(endian);
- if (iview->image->samples > 1)
+ if (iview->image->info.samples > 1)
if (iview->image->fmask.size)
cb->cb_color_info |= S_028C70_COMPRESSION(1);
if (device->physical_device->rad_info.chip_class >= VI) {
unsigned max_uncompressed_block_size = 2;
- if (iview->image->samples > 1) {
+ if (iview->image->info.samples > 1) {
if (iview->image->surface.bpe == 1)
max_uncompressed_block_size = 0;
else if (iview->image->surface.bpe == 2)
unsigned format;
uint64_t va, s_offs, z_offs;
const struct radeon_surf_level *level_info = &iview->image->surface.level[level];
+ bool stencil_only = false;
memset(ds, 0, sizeof(*ds));
switch (iview->vk_format) {
case VK_FORMAT_D24_UNORM_S8_UINT:
S_028B78_POLY_OFFSET_DB_IS_FLOAT_FMT(1);
ds->offset_scale = 1.0f;
break;
+ case VK_FORMAT_S8_UINT:
+ stencil_only = true;
+ level_info = &iview->image->surface.stencil_level[level];
+ break;
default:
break;
}
format = radv_translate_dbformat(iview->vk_format);
- if (format == V_028040_Z_INVALID) {
- fprintf(stderr, "Invalid DB format: %d, disabling DB.\n", iview->vk_format);
- }
va = device->ws->buffer_get_va(iview->bo) + iview->image->offset;
s_offs = z_offs = va;
ds->db_depth_info = S_02803C_ADDR5_SWIZZLE_MASK(1);
ds->db_z_info = S_028040_FORMAT(format) | S_028040_ZRANGE_PRECISION(1);
- if (iview->image->samples > 1)
- ds->db_z_info |= S_028040_NUM_SAMPLES(util_logbase2(iview->image->samples));
+ if (iview->image->info.samples > 1)
+ ds->db_z_info |= S_028040_NUM_SAMPLES(util_logbase2(iview->image->info.samples));
if (iview->image->surface.flags & RADEON_SURF_SBUFFER)
ds->db_stencil_info = S_028044_FORMAT(V_028044_STENCIL_8);
unsigned stencil_tile_mode = info->si_tile_mode_array[stencil_index];
unsigned macro_mode = info->cik_macrotile_mode_array[macro_index];
+ if (stencil_only)
+ tile_mode = stencil_tile_mode;
+
ds->db_depth_info |=
S_02803C_ARRAY_MODE(G_009910_ARRAY_MODE(tile_mode)) |
S_02803C_PIPE_CONFIG(G_009910_PIPE_CONFIG(tile_mode)) |
ds->db_stencil_info |= S_028044_TILE_MODE_INDEX(tile_mode_index);
}
- if (iview->image->htile.size && !level) {
- ds->db_z_info |= S_028040_TILE_SURFACE_ENABLE(1) |
- S_028040_ALLOW_EXPCLEAR(1);
-
- if (iview->image->surface.flags & RADEON_SURF_SBUFFER) {
- /* Workaround: For a not yet understood reason, the
- * combination of MSAA, fast stencil clear and stencil
- * decompress messes with subsequent stencil buffer
- * uses. Problem was reproduced on Verde, Bonaire,
- * Tonga, and Carrizo.
- *
- * Disabling EXPCLEAR works around the problem.
- *
- * Check piglit's arb_texture_multisample-stencil-clear
- * test if you want to try changing this.
- */
- if (iview->image->samples <= 1)
- ds->db_stencil_info |= S_028044_ALLOW_EXPCLEAR(1);
- } else
+ if (iview->image->surface.htile_size && !level) {
+ ds->db_z_info |= S_028040_TILE_SURFACE_ENABLE(1);
+
+ if (!(iview->image->surface.flags & RADEON_SURF_SBUFFER))
/* Use all of the htile_buffer for depth if there's no stencil. */
ds->db_stencil_info |= S_028044_TILE_STENCIL_DISABLE(1);
va = device->ws->buffer_get_va(iview->bo) + iview->image->offset +
- iview->image->htile.offset;
+ iview->image->htile_offset;
ds->db_htile_data_base = va >> 8;
ds->db_htile_surface = S_028ABC_FULL_CACHE(1);
} else {
S_008F38_XY_MAG_FILTER(radv_tex_filter(pCreateInfo->magFilter, max_aniso)) |
S_008F38_XY_MIN_FILTER(radv_tex_filter(pCreateInfo->minFilter, max_aniso)) |
S_008F38_MIP_FILTER(radv_tex_mipfilter(pCreateInfo->mipmapMode)) |
- S_008F38_MIP_POINT_PRECLAMP(1) |
+ S_008F38_MIP_POINT_PRECLAMP(0) |
S_008F38_DISABLE_LSB_CEIL(1) |
S_008F38_FILTER_PREC_FIX(1) |
S_008F38_ANISO_OVERRIDE(is_vi));