#include <unistd.h>
#include <xf86drm.h>
+#include "compiler/glsl_types.h"
#include "util/debug.h"
#include "util/disk_cache.h"
-#include "util/strtod.h"
#include "vk_format.h"
#include "vk_util.h"
-#include "drm/msm_drm.h"
+#include "drm-uapi/msm_drm.h"
static int
tu_device_get_cache_uuid(uint16_t family, void *uuid)
memset(uuid, 0, VK_UUID_SIZE);
}
+static VkResult
+tu_bo_init(struct tu_device *dev,
+ struct tu_bo *bo,
+ uint32_t gem_handle,
+ uint64_t size)
+{
+ uint64_t iova = tu_gem_info_iova(dev, gem_handle);
+ if (!iova)
+ return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
+ *bo = (struct tu_bo) {
+ .gem_handle = gem_handle,
+ .size = size,
+ .iova = iova,
+ };
+
+ return VK_SUCCESS;
+}
+
VkResult
tu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size)
{
*/
uint32_t gem_handle = tu_gem_new(dev, size, MSM_BO_WC);
if (!gem_handle)
- goto fail_new;
+ return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
- /* Calling DRM_MSM_GEM_INFO forces the kernel to allocate backing pages. We
- * want immediate backing pages because vkAllocateMemory and friends must
- * not lazily fail.
- *
- * TODO(chadv): Must we really call DRM_MSM_GEM_INFO to acquire backing
- * pages? I infer so from reading comments in msm_bo.c:bo_allocate(), but
- * maybe I misunderstand.
- */
+ VkResult result = tu_bo_init(dev, bo, gem_handle, size);
+ if (result != VK_SUCCESS) {
+ tu_gem_close(dev, gem_handle);
+ return vk_error(dev->instance, result);
+ }
- /* TODO: Do we need 'offset' if we have 'iova'? */
- uint64_t offset = tu_gem_info_offset(dev, gem_handle);
- if (!offset)
- goto fail_info;
+ return VK_SUCCESS;
+}
- uint64_t iova = tu_gem_info_iova(dev, gem_handle);
- if (!iova)
- goto fail_info;
+VkResult
+tu_bo_init_dmabuf(struct tu_device *dev,
+ struct tu_bo *bo,
+ uint64_t size,
+ int fd)
+{
+ uint32_t gem_handle = tu_gem_import_dmabuf(dev, fd, size);
+ if (!gem_handle)
+ return vk_error(dev->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
- *bo = (struct tu_bo) {
- .gem_handle = gem_handle,
- .size = size,
- .offset = offset,
- .iova = iova,
- };
+ VkResult result = tu_bo_init(dev, bo, gem_handle, size);
+ if (result != VK_SUCCESS) {
+ tu_gem_close(dev, gem_handle);
+ return vk_error(dev->instance, result);
+ }
return VK_SUCCESS;
+}
-fail_info:
- tu_gem_close(dev, bo->gem_handle);
-fail_new:
- return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
+int
+tu_bo_export_dmabuf(struct tu_device *dev, struct tu_bo *bo)
+{
+ return tu_gem_export_dmabuf(dev, bo->gem_handle);
}
VkResult
if (bo->map)
return VK_SUCCESS;
+ uint64_t offset = tu_gem_info_offset(dev, bo->gem_handle);
+ if (!offset)
+ return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
+
/* TODO: Should we use the wrapper os_mmap() like Freedreno does? */
void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
- dev->physical_device->local_fd, bo->offset);
+ dev->physical_device->local_fd, offset);
if (map == MAP_FAILED)
return vk_error(dev->instance, VK_ERROR_MEMORY_MAP_FAILED);
if (strcmp(version->name, "msm")) {
drmFreeVersion(version);
- if (master_fd != -1)
- close(master_fd);
close(fd);
return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
"device %s does not use the msm kernel driver", path);
goto fail;
}
+ if (tu_drm_get_gmem_base(device, &device->gmem_base)) {
+ if (instance->debug_flags & TU_DEBUG_STARTUP)
+ tu_logi("Could not query the GMEM size");
+ result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
+ "could not get GMEM size");
+ goto fail;
+ }
+
memset(device->name, 0, sizeof(device->name));
sprintf(device->name, "FD%d", device->gpu_id);
switch (device->gpu_id) {
- case 530:
+ case 618:
+ device->magic.RB_UNKNOWN_8E04_blit = 0x00100000;
+ device->ccu_offset_gmem = 0x7c000; /* 0x7e000 in some cases? */
+ device->ccu_offset_bypass = 0x10000;
+ device->magic.PC_UNKNOWN_9805 = 0x0;
+ device->magic.SP_UNKNOWN_A0F8 = 0x0;
+ break;
case 630:
+ case 640:
+ device->magic.RB_UNKNOWN_8E04_blit = 0x01000000;
+ device->ccu_offset_gmem = 0xf8000;
+ device->ccu_offset_bypass = 0x20000;
+ device->magic.PC_UNKNOWN_9805 = 0x1;
+ device->magic.SP_UNKNOWN_A0F8 = 0x1;
break;
default:
result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
goto fail;
}
+ result = tu_wsi_init(device);
+ if (result != VK_SUCCESS) {
+ vk_error(instance, result);
+ goto fail;
+ }
+
return VK_SUCCESS;
fail:
static void
tu_physical_device_finish(struct tu_physical_device *device)
{
+ tu_wsi_finish(device);
+
disk_cache_destroy(device->disk_cache);
close(device->local_fd);
if (device->master_fd != -1)
close(device->master_fd);
}
-static void *
+static VKAPI_ATTR void *
default_alloc_func(void *pUserData,
size_t size,
size_t align,
return malloc(size);
}
-static void *
+static VKAPI_ATTR void *
default_realloc_func(void *pUserData,
void *pOriginal,
size_t size,
return realloc(pOriginal, size);
}
-static void
+static VKAPI_ATTR void
default_free_func(void *pUserData, void *pMemory)
{
free(pMemory);
};
static const struct debug_control tu_debug_options[] = {
- { "startup", TU_DEBUG_STARTUP }, { NULL, 0 }
+ { "startup", TU_DEBUG_STARTUP },
+ { "nir", TU_DEBUG_NIR },
+ { "ir3", TU_DEBUG_IR3 },
+ { "nobin", TU_DEBUG_NOBIN },
+ { "sysmem", TU_DEBUG_SYSMEM },
+ { "forcebin", TU_DEBUG_FORCEBIN },
+ { NULL, 0 }
};
const char *
return vk_error(instance, result);
}
- _mesa_locale_init();
+ glsl_type_singleton_init_or_ref();
VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
VG(VALGRIND_DESTROY_MEMPOOL(instance));
- _mesa_locale_fini();
+ glsl_type_singleton_decref();
vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
*pFeatures = (VkPhysicalDeviceFeatures) {
.robustBufferAccess = false,
- .fullDrawIndexUint32 = false,
- .imageCubeArray = false,
- .independentBlend = false,
- .geometryShader = false,
+ .fullDrawIndexUint32 = true,
+ .imageCubeArray = true,
+ .independentBlend = true,
+ .geometryShader = true,
.tessellationShader = false,
- .sampleRateShading = false,
- .dualSrcBlend = false,
- .logicOp = false,
+ .sampleRateShading = true,
+ .dualSrcBlend = true,
+ .logicOp = true,
.multiDrawIndirect = false,
.drawIndirectFirstInstance = false,
- .depthClamp = false,
+ .depthClamp = true,
.depthBiasClamp = false,
.fillModeNonSolid = false,
.depthBounds = false,
.largePoints = false,
.alphaToOne = false,
.multiViewport = false,
- .samplerAnisotropy = false,
- .textureCompressionETC2 = false,
- .textureCompressionASTC_LDR = false,
- .textureCompressionBC = false,
- .occlusionQueryPrecise = false,
+ .samplerAnisotropy = true,
+ .textureCompressionETC2 = true,
+ .textureCompressionASTC_LDR = true,
+ .textureCompressionBC = true,
+ .occlusionQueryPrecise = true,
.pipelineStatisticsQuery = false,
.vertexPipelineStoresAndAtomics = false,
.fragmentStoresAndAtomics = false,
void
tu_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
- VkPhysicalDeviceFeatures2KHR *pFeatures)
+ VkPhysicalDeviceFeatures2 *pFeatures)
{
vk_foreach_struct(ext, pFeatures->pNext)
{
switch (ext->sType) {
- case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR: {
- VkPhysicalDeviceVariablePointerFeaturesKHR *features = (void *) ext;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES: {
+ VkPhysicalDeviceVariablePointersFeatures *features = (void *) ext;
features->variablePointersStorageBuffer = false;
features->variablePointers = false;
break;
}
- case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHR: {
- VkPhysicalDeviceMultiviewFeaturesKHR *features =
- (VkPhysicalDeviceMultiviewFeaturesKHR *) ext;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES: {
+ VkPhysicalDeviceMultiviewFeatures *features =
+ (VkPhysicalDeviceMultiviewFeatures *) ext;
features->multiview = false;
features->multiviewGeometryShader = false;
features->multiviewTessellationShader = false;
break;
}
- case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES: {
- VkPhysicalDeviceShaderDrawParameterFeatures *features =
- (VkPhysicalDeviceShaderDrawParameterFeatures *) ext;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES: {
+ VkPhysicalDeviceShaderDrawParametersFeatures *features =
+ (VkPhysicalDeviceShaderDrawParametersFeatures *) ext;
features->shaderDrawParameters = false;
break;
}
features->inheritedConditionalRendering = false;
break;
}
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT: {
+ VkPhysicalDeviceTransformFeedbackFeaturesEXT *features =
+ (VkPhysicalDeviceTransformFeedbackFeaturesEXT *) ext;
+ features->transformFeedback = true;
+ features->geometryStreams = false;
+ break;
+ }
default:
break;
}
VkPhysicalDeviceProperties *pProperties)
{
TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
- VkSampleCountFlags sample_counts = 0xf;
-
- /* make sure that the entire descriptor set is addressable with a signed
- * 32-bit int. So the sum of all limits scaled by descriptor size has to
- * be at most 2 GiB. the combined image & samples object count as one of
- * both. This limit is for the pipeline layout, not for the set layout, but
- * there is no set limit, so we just set a pipeline limit. I don't think
- * any app is going to hit this soon. */
- size_t max_descriptor_set_size =
- ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS) /
- (32 /* uniform buffer, 32 due to potential space wasted on alignment */ +
- 32 /* storage buffer, 32 due to potential space wasted on alignment */ +
- 32 /* sampler, largest when combined with image */ +
- 64 /* sampled image */ + 64 /* storage image */);
+ VkSampleCountFlags sample_counts =
+ VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_2_BIT | VK_SAMPLE_COUNT_4_BIT;
+
+ /* I have no idea what the maximum size is, but the hardware supports very
+ * large numbers of descriptors (at least 2^16). This limit is based on
+ * CP_LOAD_STATE6, which has a 28-bit field for the DWORD offset, so that
+ * we don't have to think about what to do if that overflows, but really
+ * nothing is likely to get close to this.
+ */
+ const size_t max_descriptor_set_size = (1 << 28) / A6XX_TEX_CONST_DWORDS;
VkPhysicalDeviceLimits limits = {
.maxImageDimension1D = (1 << 14),
.maxImageDimensionCube = (1 << 14),
.maxImageArrayLayers = (1 << 11),
.maxTexelBufferElements = 128 * 1024 * 1024,
- .maxUniformBufferRange = UINT32_MAX,
- .maxStorageBufferRange = UINT32_MAX,
+ .maxUniformBufferRange = MAX_UNIFORM_BUFFER_RANGE,
+ .maxStorageBufferRange = MAX_STORAGE_BUFFER_RANGE,
.maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
.maxMemoryAllocationCount = UINT32_MAX,
.maxSamplerAllocationCount = 64 * 1024,
.maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
.maxPerStageDescriptorSampledImages = max_descriptor_set_size,
.maxPerStageDescriptorStorageImages = max_descriptor_set_size,
- .maxPerStageDescriptorInputAttachments = max_descriptor_set_size,
+ .maxPerStageDescriptorInputAttachments = MAX_RTS,
.maxPerStageResources = max_descriptor_set_size,
.maxDescriptorSetSamplers = max_descriptor_set_size,
.maxDescriptorSetUniformBuffers = max_descriptor_set_size,
.maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_STORAGE_BUFFERS,
.maxDescriptorSetSampledImages = max_descriptor_set_size,
.maxDescriptorSetStorageImages = max_descriptor_set_size,
- .maxDescriptorSetInputAttachments = max_descriptor_set_size,
+ .maxDescriptorSetInputAttachments = MAX_RTS,
.maxVertexInputAttributes = 32,
.maxVertexInputBindings = 32,
- .maxVertexInputAttributeOffset = 2047,
+ .maxVertexInputAttributeOffset = 4095,
.maxVertexInputBindingStride = 2048,
.maxVertexOutputComponents = 128,
.maxTessellationGenerationLevel = 64,
.maxTessellationControlTotalOutputComponents = 4096,
.maxTessellationEvaluationInputComponents = 128,
.maxTessellationEvaluationOutputComponents = 128,
- .maxGeometryShaderInvocations = 127,
+ .maxGeometryShaderInvocations = 32,
.maxGeometryInputComponents = 64,
.maxGeometryOutputComponents = 128,
.maxGeometryOutputVertices = 256,
.maxGeometryTotalOutputComponents = 1024,
- .maxFragmentInputComponents = 128,
+ .maxFragmentInputComponents = 124,
.maxFragmentOutputAttachments = 8,
.maxFragmentDualSrcAttachments = 1,
.maxFragmentCombinedOutputResources = 8,
.maxComputeWorkGroupCount = { 65535, 65535, 65535 },
.maxComputeWorkGroupInvocations = 2048,
.maxComputeWorkGroupSize = { 2048, 2048, 2048 },
- .subPixelPrecisionBits = 4 /* FIXME */,
+ .subPixelPrecisionBits = 8,
.subTexelPrecisionBits = 4 /* FIXME */,
.mipmapPrecisionBits = 4 /* FIXME */,
.maxDrawIndexedIndexValue = UINT32_MAX,
.viewportBoundsRange = { INT16_MIN, INT16_MAX },
.viewportSubPixelBits = 8,
.minMemoryMapAlignment = 4096, /* A page */
- .minTexelBufferOffsetAlignment = 1,
- .minUniformBufferOffsetAlignment = 4,
- .minStorageBufferOffsetAlignment = 4,
+ .minTexelBufferOffsetAlignment = 64,
+ .minUniformBufferOffsetAlignment = 64,
+ .minStorageBufferOffsetAlignment = 64,
.minTexelOffset = -32,
.maxTexelOffset = 31,
.minTexelGatherOffset = -32,
.storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
.maxSampleMaskWords = 1,
.timestampComputeAndGraphics = true,
- .timestampPeriod = 1,
+ .timestampPeriod = 1000000000.0 / 19200000.0, /* CP_ALWAYS_ON_COUNTER is fixed 19.2MHz */
.maxClipDistances = 8,
.maxCullDistances = 8,
.maxCombinedClipAndCullDistances = 8,
void
tu_GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice,
- VkPhysicalDeviceProperties2KHR *pProperties)
+ VkPhysicalDeviceProperties2 *pProperties)
{
TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
tu_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
break;
}
- case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR: {
- VkPhysicalDeviceIDPropertiesKHR *properties =
- (VkPhysicalDeviceIDPropertiesKHR *) ext;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES: {
+ VkPhysicalDeviceIDProperties *properties =
+ (VkPhysicalDeviceIDProperties *) ext;
memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
properties->deviceLUIDValid = false;
break;
}
- case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHR: {
- VkPhysicalDeviceMultiviewPropertiesKHR *properties =
- (VkPhysicalDeviceMultiviewPropertiesKHR *) ext;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES: {
+ VkPhysicalDeviceMultiviewProperties *properties =
+ (VkPhysicalDeviceMultiviewProperties *) ext;
properties->maxMultiviewViewCount = MAX_VIEWS;
properties->maxMultiviewInstanceIndex = INT_MAX;
break;
}
- case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR: {
- VkPhysicalDevicePointClippingPropertiesKHR *properties =
- (VkPhysicalDevicePointClippingPropertiesKHR *) ext;
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES: {
+ VkPhysicalDevicePointClippingProperties *properties =
+ (VkPhysicalDevicePointClippingProperties *) ext;
properties->pointClippingBehavior =
- VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR;
+ VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES;
break;
}
case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
properties->maxMemoryAllocationSize = 0xFFFFFFFFull;
break;
}
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT: {
+ VkPhysicalDeviceTransformFeedbackPropertiesEXT *properties =
+ (VkPhysicalDeviceTransformFeedbackPropertiesEXT *)ext;
+
+ properties->maxTransformFeedbackStreams = IR3_MAX_SO_STREAMS;
+ properties->maxTransformFeedbackBuffers = IR3_MAX_SO_BUFFERS;
+ properties->maxTransformFeedbackBufferSize = UINT32_MAX;
+ properties->maxTransformFeedbackStreamDataSize = 512;
+ properties->maxTransformFeedbackBufferDataSize = 512;
+ properties->maxTransformFeedbackBufferDataStride = 512;
+ properties->transformFeedbackQueries = true;
+ properties->transformFeedbackStreamsLinesTriangles = false;
+ properties->transformFeedbackRasterizationStreamSelect = false;
+ properties->transformFeedbackDraw = true;
+ break;
+ }
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT: {
+ VkPhysicalDeviceSampleLocationsPropertiesEXT *properties =
+ (VkPhysicalDeviceSampleLocationsPropertiesEXT *)ext;
+ properties->sampleLocationSampleCounts = 0;
+ if (pdevice->supported_extensions.EXT_sample_locations) {
+ properties->sampleLocationSampleCounts =
+ VK_SAMPLE_COUNT_1_BIT | VK_SAMPLE_COUNT_2_BIT | VK_SAMPLE_COUNT_4_BIT;
+ }
+ properties->maxSampleLocationGridSize = (VkExtent2D) { 1 , 1 };
+ properties->sampleLocationCoordinateRange[0] = 0.0f;
+ properties->sampleLocationCoordinateRange[1] = 0.9375f;
+ properties->sampleLocationSubPixelBits = 4;
+ properties->variableSampleLocations = true;
+ break;
+ }
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES: {
+ VkPhysicalDeviceSamplerFilterMinmaxProperties *properties =
+ (VkPhysicalDeviceSamplerFilterMinmaxProperties *)ext;
+ properties->filterMinmaxImageComponentMapping = true;
+ properties->filterMinmaxSingleComponentFormats = true;
+ break;
+ }
+
default:
break;
}
.queueFlags =
VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT,
.queueCount = 1,
- .timestampValidBits = 64,
- .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
+ .timestampValidBits = 48,
+ .minImageTransferGranularity = { 1, 1, 1 },
};
void
tu_GetPhysicalDeviceQueueFamilyProperties2(
VkPhysicalDevice physicalDevice,
uint32_t *pQueueFamilyPropertyCount,
- VkQueueFamilyProperties2KHR *pQueueFamilyProperties)
+ VkQueueFamilyProperties2 *pQueueFamilyProperties)
{
VK_OUTARRAY_MAKE(out, pQueueFamilyProperties, pQueueFamilyPropertyCount);
void
tu_GetPhysicalDeviceMemoryProperties2(
VkPhysicalDevice physicalDevice,
- VkPhysicalDeviceMemoryProperties2KHR *pMemoryProperties)
+ VkPhysicalDeviceMemoryProperties2 *pMemoryProperties)
{
return tu_GetPhysicalDeviceMemoryProperties(
physicalDevice, &pMemoryProperties->memoryProperties);
if (ret)
return VK_ERROR_INITIALIZATION_FAILED;
- queue->submit_fence_fd = -1;
+ tu_fence_init(&queue->submit_fence, false);
return VK_SUCCESS;
}
static void
tu_queue_finish(struct tu_queue *queue)
{
- if (queue->submit_fence_fd >= 0) {
- close(queue->submit_fence_fd);
- }
+ tu_fence_finish(&queue->submit_fence);
tu_drm_submitqueue_close(queue->device, queue->msm_queue_id);
}
return -1;
}
+struct PACKED bcolor_entry {
+ uint32_t fp32[4];
+ uint16_t ui16[4];
+ int16_t si16[4];
+ uint16_t fp16[4];
+ uint16_t rgb565;
+ uint16_t rgb5a1;
+ uint16_t rgba4;
+ uint8_t __pad0[2];
+ uint8_t ui8[4];
+ int8_t si8[4];
+ uint32_t rgb10a2;
+ uint32_t z24; /* also s8? */
+ uint16_t srgb[4]; /* appears to duplicate fp16[], but clamped, used for srgb */
+ uint8_t __pad1[56];
+} border_color[] = {
+ [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] = {},
+ [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK] = {},
+ [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] = {
+ .fp32[3] = 0x3f800000,
+ .ui16[3] = 0xffff,
+ .si16[3] = 0x7fff,
+ .fp16[3] = 0x3c00,
+ .rgb5a1 = 0x8000,
+ .rgba4 = 0xf000,
+ .ui8[3] = 0xff,
+ .si8[3] = 0x7f,
+ .rgb10a2 = 0xc0000000,
+ .srgb[3] = 0x3c00,
+ },
+ [VK_BORDER_COLOR_INT_OPAQUE_BLACK] = {
+ .fp32[3] = 1,
+ .fp16[3] = 1,
+ },
+ [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE] = {
+ .fp32[0 ... 3] = 0x3f800000,
+ .ui16[0 ... 3] = 0xffff,
+ .si16[0 ... 3] = 0x7fff,
+ .fp16[0 ... 3] = 0x3c00,
+ .rgb565 = 0xffff,
+ .rgb5a1 = 0xffff,
+ .rgba4 = 0xffff,
+ .ui8[0 ... 3] = 0xff,
+ .si8[0 ... 3] = 0x7f,
+ .rgb10a2 = 0xffffffff,
+ .z24 = 0xffffff,
+ .srgb[0 ... 3] = 0x3c00,
+ },
+ [VK_BORDER_COLOR_INT_OPAQUE_WHITE] = {
+ .fp32[0 ... 3] = 1,
+ .fp16[0 ... 3] = 1,
+ },
+};
+
+
VkResult
tu_CreateDevice(VkPhysicalDevice physicalDevice,
const VkDeviceCreateInfo *pCreateInfo,
8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
if (!device->queues[qfi]) {
result = VK_ERROR_OUT_OF_HOST_MEMORY;
- goto fail;
+ goto fail_queues;
}
memset(device->queues[qfi], 0,
result = tu_queue_init(device, &device->queues[qfi][q], qfi, q,
queue_create->flags);
if (result != VK_SUCCESS)
- goto fail;
+ goto fail_queues;
}
}
+ device->compiler = ir3_compiler_create(NULL, physical_device->gpu_id);
+ if (!device->compiler)
+ goto fail_queues;
+
+#define VSC_DATA_SIZE(pitch) ((pitch) * 32 + 0x100) /* extra size to store VSC_SIZE */
+#define VSC_DATA2_SIZE(pitch) ((pitch) * 32)
+
+ device->vsc_data_pitch = 0x440 * 4;
+ device->vsc_data2_pitch = 0x1040 * 4;
+
+ result = tu_bo_init_new(device, &device->vsc_data, VSC_DATA_SIZE(device->vsc_data_pitch));
+ if (result != VK_SUCCESS)
+ goto fail_vsc_data;
+
+ result = tu_bo_init_new(device, &device->vsc_data2, VSC_DATA2_SIZE(device->vsc_data2_pitch));
+ if (result != VK_SUCCESS)
+ goto fail_vsc_data2;
+
+ STATIC_ASSERT(sizeof(struct bcolor_entry) == 128);
+ result = tu_bo_init_new(device, &device->border_color, sizeof(border_color));
+ if (result != VK_SUCCESS)
+ goto fail_border_color;
+
+ result = tu_bo_map(device, &device->border_color);
+ if (result != VK_SUCCESS)
+ goto fail_border_color_map;
+
+ memcpy(device->border_color.map, border_color, sizeof(border_color));
+
VkPipelineCacheCreateInfo ci;
ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
ci.pNext = NULL;
result =
tu_CreatePipelineCache(tu_device_to_handle(device), &ci, NULL, &pc);
if (result != VK_SUCCESS)
- goto fail;
+ goto fail_pipeline_cache;
device->mem_cache = tu_pipeline_cache_from_handle(pc);
*pDevice = tu_device_to_handle(device);
return VK_SUCCESS;
-fail:
+fail_pipeline_cache:
+fail_border_color_map:
+ tu_bo_finish(device, &device->border_color);
+
+fail_border_color:
+ tu_bo_finish(device, &device->vsc_data2);
+
+fail_vsc_data2:
+ tu_bo_finish(device, &device->vsc_data);
+
+fail_vsc_data:
+ ralloc_free(device->compiler);
+
+fail_queues:
for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
for (unsigned q = 0; q < device->queue_count[i]; q++)
tu_queue_finish(&device->queues[i][q]);
if (!device)
return;
+ tu_bo_finish(device, &device->vsc_data);
+ tu_bo_finish(device, &device->vsc_data2);
+
for (unsigned i = 0; i < TU_MAX_QUEUE_FAMILIES; i++) {
for (unsigned q = 0; q < device->queue_count[i]; q++)
tu_queue_finish(&device->queues[i][q]);
vk_free(&device->alloc, device->queues[i]);
}
+ /* the compiler does not use pAllocator */
+ ralloc_free(device->compiler);
+
VkPipelineCache pc = tu_pipeline_cache_to_handle(device->mem_cache);
tu_DestroyPipelineCache(tu_device_to_handle(device), pc, NULL);
tu_bo_list_init(&bo_list);
uint32_t entry_count = 0;
- for(uint32_t j = 0; j < submit->commandBufferCount; ++j) {
+ for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
entry_count += cmdbuf->cs.entry_count;
}
struct drm_msm_gem_submit_cmd cmds[entry_count];
uint32_t entry_idx = 0;
- for(uint32_t j = 0; j < submit->commandBufferCount; ++j) {
+ for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
- struct tu_cmd_stream *stream = &cmdbuf->cs;
- for (unsigned i = 0; i < stream->entry_count; ++i, ++entry_idx) {
+ struct tu_cs *cs = &cmdbuf->cs;
+ for (unsigned i = 0; i < cs->entry_count; ++i, ++entry_idx) {
cmds[entry_idx].type = MSM_SUBMIT_CMD_BUF;
- cmds[entry_idx].submit_idx = tu_bo_list_add(&bo_list, stream->entries[i].bo);
- cmds[entry_idx].submit_offset = stream->entries[i].offset;
- cmds[entry_idx].size = stream->entries[i].size;
+ cmds[entry_idx].submit_idx =
+ tu_bo_list_add(&bo_list, cs->entries[i].bo,
+ MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_DUMP);
+ cmds[entry_idx].submit_offset = cs->entries[i].offset;
+ cmds[entry_idx].size = cs->entries[i].size;
cmds[entry_idx].pad = 0;
cmds[entry_idx].nr_relocs = 0;
cmds[entry_idx].relocs = 0;
-
}
- }
- struct drm_msm_gem_submit_bo bos[bo_list.count];
- for (unsigned i = 0; i < bo_list.count; ++i) {
- bos[i].flags = MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE;
- bos[i].handle = bo_list.handles[i];
- bos[i].presumed = 0;
+ tu_bo_list_merge(&bo_list, &cmdbuf->bo_list);
}
uint32_t flags = MSM_PIPE_3D0;
struct drm_msm_gem_submit req = {
.flags = flags,
.queueid = queue->msm_queue_id,
- .bos = (uint64_t)(uintptr_t)bos,
+ .bos = (uint64_t)(uintptr_t) bo_list.bo_infos,
.nr_bos = bo_list.count,
.cmds = (uint64_t)(uintptr_t)cmds,
.nr_cmds = entry_count,
if (last_submit) {
/* no need to merge fences as queue execution is serialized */
- if (queue->submit_fence_fd >= 0) {
- close(queue->submit_fence_fd);
- }
- queue->submit_fence_fd = req.fence_fd;
+ tu_fence_update_fd(&queue->submit_fence, req.fence_fd);
}
}
+
+ if (_fence != VK_NULL_HANDLE) {
+ TU_FROM_HANDLE(tu_fence, fence, _fence);
+ tu_fence_copy(fence, &queue->submit_fence);
+ }
+
return VK_SUCCESS;
}
{
TU_FROM_HANDLE(tu_queue, queue, _queue);
- if (queue->submit_fence_fd >= 0) {
- int ret = sync_wait(queue->submit_fence_fd, -1);
- if (ret)
- tu_loge("sync_wait on fence fd %d failed", queue->submit_fence_fd);
-
- close(queue->submit_fence_fd);
- queue->submit_fence_fd = -1;
- }
+ tu_fence_wait_idle(&queue->submit_fence);
return VK_SUCCESS;
}
return VK_SUCCESS;
}
+VkResult
+tu_ImportSemaphoreFdKHR(VkDevice _device,
+ const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo)
+{
+ tu_stub();
+
+ return VK_SUCCESS;
+}
+
+VkResult
+tu_GetSemaphoreFdKHR(VkDevice _device,
+ const VkSemaphoreGetFdInfoKHR *pGetFdInfo,
+ int *pFd)
+{
+ tu_stub();
+
+ return VK_SUCCESS;
+}
+
+VkResult
+tu_ImportFenceFdKHR(VkDevice _device,
+ const VkImportFenceFdInfoKHR *pImportFenceFdInfo)
+{
+ tu_stub();
+
+ return VK_SUCCESS;
+}
+
+VkResult
+tu_GetFenceFdKHR(VkDevice _device,
+ const VkFenceGetFdInfoKHR *pGetFdInfo,
+ int *pFd)
+{
+ tu_stub();
+
+ return VK_SUCCESS;
+}
+
VkResult
tu_EnumerateInstanceExtensionProperties(const char *pLayerName,
uint32_t *pPropertyCount,
if (mem == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
- result = tu_bo_init_new(device, &mem->bo, pAllocateInfo->allocationSize);
+ const VkImportMemoryFdInfoKHR *fd_info =
+ vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_FD_INFO_KHR);
+ if (fd_info && !fd_info->handleType)
+ fd_info = NULL;
+
+ if (fd_info) {
+ assert(fd_info->handleType ==
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
+ fd_info->handleType ==
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
+
+ /*
+ * TODO Importing the same fd twice gives us the same handle without
+ * reference counting. We need to maintain a per-instance handle-to-bo
+ * table and add reference count to tu_bo.
+ */
+ result = tu_bo_init_dmabuf(device, &mem->bo,
+ pAllocateInfo->allocationSize, fd_info->fd);
+ if (result == VK_SUCCESS) {
+ /* take ownership and close the fd */
+ close(fd_info->fd);
+ }
+ } else {
+ result =
+ tu_bo_init_new(device, &mem->bo, pAllocateInfo->allocationSize);
+ }
+
if (result != VK_SUCCESS) {
vk_free2(&device->alloc, pAllocator, mem);
return result;
TU_FROM_HANDLE(tu_buffer, buffer, _buffer);
pMemoryRequirements->memoryTypeBits = 1;
- pMemoryRequirements->alignment = 16;
+ pMemoryRequirements->alignment = 64;
pMemoryRequirements->size =
align64(buffer->size, pMemoryRequirements->alignment);
}
void
tu_GetBufferMemoryRequirements2(
VkDevice device,
- const VkBufferMemoryRequirementsInfo2KHR *pInfo,
- VkMemoryRequirements2KHR *pMemoryRequirements)
+ const VkBufferMemoryRequirementsInfo2 *pInfo,
+ VkMemoryRequirements2 *pMemoryRequirements)
{
tu_GetBufferMemoryRequirements(device, pInfo->buffer,
&pMemoryRequirements->memoryRequirements);
TU_FROM_HANDLE(tu_image, image, _image);
pMemoryRequirements->memoryTypeBits = 1;
- pMemoryRequirements->size = image->size;
- pMemoryRequirements->alignment = image->alignment;
+ pMemoryRequirements->size = image->layout.size;
+ pMemoryRequirements->alignment = image->layout.base_align;
}
void
tu_GetImageMemoryRequirements2(VkDevice device,
- const VkImageMemoryRequirementsInfo2KHR *pInfo,
- VkMemoryRequirements2KHR *pMemoryRequirements)
+ const VkImageMemoryRequirementsInfo2 *pInfo,
+ VkMemoryRequirements2 *pMemoryRequirements)
{
tu_GetImageMemoryRequirements(device, pInfo->image,
&pMemoryRequirements->memoryRequirements);
void
tu_GetImageSparseMemoryRequirements2(
VkDevice device,
- const VkImageSparseMemoryRequirementsInfo2KHR *pInfo,
+ const VkImageSparseMemoryRequirementsInfo2 *pInfo,
uint32_t *pSparseMemoryRequirementCount,
- VkSparseImageMemoryRequirements2KHR *pSparseMemoryRequirements)
+ VkSparseImageMemoryRequirements2 *pSparseMemoryRequirements)
{
tu_stub();
}
VkResult
tu_BindBufferMemory2(VkDevice device,
uint32_t bindInfoCount,
- const VkBindBufferMemoryInfoKHR *pBindInfos)
+ const VkBindBufferMemoryInfo *pBindInfos)
{
+ for (uint32_t i = 0; i < bindInfoCount; ++i) {
+ TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
+ TU_FROM_HANDLE(tu_buffer, buffer, pBindInfos[i].buffer);
+
+ if (mem) {
+ buffer->bo = &mem->bo;
+ buffer->bo_offset = pBindInfos[i].memoryOffset;
+ } else {
+ buffer->bo = NULL;
+ }
+ }
return VK_SUCCESS;
}
VkDeviceMemory memory,
VkDeviceSize memoryOffset)
{
- const VkBindBufferMemoryInfoKHR info = {
- .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
+ const VkBindBufferMemoryInfo info = {
+ .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
.buffer = buffer,
.memory = memory,
.memoryOffset = memoryOffset
VkResult
tu_BindImageMemory2(VkDevice device,
uint32_t bindInfoCount,
- const VkBindImageMemoryInfoKHR *pBindInfos)
-{
+ const VkBindImageMemoryInfo *pBindInfos)
+{
+ for (uint32_t i = 0; i < bindInfoCount; ++i) {
+ TU_FROM_HANDLE(tu_image, image, pBindInfos[i].image);
+ TU_FROM_HANDLE(tu_device_memory, mem, pBindInfos[i].memory);
+
+ if (mem) {
+ image->bo = &mem->bo;
+ image->bo_offset = pBindInfos[i].memoryOffset;
+ } else {
+ image->bo = NULL;
+ image->bo_offset = 0;
+ }
+ }
+
return VK_SUCCESS;
}
VkDeviceMemory memory,
VkDeviceSize memoryOffset)
{
- const VkBindImageMemoryInfoKHR info = {
- .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
+ const VkBindImageMemoryInfo info = {
+ .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO,
.image = image,
.memory = memory,
.memoryOffset = memoryOffset
return VK_SUCCESS;
}
-VkResult
-tu_CreateFence(VkDevice _device,
- const VkFenceCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator,
- VkFence *pFence)
-{
- TU_FROM_HANDLE(tu_device, device, _device);
-
- struct tu_fence *fence =
- vk_alloc2(&device->alloc, pAllocator, sizeof(*fence), 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
-
- if (!fence)
- return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
-
- *pFence = tu_fence_to_handle(fence);
-
- return VK_SUCCESS;
-}
-
-void
-tu_DestroyFence(VkDevice _device,
- VkFence _fence,
- const VkAllocationCallbacks *pAllocator)
-{
- TU_FROM_HANDLE(tu_device, device, _device);
- TU_FROM_HANDLE(tu_fence, fence, _fence);
-
- if (!fence)
- return;
-
- vk_free2(&device->alloc, pAllocator, fence);
-}
-
-VkResult
-tu_WaitForFences(VkDevice _device,
- uint32_t fenceCount,
- const VkFence *pFences,
- VkBool32 waitAll,
- uint64_t timeout)
-{
- return VK_SUCCESS;
-}
-
-VkResult
-tu_ResetFences(VkDevice _device, uint32_t fenceCount, const VkFence *pFences)
-{
- return VK_SUCCESS;
-}
-
-VkResult
-tu_GetFenceStatus(VkDevice _device, VkFence _fence)
-{
- return VK_SUCCESS;
-}
-
// Queue semaphore functions
VkResult
if (!event)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+ VkResult result = tu_bo_init_new(device, &event->bo, 0x1000);
+ if (result != VK_SUCCESS)
+ goto fail_alloc;
+
+ result = tu_bo_map(device, &event->bo);
+ if (result != VK_SUCCESS)
+ goto fail_map;
+
*pEvent = tu_event_to_handle(event);
return VK_SUCCESS;
+
+fail_map:
+ tu_bo_finish(device, &event->bo);
+fail_alloc:
+ vk_free2(&device->alloc, pAllocator, event);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
}
void
if (!event)
return;
+
+ tu_bo_finish(device, &event->bo);
vk_free2(&device->alloc, pAllocator, event);
}
{
TU_FROM_HANDLE(tu_event, event, _event);
- if (*event->map == 1)
+ if (*(uint64_t*) event->bo.map == 1)
return VK_EVENT_SET;
return VK_EVENT_RESET;
}
tu_SetEvent(VkDevice _device, VkEvent _event)
{
TU_FROM_HANDLE(tu_event, event, _event);
- *event->map = 1;
+ *(uint64_t*) event->bo.map = 1;
return VK_SUCCESS;
}
tu_ResetEvent(VkDevice _device, VkEvent _event)
{
TU_FROM_HANDLE(tu_event, event, _event);
- *event->map = 0;
+ *(uint64_t*) event->bo.map = 0;
return VK_SUCCESS;
}
vk_free2(&device->alloc, pAllocator, buffer);
}
-static uint32_t
-tu_surface_max_layer_count(struct tu_image_view *iview)
-{
- return iview->type == VK_IMAGE_VIEW_TYPE_3D
- ? iview->extent.depth
- : (iview->base_layer + iview->layer_count);
-}
-
VkResult
tu_CreateFramebuffer(VkDevice _device,
const VkFramebufferCreateInfo *pCreateInfo,
VkImageView _iview = pCreateInfo->pAttachments[i];
struct tu_image_view *iview = tu_image_view_from_handle(_iview);
framebuffer->attachments[i].attachment = iview;
-
- framebuffer->width = MIN2(framebuffer->width, iview->extent.width);
- framebuffer->height = MIN2(framebuffer->height, iview->extent.height);
- framebuffer->layers =
- MIN2(framebuffer->layers, tu_surface_max_layer_count(iview));
}
*pFramebuffer = tu_framebuffer_to_handle(framebuffer);
vk_free2(&device->alloc, pAllocator, fb);
}
+static enum a6xx_tex_clamp
+tu6_tex_wrap(VkSamplerAddressMode address_mode)
+{
+ switch (address_mode) {
+ case VK_SAMPLER_ADDRESS_MODE_REPEAT:
+ return A6XX_TEX_REPEAT;
+ case VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT:
+ return A6XX_TEX_MIRROR_REPEAT;
+ case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE:
+ return A6XX_TEX_CLAMP_TO_EDGE;
+ case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER:
+ return A6XX_TEX_CLAMP_TO_BORDER;
+ case VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE:
+ /* only works for PoT.. need to emulate otherwise! */
+ return A6XX_TEX_MIRROR_CLAMP;
+ default:
+ unreachable("illegal tex wrap mode");
+ break;
+ }
+}
+
+static enum a6xx_tex_filter
+tu6_tex_filter(VkFilter filter, unsigned aniso)
+{
+ switch (filter) {
+ case VK_FILTER_NEAREST:
+ return A6XX_TEX_NEAREST;
+ case VK_FILTER_LINEAR:
+ return aniso ? A6XX_TEX_ANISO : A6XX_TEX_LINEAR;
+ case VK_FILTER_CUBIC_EXT:
+ return A6XX_TEX_CUBIC;
+ default:
+ unreachable("illegal texture filter");
+ break;
+ }
+}
+
+static inline enum adreno_compare_func
+tu6_compare_func(VkCompareOp op)
+{
+ return (enum adreno_compare_func) op;
+}
+
static void
tu_init_sampler(struct tu_device *device,
struct tu_sampler *sampler,
const VkSamplerCreateInfo *pCreateInfo)
{
+ const struct VkSamplerReductionModeCreateInfo *reduction =
+ vk_find_struct_const(pCreateInfo->pNext, SAMPLER_REDUCTION_MODE_CREATE_INFO);
+
+ unsigned aniso = pCreateInfo->anisotropyEnable ?
+ util_last_bit(MIN2((uint32_t)pCreateInfo->maxAnisotropy >> 1, 8)) : 0;
+ bool miplinear = (pCreateInfo->mipmapMode == VK_SAMPLER_MIPMAP_MODE_LINEAR);
+
+ sampler->descriptor[0] =
+ COND(miplinear, A6XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR) |
+ A6XX_TEX_SAMP_0_XY_MAG(tu6_tex_filter(pCreateInfo->magFilter, aniso)) |
+ A6XX_TEX_SAMP_0_XY_MIN(tu6_tex_filter(pCreateInfo->minFilter, aniso)) |
+ A6XX_TEX_SAMP_0_ANISO(aniso) |
+ A6XX_TEX_SAMP_0_WRAP_S(tu6_tex_wrap(pCreateInfo->addressModeU)) |
+ A6XX_TEX_SAMP_0_WRAP_T(tu6_tex_wrap(pCreateInfo->addressModeV)) |
+ A6XX_TEX_SAMP_0_WRAP_R(tu6_tex_wrap(pCreateInfo->addressModeW)) |
+ A6XX_TEX_SAMP_0_LOD_BIAS(pCreateInfo->mipLodBias);
+ sampler->descriptor[1] =
+ /* COND(!cso->seamless_cube_map, A6XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF) | */
+ COND(pCreateInfo->unnormalizedCoordinates, A6XX_TEX_SAMP_1_UNNORM_COORDS) |
+ A6XX_TEX_SAMP_1_MIN_LOD(pCreateInfo->minLod) |
+ A6XX_TEX_SAMP_1_MAX_LOD(pCreateInfo->maxLod) |
+ COND(pCreateInfo->compareEnable,
+ A6XX_TEX_SAMP_1_COMPARE_FUNC(tu6_compare_func(pCreateInfo->compareOp)));
+ /* This is an offset into the border_color BO, which we fill with all the
+ * possible Vulkan border colors in the correct order, so we can just use
+ * the Vulkan enum with no translation necessary.
+ */
+ sampler->descriptor[2] =
+ A6XX_TEX_SAMP_2_BCOLOR_OFFSET((unsigned) pCreateInfo->borderColor *
+ sizeof(struct bcolor_entry));
+ sampler->descriptor[3] = 0;
+
+ if (reduction) {
+ /* note: vulkan enum matches hw */
+ sampler->descriptor[2] |= A6XX_TEX_SAMP_2_REDUCTION_MODE(reduction->reductionMode);
+ }
+
+ /* TODO:
+ * A6XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR disables mipmapping, but vk has no NONE mipfilter?
+ */
}
VkResult
return VK_SUCCESS;
}
+VkResult
+tu_GetMemoryFdKHR(VkDevice _device,
+ const VkMemoryGetFdInfoKHR *pGetFdInfo,
+ int *pFd)
+{
+ TU_FROM_HANDLE(tu_device, device, _device);
+ TU_FROM_HANDLE(tu_device_memory, memory, pGetFdInfo->memory);
+
+ assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR);
+
+ /* At the moment, we support only the below handle types. */
+ assert(pGetFdInfo->handleType ==
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
+ pGetFdInfo->handleType ==
+ VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
+
+ int prime_fd = tu_bo_export_dmabuf(device, &memory->bo);
+ if (prime_fd < 0)
+ return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
+
+ *pFd = prime_fd;
+ return VK_SUCCESS;
+}
+
+VkResult
+tu_GetMemoryFdPropertiesKHR(VkDevice _device,
+ VkExternalMemoryHandleTypeFlagBits handleType,
+ int fd,
+ VkMemoryFdPropertiesKHR *pMemoryFdProperties)
+{
+ assert(handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
+ pMemoryFdProperties->memoryTypeBits = 1;
+ return VK_SUCCESS;
+}
+
void
tu_GetPhysicalDeviceExternalSemaphoreProperties(
VkPhysicalDevice physicalDevice,
- const VkPhysicalDeviceExternalSemaphoreInfoKHR *pExternalSemaphoreInfo,
- VkExternalSemaphorePropertiesKHR *pExternalSemaphoreProperties)
+ const VkPhysicalDeviceExternalSemaphoreInfo *pExternalSemaphoreInfo,
+ VkExternalSemaphoreProperties *pExternalSemaphoreProperties)
{
pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
pExternalSemaphoreProperties->compatibleHandleTypes = 0;
void
tu_GetPhysicalDeviceExternalFenceProperties(
VkPhysicalDevice physicalDevice,
- const VkPhysicalDeviceExternalFenceInfoKHR *pExternalFenceInfo,
- VkExternalFencePropertiesKHR *pExternalFenceProperties)
+ const VkPhysicalDeviceExternalFenceInfo *pExternalFenceInfo,
+ VkExternalFenceProperties *pExternalFenceProperties)
{
pExternalFenceProperties->exportFromImportedHandleTypes = 0;
pExternalFenceProperties->compatibleHandleTypes = 0;
VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
}
+
+void tu_GetPhysicalDeviceMultisamplePropertiesEXT(
+ VkPhysicalDevice physicalDevice,
+ VkSampleCountFlagBits samples,
+ VkMultisamplePropertiesEXT* pMultisampleProperties)
+{
+ TU_FROM_HANDLE(tu_physical_device, pdevice, physicalDevice);
+
+ if (samples <= VK_SAMPLE_COUNT_4_BIT && pdevice->supported_extensions.EXT_sample_locations)
+ pMultisampleProperties->maxSampleLocationGridSize = (VkExtent2D){ 1, 1 };
+ else
+ pMultisampleProperties->maxSampleLocationGridSize = (VkExtent2D){ 0, 0 };
+}