* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
*/
#include "tu_private.h"
+
#include "util/debug.h"
#include "util/u_atomic.h"
#include "vk_format.h"
#include "vk_util.h"
+static inline bool
+image_level_linear(struct tu_image *image, int level)
+{
+ unsigned w = u_minify(image->extent.width, level);
+ return w < 16;
+}
+
+/* indexed by cpp: */
+static const struct
+{
+ unsigned pitchalign;
+ unsigned heightalign;
+} tile_alignment[] = {
+ [1] = { 128, 32 }, [2] = { 128, 16 }, [3] = { 128, 16 }, [4] = { 64, 16 },
+ [8] = { 64, 16 }, [12] = { 64, 16 }, [16] = { 64, 16 },
+};
+
+static void
+setup_slices(struct tu_image *image, const VkImageCreateInfo *pCreateInfo)
+{
+ enum vk_format_layout layout =
+ vk_format_description(pCreateInfo->format)->layout;
+ uint32_t layer_size = 0;
+ uint32_t width = pCreateInfo->extent.width;
+ uint32_t height = pCreateInfo->extent.height;
+ uint32_t depth = pCreateInfo->extent.depth;
+ bool layer_first = pCreateInfo->imageType != VK_IMAGE_TYPE_3D;
+ uint32_t alignment = pCreateInfo->imageType == VK_IMAGE_TYPE_3D ? 4096 : 1;
+ uint32_t cpp = vk_format_get_blocksize(pCreateInfo->format);
+
+ uint32_t heightalign = tile_alignment[cpp].heightalign;
+
+ for (unsigned level = 0; level < pCreateInfo->mipLevels; level++) {
+ struct tu_image_level *slice = &image->levels[level];
+ bool linear_level = image_level_linear(image, level);
+ uint32_t aligned_height = height;
+ uint32_t blocks;
+ uint32_t pitchalign;
+
+ if (image->tile_mode && !linear_level) {
+ pitchalign = tile_alignment[cpp].pitchalign;
+ aligned_height = align(aligned_height, heightalign);
+ } else {
+ pitchalign = 64;
+
+ /* The blits used for mem<->gmem work at a granularity of
+ * 32x32, which can cause faults due to over-fetch on the
+ * last level. The simple solution is to over-allocate a
+ * bit the last level to ensure any over-fetch is harmless.
+ * The pitch is already sufficiently aligned, but height
+ * may not be:
+ */
+ if ((level + 1 == pCreateInfo->mipLevels))
+ aligned_height = align(aligned_height, 32);
+ }
+
+ if (layout == VK_FORMAT_LAYOUT_ASTC)
+ slice->pitch = util_align_npot(
+ width,
+ pitchalign * vk_format_get_blockwidth(pCreateInfo->format));
+ else
+ slice->pitch = align(width, pitchalign);
+
+ slice->offset = layer_size;
+ blocks = vk_format_get_block_count(pCreateInfo->format, slice->pitch,
+ aligned_height);
+
+ /* 1d array and 2d array textures must all have the same layer size
+ * for each miplevel on a3xx. 3d textures can have different layer
+ * sizes for high levels, but the hw auto-sizer is buggy (or at least
+ * different than what this code does), so as soon as the layer size
+ * range gets into range, we stop reducing it.
+ */
+ if (pCreateInfo->imageType == VK_IMAGE_TYPE_3D &&
+ (level == 1 ||
+ (level > 1 && image->levels[level - 1].size > 0xf000)))
+ slice->size = align(blocks * cpp, alignment);
+ else if (level == 0 || layer_first || alignment == 1)
+ slice->size = align(blocks * cpp, alignment);
+ else
+ slice->size = image->levels[level - 1].size;
+
+ layer_size += slice->size * depth;
+
+ width = u_minify(width, 1);
+ height = u_minify(height, 1);
+ depth = u_minify(depth, 1);
+ }
+
+ image->layer_size = layer_size;
+}
+
VkResult
tu_image_create(VkDevice _device,
const struct tu_image_create_info *create_info,
tu_assert(pCreateInfo->extent.height > 0);
tu_assert(pCreateInfo->extent.depth > 0);
- image = vk_zalloc2(&device->alloc,
- alloc,
- sizeof(*image),
- 8,
+ image = vk_zalloc2(&device->alloc, alloc, sizeof(*image), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!image)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
image->tiling = pCreateInfo->tiling;
image->usage = pCreateInfo->usage;
image->flags = pCreateInfo->flags;
+ image->extent = pCreateInfo->extent;
+ image->level_count = pCreateInfo->mipLevels;
+ image->layer_count = pCreateInfo->arrayLayers;
image->exclusive = pCreateInfo->sharingMode == VK_SHARING_MODE_EXCLUSIVE;
if (pCreateInfo->sharingMode == VK_SHARING_MODE_CONCURRENT) {
for (uint32_t i = 0; i < pCreateInfo->queueFamilyIndexCount; ++i)
if (pCreateInfo->pQueueFamilyIndices[i] ==
- VK_QUEUE_FAMILY_EXTERNAL_KHR)
+ VK_QUEUE_FAMILY_EXTERNAL)
image->queue_family_mask |= (1u << TU_MAX_QUEUE_FAMILIES) - 1u;
else
- image->queue_family_mask |= 1u
- << pCreateInfo->pQueueFamilyIndices[i];
+ image->queue_family_mask |=
+ 1u << pCreateInfo->pQueueFamilyIndices[i];
}
image->shareable =
- vk_find_struct_const(pCreateInfo->pNext,
- EXTERNAL_MEMORY_IMAGE_CREATE_INFO_KHR) != NULL;
+ vk_find_struct_const(pCreateInfo->pNext,
+ EXTERNAL_MEMORY_IMAGE_CREATE_INFO) != NULL;
+ image->tile_mode = pCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ? 3 : 0;
+ setup_slices(image, pCreateInfo);
+
+ image->size = image->layer_size * pCreateInfo->arrayLayers;
*pImage = tu_image_to_handle(image);
return VK_SUCCESS;
struct tu_device *device,
const VkImageViewCreateInfo *pCreateInfo)
{
+ TU_FROM_HANDLE(tu_image, image, pCreateInfo->image);
+ const VkImageSubresourceRange *range = &pCreateInfo->subresourceRange;
+
+ switch (image->type) {
+ case VK_IMAGE_TYPE_1D:
+ case VK_IMAGE_TYPE_2D:
+ assert(range->baseArrayLayer + tu_get_layerCount(image, range) <=
+ image->layer_count);
+ break;
+ case VK_IMAGE_TYPE_3D:
+ assert(range->baseArrayLayer + tu_get_layerCount(image, range) <=
+ tu_minify(image->extent.depth, range->baseMipLevel));
+ break;
+ default:
+ unreachable("bad VkImageType");
+ }
+
+ iview->image = image;
+ iview->type = pCreateInfo->viewType;
+ iview->vk_format = pCreateInfo->format;
+ iview->aspect_mask = pCreateInfo->subresourceRange.aspectMask;
+
+ if (iview->aspect_mask == VK_IMAGE_ASPECT_STENCIL_BIT) {
+ iview->vk_format = vk_format_stencil_only(iview->vk_format);
+ } else if (iview->aspect_mask == VK_IMAGE_ASPECT_DEPTH_BIT) {
+ iview->vk_format = vk_format_depth_only(iview->vk_format);
+ }
+
+ // should we minify?
+ iview->extent = image->extent;
+
+ iview->base_layer = range->baseArrayLayer;
+ iview->layer_count = tu_get_layerCount(image, range);
+ iview->base_mip = range->baseMipLevel;
+ iview->level_count = tu_get_levelCount(image, range);
}
unsigned
{
if (!image->exclusive)
return image->queue_family_mask;
- if (family == VK_QUEUE_FAMILY_EXTERNAL_KHR)
+ if (family == VK_QUEUE_FAMILY_EXTERNAL)
return (1u << TU_MAX_QUEUE_FAMILIES) - 1u;
if (family == VK_QUEUE_FAMILY_IGNORED)
return 1u << queue_family;
{
#ifdef ANDROID
const VkNativeBufferANDROID *gralloc_info =
- vk_find_struct_const(pCreateInfo->pNext, NATIVE_BUFFER_ANDROID);
+ vk_find_struct_const(pCreateInfo->pNext, NATIVE_BUFFER_ANDROID);
if (gralloc_info)
- return tu_image_from_gralloc(
- device, pCreateInfo, gralloc_info, pAllocator, pImage);
+ return tu_image_from_gralloc(device, pCreateInfo, gralloc_info,
+ pAllocator, pImage);
#endif
return tu_image_create(device,
- &(struct tu_image_create_info) {
+ &(struct tu_image_create_info) {
.vk_info = pCreateInfo,
.scanout = false,
- },
- pAllocator,
- pImage);
+ },
+ pAllocator, pImage);
}
void
const VkImageSubresource *pSubresource,
VkSubresourceLayout *pLayout)
{
+ TU_FROM_HANDLE(tu_image, image, _image);
+
+ const uint32_t layer_offset = image->layer_size * pSubresource->arrayLayer;
+ const struct tu_image_level *level =
+ image->levels + pSubresource->mipLevel;
+
+ pLayout->offset = layer_offset + level->offset;
+ pLayout->size = level->size;
+ pLayout->rowPitch =
+ level->pitch * vk_format_get_blocksize(image->vk_format);
+ pLayout->arrayPitch = image->layer_size;
+ pLayout->depthPitch = level->size;
}
VkResult
TU_FROM_HANDLE(tu_device, device, _device);
struct tu_image_view *view;
- view = vk_alloc2(&device->alloc,
- pAllocator,
- sizeof(*view),
- 8,
+ view = vk_alloc2(&device->alloc, pAllocator, sizeof(*view), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (view == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
TU_FROM_HANDLE(tu_buffer, buffer, pCreateInfo->buffer);
view->range = pCreateInfo->range == VK_WHOLE_SIZE
- ? buffer->size - pCreateInfo->offset
- : pCreateInfo->range;
+ ? buffer->size - pCreateInfo->offset
+ : pCreateInfo->range;
view->vk_format = pCreateInfo->format;
}
TU_FROM_HANDLE(tu_device, device, _device);
struct tu_buffer_view *view;
- view = vk_alloc2(&device->alloc,
- pAllocator,
- sizeof(*view),
- 8,
+ view = vk_alloc2(&device->alloc, pAllocator, sizeof(*view), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!view)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);