#include "util/xmlconfig.h"
#include "vk_alloc.h"
#include "vk_debug_report.h"
+#include "vk_object.h"
/* Pre-declarations needed for WSI entrypoints */
struct wl_surface;
struct gen_aux_map_context;
struct gen_perf_config;
+struct gen_perf_counter_pass;
+struct gen_perf_query_result;
#include <vulkan/vulkan.h>
#include <vulkan/vulkan_intel.h>
#define MAX_PUSH_DESCRIPTORS 32 /* Minimum requirement */
#define MAX_INLINE_UNIFORM_BLOCK_SIZE 4096
#define MAX_INLINE_UNIFORM_BLOCK_DESCRIPTORS 32
-#define ANV_UBO_BOUNDS_CHECK_ALIGNMENT 32
+/* We need 16 for UBO block reads to work and 32 for push UBOs. However, we
+ * use 64 here to avoid cache issues. This could most likely bring it back to
+ * 32 if we had different virtual addresses for the different views on a given
+ * GEM object.
+ */
+#define ANV_UBO_ALIGNMENT 64
#define ANV_SSBO_BOUNDS_CHECK_ALIGNMENT 4
#define MAX_VIEWS_FOR_PRIMITIVE_REPLICATION 16
*/
#define ANV_PREDICATE_RESULT_REG 0x2678 /* MI_ALU_REG15 */
+/* We reserve this MI ALU register to pass around an offset computed from
+ * VkPerformanceQuerySubmitInfoKHR::counterPassIndex VK_KHR_performance_query.
+ * Other code which uses the MI ALU should leave it alone.
+ */
+#define ANV_PERF_QUERY_OFFSET_REG 0x2670 /* MI_ALU_REG14 */
+
/* For gen12 we set the streamout buffers using 4 separate commands
* (3DSTATE_SO_BUFFER_INDEX_*) instead of 3DSTATE_SO_BUFFER. However the layout
* of the 3DSTATE_SO_BUFFER_INDEX_* commands is identical to that of
struct anv_state_pool {
struct anv_block_pool block_pool;
+ /* Offset into the relevant state base address where the state pool starts
+ * allocating memory.
+ */
+ int32_t start_offset;
+
struct anv_state_table table;
/* The size of blocks which will be allocated from the block pool */
struct anv_fixed_size_state_pool buckets[ANV_STATE_BUCKETS];
};
+struct anv_state_reserved_pool {
+ struct anv_state_pool *pool;
+ union anv_free_list reserved_blocks;
+ uint32_t count;
+};
+
struct anv_state_stream {
struct anv_state_pool *state_pool;
VkResult anv_state_pool_init(struct anv_state_pool *pool,
struct anv_device *device,
- uint64_t start_address,
+ uint64_t base_address,
+ int32_t start_offset,
uint32_t block_size);
void anv_state_pool_finish(struct anv_state_pool *pool);
struct anv_state anv_state_pool_alloc(struct anv_state_pool *pool,
struct anv_state anv_state_stream_alloc(struct anv_state_stream *stream,
uint32_t size, uint32_t alignment);
+void anv_state_reserved_pool_init(struct anv_state_reserved_pool *pool,
+ struct anv_state_pool *parent,
+ uint32_t count, uint32_t size,
+ uint32_t alignment);
+void anv_state_reserved_pool_finish(struct anv_state_reserved_pool *pool);
+struct anv_state anv_state_reserved_pool_alloc(struct anv_state_reserved_pool *pool);
+void anv_state_reserved_pool_free(struct anv_state_reserved_pool *pool,
+ struct anv_state state);
+
VkResult anv_state_table_init(struct anv_state_table *table,
struct anv_device *device,
uint32_t initial_entries);
VkDeviceSize size;
VkMemoryHeapFlags flags;
- /* Driver-internal book-keeping */
- VkDeviceSize used;
+ /** Driver-internal book-keeping.
+ *
+ * Align it to 64 bits to make atomic operations faster on 32 bit platforms.
+ */
+ VkDeviceSize used __attribute__ ((aligned (8)));
};
struct anv_physical_device {
- VK_LOADER_DATA _loader_data;
+ struct vk_object_base base;
/* Link in anv_instance::physical_devices */
struct list_head link;
bool use_softpin;
bool always_use_bindless;
+ bool use_call_secondary;
/** True if we can access buffers using A64 messages */
bool has_a64_buffer_access;
/** True if we can use bindless access for samplers */
bool has_bindless_samplers;
+ /** True if we can read the GPU timestamp register
+ *
+ * When running in a virtual context, the timestamp register is unreadable
+ * on Gen12+.
+ */
+ bool has_reg_timestamp;
+
/** True if this device has implicit AUX
*
* If true, CCS is handled as an implicit attachment to the BO rather than
};
struct anv_instance {
- VK_LOADER_DATA _loader_data;
+ struct vk_object_base base;
VkAllocationCallbacks alloc;
*/
uintptr_t * fence_bos;
+ int perf_query_pass;
+
const VkAllocationCallbacks * alloc;
VkSystemAllocationScope alloc_scope;
};
struct anv_queue {
- VK_LOADER_DATA _loader_data;
+ struct vk_object_base base;
struct anv_device * device;
};
struct anv_pipeline_cache {
+ struct vk_object_base base;
struct anv_device * device;
pthread_mutex_t mutex;
struct hash_table * nir_cache;
struct hash_table * cache;
+
+ bool external_sync;
};
struct nir_xfb_info;
void anv_pipeline_cache_init(struct anv_pipeline_cache *cache,
struct anv_device *device,
- bool cache_enabled);
+ bool cache_enabled,
+ bool external_sync);
void anv_pipeline_cache_finish(struct anv_pipeline_cache *cache);
struct anv_shader_bin *
const struct nir_shader *nir,
unsigned char sha1_key[20]);
-struct anv_device {
- VK_LOADER_DATA _loader_data;
+struct anv_address {
+ struct anv_bo *bo;
+ uint32_t offset;
+};
- VkAllocationCallbacks alloc;
+struct anv_device {
+ struct vk_device vk;
struct anv_physical_device * physical;
bool no_hw;
struct anv_state_pool binding_table_pool;
struct anv_state_pool surface_state_pool;
+ struct anv_state_reserved_pool custom_border_colors;
+
+ /** BO used for various workarounds
+ *
+ * There are a number of workarounds on our hardware which require writing
+ * data somewhere and it doesn't really matter where. For that, we use
+ * this BO and just write to the first dword or so.
+ *
+ * We also need to be able to handle NULL buffers bound as pushed UBOs.
+ * For that, we use the high bytes (>= 1024) of the workaround BO.
+ */
struct anv_bo * workaround_bo;
+ struct anv_address workaround_address;
+
struct anv_bo * trivial_batch_bo;
struct anv_bo * hiz_clear_bo;
+ struct anv_state null_surface_state;
struct anv_pipeline_cache default_pipeline_cache;
struct blorp_context blorp;
int anv_gem_get_param(int fd, uint32_t param);
int anv_gem_get_tiling(struct anv_device *device, uint32_t gem_handle);
bool anv_gem_get_bit6_swizzle(int fd, uint32_t tiling);
-int anv_gem_get_aperture(int fd, uint64_t *size);
int anv_gem_gpu_get_reset_stats(struct anv_device *device,
uint32_t *active, uint32_t *pending);
int anv_gem_handle_to_fd(struct anv_device *device, uint32_t gem_handle);
-int anv_gem_reg_read(struct anv_device *device,
- uint32_t offset, uint64_t *result);
+int anv_gem_reg_read(int fd, uint32_t offset, uint64_t *result);
uint32_t anv_gem_fd_to_handle(struct anv_device *device, int fd);
int anv_gem_set_caching(struct anv_device *device, uint32_t gem_handle, uint32_t caching);
int anv_gem_set_domain(struct anv_device *device, uint32_t gem_handle,
struct anv_batch {
const VkAllocationCallbacks * alloc;
+ struct anv_address start_addr;
+
void * start;
void * end;
void * next;
void anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other);
uint64_t anv_batch_emit_reloc(struct anv_batch *batch,
void *location, struct anv_bo *bo, uint32_t offset);
+struct anv_address anv_batch_address(struct anv_batch *batch, void *batch_location);
+
+static inline void
+anv_batch_set_storage(struct anv_batch *batch, struct anv_address addr,
+ void *map, size_t size)
+{
+ batch->start_addr = addr;
+ batch->next = batch->start = map;
+ batch->end = map + size;
+}
static inline VkResult
anv_batch_set_error(struct anv_batch *batch, VkResult error)
return batch->status != VK_SUCCESS;
}
-struct anv_address {
- struct anv_bo *bo;
- uint32_t offset;
-};
-
#define ANV_NULL_ADDRESS ((struct anv_address) { NULL, 0 })
static inline bool
_dst = NULL; \
}))
+/* #define __gen_get_batch_dwords anv_batch_emit_dwords */
+/* #define __gen_get_batch_address anv_batch_address */
+/* #define __gen_address_value anv_address_physical */
+/* #define __gen_address_offset anv_address_add */
+
struct anv_device_memory {
+ struct vk_object_base base;
+
struct list_head link;
struct anv_bo * bo;
bool sampler);
struct anv_descriptor_set_layout {
+ struct vk_object_base base;
+
/* Descriptor set layouts can be destroyed at almost any time */
uint32_t ref_cnt;
struct anv_descriptor_set_binding_layout binding[0];
};
+void anv_descriptor_set_layout_destroy(struct anv_device *device,
+ struct anv_descriptor_set_layout *layout);
+
static inline void
anv_descriptor_set_layout_ref(struct anv_descriptor_set_layout *layout)
{
{
assert(layout && layout->ref_cnt >= 1);
if (p_atomic_dec_zero(&layout->ref_cnt))
- vk_free(&device->alloc, layout);
+ anv_descriptor_set_layout_destroy(device, layout);
}
struct anv_descriptor {
};
struct anv_descriptor_set {
+ struct vk_object_base base;
+
struct anv_descriptor_pool *pool;
struct anv_descriptor_set_layout *layout;
uint32_t size;
};
struct anv_buffer_view {
+ struct vk_object_base base;
+
enum isl_format format; /**< VkBufferViewCreateInfo::format */
uint64_t range; /**< VkBufferViewCreateInfo::range */
};
struct anv_descriptor_pool {
+ struct vk_object_base base;
+
uint32_t size;
uint32_t next;
uint32_t free_list;
};
struct anv_descriptor_update_template {
+ struct vk_object_base base;
+
VkPipelineBindPoint bind_point;
/* The descriptor set this template corresponds to. This value is only
};
struct anv_pipeline_layout {
+ struct vk_object_base base;
+
struct {
struct anv_descriptor_set_layout *layout;
uint32_t dynamic_offset_start;
};
struct anv_buffer {
+ struct vk_object_base base;
+
struct anv_device * device;
VkDeviceSize size;
*/
struct anv_attachment_state {
enum isl_aux_usage aux_usage;
- enum isl_aux_usage input_aux_usage;
struct anv_surface_state color;
struct anv_surface_state input;
VkImageAspectFlags pending_load_aspects;
bool fast_clear;
VkClearValue clear_value;
- bool clear_color_is_zero_one;
- bool clear_color_is_zero;
/* When multiview is active, attachments with a renderpass clear
* operation have their respective layers cleared on the first
};
struct anv_cmd_pool {
+ struct vk_object_base base;
VkAllocationCallbacks alloc;
struct list_head cmd_buffers;
};
ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT,
ANV_CMD_BUFFER_EXEC_MODE_CHAIN,
ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN,
+ ANV_CMD_BUFFER_EXEC_MODE_CALL_AND_RETURN,
};
struct anv_cmd_buffer {
- VK_LOADER_DATA _loader_data;
+ struct vk_object_base base;
struct anv_device * device;
VkCommandBufferUsageFlags usage_flags;
VkCommandBufferLevel level;
+ struct anv_query_pool *perf_query_pool;
+
struct anv_cmd_state state;
+ struct anv_address return_addr;
+
/* Set by SetPerformanceMarkerINTEL, written into queries by CmdBeginQuery */
uint64_t intel_perf_marker;
};
const VkSemaphore *out_semaphores,
const uint64_t *out_signal_values,
uint32_t num_out_semaphores,
- VkFence fence);
+ VkFence fence,
+ int perf_query_pass);
VkResult anv_cmd_buffer_reset(struct anv_cmd_buffer *cmd_buffer);
};
struct anv_fence {
+ struct vk_object_base base;
+
/* Permanent fence state. Every fence has some form of permanent state
* (type != ANV_SEMAPHORE_TYPE_NONE). This may be a BO to fence on (for
* cross-process fences) or it could just be a dummy for use internally.
struct anv_fence *fence);
struct anv_event {
+ struct vk_object_base base;
uint64_t semaphore;
struct anv_state state;
};
};
struct anv_semaphore {
+ struct vk_object_base base;
+
uint32_t refcount;
/* Permanent semaphore state. Every semaphore has some form of permanent
struct anv_semaphore *semaphore);
struct anv_shader_module {
+ struct vk_object_base base;
+
unsigned char sha1[20];
uint32_t size;
char data[0];
};
struct anv_pipeline {
+ struct vk_object_base base;
+
struct anv_device * device;
struct anv_batch batch;
}
VkResult
-anv_pipeline_init(struct anv_graphics_pipeline *pipeline, struct anv_device *device,
- struct anv_pipeline_cache *cache,
- const VkGraphicsPipelineCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *alloc);
+anv_pipeline_init(struct anv_pipeline *pipeline,
+ struct anv_device *device,
+ enum anv_pipeline_type type,
+ VkPipelineCreateFlags flags,
+ const VkAllocationCallbacks *pAllocator);
+
+void
+anv_pipeline_finish(struct anv_pipeline *pipeline,
+ struct anv_device *device,
+ const VkAllocationCallbacks *pAllocator);
+
+VkResult
+anv_graphics_pipeline_init(struct anv_graphics_pipeline *pipeline, struct anv_device *device,
+ struct anv_pipeline_cache *cache,
+ const VkGraphicsPipelineCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *alloc);
VkResult
anv_pipeline_compile_cs(struct anv_compute_pipeline *pipeline,
const char *entrypoint,
const VkSpecializationInfo *spec_info);
-uint32_t
-anv_cs_workgroup_size(const struct anv_compute_pipeline *pipeline);
+struct anv_cs_parameters {
+ uint32_t group_size;
+ uint32_t simd_size;
+ uint32_t threads;
+};
-uint32_t
-anv_cs_threads(const struct anv_compute_pipeline *pipeline);
+struct anv_cs_parameters
+anv_cs_parameters(const struct anv_compute_pipeline *pipeline);
struct anv_format_plane {
enum isl_format isl_format:16;
};
struct anv_image {
+ struct vk_object_base base;
+
VkImageType type; /**< VkImageCreateInfo::imageType */
/* The original VkFormat provided by the client. This may not match any
* of the actual surface formats.
}
static inline struct anv_address
-anv_image_get_clear_color_addr(const struct anv_device *device,
+anv_image_get_clear_color_addr(UNUSED const struct anv_device *device,
const struct anv_image *image,
VkImageAspectFlagBits aspect)
{
}
struct anv_image_view {
+ struct vk_object_base base;
+
const struct anv_image *image; /**< VkImageViewCreateInfo::image */
VkImageAspectFlags aspect_mask;
}
+/* Haswell border color is a bit of a disaster. Float and unorm formats use a
+ * straightforward 32-bit float color in the first 64 bytes. Instead of using
+ * a nice float/integer union like Gen8+, Haswell specifies the integer border
+ * color as a separate entry /after/ the float color. The layout of this entry
+ * also depends on the format's bpp (with extra hacks for RG32), and overlaps.
+ *
+ * Since we don't know the format/bpp, we can't make any of the border colors
+ * containing '1' work for all formats, as it would be in the wrong place for
+ * some of them. We opt to make 32-bit integers work as this seems like the
+ * most common option. Fortunately, transparent black works regardless, as
+ * all zeroes is the same in every bit-size.
+ */
+struct hsw_border_color {
+ float float32[4];
+ uint32_t _pad0[12];
+ uint32_t uint32[4];
+ uint32_t _pad1[108];
+};
+
+struct gen8_border_color {
+ union {
+ float float32[4];
+ uint32_t uint32[4];
+ };
+ /* Pad out to 64 bytes */
+ uint32_t _pad[12];
+};
+
struct anv_ycbcr_conversion {
+ struct vk_object_base base;
+
const struct anv_format * format;
VkSamplerYcbcrModelConversion ycbcr_model;
VkSamplerYcbcrRange ycbcr_range;
};
struct anv_sampler {
+ struct vk_object_base base;
+
uint32_t state[3][4];
uint32_t n_planes;
struct anv_ycbcr_conversion *conversion;
* and with a 32-byte stride for use as bindless samplers.
*/
struct anv_state bindless_state;
+
+ struct anv_state custom_border_color;
};
struct anv_framebuffer {
+ struct vk_object_base base;
+
uint32_t width;
uint32_t height;
uint32_t layers;
};
struct anv_render_pass {
+ struct vk_object_base base;
+
uint32_t attachment_count;
uint32_t subpass_count;
/* An array of subpass_count+1 flushes, one per subpass boundary */
#define ANV_PIPELINE_STATISTICS_MASK 0x000007ff
+#define OA_SNAPSHOT_SIZE (256)
+#define ANV_KHR_PERF_QUERY_SIZE (ALIGN(sizeof(uint64_t), 64) + 2 * OA_SNAPSHOT_SIZE)
+
struct anv_query_pool {
+ struct vk_object_base base;
+
VkQueryType type;
VkQueryPipelineStatisticFlags pipeline_statistics;
/** Stride between slots, in bytes */
/** Number of slots in this query pool */
uint32_t slots;
struct anv_bo * bo;
+
+ /* Perf queries : */
+ struct anv_bo reset_bo;
+ uint32_t n_counters;
+ struct gen_perf_counter_pass *counter_pass;
+ uint32_t n_passes;
+ struct gen_perf_query_info **pass_query;
};
+static inline uint32_t khr_perf_query_preamble_offset(struct anv_query_pool *pool,
+ uint32_t pass)
+{
+ return pass * ANV_KHR_PERF_QUERY_SIZE + 8;
+}
+
int anv_get_instance_entrypoint_index(const char *name);
int anv_get_device_entrypoint_index(const char *name);
int anv_get_physical_device_entrypoint_index(const char *name);
const struct anv_instance_extension_table *instance,
const struct anv_device_extension_table *device);
+void *anv_resolve_device_entrypoint(const struct gen_device_info *devinfo,
+ uint32_t index);
void *anv_lookup_entrypoint(const struct gen_device_info *devinfo,
const char *name);
struct gen_perf_config *anv_get_perf(const struct gen_device_info *devinfo, int fd);
void anv_device_perf_init(struct anv_device *device);
-
-#define ANV_DEFINE_HANDLE_CASTS(__anv_type, __VkType) \
- \
- static inline struct __anv_type * \
- __anv_type ## _from_handle(__VkType _handle) \
- { \
- return (struct __anv_type *) _handle; \
- } \
- \
- static inline __VkType \
- __anv_type ## _to_handle(struct __anv_type *_obj) \
- { \
- return (__VkType) _obj; \
- }
-
-#define ANV_DEFINE_NONDISP_HANDLE_CASTS(__anv_type, __VkType) \
- \
- static inline struct __anv_type * \
- __anv_type ## _from_handle(__VkType _handle) \
- { \
- return (struct __anv_type *)(uintptr_t) _handle; \
- } \
- \
- static inline __VkType \
- __anv_type ## _to_handle(struct __anv_type *_obj) \
- { \
- return (__VkType)(uintptr_t) _obj; \
- }
+void anv_perf_write_pass_results(struct gen_perf_config *perf,
+ struct anv_query_pool *pool, uint32_t pass,
+ const struct gen_perf_query_result *accumulated_results,
+ union VkPerformanceCounterResultKHR *results);
#define ANV_FROM_HANDLE(__anv_type, __name, __handle) \
- struct __anv_type *__name = __anv_type ## _from_handle(__handle)
-
-ANV_DEFINE_HANDLE_CASTS(anv_cmd_buffer, VkCommandBuffer)
-ANV_DEFINE_HANDLE_CASTS(anv_device, VkDevice)
-ANV_DEFINE_HANDLE_CASTS(anv_instance, VkInstance)
-ANV_DEFINE_HANDLE_CASTS(anv_physical_device, VkPhysicalDevice)
-ANV_DEFINE_HANDLE_CASTS(anv_queue, VkQueue)
-
-ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_cmd_pool, VkCommandPool)
-ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer, VkBuffer)
-ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer_view, VkBufferView)
-ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_pool, VkDescriptorPool)
-ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set, VkDescriptorSet)
-ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set_layout, VkDescriptorSetLayout)
-ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_update_template, VkDescriptorUpdateTemplate)
-ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_device_memory, VkDeviceMemory)
-ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_fence, VkFence)
-ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_event, VkEvent)
-ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_framebuffer, VkFramebuffer)
-ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_image, VkImage)
-ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_image_view, VkImageView);
-ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_cache, VkPipelineCache)
-ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline, VkPipeline)
-ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_layout, VkPipelineLayout)
-ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_query_pool, VkQueryPool)
-ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_render_pass, VkRenderPass)
-ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_sampler, VkSampler)
-ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_semaphore, VkSemaphore)
-ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_shader_module, VkShaderModule)
-ANV_DEFINE_NONDISP_HANDLE_CASTS(vk_debug_report_callback, VkDebugReportCallbackEXT)
-ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_ycbcr_conversion, VkSamplerYcbcrConversion)
+ VK_FROM_HANDLE(__anv_type, __name, __handle)
+
+VK_DEFINE_HANDLE_CASTS(anv_cmd_buffer, base, VkCommandBuffer,
+ VK_OBJECT_TYPE_COMMAND_BUFFER)
+VK_DEFINE_HANDLE_CASTS(anv_device, vk.base, VkDevice, VK_OBJECT_TYPE_DEVICE)
+VK_DEFINE_HANDLE_CASTS(anv_instance, base, VkInstance, VK_OBJECT_TYPE_INSTANCE)
+VK_DEFINE_HANDLE_CASTS(anv_physical_device, base, VkPhysicalDevice,
+ VK_OBJECT_TYPE_PHYSICAL_DEVICE)
+VK_DEFINE_HANDLE_CASTS(anv_queue, base, VkQueue, VK_OBJECT_TYPE_QUEUE)
+
+VK_DEFINE_NONDISP_HANDLE_CASTS(anv_cmd_pool, base, VkCommandPool,
+ VK_OBJECT_TYPE_COMMAND_POOL)
+VK_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer, base, VkBuffer,
+ VK_OBJECT_TYPE_BUFFER)
+VK_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer_view, base, VkBufferView,
+ VK_OBJECT_TYPE_BUFFER_VIEW)
+VK_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_pool, base, VkDescriptorPool,
+ VK_OBJECT_TYPE_DESCRIPTOR_POOL)
+VK_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set, base, VkDescriptorSet,
+ VK_OBJECT_TYPE_DESCRIPTOR_SET)
+VK_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set_layout, base,
+ VkDescriptorSetLayout,
+ VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT)
+VK_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_update_template, base,
+ VkDescriptorUpdateTemplate,
+ VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE)
+VK_DEFINE_NONDISP_HANDLE_CASTS(anv_device_memory, base, VkDeviceMemory,
+ VK_OBJECT_TYPE_DEVICE_MEMORY)
+VK_DEFINE_NONDISP_HANDLE_CASTS(anv_fence, base, VkFence, VK_OBJECT_TYPE_FENCE)
+VK_DEFINE_NONDISP_HANDLE_CASTS(anv_event, base, VkEvent, VK_OBJECT_TYPE_EVENT)
+VK_DEFINE_NONDISP_HANDLE_CASTS(anv_framebuffer, base, VkFramebuffer,
+ VK_OBJECT_TYPE_FRAMEBUFFER)
+VK_DEFINE_NONDISP_HANDLE_CASTS(anv_image, base, VkImage, VK_OBJECT_TYPE_IMAGE)
+VK_DEFINE_NONDISP_HANDLE_CASTS(anv_image_view, base, VkImageView,
+ VK_OBJECT_TYPE_IMAGE_VIEW);
+VK_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_cache, base, VkPipelineCache,
+ VK_OBJECT_TYPE_PIPELINE_CACHE)
+VK_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline, base, VkPipeline,
+ VK_OBJECT_TYPE_PIPELINE)
+VK_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_layout, base, VkPipelineLayout,
+ VK_OBJECT_TYPE_PIPELINE_LAYOUT)
+VK_DEFINE_NONDISP_HANDLE_CASTS(anv_query_pool, base, VkQueryPool,
+ VK_OBJECT_TYPE_QUERY_POOL)
+VK_DEFINE_NONDISP_HANDLE_CASTS(anv_render_pass, base, VkRenderPass,
+ VK_OBJECT_TYPE_RENDER_PASS)
+VK_DEFINE_NONDISP_HANDLE_CASTS(anv_sampler, base, VkSampler,
+ VK_OBJECT_TYPE_SAMPLER)
+VK_DEFINE_NONDISP_HANDLE_CASTS(anv_semaphore, base, VkSemaphore,
+ VK_OBJECT_TYPE_SEMAPHORE)
+VK_DEFINE_NONDISP_HANDLE_CASTS(anv_shader_module, base, VkShaderModule,
+ VK_OBJECT_TYPE_SHADER_MODULE)
+VK_DEFINE_NONDISP_HANDLE_CASTS(anv_ycbcr_conversion, base,
+ VkSamplerYcbcrConversion,
+ VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION)
/* Gen-specific function declarations */
#ifdef genX