#include <valgrind.h>
#include <memcheck.h>
#define VG(x) x
+#ifndef NDEBUG
#define __gen_validate_value(x) VALGRIND_CHECK_MEM_IS_DEFINED(&(x), sizeof(x))
+#endif
#else
#define VG(x)
#endif
#include "common/gen_clflush.h"
+#include "common/gen_gem.h"
#include "dev/gen_device_info.h"
#include "blorp/blorp.h"
#include "compiler/brw_compiler.h"
#include "util/macros.h"
+#include "util/hash_table.h"
#include "util/list.h"
+#include "util/set.h"
#include "util/u_atomic.h"
#include "util/u_vector.h"
+#include "util/u_math.h"
+#include "util/vma.h"
#include "vk_alloc.h"
#include "vk_debug_report.h"
#include <vulkan/vulkan.h>
#include <vulkan/vulkan_intel.h>
#include <vulkan/vk_icd.h>
-#include <vulkan/vk_android_native_buffer.h>
+#include "anv_android.h"
#include "anv_entrypoints.h"
#include "anv_extensions.h"
#include "isl/isl.h"
#include "common/intel_log.h"
#include "wsi_common.h"
+/* anv Virtual Memory Layout
+ * =========================
+ *
+ * When the anv driver is determining the virtual graphics addresses of memory
+ * objects itself using the softpin mechanism, the following memory ranges
+ * will be used.
+ *
+ * Three special considerations to notice:
+ *
+ * (1) the dynamic state pool is located within the same 4 GiB as the low
+ * heap. This is to work around a VF cache issue described in a comment in
+ * anv_physical_device_init_heaps.
+ *
+ * (2) the binding table pool is located at lower addresses than the surface
+ * state pool, within a 4 GiB range. This allows surface state base addresses
+ * to cover both binding tables (16 bit offsets) and surface states (32 bit
+ * offsets).
+ *
+ * (3) the last 4 GiB of the address space is withheld from the high
+ * heap. Various hardware units will read past the end of an object for
+ * various reasons. This healthy margin prevents reads from wrapping around
+ * 48-bit addresses.
+ */
+#define LOW_HEAP_MIN_ADDRESS 0x000000001000ULL /* 4 KiB */
+#define LOW_HEAP_MAX_ADDRESS 0x0000bfffffffULL
+#define DYNAMIC_STATE_POOL_MIN_ADDRESS 0x0000c0000000ULL /* 3 GiB */
+#define DYNAMIC_STATE_POOL_MAX_ADDRESS 0x0000ffffffffULL
+#define BINDING_TABLE_POOL_MIN_ADDRESS 0x000100000000ULL /* 4 GiB */
+#define BINDING_TABLE_POOL_MAX_ADDRESS 0x00013fffffffULL
+#define SURFACE_STATE_POOL_MIN_ADDRESS 0x000140000000ULL /* 5 GiB */
+#define SURFACE_STATE_POOL_MAX_ADDRESS 0x00017fffffffULL
+#define INSTRUCTION_STATE_POOL_MIN_ADDRESS 0x000180000000ULL /* 6 GiB */
+#define INSTRUCTION_STATE_POOL_MAX_ADDRESS 0x0001bfffffffULL
+#define HIGH_HEAP_MIN_ADDRESS 0x0001c0000000ULL /* 7 GiB */
+#define HIGH_HEAP_MAX_ADDRESS 0xfffeffffffffULL
+
+#define LOW_HEAP_SIZE \
+ (LOW_HEAP_MAX_ADDRESS - LOW_HEAP_MIN_ADDRESS + 1)
+#define HIGH_HEAP_SIZE \
+ (HIGH_HEAP_MAX_ADDRESS - HIGH_HEAP_MIN_ADDRESS + 1)
+#define DYNAMIC_STATE_POOL_SIZE \
+ (DYNAMIC_STATE_POOL_MAX_ADDRESS - DYNAMIC_STATE_POOL_MIN_ADDRESS + 1)
+#define BINDING_TABLE_POOL_SIZE \
+ (BINDING_TABLE_POOL_MAX_ADDRESS - BINDING_TABLE_POOL_MIN_ADDRESS + 1)
+#define SURFACE_STATE_POOL_SIZE \
+ (SURFACE_STATE_POOL_MAX_ADDRESS - SURFACE_STATE_POOL_MIN_ADDRESS + 1)
+#define INSTRUCTION_STATE_POOL_SIZE \
+ (INSTRUCTION_STATE_POOL_MAX_ADDRESS - INSTRUCTION_STATE_POOL_MIN_ADDRESS + 1)
+
/* Allowing different clear colors requires us to perform a depth resolve at
* the end of certain render passes. This is because while slow clears store
* the clear color in the HiZ buffer, fast clears (without a resolve) don't.
#define MAX_SCISSORS 16
#define MAX_PUSH_CONSTANTS_SIZE 128
#define MAX_DYNAMIC_BUFFERS 16
-#define MAX_IMAGES 8
+#define MAX_IMAGES 64
+#define MAX_GEN8_IMAGES 8
#define MAX_PUSH_DESCRIPTORS 32 /* Minimum requirement */
/* The kernel relocation API has a limitation of a 32-bit delta value
#define ANV_SVGS_VB_INDEX MAX_VBS
#define ANV_DRAWID_VB_INDEX (MAX_VBS + 1)
+/* We reserve this MI ALU register for the purpose of handling predication.
+ * Other code which uses the MI ALU should leave it alone.
+ */
+#define ANV_PREDICATE_RESULT_REG MI_ALU_REG15
+
#define anv_printflike(a, b) __attribute__((__format__(__printf__, a, b)))
static inline uint32_t
* propagating errors. Might be useful to plug in a stack trace here.
*/
+VkResult __vk_errorv(struct anv_instance *instance, const void *object,
+ VkDebugReportObjectTypeEXT type, VkResult error,
+ const char *file, int line, const char *format,
+ va_list args);
+
VkResult __vk_errorf(struct anv_instance *instance, const void *object,
VkDebugReportObjectTypeEXT type, VkResult error,
const char *file, int line, const char *format, ...);
#define vk_error(error) __vk_errorf(NULL, NULL,\
VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,\
error, __FILE__, __LINE__, NULL)
+#define vk_errorv(instance, obj, error, format, args)\
+ __vk_errorv(instance, obj, REPORT_OBJECT_TYPE(obj), error,\
+ __FILE__, __LINE__, format, args)
#define vk_errorf(instance, obj, error, format, ...)\
__vk_errorf(instance, obj, REPORT_OBJECT_TYPE(obj), error,\
__FILE__, __LINE__, format, ## __VA_ARGS__)
return anv_multialloc_alloc(ma, alloc ? alloc : parent_alloc, scope);
}
+/* Extra ANV-defined BO flags which won't be passed to the kernel */
+#define ANV_BO_EXTERNAL (1ull << 31)
+#define ANV_BO_FLAG_MASK (1ull << 31)
+
struct anv_bo {
uint32_t gem_handle;
*/
union anv_free_list {
struct {
- int32_t offset;
+ uint32_t offset;
/* A simple count that is incremented every time the head changes. */
uint32_t count;
uint64_t u64;
};
-#define ANV_FREE_LIST_EMPTY ((union anv_free_list) { { 1, 0 } })
+#define ANV_FREE_LIST_EMPTY ((union anv_free_list) { { UINT32_MAX, 0 } })
struct anv_block_state {
union {
};
};
+#define anv_block_pool_foreach_bo(bo, pool) \
+ for (bo = (pool)->bos; bo != &(pool)->bos[(pool)->nbos]; bo++)
+
+#define ANV_MAX_BLOCK_POOL_BOS 20
+
struct anv_block_pool {
struct anv_device *device;
uint64_t bo_flags;
- struct anv_bo bo;
+ struct anv_bo bos[ANV_MAX_BLOCK_POOL_BOS];
+ struct anv_bo *bo;
+ uint32_t nbos;
+
+ uint64_t size;
+
+ /* The address where the start of the pool is pinned. The various bos that
+ * are created as the pool grows will have addresses in the range
+ * [start_address, start_address + BLOCK_POOL_MEMFD_SIZE).
+ */
+ uint64_t start_address;
/* The offset from the start of the bo to the "center" of the block
* pool. Pointers to allocated blocks are given by
*/
uint32_t center_bo_offset;
- /* Current memory map of the block pool. This pointer may or may not
- * point to the actual beginning of the block pool memory. If
- * anv_block_pool_alloc_back has ever been called, then this pointer
- * will point to the "center" position of the buffer and all offsets
- * (negative or positive) given out by the block pool alloc functions
- * will be valid relative to this pointer.
- *
- * In particular, map == bo.map + center_offset
- */
- void *map;
int fd;
/**
int32_t offset;
uint32_t alloc_size;
void *map;
+ uint32_t idx;
};
#define ANV_STATE_NULL ((struct anv_state) { .alloc_size = 0 })
#define ANV_STATE_BUCKETS (ANV_MAX_STATE_SIZE_LOG2 - ANV_MIN_STATE_SIZE_LOG2 + 1)
+struct anv_free_entry {
+ uint32_t next;
+ struct anv_state state;
+};
+
+struct anv_state_table {
+ struct anv_device *device;
+ int fd;
+ struct anv_free_entry *map;
+ uint32_t size;
+ struct anv_block_state state;
+ struct u_vector mmap_cleanups;
+};
+
struct anv_state_pool {
struct anv_block_pool block_pool;
+ struct anv_state_table table;
+
/* The size of blocks which will be allocated from the block pool */
uint32_t block_size;
*/
VkResult anv_block_pool_init(struct anv_block_pool *pool,
struct anv_device *device,
+ uint64_t start_address,
uint32_t initial_size,
uint64_t bo_flags);
void anv_block_pool_finish(struct anv_block_pool *pool);
int32_t anv_block_pool_alloc(struct anv_block_pool *pool,
- uint32_t block_size);
+ uint32_t block_size, uint32_t *padding);
int32_t anv_block_pool_alloc_back(struct anv_block_pool *pool,
uint32_t block_size);
+void* anv_block_pool_map(struct anv_block_pool *pool, int32_t offset);
VkResult anv_state_pool_init(struct anv_state_pool *pool,
struct anv_device *device,
+ uint64_t start_address,
uint32_t block_size,
uint64_t bo_flags);
void anv_state_pool_finish(struct anv_state_pool *pool);
struct anv_state anv_state_stream_alloc(struct anv_state_stream *stream,
uint32_t size, uint32_t alignment);
+VkResult anv_state_table_init(struct anv_state_table *table,
+ struct anv_device *device,
+ uint32_t initial_entries);
+void anv_state_table_finish(struct anv_state_table *table);
+VkResult anv_state_table_add(struct anv_state_table *table, uint32_t *idx,
+ uint32_t count);
+void anv_free_list_push(union anv_free_list *list,
+ struct anv_state_table *table,
+ uint32_t idx, uint32_t count);
+struct anv_state* anv_free_list_pop(union anv_free_list *list,
+ struct anv_state_table *table);
+
+
+static inline struct anv_state *
+anv_state_table_get(struct anv_state_table *table, uint32_t idx)
+{
+ return &table->map[idx].state;
+}
/**
* Implements a pool of re-usable BOs. The interface is identical to that
* of block_pool except that each block is its own BO.
void anv_bo_cache_finish(struct anv_bo_cache *cache);
VkResult anv_bo_cache_alloc(struct anv_device *device,
struct anv_bo_cache *cache,
- uint64_t size, struct anv_bo **bo);
+ uint64_t size, uint64_t bo_flags,
+ struct anv_bo **bo);
VkResult anv_bo_cache_import(struct anv_device *device,
struct anv_bo_cache *cache,
- int fd, struct anv_bo **bo);
+ int fd, uint64_t bo_flags,
+ struct anv_bo **bo);
VkResult anv_bo_cache_export(struct anv_device *device,
struct anv_bo_cache *cache,
struct anv_bo *bo_in, int *fd_out);
bool no_hw;
char path[20];
const char * name;
+ struct {
+ uint16_t domain;
+ uint8_t bus;
+ uint8_t device;
+ uint8_t function;
+ } pci_info;
struct gen_device_info info;
/** Amount of "GPU memory" we want to advertise
*
bool has_syncobj;
bool has_syncobj_wait;
bool has_context_priority;
+ bool use_softpin;
+ bool has_context_isolation;
struct anv_device_extension_table supported_extensions;
struct anv_memory_heap heaps[VK_MAX_MEMORY_HEAPS];
} memory;
+ uint8_t driver_build_sha1[20];
uint8_t pipeline_cache_uuid[VK_UUID_SIZE];
uint8_t driver_uuid[VK_UUID_SIZE];
uint8_t device_uuid[VK_UUID_SIZE];
+ struct disk_cache * disk_cache;
+
struct wsi_device wsi_device;
int local_fd;
+ int master_fd;
+};
+
+struct anv_app_info {
+ const char* app_name;
+ uint32_t app_version;
+ const char* engine_name;
+ uint32_t engine_version;
+ uint32_t api_version;
};
struct anv_instance {
VkAllocationCallbacks alloc;
- uint32_t apiVersion;
+ struct anv_app_info app_info;
+
struct anv_instance_extension_table enabled_extensions;
- struct anv_dispatch_table dispatch;
+ struct anv_instance_dispatch_table dispatch;
+ struct anv_device_dispatch_table device_dispatch;
int physicalDeviceCount;
struct anv_physical_device physicalDevice;
+ bool pipeline_cache_enabled;
+
struct vk_debug_report_instance debug_report_callbacks;
};
struct anv_device * device;
- struct anv_state_pool * pool;
-
VkDeviceQueueCreateFlags flags;
};
struct anv_device * device;
pthread_mutex_t mutex;
+ struct hash_table * nir_cache;
+
struct hash_table * cache;
};
anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache *cache,
const void *key_data, uint32_t key_size,
const void *kernel_data, uint32_t kernel_size,
+ const void *constant_data,
+ uint32_t constant_data_size,
const struct brw_stage_prog_data *prog_data,
uint32_t prog_data_size,
const struct anv_pipeline_bind_map *bind_map);
+struct anv_shader_bin *
+anv_device_search_for_kernel(struct anv_device *device,
+ struct anv_pipeline_cache *cache,
+ const void *key_data, uint32_t key_size);
+
+struct anv_shader_bin *
+anv_device_upload_kernel(struct anv_device *device,
+ struct anv_pipeline_cache *cache,
+ const void *key_data, uint32_t key_size,
+ const void *kernel_data, uint32_t kernel_size,
+ const void *constant_data,
+ uint32_t constant_data_size,
+ const struct brw_stage_prog_data *prog_data,
+ uint32_t prog_data_size,
+ const struct anv_pipeline_bind_map *bind_map);
+
+struct nir_shader;
+struct nir_shader_compiler_options;
+
+struct nir_shader *
+anv_device_search_for_nir(struct anv_device *device,
+ struct anv_pipeline_cache *cache,
+ const struct nir_shader_compiler_options *nir_options,
+ unsigned char sha1_key[20],
+ void *mem_ctx);
+
+void
+anv_device_upload_nir(struct anv_device *device,
+ struct anv_pipeline_cache *cache,
+ const struct nir_shader *nir,
+ unsigned char sha1_key[20]);
+
struct anv_device {
VK_LOADER_DATA _loader_data;
bool can_chain_batches;
bool robust_buffer_access;
struct anv_device_extension_table enabled_extensions;
- struct anv_dispatch_table dispatch;
+ struct anv_device_dispatch_table dispatch;
+
+ pthread_mutex_t vma_mutex;
+ struct util_vma_heap vma_lo;
+ struct util_vma_heap vma_hi;
+ uint64_t vma_lo_available;
+ uint64_t vma_hi_available;
struct anv_bo_pool batch_bo_pool;
struct anv_state_pool dynamic_state_pool;
struct anv_state_pool instruction_state_pool;
+ struct anv_state_pool binding_table_pool;
struct anv_state_pool surface_state_pool;
struct anv_bo workaround_bo;
struct anv_bo trivial_batch_bo;
+ struct anv_bo hiz_clear_bo;
- struct anv_pipeline_cache blorp_shader_cache;
+ struct anv_pipeline_cache default_pipeline_cache;
struct blorp_context blorp;
struct anv_state border_colors;
struct anv_scratch_pool scratch_pool;
uint32_t default_mocs;
+ uint32_t external_mocs;
pthread_mutex_t mutex;
pthread_cond_t queue_submit;
- bool lost;
+ bool _lost;
};
-static void inline
-anv_state_flush(struct anv_device *device, struct anv_state state)
+static inline struct anv_state_pool *
+anv_binding_table_pool(struct anv_device *device)
{
- if (device->info.has_llc)
- return;
+ if (device->instance->physicalDevice.use_softpin)
+ return &device->binding_table_pool;
+ else
+ return &device->surface_state_pool;
+}
+
+static inline struct anv_state
+anv_binding_table_pool_alloc(struct anv_device *device) {
+ if (device->instance->physicalDevice.use_softpin)
+ return anv_state_pool_alloc(&device->binding_table_pool,
+ device->binding_table_pool.block_size, 0);
+ else
+ return anv_state_pool_alloc_back(&device->surface_state_pool);
+}
+
+static inline void
+anv_binding_table_pool_free(struct anv_device *device, struct anv_state state) {
+ anv_state_pool_free(anv_binding_table_pool(device), state);
+}
- gen_flush_range(state.map, state.alloc_size);
+static inline uint32_t
+anv_mocs_for_bo(const struct anv_device *device, const struct anv_bo *bo)
+{
+ if (bo->flags & ANV_BO_EXTERNAL)
+ return device->external_mocs;
+ else
+ return device->default_mocs;
}
void anv_device_init_blorp(struct anv_device *device);
void anv_device_finish_blorp(struct anv_device *device);
+VkResult _anv_device_set_lost(struct anv_device *device,
+ const char *file, int line,
+ const char *msg, ...);
+#define anv_device_set_lost(dev, ...) \
+ _anv_device_set_lost(dev, __FILE__, __LINE__, __VA_ARGS__)
+
+static inline bool
+anv_device_is_lost(struct anv_device *device)
+{
+ return unlikely(device->_lost);
+}
+
VkResult anv_device_execbuf(struct anv_device *device,
struct drm_i915_gem_execbuffer2 *execbuf,
struct anv_bo **execbuf_bos);
int anv_gem_get_tiling(struct anv_device *device, uint32_t gem_handle);
bool anv_gem_get_bit6_swizzle(int fd, uint32_t tiling);
int anv_gem_get_aperture(int fd, uint64_t *size);
-bool anv_gem_supports_48b_addresses(int fd);
int anv_gem_gpu_get_reset_stats(struct anv_device *device,
uint32_t *active, uint32_t *pending);
int anv_gem_handle_to_fd(struct anv_device *device, uint32_t gem_handle);
+int anv_gem_reg_read(struct anv_device *device,
+ uint32_t offset, uint64_t *result);
uint32_t anv_gem_fd_to_handle(struct anv_device *device, int fd);
int anv_gem_set_caching(struct anv_device *device, uint32_t gem_handle, uint32_t caching);
int anv_gem_set_domain(struct anv_device *device, uint32_t gem_handle,
uint32_t *handles, uint32_t num_handles,
int64_t abs_timeout_ns, bool wait_all);
+bool anv_vma_alloc(struct anv_device *device, struct anv_bo *bo);
+void anv_vma_free(struct anv_device *device, struct anv_bo *bo);
+
VkResult anv_bo_init_new(struct anv_bo *bo, struct anv_device *device, uint64_t size);
struct anv_reloc_list {
uint32_t array_length;
struct drm_i915_gem_relocation_entry * relocs;
struct anv_bo ** reloc_bos;
+ struct set * deps;
};
VkResult anv_reloc_list_init(struct anv_reloc_list *list,
uint32_t offset;
};
+#define ANV_NULL_ADDRESS ((struct anv_address) { NULL, 0 })
+
+static inline bool
+anv_address_is_null(struct anv_address addr)
+{
+ return addr.bo == NULL && addr.offset == 0;
+}
+
+static inline uint64_t
+anv_address_physical(struct anv_address addr)
+{
+ if (addr.bo && (addr.bo->flags & EXEC_OBJECT_PINNED))
+ return gen_canonical_address(addr.bo->offset + addr.offset);
+ else
+ return gen_canonical_address(addr.offset);
+}
+
+static inline struct anv_address
+anv_address_add(struct anv_address addr, uint64_t offset)
+{
+ addr.offset += offset;
+ return addr;
+}
+
+static inline void
+write_reloc(const struct anv_device *device, void *p, uint64_t v, bool flush)
+{
+ unsigned reloc_size = 0;
+ if (device->info.gen >= 8) {
+ reloc_size = sizeof(uint64_t);
+ *(uint64_t *)p = gen_canonical_address(v);
+ } else {
+ reloc_size = sizeof(uint32_t);
+ *(uint32_t *)p = v;
+ }
+
+ if (flush && !device->info.has_llc)
+ gen_flush_range(p, reloc_size);
+}
+
static inline uint64_t
_anv_combine_address(struct anv_batch *batch, void *location,
const struct anv_address address, uint32_t delta)
_dst = NULL; \
}))
-#define GEN7_MOCS (struct GEN7_MEMORY_OBJECT_CONTROL_STATE) { \
- .GraphicsDataTypeGFDT = 0, \
- .LLCCacheabilityControlLLCCC = 0, \
- .L3CacheabilityControlL3CC = 1, \
-}
+/* MEMORY_OBJECT_CONTROL_STATE:
+ * .GraphicsDataTypeGFDT = 0,
+ * .LLCCacheabilityControlLLCCC = 0,
+ * .L3CacheabilityControlL3CC = 1,
+ */
+#define GEN7_MOCS 1
-#define GEN75_MOCS (struct GEN75_MEMORY_OBJECT_CONTROL_STATE) { \
- .LLCeLLCCacheabilityControlLLCCC = 0, \
- .L3CacheabilityControlL3CC = 1, \
-}
+/* MEMORY_OBJECT_CONTROL_STATE:
+ * .LLCeLLCCacheabilityControlLLCCC = 0,
+ * .L3CacheabilityControlL3CC = 1,
+ */
+#define GEN75_MOCS 1
-#define GEN8_MOCS (struct GEN8_MEMORY_OBJECT_CONTROL_STATE) { \
- .MemoryTypeLLCeLLCCacheabilityControl = WB, \
- .TargetCache = L3DefertoPATforLLCeLLCselection, \
- .AgeforQUADLRU = 0 \
- }
+/* MEMORY_OBJECT_CONTROL_STATE:
+ * .MemoryTypeLLCeLLCCacheabilityControl = WB,
+ * .TargetCache = L3DefertoPATforLLCeLLCselection,
+ * .AgeforQUADLRU = 0
+ */
+#define GEN8_MOCS 0x78
+
+/* MEMORY_OBJECT_CONTROL_STATE:
+ * .MemoryTypeLLCeLLCCacheabilityControl = UCwithFenceifcoherentcycle,
+ * .TargetCache = L3DefertoPATforLLCeLLCselection,
+ * .AgeforQUADLRU = 0
+ */
+#define GEN8_EXTERNAL_MOCS 0x18
/* Skylake: MOCS is now an index into an array of 62 different caching
* configurations programmed by the kernel.
*/
-#define GEN9_MOCS (struct GEN9_MEMORY_OBJECT_CONTROL_STATE) { \
- /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */ \
- .IndextoMOCSTables = 2 \
- }
+/* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */
+#define GEN9_MOCS 2
-#define GEN9_MOCS_PTE { \
- /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */ \
- .IndextoMOCSTables = 1 \
- }
+/* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */
+#define GEN9_EXTERNAL_MOCS 1
/* Cannonlake MOCS defines are duplicates of Skylake MOCS defines. */
-#define GEN10_MOCS (struct GEN10_MEMORY_OBJECT_CONTROL_STATE) { \
- /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */ \
- .IndextoMOCSTables = 2 \
- }
-
-#define GEN10_MOCS_PTE { \
- /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */ \
- .IndextoMOCSTables = 1 \
- }
+#define GEN10_MOCS GEN9_MOCS
+#define GEN10_EXTERNAL_MOCS GEN9_EXTERNAL_MOCS
/* Ice Lake MOCS defines are duplicates of Skylake MOCS defines. */
-#define GEN11_MOCS (struct GEN11_MEMORY_OBJECT_CONTROL_STATE) { \
- /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */ \
- .IndextoMOCSTables = 2 \
- }
-
-#define GEN11_MOCS_PTE { \
- /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */ \
- .IndextoMOCSTables = 1 \
- }
+#define GEN11_MOCS GEN9_MOCS
+#define GEN11_EXTERNAL_MOCS GEN9_EXTERNAL_MOCS
struct anv_device_memory {
struct anv_bo * bo;
struct anv_memory_type * type;
VkDeviceSize map_size;
void * map;
+
+ /* If set, we are holding reference to AHardwareBuffer
+ * which we must release when memory is freed.
+ */
+ struct AHardwareBuffer * ahw;
};
/**
struct anv_buffer_view {
enum isl_format format; /**< VkBufferViewCreateInfo::format */
- struct anv_bo *bo;
- uint32_t offset; /**< Offset into bo. */
uint64_t range; /**< VkBufferViewCreateInfo::range */
+ struct anv_address address;
+
struct anv_state surface_state;
struct anv_state storage_surface_state;
struct anv_state writeonly_storage_surface_state;
/* The descriptor set this template corresponds to. This value is only
* valid if the template was created with the templateType
- * VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR.
+ * VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET.
*/
uint8_t set;
struct anv_descriptor_template_entry entries[0];
};
-size_t
-anv_descriptor_set_binding_layout_get_hw_size(const struct anv_descriptor_set_binding_layout *binding);
-
size_t
anv_descriptor_set_layout_size(const struct anv_descriptor_set_layout *layout);
struct anv_descriptor_pool *pool,
struct anv_descriptor_set *set);
+#define ANV_DESCRIPTOR_SET_SHADER_CONSTANTS (UINT8_MAX - 1)
#define ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS UINT8_MAX
struct anv_pipeline_binding {
VkBufferUsageFlags usage;
/* Set when bound */
- struct anv_bo * bo;
- VkDeviceSize offset;
+ struct anv_address address;
};
static inline uint64_t
if (range == VK_WHOLE_SIZE) {
return buffer->size - offset;
} else {
- assert(range <= buffer->size);
+ assert(range + offset >= range);
+ assert(range + offset <= buffer->size);
return range;
}
}
* we would have to CS stall on every flush which could be bad.
*/
ANV_PIPE_NEEDS_CS_STALL_BIT = (1 << 21),
+
+ /* This bit does not exist directly in PIPE_CONTROL. It means that render
+ * target operations related to transfer commands with VkBuffer as
+ * destination are ongoing. Some operations like copies on the command
+ * streamer might need to be aware of this to trigger the appropriate stall
+ * before they can proceed with the copy.
+ */
+ ANV_PIPE_RENDER_TARGET_BUFFER_WRITES = (1 << 22),
};
#define ANV_PIPE_FLUSH_BITS ( \
for_each_bit(b, flags) {
switch ((VkAccessFlagBits)(1 << b)) {
case VK_ACCESS_SHADER_WRITE_BIT:
+ /* We're transitioning a buffer that was previously used as write
+ * destination through the data port. To make its content available
+ * to future operations, flush the data cache.
+ */
pipe_bits |= ANV_PIPE_DATA_CACHE_FLUSH_BIT;
break;
case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT:
+ /* We're transitioning a buffer that was previously used as render
+ * target. To make its content available to future operations, flush
+ * the render target cache.
+ */
pipe_bits |= ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
break;
case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT:
+ /* We're transitioning a buffer that was previously used as depth
+ * buffer. To make its content available to future operations, flush
+ * the depth cache.
+ */
pipe_bits |= ANV_PIPE_DEPTH_CACHE_FLUSH_BIT;
break;
case VK_ACCESS_TRANSFER_WRITE_BIT:
+ /* We're transitioning a buffer that was previously used as a
+ * transfer write destination. Generic write operations include color
+ * & depth operations as well as buffer operations like :
+ * - vkCmdClearColorImage()
+ * - vkCmdClearDepthStencilImage()
+ * - vkCmdBlitImage()
+ * - vkCmdCopy*(), vkCmdUpdate*(), vkCmdFill*()
+ *
+ * Most of these operations are implemented using Blorp which writes
+ * through the render target, so flush that cache to make it visible
+ * to future operations. And for depth related operations we also
+ * need to flush the depth cache.
+ */
pipe_bits |= ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
pipe_bits |= ANV_PIPE_DEPTH_CACHE_FLUSH_BIT;
break;
+ case VK_ACCESS_MEMORY_WRITE_BIT:
+ /* We're transitioning a buffer for generic write operations. Flush
+ * all the caches.
+ */
+ pipe_bits |= ANV_PIPE_FLUSH_BITS;
+ break;
default:
break; /* Nothing to do */
}
for_each_bit(b, flags) {
switch ((VkAccessFlagBits)(1 << b)) {
case VK_ACCESS_INDIRECT_COMMAND_READ_BIT:
+ /* Indirect draw commands take a buffer as input that we're going to
+ * read from the command streamer to load some of the HW registers
+ * (see genX_cmd_buffer.c:load_indirect_parameters). This requires a
+ * command streamer stall so that all the cache flushes have
+ * completed before the command streamer loads from memory.
+ */
+ pipe_bits |= ANV_PIPE_CS_STALL_BIT;
+ /* Indirect draw commands also set gl_BaseVertex & gl_BaseIndex
+ * through a vertex buffer, so invalidate that cache.
+ */
+ pipe_bits |= ANV_PIPE_VF_CACHE_INVALIDATE_BIT;
+ /* For CmdDipatchIndirect, we also load gl_NumWorkGroups through a
+ * UBO from the buffer, so we need to invalidate constant cache.
+ */
+ pipe_bits |= ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT;
+ break;
case VK_ACCESS_INDEX_READ_BIT:
case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT:
+ /* We transitioning a buffer to be used for as input for vkCmdDraw*
+ * commands, so we invalidate the VF cache to make sure there is no
+ * stale data when we start rendering.
+ */
pipe_bits |= ANV_PIPE_VF_CACHE_INVALIDATE_BIT;
break;
case VK_ACCESS_UNIFORM_READ_BIT:
+ /* We transitioning a buffer to be used as uniform data. Because
+ * uniform is accessed through the data port & sampler, we need to
+ * invalidate the texture cache (sampler) & constant cache (data
+ * port) to avoid stale data.
+ */
pipe_bits |= ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT;
pipe_bits |= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT;
break;
case VK_ACCESS_SHADER_READ_BIT:
case VK_ACCESS_INPUT_ATTACHMENT_READ_BIT:
case VK_ACCESS_TRANSFER_READ_BIT:
+ /* Transitioning a buffer to be read through the sampler, so
+ * invalidate the texture cache, we don't want any stale data.
+ */
pipe_bits |= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT;
break;
+ case VK_ACCESS_MEMORY_READ_BIT:
+ /* Transitioning a buffer for generic read, invalidate all the
+ * caches.
+ */
+ pipe_bits |= ANV_PIPE_INVALIDATE_BITS;
+ break;
+ case VK_ACCESS_MEMORY_WRITE_BIT:
+ /* Generic write, make sure all previously written things land in
+ * memory.
+ */
+ pipe_bits |= ANV_PIPE_FLUSH_BITS;
+ break;
+ case VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT:
+ /* Transitioning a buffer for conditional rendering. We'll load the
+ * content of this buffer into HW registers using the command
+ * streamer, so we need to stall the command streamer to make sure
+ * any in-flight flush operations have completed.
+ */
+ pipe_bits |= ANV_PIPE_CS_STALL_BIT;
+ break;
default:
break; /* Nothing to do */
}
uint32_t base_work_group_id[3];
/* Image data for image_load_store on pre-SKL */
- struct brw_image_param images[MAX_IMAGES];
+ struct brw_image_param images[MAX_GEN8_IMAGES];
};
struct anv_dynamic_state {
*
* This address is relative to the start of the BO.
*/
- uint64_t address;
+ struct anv_address address;
/* Address of the aux surface, if any
*
- * This field is 0 if and only if no aux surface exists.
+ * This field is ANV_NULL_ADDRESS if and only if no aux surface exists.
*
- * This address is relative to the start of the BO. With the exception of
- * gen8, the bottom 12 bits of this address include extra aux information.
+ * With the exception of gen8, the bottom 12 bits of this address' offset
+ * include extra aux information.
*/
- uint64_t aux_address;
+ struct anv_address aux_address;
/* Address of the clear color, if any
*
* This address is relative to the start of the BO.
*/
- uint64_t clear_address;
+ struct anv_address clear_address;
};
/**
*/
bool hiz_enabled;
+ bool conditional_render_enabled;
+
/**
* Array length is anv_cmd_state::pass::attachment_count. Array content is
* valid only when recording a render pass instance.
struct anv_state
anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer *cmd_buffer);
-void anv_cmd_buffer_resolve_subpass(struct anv_cmd_buffer *cmd_buffer);
-
const struct anv_image_view *
anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer *cmd_buffer);
void anv_cmd_buffer_dump(struct anv_cmd_buffer *cmd_buffer);
+void anv_cmd_emit_conditional_render_predicate(struct anv_cmd_buffer *cmd_buffer);
+
enum anv_fence_type {
ANV_FENCE_TYPE_NONE = 0,
ANV_FENCE_TYPE_BO,
ANV_FENCE_TYPE_SYNCOBJ,
+ ANV_FENCE_TYPE_WSI,
};
enum anv_bo_fence_state {
/** DRM syncobj handle for syncobj-based fences */
uint32_t syncobj;
+
+ /** WSI fence */
+ struct wsi_fence *fence_wsi;
};
};
struct anv_state kernel;
uint32_t kernel_size;
+ struct anv_state constant_data;
+ uint32_t constant_data_size;
+
const struct brw_stage_prog_data *prog_data;
uint32_t prog_data_size;
anv_shader_bin_create(struct anv_device *device,
const void *key, uint32_t key_size,
const void *kernel, uint32_t kernel_size,
+ const void *constant_data, uint32_t constant_data_size,
const struct brw_stage_prog_data *prog_data,
uint32_t prog_data_size, const void *prog_data_param,
const struct anv_pipeline_bind_map *bind_map);
struct anv_state blend_state;
uint32_t vb_used;
- uint32_t binding_stride[MAX_VBS];
- bool instancing_enable[MAX_VBS];
+ struct anv_pipeline_vertex_binding {
+ uint32_t stride;
+ bool instanced;
+ uint32_t instance_divisor;
+ } vb[MAX_VBS];
+
bool primitive_restart;
uint32_t topology;
anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
struct anv_pipeline_cache *cache,
const VkComputePipelineCreateInfo *info,
- struct anv_shader_module *module,
+ const struct anv_shader_module *module,
const char *entrypoint,
const VkSpecializationInfo *spec_info);
/* How to map sampled ycbcr planes to a single 4 component element. */
struct isl_swizzle ycbcr_swizzle;
+
+ /* What aspect is associated to this plane */
+ VkImageAspectFlags aspect;
};
struct anv_format {
struct anv_format_plane planes[3];
+ VkFormat vk_format;
uint8_t n_planes;
bool can_ycbcr;
};
}
}
-static inline uint32_t
-anv_image_aspect_get_planes(VkImageAspectFlags aspect_mask)
-{
- uint32_t planes = 0;
-
- if (aspect_mask & (VK_IMAGE_ASPECT_COLOR_BIT |
- VK_IMAGE_ASPECT_DEPTH_BIT |
- VK_IMAGE_ASPECT_STENCIL_BIT |
- VK_IMAGE_ASPECT_PLANE_0_BIT))
- planes++;
- if (aspect_mask & VK_IMAGE_ASPECT_PLANE_1_BIT)
- planes++;
- if (aspect_mask & VK_IMAGE_ASPECT_PLANE_2_BIT)
- planes++;
-
- return planes;
-}
-
static inline VkImageAspectFlags
anv_plane_to_aspect(VkImageAspectFlags image_aspects,
uint32_t plane)
{
if (image_aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) {
- if (_mesa_bitcount(image_aspects) > 1)
+ if (util_bitcount(image_aspects) > 1)
return VK_IMAGE_ASPECT_PLANE_0_BIT << plane;
return VK_IMAGE_ASPECT_COLOR_BIT;
}
* Subsurface of an anv_image.
*/
struct anv_surface {
- /** Valid only if isl_surf::size > 0. */
+ /** Valid only if isl_surf::size_B > 0. */
struct isl_surf isl;
/**
uint32_t samples; /**< VkImageCreateInfo::samples */
uint32_t n_planes;
VkImageUsageFlags usage; /**< Superset of VkImageCreateInfo::usage. */
+ VkImageCreateFlags create_flags; /* Flags used when creating image. */
VkImageTiling tiling; /** VkImageCreateInfo::tiling */
/** True if this is needs to be bound to an appropriately tiled BO.
*/
bool disjoint;
+ /* All the formats that can be used when creating views of this image
+ * are CCS_E compatible.
+ */
+ bool ccs_e_compatible;
+
+ /* Image was created with external format. */
+ bool external_format;
+
/**
* Image subsurfaces
*
/**
* BO associated with this plane, set when bound.
*/
- struct anv_bo *bo;
- VkDeviceSize bo_offset;
+ struct anv_address address;
/**
* When destroying the image, also free the bo.
VkImageAspectFlagBits aspect)
{
uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
- return image->planes[plane].aux_surface.isl.size > 0 ?
+ return image->planes[plane].aux_surface.isl.size_B > 0 ?
image->planes[plane].aux_surface.isl.levels : 0;
}
assert(image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV);
uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
- return (struct anv_address) {
- .bo = image->planes[plane].bo,
- .offset = image->planes[plane].bo_offset +
- image->planes[plane].fast_clear_state_offset,
- };
+ return anv_address_add(image->planes[plane].address,
+ image->planes[plane].fast_clear_state_offset);
}
static inline struct anv_address
const unsigned clear_color_state_size = device->info.gen >= 10 ?
device->isl_dev.ss.clear_color_state_size :
device->isl_dev.ss.clear_value_size;
- addr.offset += clear_color_state_size;
- return addr;
+ return anv_address_add(addr, clear_color_state_size);
}
static inline struct anv_address
VkRect2D area,
float depth_value, uint8_t stencil_value);
void
+anv_image_msaa_resolve(struct anv_cmd_buffer *cmd_buffer,
+ const struct anv_image *src_image,
+ enum isl_aux_usage src_aux_usage,
+ uint32_t src_level, uint32_t src_base_layer,
+ const struct anv_image *dst_image,
+ enum isl_aux_usage dst_aux_usage,
+ uint32_t dst_level, uint32_t dst_base_layer,
+ VkImageAspectFlagBits aspect,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t dst_x, uint32_t dst_y,
+ uint32_t width, uint32_t height,
+ uint32_t layer_count,
+ enum blorp_filter filter);
+void
anv_image_hiz_op(struct anv_cmd_buffer *cmd_buffer,
const struct anv_image *image,
VkImageAspectFlagBits aspect, uint32_t level,
void
anv_image_mcs_op(struct anv_cmd_buffer *cmd_buffer,
const struct anv_image *image,
+ enum isl_format format,
VkImageAspectFlagBits aspect,
uint32_t base_layer, uint32_t layer_count,
- enum isl_aux_op mcs_op, bool predicate);
+ enum isl_aux_op mcs_op, union isl_color_value *clear_value,
+ bool predicate);
void
anv_image_ccs_op(struct anv_cmd_buffer *cmd_buffer,
const struct anv_image *image,
+ enum isl_format format,
VkImageAspectFlagBits aspect, uint32_t level,
uint32_t base_layer, uint32_t layer_count,
- enum isl_aux_op ccs_op, bool predicate);
+ enum isl_aux_op ccs_op, union isl_color_value *clear_value,
+ bool predicate);
void
anv_image_copy_to_shadow(struct anv_cmd_buffer *cmd_buffer,
/* Only 1 color aspects are compatibles. */
if ((aspects1 & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) != 0 &&
(aspects2 & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) != 0 &&
- _mesa_bitcount(aspects1) == _mesa_bitcount(aspects2))
+ util_bitcount(aspects1) == util_bitcount(aspects2))
return true;
return false;
isl_surf_usage_flags_t isl_extra_usage_flags;
uint32_t stride;
+ bool external_format;
};
VkResult anv_image_create(VkDevice _device,
const VkAllocationCallbacks* alloc,
VkImage *pImage);
-#ifdef ANDROID
-VkResult anv_image_from_gralloc(VkDevice device_h,
- const VkImageCreateInfo *base_info,
- const VkNativeBufferANDROID *gralloc_info,
- const VkAllocationCallbacks *alloc,
- VkImage *pImage);
-#endif
-
const struct anv_surface *
anv_image_get_surface_for_aspect_mask(const struct anv_image *image,
VkImageAspectFlags aspect_mask);
}
}
+VkFormatFeatureFlags
+anv_get_image_format_features(const struct gen_device_info *devinfo,
+ VkFormat vk_format,
+ const struct anv_format *anv_format,
+ VkImageTiling vk_tiling);
void anv_fill_buffer_surface_state(struct anv_device *device,
struct anv_state state,
enum isl_format format,
- uint32_t offset, uint32_t range,
- uint32_t stride);
+ struct anv_address address,
+ uint32_t range, uint32_t stride);
static inline void
anv_clear_color_from_att_state(union isl_color_value *clear_color,
struct anv_subpass_attachment * color_attachments;
struct anv_subpass_attachment * resolve_attachments;
- struct anv_subpass_attachment depth_stencil_attachment;
+ struct anv_subpass_attachment * depth_stencil_attachment;
+ struct anv_subpass_attachment * ds_resolve_attachment;
+ VkResolveModeFlagBitsKHR depth_resolve_mode;
+ VkResolveModeFlagBitsKHR stencil_resolve_mode;
uint32_t view_mask;
/** Subpass has a depth/stencil self-dependency */
bool has_ds_self_dep;
- /** Subpass has at least one resolve attachment */
- bool has_resolve;
+ /** Subpass has at least one color resolve attachment */
+ bool has_color_resolve;
};
static inline unsigned
anv_subpass_view_count(const struct anv_subpass *subpass)
{
- return MAX2(1, _mesa_bitcount(subpass->view_mask));
+ return MAX2(1, util_bitcount(subpass->view_mask));
}
struct anv_render_pass_attachment {
struct anv_bo bo;
};
-int anv_get_entrypoint_index(const char *name);
+int anv_get_instance_entrypoint_index(const char *name);
+int anv_get_device_entrypoint_index(const char *name);
+
+bool
+anv_instance_entrypoint_is_enabled(int index, uint32_t core_version,
+ const struct anv_instance_extension_table *instance);
bool
-anv_entrypoint_is_enabled(int index, uint32_t core_version,
- const struct anv_instance_extension_table *instance,
- const struct anv_device_extension_table *device);
+anv_device_entrypoint_is_enabled(int index, uint32_t core_version,
+ const struct anv_instance_extension_table *instance,
+ const struct anv_device_extension_table *device);
void *anv_lookup_entrypoint(const struct gen_device_info *devinfo,
const char *name);
ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_pool, VkDescriptorPool)
ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set, VkDescriptorSet)
ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set_layout, VkDescriptorSetLayout)
-ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_update_template, VkDescriptorUpdateTemplateKHR)
+ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_update_template, VkDescriptorUpdateTemplate)
ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_device_memory, VkDeviceMemory)
ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_fence, VkFence)
ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_event, VkEvent)