#include <stdbool.h>
#include <pthread.h>
#include <assert.h>
+#include <stdint.h>
#include <i915_drm.h>
#ifdef HAVE_VALGRIND
#include "util/macros.h"
#include "util/list.h"
+/* Pre-declarations needed for WSI entrypoints */
+struct wl_surface;
+struct wl_display;
+typedef struct xcb_connection_t xcb_connection_t;
+typedef uint32_t xcb_visualid_t;
+typedef uint32_t xcb_window_t;
+
+#define VK_USE_PLATFORM_XCB_KHR
+#define VK_USE_PLATFORM_WAYLAND_KHR
+
#define VK_PROTOTYPES
#include <vulkan/vulkan.h>
#include <vulkan/vulkan_intel.h>
-#include <vulkan/vk_ext_khr_swapchain.h>
-#include <vulkan/vk_ext_khr_device_swapchain.h>
#include "anv_entrypoints.h"
-
+#include "anv_gen_macros.h"
#include "brw_context.h"
+#include "isl.h"
#ifdef __cplusplus
extern "C" {
return (v + a - 1) & ~(a - 1);
}
+static inline uint64_t
+align_u64(uint64_t v, uint64_t a)
+{
+ return (v + a - 1) & ~(a - 1);
+}
+
static inline int32_t
align_i32(int32_t v, int32_t a)
{
return MAX(n >> levels, 1);
}
+static inline float
+anv_clamp_f(float f, float min, float max)
+{
+ assert(min < max);
+
+ if (f > max)
+ return max;
+ else if (f < min)
+ return min;
+ else
+ return f;
+}
+
static inline bool
anv_clear_mask(uint32_t *inout_mask, uint32_t clear_mask)
{
__anv_vector_offset += (queue)->element_size)
struct anv_bo {
- int gem_handle;
+ uint32_t gem_handle;
/* Index into the current validation list. This is used by the
* validation list building alrogithm to track which buffers are already
struct anv_fixed_size_state_pool buckets[ANV_STATE_BUCKETS];
};
+struct anv_state_stream_block;
+
struct anv_state_stream {
struct anv_block_pool *block_pool;
+
+ /* The current working block */
+ struct anv_state_stream_block *block;
+
+ /* Offset at which the current block starts */
+ uint32_t start;
+ /* Offset at which to allocate the next state */
uint32_t next;
- uint32_t current_block;
+ /* Offset at which the current block ends */
uint32_t end;
};
+#define CACHELINE_SIZE 64
+#define CACHELINE_MASK 63
+
+static void inline
+anv_state_clflush(struct anv_state state)
+{
+ /* state.map may not be cacheline aligned, so round down the start pointer
+ * to a cacheline boundary so we flush all pages that contain the state.
+ */
+ void *end = state.map + state.alloc_size;
+ void *p = (void *) (((uintptr_t) state.map) & ~CACHELINE_MASK);
+
+ __builtin_ia32_sfence();
+ while (p < end) {
+ __builtin_ia32_clflush(p);
+ p += CACHELINE_SIZE;
+ }
+}
+
void anv_block_pool_init(struct anv_block_pool *pool,
struct anv_device *device, uint32_t block_size);
void anv_block_pool_finish(struct anv_block_pool *pool);
dtable.func; \
})
+static inline void *
+anv_alloc(const VkAllocationCallbacks *alloc,
+ size_t size, size_t align,
+ VkSystemAllocationScope scope)
+{
+ return alloc->pfnAllocation(alloc->pUserData, size, align, scope);
+}
+
+static inline void *
+anv_realloc(const VkAllocationCallbacks *alloc,
+ void *ptr, size_t size, size_t align,
+ VkSystemAllocationScope scope)
+{
+ return alloc->pfnReallocation(alloc->pUserData, ptr, size, align, scope);
+}
+
+static inline void
+anv_free(const VkAllocationCallbacks *alloc, void *data)
+{
+ alloc->pfnFree(alloc->pUserData, data);
+}
+
+static inline void *
+anv_alloc2(const VkAllocationCallbacks *parent_alloc,
+ const VkAllocationCallbacks *alloc,
+ size_t size, size_t align,
+ VkSystemAllocationScope scope)
+{
+ if (alloc)
+ return anv_alloc(alloc, size, align, scope);
+ else
+ return anv_alloc(parent_alloc, size, align, scope);
+}
+
+static inline void
+anv_free2(const VkAllocationCallbacks *parent_alloc,
+ const VkAllocationCallbacks *alloc,
+ void *data)
+{
+ if (alloc)
+ anv_free(alloc, data);
+ else
+ anv_free(parent_alloc, data);
+}
struct anv_physical_device {
VK_LOADER_DATA _loader_data;
const struct brw_device_info * info;
uint64_t aperture_size;
struct brw_compiler * compiler;
+ struct isl_device isl_dev;
};
struct anv_instance {
VK_LOADER_DATA _loader_data;
- void * pAllocUserData;
- PFN_vkAllocFunction pfnAlloc;
- PFN_vkFreeFunction pfnFree;
+ VkAllocationCallbacks alloc;
+
uint32_t apiVersion;
int physicalDeviceCount;
struct anv_physical_device physicalDevice;
- struct anv_wsi_implementation * wsi_impl[VK_PLATFORM_NUM_KHR];
+ void * wayland_wsi;
};
VkResult anv_init_wsi(struct anv_instance *instance);
struct {
VkRenderPass render_pass;
+ /** Pipeline that blits from a 1D image. */
+ VkPipeline pipeline_1d_src;
+
/** Pipeline that blits from a 2D image. */
VkPipeline pipeline_2d_src;
struct anv_state_pool * pool;
};
+struct anv_pipeline_cache {
+ struct anv_device * device;
+ struct anv_state_stream program_stream;
+ pthread_mutex_t mutex;
+};
+
+void anv_pipeline_cache_init(struct anv_pipeline_cache *cache,
+ struct anv_device *device);
+void anv_pipeline_cache_finish(struct anv_pipeline_cache *cache);
+
struct anv_device {
VK_LOADER_DATA _loader_data;
+ VkAllocationCallbacks alloc;
+
struct anv_instance * instance;
uint32_t chipset_id;
struct brw_device_info info;
+ struct isl_device isl_dev;
int context_id;
int fd;
struct anv_state_pool dynamic_state_pool;
struct anv_block_pool instruction_block_pool;
+ struct anv_pipeline_cache default_pipeline_cache;
+
struct anv_block_pool surface_state_block_pool;
struct anv_state_pool surface_state_pool;
+ struct anv_bo workaround_bo;
+
struct anv_meta_state meta_state;
struct anv_state border_colors;
pthread_mutex_t mutex;
};
-void *
-anv_instance_alloc(struct anv_instance * instance,
- size_t size,
- size_t alignment,
- VkSystemAllocType allocType);
-
-void
-anv_instance_free(struct anv_instance * instance,
- void * mem);
-
-void *
-anv_device_alloc(struct anv_device * device,
- size_t size,
- size_t alignment,
- VkSystemAllocType allocType);
-
-void
-anv_device_free(struct anv_device * device,
- void * mem);
-
void* anv_gem_mmap(struct anv_device *device,
- uint32_t gem_handle, uint64_t offset, uint64_t size);
+ uint32_t gem_handle, uint64_t offset, uint64_t size, uint32_t flags);
void anv_gem_munmap(void *p, uint64_t size);
uint32_t anv_gem_create(struct anv_device *device, size_t size);
-void anv_gem_close(struct anv_device *device, int gem_handle);
-int anv_gem_userptr(struct anv_device *device, void *mem, size_t size);
-int anv_gem_wait(struct anv_device *device, int gem_handle, int64_t *timeout_ns);
+void anv_gem_close(struct anv_device *device, uint32_t gem_handle);
+uint32_t anv_gem_userptr(struct anv_device *device, void *mem, size_t size);
+int anv_gem_wait(struct anv_device *device, uint32_t gem_handle, int64_t *timeout_ns);
int anv_gem_execbuffer(struct anv_device *device,
struct drm_i915_gem_execbuffer2 *execbuf);
-int anv_gem_set_tiling(struct anv_device *device, int gem_handle,
+int anv_gem_set_tiling(struct anv_device *device, uint32_t gem_handle,
uint32_t stride, uint32_t tiling);
int anv_gem_create_context(struct anv_device *device);
int anv_gem_destroy_context(struct anv_device *device, int context);
int anv_gem_get_param(int fd, uint32_t param);
int anv_gem_get_aperture(int fd, uint64_t *size);
-int anv_gem_handle_to_fd(struct anv_device *device, int gem_handle);
-int anv_gem_fd_to_handle(struct anv_device *device, int fd);
-int anv_gem_userptr(struct anv_device *device, void *mem, size_t size);
+int anv_gem_handle_to_fd(struct anv_device *device, uint32_t gem_handle);
+uint32_t anv_gem_fd_to_handle(struct anv_device *device, int fd);
+int anv_gem_set_caching(struct anv_device *device, uint32_t gem_handle, uint32_t caching);
+int anv_gem_set_domain(struct anv_device *device, uint32_t gem_handle,
+ uint32_t read_domains, uint32_t write_domain);
VkResult anv_bo_init_new(struct anv_bo *bo, struct anv_device *device, uint64_t size);
};
VkResult anv_reloc_list_init(struct anv_reloc_list *list,
- struct anv_device *device);
+ const VkAllocationCallbacks *alloc);
void anv_reloc_list_finish(struct anv_reloc_list *list,
- struct anv_device *device);
+ const VkAllocationCallbacks *alloc);
uint64_t anv_reloc_list_add(struct anv_reloc_list *list,
- struct anv_device *device,
+ const VkAllocationCallbacks *alloc,
uint32_t offset, struct anv_bo *target_bo,
uint32_t delta);
};
struct anv_batch {
- struct anv_device * device;
+ const VkAllocationCallbacks * alloc;
void * start;
void * end;
}
}
-#include "gen7_pack.h"
-#include "gen75_pack.h"
-#undef GEN8_3DSTATE_MULTISAMPLE
-#include "gen8_pack.h"
-
-#define anv_batch_emit(batch, cmd, ...) do { \
- void *__dst = anv_batch_emit_dwords(batch, cmd ## _length); \
- struct cmd __template = { \
- cmd ## _header, \
- __VA_ARGS__ \
- }; \
- cmd ## _pack(batch, __dst, &__template); \
- VG(VALGRIND_CHECK_MEM_IS_DEFINED(__dst, cmd ## _length * 4)); \
+/* Wrapper macros needed to work around preprocessor argument issues. In
+ * particular, arguments don't get pre-evaluated if they are concatenated.
+ * This means that, if you pass GENX(3DSTATE_PS) into the emit macro, the
+ * GENX macro won't get evaluated if the emit macro contains "cmd ## foo".
+ * We can work around this easily enough with these helpers.
+ */
+#define __anv_cmd_length(cmd) cmd ## _length
+#define __anv_cmd_length_bias(cmd) cmd ## _length_bias
+#define __anv_cmd_header(cmd) cmd ## _header
+#define __anv_cmd_pack(cmd) cmd ## _pack
+
+#define anv_batch_emit(batch, cmd, ...) do { \
+ void *__dst = anv_batch_emit_dwords(batch, __anv_cmd_length(cmd)); \
+ struct cmd __template = { \
+ __anv_cmd_header(cmd), \
+ __VA_ARGS__ \
+ }; \
+ __anv_cmd_pack(cmd)(batch, __dst, &__template); \
+ VG(VALGRIND_CHECK_MEM_IS_DEFINED(__dst, __anv_cmd_length(cmd) * 4)); \
} while (0)
#define anv_batch_emitn(batch, n, cmd, ...) ({ \
void *__dst = anv_batch_emit_dwords(batch, n); \
struct cmd __template = { \
- cmd ## _header, \
- .DwordLength = n - cmd ## _length_bias, \
+ __anv_cmd_header(cmd), \
+ .DwordLength = n - __anv_cmd_length_bias(cmd), \
__VA_ARGS__ \
}; \
- cmd ## _pack(batch, __dst, &__template); \
+ __anv_cmd_pack(cmd)(batch, __dst, &__template); \
__dst; \
})
do { \
uint32_t *dw; \
\
- assert(ARRAY_SIZE(dwords0) == ARRAY_SIZE(dwords1)); \
+ static_assert(ARRAY_SIZE(dwords0) == ARRAY_SIZE(dwords1), "mismatch merge"); \
dw = anv_batch_emit_dwords((batch), ARRAY_SIZE(dwords0)); \
for (uint32_t i = 0; i < ARRAY_SIZE(dwords0); i++) \
dw[i] = (dwords0)[i] | (dwords1)[i]; \
VG(VALGRIND_CHECK_MEM_IS_DEFINED(dw, ARRAY_SIZE(dwords0) * 4));\
} while (0)
-static const struct GEN7_MEMORY_OBJECT_CONTROL_STATE GEN7_MOCS = {
- .GraphicsDataTypeGFDT = 0,
- .LLCCacheabilityControlLLCCC = 0,
- .L3CacheabilityControlL3CC = 1
-};
+#define anv_state_pool_emit(pool, cmd, align, ...) ({ \
+ const uint32_t __size = __anv_cmd_length(cmd) * 4; \
+ struct anv_state __state = \
+ anv_state_pool_alloc((pool), __size, align); \
+ struct cmd __template = { \
+ __VA_ARGS__ \
+ }; \
+ __anv_cmd_pack(cmd)(NULL, __state.map, &__template); \
+ VG(VALGRIND_CHECK_MEM_IS_DEFINED(__state.map, __anv_cmd_length(cmd) * 4)); \
+ if (!(pool)->block_pool->device->info.has_llc) \
+ anv_state_clflush(__state); \
+ __state; \
+ })
+
+#define GEN7_MOCS (struct GEN7_MEMORY_OBJECT_CONTROL_STATE) { \
+ .GraphicsDataTypeGFDT = 0, \
+ .LLCCacheabilityControlLLCCC = 0, \
+ .L3CacheabilityControlL3CC = 1, \
+}
+
+#define GEN75_MOCS (struct GEN75_MEMORY_OBJECT_CONTROL_STATE) { \
+ .LLCeLLCCacheabilityControlLLCCC = 0, \
+ .L3CacheabilityControlL3CC = 1, \
+}
#define GEN8_MOCS { \
.MemoryTypeLLCeLLCCacheabilityControl = WB, \
.AgeforQUADLRU = 0 \
}
+/* Skylake: MOCS is now an index into an array of 62 different caching
+ * configurations programmed by the kernel.
+ */
+
+#define GEN9_MOCS { \
+ /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */ \
+ .IndextoMOCSTables = 2 \
+ }
+
+#define GEN9_MOCS_PTE { \
+ /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */ \
+ .IndextoMOCSTables = 1 \
+ }
+
struct anv_device_memory {
struct anv_bo bo;
+ uint32_t type_index;
VkDeviceSize map_size;
void * map;
};
/* Number of array elements in this binding */
uint16_t array_size;
+ /* Index into the flattend descriptor set */
+ uint16_t descriptor_index;
+
/* Index into the dynamic state array for a dynamic buffer */
int16_t dynamic_offset_index;
+ /* Index into the descriptor set buffer views */
+ int16_t buffer_index;
+
struct {
/* Index into the binding table for the associated surface */
int16_t surface_index;
/* Index into the sampler table for the associated sampler */
int16_t sampler_index;
- } stage[VK_SHADER_STAGE_NUM];
+
+ /* Index into the image table for the associated image */
+ int16_t image_index;
+ } stage[MESA_SHADER_STAGES];
/* Immutable samplers (or NULL if no immutable samplers) */
struct anv_sampler **immutable_samplers;
/* Shader stages affected by this descriptor set */
uint16_t shader_stages;
+ /* Number of buffers in this descriptor set */
+ uint16_t buffer_count;
+
/* Number of dynamic offsets used by this descriptor set */
uint16_t dynamic_offset_count;
struct anv_descriptor_set_binding_layout binding[0];
};
-enum anv_descriptor_type {
- ANV_DESCRIPTOR_TYPE_EMPTY = 0,
- ANV_DESCRIPTOR_TYPE_BUFFER_VIEW,
- ANV_DESCRIPTOR_TYPE_BUFFER_AND_OFFSET,
- ANV_DESCRIPTOR_TYPE_IMAGE_VIEW,
- ANV_DESCRIPTOR_TYPE_SAMPLER,
- ANV_DESCRIPTOR_TYPE_IMAGE_VIEW_AND_SAMPLER,
-};
-
struct anv_descriptor {
- enum anv_descriptor_type type;
+ VkDescriptorType type;
union {
struct {
union {
- struct anv_buffer_view *buffer_view;
struct anv_image_view *image_view;
};
struct anv_sampler *sampler;
};
- struct {
- struct anv_buffer *buffer;
- uint64_t offset;
- uint64_t range;
- };
+ struct anv_buffer_view *buffer_view;
};
};
struct anv_descriptor_set {
const struct anv_descriptor_set_layout *layout;
+ struct anv_buffer_view *buffer_views;
struct anv_descriptor descriptors[0];
};
struct {
uint32_t surface_start;
uint32_t sampler_start;
- } stage[VK_SHADER_STAGE_NUM];
+ uint32_t image_start;
+ } stage[MESA_SHADER_STAGES];
} set[MAX_SETS];
uint32_t num_sets;
struct anv_pipeline_binding *surface_to_descriptor;
uint32_t sampler_count;
struct anv_pipeline_binding *sampler_to_descriptor;
- } stage[VK_SHADER_STAGE_NUM];
+ uint32_t image_count;
+ } stage[MESA_SHADER_STAGES];
struct anv_pipeline_binding entries[0];
};
struct anv_device * device;
VkDeviceSize size;
+ VkBufferUsageFlags usage;
+
/* Set when bound */
struct anv_bo * bo;
- VkDeviceSize offset;
+ VkDeviceSize offset;
};
enum anv_cmd_dirty_bits {
ANV_CMD_DIRTY_DYNAMIC_ALL = (1 << 9) - 1,
ANV_CMD_DIRTY_PIPELINE = 1 << 9,
ANV_CMD_DIRTY_INDEX_BUFFER = 1 << 10,
+ ANV_CMD_DIRTY_RENDER_TARGETS = 1 << 11,
};
typedef uint32_t anv_cmd_dirty_mask_t;
uint32_t base_vertex;
uint32_t base_instance;
- /* Offsets for dynamically bound buffers */
- uint32_t dynamic_offsets[MAX_DYNAMIC_BUFFERS];
+ /* Offsets and ranges for dynamically bound buffers */
+ struct {
+ uint32_t offset;
+ uint32_t range;
+ } dynamic[MAX_DYNAMIC_BUFFERS];
/* Image data for image_load_store on pre-SKL */
struct brw_image_param images[MAX_IMAGES];
struct {
float bias;
float clamp;
- float slope_scaled;
+ float slope;
} depth_bias;
float blend_constants[4];
uint32_t vb_dirty;
anv_cmd_dirty_mask_t dirty;
anv_cmd_dirty_mask_t compute_dirty;
+ uint32_t num_workgroups_offset;
+ struct anv_bo *num_workgroups_bo;
VkShaderStageFlags descriptors_dirty;
VkShaderStageFlags push_constants_dirty;
uint32_t scratch_size;
struct anv_framebuffer * framebuffer;
struct anv_render_pass * pass;
struct anv_subpass * subpass;
- uint32_t state_vf[GEN8_3DSTATE_VF_length];
+ uint32_t restart_index;
struct anv_vertex_binding vertex_bindings[MAX_VBS];
struct anv_descriptor_set * descriptors[MAX_SETS];
- struct anv_push_constants * push_constants[VK_SHADER_STAGE_NUM];
+ struct anv_push_constants * push_constants[MESA_SHADER_STAGES];
struct anv_dynamic_state dynamic;
struct {
};
struct anv_cmd_pool {
+ VkAllocationCallbacks alloc;
struct list_head cmd_buffers;
};
struct anv_device * device;
+ struct anv_cmd_pool * pool;
struct list_head pool_link;
struct anv_batch batch;
struct anv_state_stream surface_state_stream;
struct anv_state_stream dynamic_state_stream;
- VkCmdBufferOptimizeFlags opt_flags;
- VkCmdBufferLevel level;
+ VkCommandBufferUsageFlags usage_flags;
+ VkCommandBufferLevel level;
struct anv_cmd_state state;
};
unsigned stage, struct anv_state *bt_state);
VkResult anv_cmd_buffer_emit_samplers(struct anv_cmd_buffer *cmd_buffer,
unsigned stage, struct anv_state *state);
-void anv_flush_descriptor_sets(struct anv_cmd_buffer *cmd_buffer);
+void gen7_cmd_buffer_flush_descriptor_sets(struct anv_cmd_buffer *cmd_buffer);
struct anv_state anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer *cmd_buffer,
- uint32_t *a, uint32_t dwords,
- uint32_t alignment);
+ const void *data, uint32_t size, uint32_t alignment);
struct anv_state anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer *cmd_buffer,
uint32_t *a, uint32_t *b,
uint32_t dwords, uint32_t alignment);
VkResult
anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer *cmd_buffer);
-void anv_cmd_buffer_emit_viewport(struct anv_cmd_buffer *cmd_buffer);
-void anv_cmd_buffer_emit_scissor(struct anv_cmd_buffer *cmd_buffer);
+void gen8_cmd_buffer_emit_viewport(struct anv_cmd_buffer *cmd_buffer);
+void gen7_cmd_buffer_emit_scissor(struct anv_cmd_buffer *cmd_buffer);
void gen7_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer);
+void gen75_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer);
void gen8_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer);
+void gen9_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer);
void anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer);
void gen8_cmd_buffer_begin_subpass(struct anv_cmd_buffer *cmd_buffer,
struct anv_subpass *subpass);
+void gen9_cmd_buffer_begin_subpass(struct anv_cmd_buffer *cmd_buffer,
+ struct anv_subpass *subpass);
void anv_cmd_buffer_begin_subpass(struct anv_cmd_buffer *cmd_buffer,
struct anv_subpass *subpass);
struct anv_state
anv_cmd_buffer_push_constants(struct anv_cmd_buffer *cmd_buffer,
- VkShaderStage stage);
+ gl_shader_stage stage);
+struct anv_state
+anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer *cmd_buffer);
void anv_cmd_buffer_clear_attachments(struct anv_cmd_buffer *cmd_buffer,
struct anv_render_pass *pass,
bool ready;
};
+struct anv_event {
+ uint32_t semaphore;
+ struct anv_state state;
+};
+
struct nir_shader;
struct anv_shader_module {
char data[0];
};
-struct anv_shader {
- struct anv_shader_module * module;
- char entrypoint[0];
-};
+static inline gl_shader_stage
+vk_to_mesa_shader_stage(VkShaderStageFlagBits vk_stage)
+{
+ assert(__builtin_popcount(vk_stage) == 1);
+ return ffs(vk_stage) - 1;
+}
+
+static inline VkShaderStageFlagBits
+mesa_to_vk_shader_stage(gl_shader_stage mesa_stage)
+{
+ return (1 << mesa_stage);
+}
+
+#define ANV_STAGE_MASK ((1 << MESA_SHADER_STAGES) - 1)
+
+#define anv_foreach_stage(stage, stage_bits) \
+ for (gl_shader_stage stage, \
+ __tmp = (gl_shader_stage)((stage_bits) & ANV_STAGE_MASK); \
+ stage = __builtin_ffs(__tmp) - 1, __tmp; \
+ __tmp &= ~(1 << (stage)))
struct anv_pipeline {
struct anv_device * device;
struct brw_gs_prog_data gs_prog_data;
struct brw_cs_prog_data cs_prog_data;
bool writes_point_size;
- struct brw_stage_prog_data * prog_data[VK_SHADER_STAGE_NUM];
- uint32_t scratch_start[VK_SHADER_STAGE_NUM];
+ struct brw_stage_prog_data * prog_data[MESA_SHADER_STAGES];
+ uint32_t scratch_start[MESA_SHADER_STAGES];
uint32_t total_scratch;
struct {
uint32_t vs_start;
} urb;
VkShaderStageFlags active_stages;
- struct anv_state_stream program_stream;
struct anv_state blend_state;
uint32_t vs_simd8;
uint32_t vs_vec4;
uint32_t ps_ksp2;
uint32_t ps_grf_start0;
uint32_t ps_grf_start2;
- uint32_t gs_vec4;
+ uint32_t gs_kernel;
uint32_t gs_vertex_count;
uint32_t cs_simd;
uint32_t cs_right_mask;
struct {
- uint32_t sf[GEN7_3DSTATE_SF_length];
- uint32_t depth_stencil_state[GEN7_DEPTH_STENCIL_STATE_length];
+ uint32_t sf[7];
+ uint32_t depth_stencil_state[3];
} gen7;
struct {
- uint32_t sf[GEN8_3DSTATE_SF_length];
- uint32_t vf[GEN8_3DSTATE_VF_length];
- uint32_t raster[GEN8_3DSTATE_RASTER_length];
- uint32_t wm_depth_stencil[GEN8_3DSTATE_WM_DEPTH_STENCIL_length];
+ uint32_t sf[4];
+ uint32_t raster[5];
+ uint32_t wm_depth_stencil[3];
} gen8;
+
+ struct {
+ uint32_t wm_depth_stencil[4];
+ } gen9;
};
struct anv_graphics_pipeline_create_info {
VkResult
anv_pipeline_init(struct anv_pipeline *pipeline, struct anv_device *device,
+ struct anv_pipeline_cache *cache,
const VkGraphicsPipelineCreateInfo *pCreateInfo,
- const struct anv_graphics_pipeline_create_info *extra);
+ const struct anv_graphics_pipeline_create_info *extra,
+ const VkAllocationCallbacks *alloc);
VkResult
anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
+ struct anv_pipeline_cache *cache,
const VkComputePipelineCreateInfo *info,
- struct anv_shader *shader);
+ struct anv_shader_module *module,
+ const char *entrypoint_name);
VkResult
anv_graphics_pipeline_create(VkDevice device,
+ VkPipelineCache cache,
const VkGraphicsPipelineCreateInfo *pCreateInfo,
const struct anv_graphics_pipeline_create_info *extra,
+ const VkAllocationCallbacks *alloc,
VkPipeline *pPipeline);
VkResult
gen7_graphics_pipeline_create(VkDevice _device,
+ struct anv_pipeline_cache *cache,
const VkGraphicsPipelineCreateInfo *pCreateInfo,
const struct anv_graphics_pipeline_create_info *extra,
+ const VkAllocationCallbacks *alloc,
VkPipeline *pPipeline);
+VkResult
+gen75_graphics_pipeline_create(VkDevice _device,
+ struct anv_pipeline_cache *cache,
+ const VkGraphicsPipelineCreateInfo *pCreateInfo,
+ const struct anv_graphics_pipeline_create_info *extra,
+ const VkAllocationCallbacks *alloc,
+ VkPipeline *pPipeline);
+
VkResult
gen8_graphics_pipeline_create(VkDevice _device,
+ struct anv_pipeline_cache *cache,
+ const VkGraphicsPipelineCreateInfo *pCreateInfo,
+ const struct anv_graphics_pipeline_create_info *extra,
+ const VkAllocationCallbacks *alloc,
+ VkPipeline *pPipeline);
+VkResult
+gen9_graphics_pipeline_create(VkDevice _device,
+ struct anv_pipeline_cache *cache,
const VkGraphicsPipelineCreateInfo *pCreateInfo,
const struct anv_graphics_pipeline_create_info *extra,
+ const VkAllocationCallbacks *alloc,
VkPipeline *pPipeline);
VkResult
gen7_compute_pipeline_create(VkDevice _device,
+ struct anv_pipeline_cache *cache,
const VkComputePipelineCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *alloc,
VkPipeline *pPipeline);
+VkResult
+gen75_compute_pipeline_create(VkDevice _device,
+ struct anv_pipeline_cache *cache,
+ const VkComputePipelineCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *alloc,
+ VkPipeline *pPipeline);
VkResult
gen8_compute_pipeline_create(VkDevice _device,
+ struct anv_pipeline_cache *cache,
const VkComputePipelineCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *alloc,
+ VkPipeline *pPipeline);
+VkResult
+gen9_compute_pipeline_create(VkDevice _device,
+ struct anv_pipeline_cache *cache,
+ const VkComputePipelineCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *alloc,
VkPipeline *pPipeline);
struct anv_format {
const VkFormat vk_format;
const char *name;
- uint16_t surface_format; /**< RENDER_SURFACE_STATE.SurfaceFormat */
- uint8_t cpp; /**< Bytes-per-pixel of anv_format::surface_format. */
- uint8_t num_channels;
+ enum isl_format surface_format; /**< RENDER_SURFACE_STATE.SurfaceFormat */
+ const struct isl_format_layout *isl_layout;
uint16_t depth_format; /**< 3DSTATE_DEPTH_BUFFER.SurfaceFormat */
bool has_stencil;
};
-/**
- * Stencil formats are often a special case. To reduce the number of lookups
- * into the VkFormat-to-anv_format translation table when working with
- * stencil, here is the handle to the table's entry for VK_FORMAT_S8_UINT.
- */
-extern const struct anv_format *const anv_format_s8_uint;
-
const struct anv_format *
anv_format_for_vk_format(VkFormat format);
+enum isl_format
+anv_get_isl_format(VkFormat format, VkImageAspectFlags aspect,
+ VkImageTiling tiling);
+
static inline bool
anv_format_is_color(const struct anv_format *format)
{
return format->depth_format || format->has_stencil;
}
-struct anv_image_view_info {
- uint8_t surface_type; /**< RENDER_SURFACE_STATE.SurfaceType */
- bool is_array:1; /**< RENDER_SURFACE_STATE.SurfaceArray */
- bool is_cube:1; /**< RENDER_SURFACE_STATE.CubeFaceEnable* */
-};
-
-struct anv_image_view_info
-anv_image_view_info_for_vk_image_view_type(VkImageViewType type);
-
/**
- * A proxy for the color surfaces, depth surfaces, and stencil surfaces.
+ * Subsurface of an anv_image.
*/
struct anv_surface {
+ struct isl_surf isl;
+
/**
* Offset from VkImage's base address, as bound by vkBindImageMemory().
*/
uint32_t offset;
-
- uint32_t stride; /**< RENDER_SURFACE_STATE.SurfacePitch */
- uint16_t qpitch; /**< RENDER_SURFACE_STATE.QPitch */
-
- /**
- * \name Alignment of miptree images, in units of pixels.
- *
- * These fields contain the real alignment values, not the values to be
- * given to the GPU. For example, if h_align is 4, then program the GPU
- * with HALIGN_4.
- * \{
- */
- uint8_t h_align; /**< RENDER_SURFACE_STATE.SurfaceHorizontalAlignment */
- uint8_t v_align; /**< RENDER_SURFACE_STATE.SurfaceVerticalAlignment */
- /** \} */
-
- uint8_t tile_mode; /**< RENDER_SURFACE_STATE.TileMode */
};
struct anv_image {
VkImageType type;
+ /* The original VkFormat provided by the client. This may not match any
+ * of the actual surface formats.
+ */
+ VkFormat vk_format;
const struct anv_format *format;
VkExtent3D extent;
uint32_t levels;
uint32_t array_size;
VkImageUsageFlags usage; /**< Superset of VkImageCreateInfo::usage. */
+ VkImageTiling tiling; /** VkImageCreateInfo::tiling */
VkDeviceSize size;
uint32_t alignment;
struct anv_bo *bo;
VkDeviceSize offset;
- uint8_t surface_type; /**< RENDER_SURFACE_STATE.SurfaceType */
-
bool needs_nonrt_surface_state:1;
bool needs_color_rt_surface_state:1;
+ bool needs_storage_surface_state:1;
/**
* Image subsurfaces
};
};
-struct anv_buffer_view {
- struct anv_state surface_state; /**< RENDER_SURFACE_STATE */
- struct anv_bo *bo;
- uint32_t offset; /**< Offset into bo. */
- uint32_t range; /**< VkBufferViewCreateInfo::range */
- const struct anv_format *format; /**< VkBufferViewCreateInfo::format */
-};
-
struct anv_image_view {
const struct anv_image *image; /**< VkImageViewCreateInfo::image */
- const struct anv_format *format; /**< VkImageViewCreateInfo::format */
struct anv_bo *bo;
uint32_t offset; /**< Offset into bo. */
+
+ VkImageAspectFlags aspect_mask;
+ VkFormat vk_format;
+ enum isl_format format;
VkExtent3D extent; /**< Extent of VkImageViewCreateInfo::baseMipLevel. */
/** RENDER_SURFACE_STATE when using image as a color render target. */
/** RENDER_SURFACE_STATE when using image as a non render target. */
struct anv_state nonrt_surface_state;
+
+ /** RENDER_SURFACE_STATE when using image as a storage image. */
+ struct anv_state storage_surface_state;
};
struct anv_image_create_info {
const VkImageCreateInfo *vk_info;
- bool force_tile_mode;
- uint8_t tile_mode;
+ isl_tiling_flags_t isl_tiling_flags;
uint32_t stride;
};
VkResult anv_image_create(VkDevice _device,
const struct anv_image_create_info *info,
+ const VkAllocationCallbacks* alloc,
VkImage *pImage);
struct anv_surface *
const VkImageViewCreateInfo* pCreateInfo,
struct anv_cmd_buffer *cmd_buffer);
+void
+gen75_image_view_init(struct anv_image_view *iview,
+ struct anv_device *device,
+ const VkImageViewCreateInfo* pCreateInfo,
+ struct anv_cmd_buffer *cmd_buffer);
+
void
gen8_image_view_init(struct anv_image_view *iview,
struct anv_device *device,
const VkImageViewCreateInfo* pCreateInfo,
struct anv_cmd_buffer *cmd_buffer);
-VkResult anv_buffer_view_create(struct anv_device *device,
- const VkBufferViewCreateInfo *pCreateInfo,
- struct anv_buffer_view **bview_out);
+void
+gen9_image_view_init(struct anv_image_view *iview,
+ struct anv_device *device,
+ const VkImageViewCreateInfo* pCreateInfo,
+ struct anv_cmd_buffer *cmd_buffer);
-void anv_fill_buffer_surface_state(struct anv_device *device, void *state,
- const struct anv_format *format,
- uint32_t offset, uint32_t range);
+struct anv_buffer_view {
+ enum isl_format format; /**< VkBufferViewCreateInfo::format */
+ struct anv_bo *bo;
+ uint32_t offset; /**< Offset into bo. */
+ uint64_t range; /**< VkBufferViewCreateInfo::range */
-void gen7_fill_buffer_surface_state(void *state, const struct anv_format *format,
- uint32_t offset, uint32_t range);
-void gen8_fill_buffer_surface_state(void *state, const struct anv_format *format,
- uint32_t offset, uint32_t range);
+ struct anv_state surface_state;
+ struct anv_state storage_surface_state;
+};
+
+const struct anv_format *
+anv_format_for_descriptor_type(VkDescriptorType type);
+
+void anv_fill_buffer_surface_state(struct anv_device *device, void *state,
+ enum isl_format format,
+ uint32_t offset, uint32_t range,
+ uint32_t stride);
+
+void gen7_fill_buffer_surface_state(void *state, enum isl_format format,
+ uint32_t offset, uint32_t range,
+ uint32_t stride);
+void gen75_fill_buffer_surface_state(void *state, enum isl_format format,
+ uint32_t offset, uint32_t range,
+ uint32_t stride);
+void gen8_fill_buffer_surface_state(void *state, enum isl_format format,
+ uint32_t offset, uint32_t range,
+ uint32_t stride);
+void gen9_fill_buffer_surface_state(void *state, enum isl_format format,
+ uint32_t offset, uint32_t range,
+ uint32_t stride);
+
+void anv_image_view_fill_image_param(struct anv_device *device,
+ struct anv_image_view *view,
+ struct brw_image_param *param);
+void anv_buffer_view_fill_image_param(struct anv_device *device,
+ struct anv_buffer_view *view,
+ struct brw_image_param *param);
struct anv_sampler {
uint32_t state[4];
struct anv_render_pass {
uint32_t attachment_count;
uint32_t subpass_count;
+ uint32_t * subpass_attachments;
struct anv_render_pass_attachment * attachments;
struct anv_subpass subpasses[0];
};
struct anv_bo bo;
};
-void anv_device_init_meta(struct anv_device *device);
+VkResult anv_device_init_meta(struct anv_device *device);
void anv_device_finish_meta(struct anv_device *device);
void *anv_lookup_entrypoint(const char *name);
static inline struct __anv_type * \
__anv_type ## _from_handle(__VkType _handle) \
{ \
- return (struct __anv_type *) _handle.handle; \
+ return (struct __anv_type *)(uintptr_t) _handle; \
} \
\
static inline __VkType \
__anv_type ## _to_handle(struct __anv_type *_obj) \
{ \
- return (__VkType) { .handle = (uint64_t) _obj }; \
+ return (__VkType)(uintptr_t) _obj; \
}
#define ANV_FROM_HANDLE(__anv_type, __name, __handle) \
struct __anv_type *__name = __anv_type ## _from_handle(__handle)
-ANV_DEFINE_HANDLE_CASTS(anv_cmd_buffer, VkCmdBuffer)
+ANV_DEFINE_HANDLE_CASTS(anv_cmd_buffer, VkCommandBuffer)
ANV_DEFINE_HANDLE_CASTS(anv_device, VkDevice)
ANV_DEFINE_HANDLE_CASTS(anv_instance, VkInstance)
ANV_DEFINE_HANDLE_CASTS(anv_physical_device, VkPhysicalDevice)
ANV_DEFINE_HANDLE_CASTS(anv_queue, VkQueue)
-ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_cmd_pool, VkCmdPool)
+ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_cmd_pool, VkCommandPool)
ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer, VkBuffer)
-ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer_view, VkBufferView);
+ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer_view, VkBufferView)
ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set, VkDescriptorSet)
ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set_layout, VkDescriptorSetLayout)
ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_device_memory, VkDeviceMemory)
ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_fence, VkFence)
+ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_event, VkEvent)
ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_framebuffer, VkFramebuffer)
ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_image, VkImage)
ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_image_view, VkImageView);
+ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_cache, VkPipelineCache)
ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline, VkPipeline)
ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_layout, VkPipelineLayout)
ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_query_pool, VkQueryPool)
ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_render_pass, VkRenderPass)
ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_sampler, VkSampler)
-ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_shader, VkShader)
ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_shader_module, VkShaderModule)
#define ANV_DEFINE_STRUCT_CASTS(__anv_type, __VkType) \