RADV_MEM_TYPE_COUNT
};
+enum radv_mem_flags_bits {
+ /* enable implicit synchronization when accessing the underlying bo */
+ RADV_MEM_IMPLICIT_SYNC = 1 << 0,
+};
+
#define radv_printflike(a, b) __attribute__((__format__(__printf__, a, b)))
static inline uint32_t
void *radv_lookup_entrypoint(const char *name);
-struct radv_extensions {
- VkExtensionProperties *ext_array;
- uint32_t num_ext;
-};
-
struct radv_physical_device {
VK_LOADER_DATA _loader_data;
int local_fd;
struct wsi_device wsi_device;
- struct radv_extensions extensions;
bool has_rbplus; /* if RB+ register exist */
bool rbplus_allowed; /* if RB+ is allowed */
-
+ bool has_clear_state;
/* This is the drivers on-disk cache used as a fallback as opposed to
* the pipeline cache defined by apps.
*/
struct disk_cache * disk_cache;
+
+ VkPhysicalDeviceMemoryProperties memory_properties;
+ enum radv_mem_type mem_type_indices[RADV_MEM_TYPE_COUNT];
};
struct radv_instance {
VkResult radv_init_wsi(struct radv_physical_device *physical_device);
void radv_finish_wsi(struct radv_physical_device *physical_device);
+bool radv_instance_extension_supported(const char *name);
+uint32_t radv_physical_device_api_version(struct radv_physical_device *dev);
+bool radv_physical_device_extension_supported(struct radv_physical_device *dev,
+ const char *name);
+
struct cache_entry;
struct radv_pipeline_cache {
VkAllocationCallbacks alloc;
};
+struct radv_pipeline_key {
+ uint32_t instance_rate_inputs;
+ unsigned tess_input_vertices;
+ uint32_t col_format;
+ uint32_t is_int8;
+ uint32_t is_int10;
+ uint32_t multisample : 1;
+ uint32_t has_multiview_view_index : 1;
+};
+
void
radv_pipeline_cache_init(struct radv_pipeline_cache *cache,
struct radv_device *device);
radv_pipeline_cache_load(struct radv_pipeline_cache *cache,
const void *data, size_t size);
-struct radv_shader_variant *
-radv_create_shader_variant_from_pipeline_cache(struct radv_device *device,
- struct radv_pipeline_cache *cache,
- const unsigned char *sha1);
+struct radv_shader_variant;
-struct radv_shader_variant *
-radv_pipeline_cache_insert_shader(struct radv_device *device,
- struct radv_pipeline_cache *cache,
- const unsigned char *sha1,
- struct radv_shader_variant *variant,
- const void *code, unsigned code_size);
+bool
+radv_create_shader_variants_from_pipeline_cache(struct radv_device *device,
+ struct radv_pipeline_cache *cache,
+ const unsigned char *sha1,
+ struct radv_shader_variant **variants);
+
+void
+radv_pipeline_cache_insert_shaders(struct radv_device *device,
+ struct radv_pipeline_cache *cache,
+ const unsigned char *sha1,
+ struct radv_shader_variant **variants,
+ const void *const *codes,
+ const unsigned *code_sizes);
struct radv_meta_state {
VkAllocationCallbacks alloc;
VK_LOADER_DATA _loader_data;
struct radv_device * device;
struct radeon_winsys_ctx *hw_ctx;
+ enum radeon_ctx_priority priority;
int queue_family_index;
int queue_idx;
bool llvm_supports_spill;
bool has_distributed_tess;
+ bool dfsm_allowed;
uint32_t tess_offchip_block_dw_size;
uint32_t scratch_waves;
struct radeon_winsys_bo *trace_bo;
uint32_t *trace_id_ptr;
+ /* Whether to keep shader debug info, for tracing or VK_AMD_shader_info */
+ bool keep_shader_info;
+
struct radv_physical_device *physical_device;
/* Backup in-memory cache to be used if the app doesn't provide one */
/* Set when bound */
struct radeon_winsys_bo * bo;
VkDeviceSize offset;
+
+ bool shareable;
};
RADV_CMD_DIRTY_DYNAMIC_ALL = (1 << 9) - 1,
RADV_CMD_DIRTY_PIPELINE = 1 << 9,
RADV_CMD_DIRTY_INDEX_BUFFER = 1 << 10,
- RADV_CMD_DIRTY_RENDER_TARGETS = 1 << 11,
+ RADV_CMD_DIRTY_FRAMEBUFFER = 1 << 11,
};
typedef uint32_t radv_cmd_dirty_mask_t;
};
struct radv_dynamic_state {
+ /**
+ * Bitmask of (1 << VK_DYNAMIC_STATE_*).
+ * Defines the set of saved dynamic state.
+ */
+ uint32_t mask;
+
struct radv_viewport_state viewport;
struct radv_scissor_state scissor;
struct radv_descriptor_set * descriptors[MAX_SETS];
struct radv_attachment_state * attachments;
VkRect2D render_area;
+
+ /* Index buffer */
+ struct radv_buffer *index_buffer;
+ uint64_t index_offset;
uint32_t index_type;
uint32_t max_index_count;
uint64_t index_va;
+
int32_t last_primitive_reset_en;
uint32_t last_primitive_reset_index;
enum radv_cmd_flush_bits flush_bits;
void si_cp_dma_clear_buffer(struct radv_cmd_buffer *cmd_buffer, uint64_t va,
uint64_t size, unsigned value);
void radv_set_db_count_control(struct radv_cmd_buffer *cmd_buffer);
-void radv_bind_descriptor_set(struct radv_cmd_buffer *cmd_buffer,
- struct radv_descriptor_set *set,
- unsigned idx);
bool
radv_cmd_buffer_upload_alloc(struct radv_cmd_buffer *cmd_buffer,
unsigned size,
void radv_set_dcc_need_cmask_elim_pred(struct radv_cmd_buffer *cmd_buffer,
struct radv_image *image,
bool value);
-void radv_fill_buffer(struct radv_cmd_buffer *cmd_buffer,
- struct radeon_winsys_bo *bo,
- uint64_t offset, uint64_t size, uint32_t value);
+uint32_t radv_fill_buffer(struct radv_cmd_buffer *cmd_buffer,
+ struct radeon_winsys_bo *bo,
+ uint64_t offset, uint64_t size, uint32_t value);
void radv_cmd_buffer_trace_emit(struct radv_cmd_buffer *cmd_buffer);
bool radv_get_memory_fd(struct radv_device *device,
struct radv_device_memory *memory,
int *pFD);
+VkResult radv_alloc_memory(VkDevice _device,
+ const VkMemoryAllocateInfo* pAllocateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ enum radv_mem_flags_bits flags,
+ VkDeviceMemory* pMem);
+
/*
* Takes x,y,z as exact numbers of invocations, instead of blocks.
*
};
struct radv_shader_module;
-struct ac_shader_variant_key;
+#define RADV_HASH_SHADER_IS_GEOM_COPY_SHADER (1 << 0)
+#define RADV_HASH_SHADER_SISCHED (1 << 1)
+#define RADV_HASH_SHADER_UNSAFE_MATH (1 << 2)
void
-radv_hash_shader(unsigned char *hash, struct radv_shader_module *module,
- const char *entrypoint,
- const VkSpecializationInfo *spec_info,
- const struct radv_pipeline_layout *layout,
- const struct ac_shader_variant_key *key,
- uint32_t is_geom_copy_shader);
+radv_hash_shaders(unsigned char *hash,
+ const VkPipelineShaderStageCreateInfo **stages,
+ const struct radv_pipeline_layout *layout,
+ const struct radv_pipeline_key *key,
+ uint32_t flags);
static inline gl_shader_stage
vk_to_mesa_shader_stage(VkShaderStageFlagBits vk_stage)
uint32_t tf_param;
};
+struct radv_gs_state {
+ uint32_t vgt_gs_onchip_cntl;
+ uint32_t vgt_gs_max_prims_per_subgroup;
+ uint32_t vgt_esgs_ring_itemsize;
+ uint32_t lds_size;
+};
+
struct radv_vertex_elements_info {
uint32_t rsrc_word3[MAX_VERTEX_ATTRIBS];
uint32_t format_size[MAX_VERTEX_ATTRIBS];
struct radv_pipeline {
struct radv_device * device;
- uint32_t dynamic_state_mask;
struct radv_dynamic_state dynamic_state;
struct radv_pipeline_layout * layout;
struct radv_raster_state raster;
struct radv_multisample_state ms;
struct radv_tessellation_state tess;
+ struct radv_gs_state gs;
uint32_t db_shader_control;
uint32_t shader_z_format;
unsigned prim;
static inline bool radv_pipeline_has_tess(struct radv_pipeline *pipeline)
{
- return pipeline->shaders[MESA_SHADER_TESS_EVAL] ? true : false;
+ return pipeline->shaders[MESA_SHADER_TESS_CTRL] ? true : false;
}
struct ac_userdata_info *radv_lookup_user_sgpr(struct radv_pipeline *pipeline,
gl_shader_stage stage,
int idx);
+struct radv_shader_variant *radv_get_vertex_shader(struct radv_pipeline *pipeline);
+
struct radv_graphics_pipeline_create_info {
bool use_rectlist;
bool db_depth_clear;
unsigned base_address_reg;
};
-struct r600_htile_info {
- uint64_t offset;
- uint64_t size;
- unsigned pitch;
- unsigned height;
- unsigned xalign;
- unsigned yalign;
-};
-
struct radv_image {
VkImageType type;
/* The original VkFormat provided by the client. This may not match any
/* Set when bound */
struct radeon_winsys_bo *bo;
VkDeviceSize offset;
- uint32_t dcc_offset;
- uint32_t htile_offset;
+ uint64_t dcc_offset;
+ uint64_t htile_offset;
bool tc_compatible_htile;
struct radeon_surf surface;
struct radv_fmask_info fmask;
struct radv_cmask_info cmask;
- uint32_t clear_value_offset;
- uint32_t dcc_pred_offset;
+ uint64_t clear_value_offset;
+ uint64_t dcc_pred_offset;
};
/* Whether the image has a htile that is known consistent with the contents of