struct spirv_supported_capabilities {
bool address;
bool atomic_storage;
+ bool demote_to_helper_invocation;
bool derivative_group;
bool descriptor_array_dynamic_indexing;
bool descriptor_array_non_uniform_indexing;
bool physical_storage_buffer_address;
bool post_depth_coverage;
bool runtime_descriptor_array;
+ bool float_controls;
+ bool shader_clock;
bool shader_viewport_index_layer;
bool stencil_export;
bool storage_8bit;
unsigned num_ssbos;
/* Number of images used by this shader */
unsigned num_images;
+ /* Index of the last MSAA image. */
+ int last_msaa_image;
/* Which inputs are actually read */
uint64_t inputs_read;
/** Was this shader linked with any transform feedback varyings? */
bool has_transform_feedback_varyings;
+ /* SPV_KHR_float_controls: execution mode for floating point ops */
+ unsigned float_controls_execution_mode;
+
union {
struct {
/* Which inputs are doubles */
uint64_t double_inputs;
+ /* For AMD-specific driver-internal shaders. It replaces vertex
+ * buffer loads with code generating VS inputs from scalar registers.
+ *
+ * Valid values: SI_VS_BLIT_SGPRS_POS_*
+ */
+ unsigned blit_sgprs_amd;
+
/* True if the shader writes position in window space coordinates pre-transform */
bool window_space_position;
} vs;
struct {
bool uses_discard;
+ /**
+ * True if this fragment shader requires helper invocations. This
+ * can be caused by the use of ALU derivative ops, texture
+ * instructions which do implicit derivatives, and the use of quad
+ * subgroup operations.
+ */
+ bool needs_helper_invocations;
+
/**
* Whether any inputs are declared with the "sample" qualifier.
*/
unsigned local_size[3];
bool local_size_variable;
+ char user_data_components_amd;
/**
* Size of shared variables accessed by the compute shader.