2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "shader_enums.h"
35 struct spirv_supported_capabilities
{
38 bool demote_to_helper_invocation
;
39 bool derivative_group
;
40 bool descriptor_array_dynamic_indexing
;
41 bool descriptor_array_non_uniform_indexing
;
42 bool descriptor_indexing
;
46 bool fragment_shader_sample_interlock
;
47 bool fragment_shader_pixel_interlock
;
48 bool geometry_streams
;
50 bool image_read_without_format
;
51 bool image_write_without_format
;
59 bool physical_storage_buffer_address
;
60 bool post_depth_coverage
;
61 bool runtime_descriptor_array
;
64 bool shader_viewport_index_layer
;
68 bool storage_image_ms
;
69 bool subgroup_arithmetic
;
73 bool subgroup_shuffle
;
76 bool transform_feedback
;
77 bool variable_pointers
;
79 bool vk_memory_model_device_scope
;
82 bool amd_shader_ballot
;
83 bool amd_trinary_minmax
;
84 bool amd_image_read_write_lod
;
87 typedef struct shader_info
{
90 /* Descriptive name provided by the client; may be NULL */
93 /** The shader stage, such as MESA_SHADER_VERTEX. */
94 gl_shader_stage stage
:8;
96 /** The shader stage in a non SSO linked program that follows this stage,
97 * such as MESA_SHADER_FRAGMENT.
99 gl_shader_stage next_stage
:8;
101 /* Number of textures used by this shader */
102 uint8_t num_textures
;
103 /* Number of uniform buffers used by this shader */
105 /* Number of atomic buffers used by this shader */
107 /* Number of shader storage buffers (max .driver_location + 1) used by this
108 * shader. In the case of nir_lower_atomics_to_ssbo being used, this will
109 * be the number of actual SSBOs in gl_program->info, and the lowered SSBOs
110 * and atomic counters in nir_shader->info.
113 /* Number of images used by this shader */
115 /* Index of the last MSAA image. */
116 int8_t last_msaa_image
;
118 /* Which inputs are actually read */
119 uint64_t inputs_read
;
120 /* Which outputs are actually written */
121 uint64_t outputs_written
;
122 /* Which outputs are actually read */
123 uint64_t outputs_read
;
124 /* Which system values are actually read */
125 uint64_t system_values_read
;
127 /* Which patch inputs are actually read */
128 uint32_t patch_inputs_read
;
129 /* Which patch outputs are actually written */
130 uint32_t patch_outputs_written
;
131 /* Which patch outputs are read */
132 uint32_t patch_outputs_read
;
134 /** Bitfield of which textures are used */
135 uint32_t textures_used
;
137 /** Bitfield of which textures are used by texelFetch() */
138 uint32_t textures_used_by_txf
;
140 /* SPV_KHR_float_controls: execution mode for floating point ops */
141 uint16_t float_controls_execution_mode
;
143 /* The size of the gl_ClipDistance[] array, if declared. */
144 uint8_t clip_distance_array_size
:4;
146 /* The size of the gl_CullDistance[] array, if declared. */
147 uint8_t cull_distance_array_size
:4;
149 /* Whether or not this shader ever uses textureGather() */
150 bool uses_texture_gather
:1;
153 * True if this shader uses the fddx/fddy opcodes.
155 * Note that this does not include the "fine" and "coarse" variants.
157 bool uses_fddx_fddy
:1;
160 * True if this shader uses 64-bit ALU operations
164 /* Whether the first UBO is the default uniform buffer, i.e. uniforms. */
165 bool first_ubo_is_default_ubo
:1;
167 /* Whether or not separate shader objects were used */
168 bool separate_shader
:1;
170 /** Was this shader linked with any transform feedback varyings? */
171 bool has_transform_feedback_varyings
:1;
173 /* Whether flrp has been lowered. */
178 /* Which inputs are doubles */
179 uint64_t double_inputs
;
181 /* For AMD-specific driver-internal shaders. It replaces vertex
182 * buffer loads with code generating VS inputs from scalar registers.
184 * Valid values: SI_VS_BLIT_SGPRS_POS_*
186 uint8_t blit_sgprs_amd
:4;
188 /* True if the shader writes position in window space coordinates pre-transform */
189 bool window_space_position
:1;
193 /** The output primitive type (GL enum value) */
194 uint16_t output_primitive
;
196 /** The input primitive type (GL enum value) */
197 uint16_t input_primitive
;
199 /** The maximum number of vertices the geometry shader might write. */
200 uint16_t vertices_out
;
202 /** 1 .. MAX_GEOMETRY_SHADER_INVOCATIONS */
205 /** The number of vertices recieves per input primitive (max. 6) */
206 uint8_t vertices_in
:3;
208 /** Whether or not this shader uses EndPrimitive */
209 bool uses_end_primitive
:1;
211 /** Whether or not this shader uses non-zero streams */
219 * True if this fragment shader requires helper invocations. This
220 * can be caused by the use of ALU derivative ops, texture
221 * instructions which do implicit derivatives, and the use of quad
222 * subgroup operations.
224 bool needs_helper_invocations
:1;
227 * Whether any inputs are declared with the "sample" qualifier.
229 bool uses_sample_qualifier
:1;
232 * Whether early fragment tests are enabled as defined by
233 * ARB_shader_image_load_store.
235 bool early_fragment_tests
:1;
238 * Defined by INTEL_conservative_rasterization.
240 bool inner_coverage
:1;
242 bool post_depth_coverage
:1;
245 * \name ARB_fragment_coord_conventions
248 bool pixel_center_integer
:1;
249 bool origin_upper_left
:1;
252 bool pixel_interlock_ordered
:1;
253 bool pixel_interlock_unordered
:1;
254 bool sample_interlock_ordered
:1;
255 bool sample_interlock_unordered
:1;
258 * Flags whether NIR's base types on the FS color outputs should be
261 * GLSL requires that fragment shader output base types match the
262 * render target's base types for the behavior to be defined. From
265 * "If the values written by the fragment shader do not match the
266 * format(s) of the corresponding color buffer(s), the result is
269 * However, for NIR shaders translated from TGSI, we don't have the
270 * output types any more, so the driver will need to do whatever
271 * fixups are necessary to handle effectively untyped data being
272 * output from the FS.
274 bool untyped_color_outputs
:1;
276 /** gl_FragDepth layout for ARB_conservative_depth. */
277 enum gl_frag_depth_layout depth_layout
:3;
281 uint16_t local_size
[3];
283 bool local_size_variable
:1;
284 uint8_t user_data_components_amd
:3;
287 * Arrangement of invocations used to calculate derivatives in a compute
288 * shader. From NV_compute_shader_derivatives.
290 enum gl_derivative_group derivative_group
:2;
293 * Size of shared variables accessed by the compute shader.
295 unsigned shared_size
;
299 * AddressingModelLogical: 0 (default)
300 * AddressingModelPhysical32: 32
301 * AddressingModelPhysical64: 64
306 /* Applies to both TCS and TES. */
308 uint16_t primitive_mode
; /* GL_TRIANGLES, GL_QUADS or GL_ISOLINES */
310 /** The number of vertices in the TCS output patch. */
311 uint8_t tcs_vertices_out
;
312 enum gl_tess_spacing spacing
:2;
314 /** Is the vertex order counterclockwise? */
325 #endif /* SHADER_INFO_H */