2 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Rob Clark <robclark@freedesktop.org>
32 #include "c11/threads.h"
33 #include "compiler/shader_enums.h"
34 #include "compiler/nir/nir.h"
35 #include "util/bitscan.h"
36 #include "util/disk_cache.h"
38 #include "ir3_compiler.h"
42 /* driver param indices: */
43 enum ir3_driver_param
{
44 /* compute shader driver params: */
45 IR3_DP_NUM_WORK_GROUPS_X
= 0,
46 IR3_DP_NUM_WORK_GROUPS_Y
= 1,
47 IR3_DP_NUM_WORK_GROUPS_Z
= 2,
48 IR3_DP_LOCAL_GROUP_SIZE_X
= 4,
49 IR3_DP_LOCAL_GROUP_SIZE_Y
= 5,
50 IR3_DP_LOCAL_GROUP_SIZE_Z
= 6,
51 /* NOTE: gl_NumWorkGroups should be vec4 aligned because
52 * glDispatchComputeIndirect() needs to load these from
53 * the info->indirect buffer. Keep that in mind when/if
54 * adding any addition CS driver params.
56 IR3_DP_CS_COUNT
= 8, /* must be aligned to vec4 */
58 /* vertex shader driver params: */
60 IR3_DP_VTXID_BASE
= 1,
61 IR3_DP_INSTID_BASE
= 2,
62 IR3_DP_VTXCNT_MAX
= 3,
63 /* user-clip-plane components, up to 8x vec4's: */
67 IR3_DP_VS_COUNT
= 36 /* must be aligned to vec4 */
70 #define IR3_MAX_SHADER_BUFFERS 32
71 #define IR3_MAX_SHADER_IMAGES 32
72 #define IR3_MAX_SO_BUFFERS 4
73 #define IR3_MAX_SO_STREAMS 4
74 #define IR3_MAX_SO_OUTPUTS 64
75 #define IR3_MAX_UBO_PUSH_RANGES 32
77 /* mirrors SYSTEM_VALUE_BARYCENTRIC_ but starting from 0 */
90 * Description of a lowered UBO.
93 uint32_t block
; /* Which constant block */
94 uint16_t bindless_base
; /* For bindless, which base register is used */
99 * Description of a range of a lowered UBO access.
101 * Drivers should not assume that there are not multiple disjoint
102 * lowered ranges of a single UBO.
104 struct ir3_ubo_range
{
105 struct ir3_ubo_info ubo
;
106 uint32_t offset
; /* start offset to push in the const register file */
107 uint32_t start
, end
; /* range of block that's actually used */
110 struct ir3_ubo_analysis_state
{
111 struct ir3_ubo_range range
[IR3_MAX_UBO_PUSH_RANGES
];
112 uint32_t num_enabled
;
114 uint32_t cmdstream_size
; /* for per-gen backend to stash required cmdstream size */
118 * Describes the layout of shader consts. This includes:
119 * + User consts + driver lowered UBO ranges
121 * + Image sizes/dimensions
122 * + Driver params (ie. IR3_DP_*)
123 * + TFBO addresses (for generations that do not have hardware streamout)
124 * + Lowered immediates
126 * For consts needed to pass internal values to shader which may or may not
127 * be required, rather than allocating worst-case const space, we scan the
128 * shader and allocate consts as-needed:
130 * + SSBO sizes: only needed if shader has a get_buffer_size intrinsic
133 * + Image dimensions: needed to calculate pixel offset, but only for
134 * images that have a image_store intrinsic
136 * Layout of constant registers, each section aligned to vec4. Note
137 * that pointer size (ubo, etc) changes depending on generation.
142 * if (vertex shader) {
143 * driver params (IR3_DP_*)
144 * if (stream_output.num_outputs > 0)
145 * stream-out addresses
146 * } else if (compute_shader) {
147 * driver params (IR3_DP_*)
151 * Immediates go last mostly because they are inserted in the CP pass
152 * after the nir -> ir3 frontend.
154 * Note UBO size in bytes should be aligned to vec4
156 struct ir3_const_state
{
158 unsigned num_driver_params
; /* scalar */
161 /* user const start at zero */
163 /* NOTE that a3xx might need a section for SSBO addresses too */
166 unsigned driver_param
;
168 unsigned primitive_param
;
169 unsigned primitive_map
;
174 uint32_t mask
; /* bitmask of SSBOs that have get_buffer_size */
175 uint32_t count
; /* number of consts allocated */
176 /* one const allocated per SSBO which has get_buffer_size,
177 * ssbo_sizes.off[ssbo_id] is offset from start of ssbo_sizes
180 uint32_t off
[IR3_MAX_SHADER_BUFFERS
];
184 uint32_t mask
; /* bitmask of images that have image_store */
185 uint32_t count
; /* number of consts allocated */
186 /* three const allocated per image which has image_store:
187 * + cpp (bytes per pixel)
189 * + array_pitch (z pitch)
191 uint32_t off
[IR3_MAX_SHADER_IMAGES
];
194 unsigned immediates_count
;
195 unsigned immediates_size
;
196 uint32_t *immediates
;
198 /* State of ubo access lowered to push consts: */
199 struct ir3_ubo_analysis_state ubo_state
;
203 * A single output for vertex transform feedback.
205 struct ir3_stream_output
{
206 unsigned register_index
:6; /**< 0 to 63 (OUT index) */
207 unsigned start_component
:2; /** 0 to 3 */
208 unsigned num_components
:3; /** 1 to 4 */
209 unsigned output_buffer
:3; /**< 0 to PIPE_MAX_SO_BUFFERS */
210 unsigned dst_offset
:16; /**< offset into the buffer in dwords */
211 unsigned stream
:2; /**< 0 to 3 */
215 * Stream output for vertex transform feedback.
217 struct ir3_stream_output_info
{
218 unsigned num_outputs
;
219 /** stride for an entire vertex for each buffer in dwords */
220 uint16_t stride
[IR3_MAX_SO_BUFFERS
];
223 * Array of stream outputs, in the order they are to be written in.
224 * Selected components are tightly packed into the output buffer.
226 struct ir3_stream_output output
[IR3_MAX_SO_OUTPUTS
];
231 * Starting from a4xx, HW supports pre-dispatching texture sampling
232 * instructions prior to scheduling a shader stage, when the
233 * coordinate maps exactly to an output of the previous stage.
237 * There is a limit in the number of pre-dispatches allowed for any
240 #define IR3_MAX_SAMPLER_PREFETCH 4
243 * This is the output stream value for 'cmd', as used by blob. It may
244 * encode the return type (in 3 bits) but it hasn't been verified yet.
246 #define IR3_SAMPLER_PREFETCH_CMD 0x4
247 #define IR3_SAMPLER_BINDLESS_PREFETCH_CMD 0x6
250 * Stream output for texture sampling pre-dispatches.
252 struct ir3_sampler_prefetch
{
256 uint16_t samp_bindless_id
;
257 uint16_t tex_bindless_id
;
260 uint8_t half_precision
;
265 /* Configuration key used to identify a shader variant.. different
266 * shader variants can be used to implement features not supported
267 * in hw (two sided color), binning-pass vertex shader, etc.
269 * When adding to this struct, please update ir3_shader_variant()'s debug
272 struct ir3_shader_key
{
276 * Combined Vertex/Fragment shader parameters:
278 unsigned ucp_enables
: 8;
280 /* do we need to check {v,f}saturate_{s,t,r}? */
281 unsigned has_per_samp
: 1;
284 * Vertex shader variant parameters:
286 unsigned vclamp_color
: 1;
289 * Fragment shader variant parameters:
291 unsigned sample_shading
: 1;
293 unsigned color_two_side
: 1;
294 /* used when shader needs to handle flat varyings (a4xx)
295 * for front/back color inputs to frag shader:
297 unsigned rasterflat
: 1;
298 unsigned fclamp_color
: 1;
300 /* Indicates that this is a tessellation pipeline which requires a
301 * whole different kind of vertex shader. In case of
302 * tessellation, this field also tells us which kind of output
303 * topology the TES uses, which the TCS needs to know.
305 #define IR3_TESS_NONE 0
306 #define IR3_TESS_TRIANGLES 1
307 #define IR3_TESS_QUADS 2
308 #define IR3_TESS_ISOLINES 3
309 unsigned tessellation
: 2;
313 /* Whether this variant sticks to the "safe" maximum constlen,
314 * which guarantees that the combined stages will never go over
317 unsigned safe_constlen
: 1;
319 /* Whether gl_Layer must be forced to 0 because it isn't written. */
320 unsigned layer_zero
: 1;
325 /* bitmask of sampler which needs coords clamped for vertex
328 uint16_t vsaturate_s
, vsaturate_t
, vsaturate_r
;
330 /* bitmask of sampler which needs coords clamped for frag
333 uint16_t fsaturate_s
, fsaturate_t
, fsaturate_r
;
335 /* bitmask of ms shifts */
336 uint32_t vsamples
, fsamples
;
338 /* bitmask of samplers which need astc srgb workaround: */
339 uint16_t vastc_srgb
, fastc_srgb
;
342 static inline unsigned
343 ir3_tess_mode(unsigned gl_tess_mode
)
345 switch (gl_tess_mode
) {
347 return IR3_TESS_ISOLINES
;
349 return IR3_TESS_TRIANGLES
;
351 return IR3_TESS_QUADS
;
353 unreachable("bad tessmode");
358 ir3_shader_key_equal(const struct ir3_shader_key
*a
, const struct ir3_shader_key
*b
)
360 /* slow-path if we need to check {v,f}saturate_{s,t,r} */
361 if (a
->has_per_samp
|| b
->has_per_samp
)
362 return memcmp(a
, b
, sizeof(struct ir3_shader_key
)) == 0;
363 return a
->global
== b
->global
;
366 /* will the two keys produce different lowering for a fragment shader? */
368 ir3_shader_key_changes_fs(struct ir3_shader_key
*key
, struct ir3_shader_key
*last_key
)
370 if (last_key
->has_per_samp
|| key
->has_per_samp
) {
371 if ((last_key
->fsaturate_s
!= key
->fsaturate_s
) ||
372 (last_key
->fsaturate_t
!= key
->fsaturate_t
) ||
373 (last_key
->fsaturate_r
!= key
->fsaturate_r
) ||
374 (last_key
->fsamples
!= key
->fsamples
) ||
375 (last_key
->fastc_srgb
!= key
->fastc_srgb
))
379 if (last_key
->fclamp_color
!= key
->fclamp_color
)
382 if (last_key
->color_two_side
!= key
->color_two_side
)
385 if (last_key
->rasterflat
!= key
->rasterflat
)
388 if (last_key
->layer_zero
!= key
->layer_zero
)
391 if (last_key
->ucp_enables
!= key
->ucp_enables
)
394 if (last_key
->safe_constlen
!= key
->safe_constlen
)
400 /* will the two keys produce different lowering for a vertex shader? */
402 ir3_shader_key_changes_vs(struct ir3_shader_key
*key
, struct ir3_shader_key
*last_key
)
404 if (last_key
->has_per_samp
|| key
->has_per_samp
) {
405 if ((last_key
->vsaturate_s
!= key
->vsaturate_s
) ||
406 (last_key
->vsaturate_t
!= key
->vsaturate_t
) ||
407 (last_key
->vsaturate_r
!= key
->vsaturate_r
) ||
408 (last_key
->vsamples
!= key
->vsamples
) ||
409 (last_key
->vastc_srgb
!= key
->vastc_srgb
))
413 if (last_key
->vclamp_color
!= key
->vclamp_color
)
416 if (last_key
->ucp_enables
!= key
->ucp_enables
)
419 if (last_key
->safe_constlen
!= key
->safe_constlen
)
426 * On a4xx+a5xx, Images share state with textures and SSBOs:
428 * + Uses texture (cat5) state/instruction (isam) to read
429 * + Uses SSBO state and instructions (cat6) to write and for atomics
431 * Starting with a6xx, Images and SSBOs are basically the same thing,
432 * with texture state and isam also used for SSBO reads.
434 * On top of that, gallium makes the SSBO (shader_buffers) state semi
435 * sparse, with the first half of the state space used for atomic
436 * counters lowered to atomic buffers. We could ignore this, but I
437 * don't think we could *really* handle the case of a single shader
438 * that used the max # of textures + images + SSBOs. And once we are
439 * offsetting images by num_ssbos (or visa versa) to map them into
440 * the same hardware state, the hardware state has become coupled to
441 * the shader state, so at this point we might as well just use a
442 * mapping table to remap things from image/SSBO idx to hw idx.
444 * To make things less (more?) confusing, for the hw "SSBO" state
445 * (since it is really both SSBO and Image) I'll use the name "IBO"
447 struct ir3_ibo_mapping
{
448 #define IBO_INVALID 0xff
449 /* Maps logical SSBO state to hw tex state: */
450 uint8_t ssbo_to_tex
[IR3_MAX_SHADER_BUFFERS
];
452 /* Maps logical Image state to hw tex state: */
453 uint8_t image_to_tex
[IR3_MAX_SHADER_IMAGES
];
455 /* Maps hw state back to logical SSBO or Image state:
457 * note IBO_SSBO ORd into values to indicate that the
458 * hw slot is used for SSBO state vs Image state.
460 #define IBO_SSBO 0x80
461 uint8_t tex_to_image
[32];
463 uint8_t num_tex
; /* including real textures */
464 uint8_t tex_base
; /* the number of real textures, ie. image/ssbo start here */
467 /* Represents half register in regid */
468 #define HALF_REG_ID 0x100
471 * Shader variant which contains the actual hw shader instructions,
472 * and necessary info for shader state setup.
474 struct ir3_shader_variant
{
477 /* variant id (for debug) */
480 struct ir3_shader_key key
;
482 /* vertex shaders can have an extra version for hwbinning pass,
483 * which is pointed to by so->binning:
487 struct ir3_shader_variant
*binning
;
488 struct ir3_shader_variant
*nonbinning
;
491 struct ir3
*ir
; /* freed after assembling machine instructions */
493 /* shader variants form a linked list: */
494 struct ir3_shader_variant
*next
;
496 /* replicated here to avoid passing extra ptrs everywhere: */
497 gl_shader_stage type
;
498 struct ir3_shader
*shader
;
501 * Below here is serialized when written to disk cache:
504 /* The actual binary shader instructions, size given by info.sizedwords: */
507 struct ir3_const_state
*const_state
;
510 * The following macros are used by the shader disk cache save/
511 * restore paths to serialize/deserialize the variant. Any
512 * pointers that require special handling in store_variant()
513 * and retrieve_variant() should go above here.
515 #define VARIANT_CACHE_START offsetof(struct ir3_shader_variant, info)
516 #define VARIANT_CACHE_PTR(v) (((char *)v) + VARIANT_CACHE_START)
517 #define VARIANT_CACHE_SIZE (sizeof(struct ir3_shader_variant) - VARIANT_CACHE_START)
519 struct ir3_info info
;
521 /* Levels of nesting of flow control:
523 unsigned branchstack
;
528 /* the instructions length is in units of instruction groups
529 * (4 instructions for a3xx, 16 instructions for a4xx.. each
530 * instruction is 2 dwords):
534 /* the constants length is in units of vec4's, and is the sum of
535 * the uniforms and the built-in compiler constants
540 * + Let the frag shader determine the position/compmask for the
541 * varyings, since it is the place where we know if the varying
542 * is actually used, and if so, which components are used. So
543 * what the hw calls "outloc" is taken from the "inloc" of the
545 * + From the vert shader, we only need the output regid
548 bool frag_face
, color0_mrt
;
549 uint8_t fragcoord_compmask
;
551 /* NOTE: for input/outputs, slot is:
552 * gl_vert_attrib - for VS inputs
553 * gl_varying_slot - for VS output / FS input
554 * gl_frag_result - for FS output
557 /* varyings/outputs: */
558 unsigned outputs_count
;
563 } outputs
[32 + 2]; /* +POSITION +PSIZE */
564 bool writes_pos
, writes_smask
, writes_psize
, writes_stencilref
;
566 /* Size in dwords of all outputs for VS, size of entire patch for HS. */
567 uint32_t output_size
;
569 /* Map from driver_location to byte offset in per-primitive storage */
570 unsigned output_loc
[32];
572 /* attributes (VS) / varyings (FS):
573 * Note that sysval's should come *after* normal inputs.
575 unsigned inputs_count
;
580 /* location of input (ie. offset passed to bary.f, etc). This
581 * matches the SP_VS_VPC_DST_REG.OUTLOCn value (a3xx and a4xx
582 * have the OUTLOCn value offset by 8, presumably to account
583 * for gl_Position/gl_PointSize)
586 /* vertex shader specific: */
587 bool sysval
: 1; /* slot is a gl_system_value */
588 /* fragment shader specific: */
589 bool bary
: 1; /* fetched varying (vs one loaded into reg) */
590 bool rasterflat
: 1; /* special handling for emit->rasterflat */
591 bool use_ldlv
: 1; /* internal to ir3_compiler_nir */
593 enum glsl_interp_mode interpolate
;
594 } inputs
[32 + 2]; /* +POSITION +FACE */
596 /* sum of input components (scalar). For frag shaders, it only counts
597 * the varying inputs:
601 /* For frag shaders, the total number of inputs (not scalar,
602 * ie. SP_VS_PARAM_REG.TOTALVSOUTVAR)
606 /* Remapping table to map Image and SSBO to hw state: */
607 struct ir3_ibo_mapping image_mapping
;
609 /* number of samplers/textures (which are currently 1:1): */
612 /* is there an implicit sampler to read framebuffer (FS only).. if
613 * so the sampler-idx is 'num_samp - 1' (ie. it is appended after
614 * the last "real" texture)
618 /* do we have one or more SSBO instructions: */
621 /* Which bindless resources are used, for filling out sp_xs_config */
627 /* do we need derivatives: */
630 bool need_fine_derivatives
;
632 /* do we have image write, etc (which prevents early-z): */
635 /* do we have kill, which also prevents early-z, but not necessarily
636 * early-lrz (as long as lrz-write is disabled, which must be handled
637 * outside of ir3. Unlike other no_earlyz cases, kill doesn't have
638 * side effects that prevent early-lrz discard.
644 /* Are we using split or merged register file? */
647 /* for astc srgb workaround, the number/base of additional
648 * alpha tex states we need, and index of original tex states
651 unsigned base
, count
;
652 unsigned orig_idx
[16];
655 /* texture sampler pre-dispatches */
656 uint32_t num_sampler_prefetch
;
657 struct ir3_sampler_prefetch sampler_prefetch
[IR3_MAX_SAMPLER_PREFETCH
];
660 static inline const char *
661 ir3_shader_stage(struct ir3_shader_variant
*v
)
664 case MESA_SHADER_VERTEX
: return v
->binning_pass
? "BVERT" : "VERT";
665 case MESA_SHADER_TESS_CTRL
: return "TCS";
666 case MESA_SHADER_TESS_EVAL
: return "TES";
667 case MESA_SHADER_GEOMETRY
: return "GEOM";
668 case MESA_SHADER_FRAGMENT
: return "FRAG";
669 case MESA_SHADER_COMPUTE
: return "CL";
671 unreachable("invalid type");
676 /* Currently we do not do binning for tess. And for GS there is no
677 * cross-stage VS+GS optimization, so the full VS+GS is used in
681 ir3_has_binning_vs(const struct ir3_shader_key
*key
)
683 if (key
->tessellation
|| key
->has_gs
)
689 * Represents a shader at the API level, before state-specific variants are
693 gl_shader_stage type
;
695 /* shader id (for debug): */
697 uint32_t variant_count
;
699 /* Set by freedreno after shader_state_create, so we can emit debug info
700 * when recompiling a shader at draw time.
702 bool initial_variants_done
;
704 struct ir3_compiler
*compiler
;
706 unsigned num_reserved_user_consts
;
709 struct nir_shader
*nir
;
710 struct ir3_stream_output_info stream_output
;
712 struct ir3_shader_variant
*variants
;
715 cache_key cache_key
; /* shader disk-cache key */
717 /* Bitmask of bits of the shader key used by this shader. Used to avoid
718 * recompiles for GL NOS that doesn't actually apply to the shader.
720 struct ir3_shader_key key_mask
;
724 * In order to use the same cmdstream, in particular constlen setup and const
725 * emit, for both binning and draw pass (a6xx+), the binning pass re-uses it's
726 * corresponding draw pass shaders const_state.
728 static inline struct ir3_const_state
*
729 ir3_const_state(const struct ir3_shader_variant
*v
)
732 return v
->nonbinning
->const_state
;
733 return v
->const_state
;
736 /* Given a variant, calculate the maximum constlen it can have.
739 static inline unsigned
740 ir3_max_const(const struct ir3_shader_variant
*v
)
742 const struct ir3_compiler
*compiler
= v
->shader
->compiler
;
744 if (v
->shader
->type
== MESA_SHADER_COMPUTE
) {
745 return compiler
->max_const_compute
;
746 } else if (v
->key
.safe_constlen
) {
747 return compiler
->max_const_safe
;
748 } else if (v
->shader
->type
== MESA_SHADER_FRAGMENT
) {
749 return compiler
->max_const_frag
;
751 return compiler
->max_const_geom
;
755 void * ir3_shader_assemble(struct ir3_shader_variant
*v
);
756 struct ir3_shader_variant
* ir3_shader_get_variant(struct ir3_shader
*shader
,
757 const struct ir3_shader_key
*key
, bool binning_pass
, bool *created
);
758 struct ir3_shader
* ir3_shader_from_nir(struct ir3_compiler
*compiler
, nir_shader
*nir
,
759 unsigned reserved_user_consts
, struct ir3_stream_output_info
*stream_output
);
760 uint32_t ir3_trim_constlen(struct ir3_shader_variant
**variants
,
761 const struct ir3_compiler
*compiler
);
762 void ir3_shader_destroy(struct ir3_shader
*shader
);
763 void ir3_shader_disasm(struct ir3_shader_variant
*so
, uint32_t *bin
, FILE *out
);
764 uint64_t ir3_shader_outputs(const struct ir3_shader
*so
);
767 ir3_glsl_type_size(const struct glsl_type
*type
, bool bindless
);
773 /* clears shader-key flags which don't apply to the given shader.
776 ir3_key_clear_unused(struct ir3_shader_key
*key
, struct ir3_shader
*shader
)
778 uint32_t *key_bits
= (uint32_t *)key
;
779 uint32_t *key_mask
= (uint32_t *)&shader
->key_mask
;
780 STATIC_ASSERT(sizeof(*key
) % 4 == 0);
781 for (int i
= 0; i
< sizeof(*key
) >> 2; i
++)
782 key_bits
[i
] &= key_mask
[i
];
786 ir3_find_output(const struct ir3_shader_variant
*so
, gl_varying_slot slot
)
790 for (j
= 0; j
< so
->outputs_count
; j
++)
791 if (so
->outputs
[j
].slot
== slot
)
794 /* it seems optional to have a OUT.BCOLOR[n] for each OUT.COLOR[n]
795 * in the vertex shader.. but the fragment shader doesn't know this
796 * so it will always have both IN.COLOR[n] and IN.BCOLOR[n]. So
797 * at link time if there is no matching OUT.BCOLOR[n], we must map
798 * OUT.COLOR[n] to IN.BCOLOR[n]. And visa versa if there is only
799 * a OUT.BCOLOR[n] but no matching OUT.COLOR[n]
801 if (slot
== VARYING_SLOT_BFC0
) {
802 slot
= VARYING_SLOT_COL0
;
803 } else if (slot
== VARYING_SLOT_BFC1
) {
804 slot
= VARYING_SLOT_COL1
;
805 } else if (slot
== VARYING_SLOT_COL0
) {
806 slot
= VARYING_SLOT_BFC0
;
807 } else if (slot
== VARYING_SLOT_COL1
) {
808 slot
= VARYING_SLOT_BFC1
;
813 for (j
= 0; j
< so
->outputs_count
; j
++)
814 if (so
->outputs
[j
].slot
== slot
)
823 ir3_next_varying(const struct ir3_shader_variant
*so
, int i
)
825 while (++i
< so
->inputs_count
)
826 if (so
->inputs
[i
].compmask
&& so
->inputs
[i
].bary
)
831 struct ir3_shader_linkage
{
832 /* Maximum location either consumed by the fragment shader or produced by
833 * the last geometry stage, i.e. the size required for each vertex in the
838 /* Number of entries in var. */
841 /* Bitset of locations used, including ones which are only used by the FS.
845 /* Map from VS output to location. */
852 /* location for fixed-function gl_PrimitiveID passthrough */
855 /* location for fixed-function gl_ViewIndex passthrough */
860 ir3_link_add(struct ir3_shader_linkage
*l
, uint8_t regid_
, uint8_t compmask
, uint8_t loc
)
862 for (int j
= 0; j
< util_last_bit(compmask
); j
++) {
863 uint8_t comploc
= loc
+ j
;
864 l
->varmask
[comploc
/ 32] |= 1 << (comploc
% 32);
867 l
->max_loc
= MAX2(l
->max_loc
, loc
+ util_last_bit(compmask
));
869 if (regid_
!= regid(63, 0)) {
871 debug_assert(i
< ARRAY_SIZE(l
->var
));
873 l
->var
[i
].regid
= regid_
;
874 l
->var
[i
].compmask
= compmask
;
880 ir3_link_shaders(struct ir3_shader_linkage
*l
,
881 const struct ir3_shader_variant
*vs
,
882 const struct ir3_shader_variant
*fs
,
885 /* On older platforms, varmask isn't programmed at all, and it appears
886 * that the hardware generates a mask of used VPC locations using the VS
887 * output map, and hangs if a FS bary instruction references a location
888 * not in the list. This means that we need to have a dummy entry in the
889 * VS out map for things like gl_PointCoord which aren't written by the
890 * VS. Furthermore we can't use r63.x, so just pick a random register to
891 * use if there is no VS output.
893 const unsigned default_regid
= pack_vs_out
? regid(63, 0) : regid(0, 0);
896 l
->primid_loc
= 0xff;
897 l
->viewid_loc
= 0xff;
899 while (l
->cnt
< ARRAY_SIZE(l
->var
)) {
900 j
= ir3_next_varying(fs
, j
);
902 if (j
>= fs
->inputs_count
)
905 if (fs
->inputs
[j
].inloc
>= fs
->total_in
)
908 k
= ir3_find_output(vs
, fs
->inputs
[j
].slot
);
910 if (k
< 0 && fs
->inputs
[j
].slot
== VARYING_SLOT_PRIMITIVE_ID
) {
911 l
->primid_loc
= fs
->inputs
[j
].inloc
;
914 if (fs
->inputs
[j
].slot
== VARYING_SLOT_VIEW_INDEX
) {
916 l
->viewid_loc
= fs
->inputs
[j
].inloc
;
919 ir3_link_add(l
, k
>= 0 ? vs
->outputs
[k
].regid
: default_regid
,
920 fs
->inputs
[j
].compmask
, fs
->inputs
[j
].inloc
);
924 static inline uint32_t
925 ir3_find_output_regid(const struct ir3_shader_variant
*so
, unsigned slot
)
928 for (j
= 0; j
< so
->outputs_count
; j
++)
929 if (so
->outputs
[j
].slot
== slot
) {
930 uint32_t regid
= so
->outputs
[j
].regid
;
931 if (so
->outputs
[j
].half
)
932 regid
|= HALF_REG_ID
;
938 #define VARYING_SLOT_GS_HEADER_IR3 (VARYING_SLOT_MAX + 0)
939 #define VARYING_SLOT_GS_VERTEX_FLAGS_IR3 (VARYING_SLOT_MAX + 1)
940 #define VARYING_SLOT_TCS_HEADER_IR3 (VARYING_SLOT_MAX + 2)
943 static inline uint32_t
944 ir3_find_sysval_regid(const struct ir3_shader_variant
*so
, unsigned slot
)
947 for (j
= 0; j
< so
->inputs_count
; j
++)
948 if (so
->inputs
[j
].sysval
&& (so
->inputs
[j
].slot
== slot
))
949 return so
->inputs
[j
].regid
;
953 /* calculate register footprint in terms of half-regs (ie. one full
954 * reg counts as two half-regs).
956 static inline uint32_t
957 ir3_shader_halfregs(const struct ir3_shader_variant
*v
)
959 return (2 * (v
->info
.max_reg
+ 1)) + (v
->info
.max_half_reg
+ 1);
962 static inline uint32_t
963 ir3_shader_nibo(const struct ir3_shader_variant
*v
)
965 /* The dummy variant used in binning mode won't have an actual shader. */
969 return v
->shader
->nir
->info
.num_ssbos
+ v
->shader
->nir
->info
.num_images
;
972 #endif /* IR3_SHADER_H_ */