freedreno/ir3: shuffle some variant fields
[mesa.git] / src / freedreno / ir3 / ir3_shader.h
1 /*
2 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #ifndef IR3_SHADER_H_
28 #define IR3_SHADER_H_
29
30 #include <stdio.h>
31
32 #include "c11/threads.h"
33 #include "compiler/shader_enums.h"
34 #include "compiler/nir/nir.h"
35 #include "util/bitscan.h"
36
37 #include "ir3_compiler.h"
38
39 struct glsl_type;
40
41 /* driver param indices: */
42 enum ir3_driver_param {
43 /* compute shader driver params: */
44 IR3_DP_NUM_WORK_GROUPS_X = 0,
45 IR3_DP_NUM_WORK_GROUPS_Y = 1,
46 IR3_DP_NUM_WORK_GROUPS_Z = 2,
47 IR3_DP_LOCAL_GROUP_SIZE_X = 4,
48 IR3_DP_LOCAL_GROUP_SIZE_Y = 5,
49 IR3_DP_LOCAL_GROUP_SIZE_Z = 6,
50 /* NOTE: gl_NumWorkGroups should be vec4 aligned because
51 * glDispatchComputeIndirect() needs to load these from
52 * the info->indirect buffer. Keep that in mind when/if
53 * adding any addition CS driver params.
54 */
55 IR3_DP_CS_COUNT = 8, /* must be aligned to vec4 */
56
57 /* vertex shader driver params: */
58 IR3_DP_DRAWID = 0,
59 IR3_DP_VTXID_BASE = 1,
60 IR3_DP_INSTID_BASE = 2,
61 IR3_DP_VTXCNT_MAX = 3,
62 /* user-clip-plane components, up to 8x vec4's: */
63 IR3_DP_UCP0_X = 4,
64 /* .... */
65 IR3_DP_UCP7_W = 35,
66 IR3_DP_VS_COUNT = 36 /* must be aligned to vec4 */
67 };
68
69 #define IR3_MAX_SHADER_BUFFERS 32
70 #define IR3_MAX_SHADER_IMAGES 32
71 #define IR3_MAX_SO_BUFFERS 4
72 #define IR3_MAX_SO_STREAMS 4
73 #define IR3_MAX_SO_OUTPUTS 64
74 #define IR3_MAX_UBO_PUSH_RANGES 32
75
76 /**
77 * Description of a lowered UBO.
78 */
79 struct ir3_ubo_info {
80 uint32_t block; /* Which constant block */
81 uint16_t bindless_base; /* For bindless, which base register is used */
82 bool bindless;
83 };
84
85 /**
86 * Description of a range of a lowered UBO access.
87 *
88 * Drivers should not assume that there are not multiple disjoint
89 * lowered ranges of a single UBO.
90 */
91 struct ir3_ubo_range {
92 struct ir3_ubo_info ubo;
93 uint32_t offset; /* start offset to push in the const register file */
94 uint32_t start, end; /* range of block that's actually used */
95 };
96
97 struct ir3_ubo_analysis_state {
98 struct ir3_ubo_range range[IR3_MAX_UBO_PUSH_RANGES];
99 uint32_t num_enabled;
100 uint32_t size;
101 uint32_t cmdstream_size; /* for per-gen backend to stash required cmdstream size */
102 };
103
104 /**
105 * Describes the layout of shader consts. This includes:
106 * + User consts + driver lowered UBO ranges
107 * + SSBO sizes
108 * + Image sizes/dimensions
109 * + Driver params (ie. IR3_DP_*)
110 * + TFBO addresses (for generations that do not have hardware streamout)
111 * + Lowered immediates
112 *
113 * For consts needed to pass internal values to shader which may or may not
114 * be required, rather than allocating worst-case const space, we scan the
115 * shader and allocate consts as-needed:
116 *
117 * + SSBO sizes: only needed if shader has a get_buffer_size intrinsic
118 * for a given SSBO
119 *
120 * + Image dimensions: needed to calculate pixel offset, but only for
121 * images that have a image_store intrinsic
122 *
123 * Layout of constant registers, each section aligned to vec4. Note
124 * that pointer size (ubo, etc) changes depending on generation.
125 *
126 * user consts
127 * UBO addresses
128 * SSBO sizes
129 * if (vertex shader) {
130 * driver params (IR3_DP_*)
131 * if (stream_output.num_outputs > 0)
132 * stream-out addresses
133 * } else if (compute_shader) {
134 * driver params (IR3_DP_*)
135 * }
136 * immediates
137 *
138 * Immediates go last mostly because they are inserted in the CP pass
139 * after the nir -> ir3 frontend.
140 *
141 * Note UBO size in bytes should be aligned to vec4
142 */
143 struct ir3_const_state {
144 unsigned num_ubos;
145 unsigned num_driver_params; /* scalar */
146
147 struct {
148 /* user const start at zero */
149 unsigned ubo;
150 /* NOTE that a3xx might need a section for SSBO addresses too */
151 unsigned ssbo_sizes;
152 unsigned image_dims;
153 unsigned driver_param;
154 unsigned tfbo;
155 unsigned primitive_param;
156 unsigned primitive_map;
157 unsigned immediate;
158 } offsets;
159
160 struct {
161 uint32_t mask; /* bitmask of SSBOs that have get_buffer_size */
162 uint32_t count; /* number of consts allocated */
163 /* one const allocated per SSBO which has get_buffer_size,
164 * ssbo_sizes.off[ssbo_id] is offset from start of ssbo_sizes
165 * consts:
166 */
167 uint32_t off[IR3_MAX_SHADER_BUFFERS];
168 } ssbo_size;
169
170 struct {
171 uint32_t mask; /* bitmask of images that have image_store */
172 uint32_t count; /* number of consts allocated */
173 /* three const allocated per image which has image_store:
174 * + cpp (bytes per pixel)
175 * + pitch (y pitch)
176 * + array_pitch (z pitch)
177 */
178 uint32_t off[IR3_MAX_SHADER_IMAGES];
179 } image_dims;
180
181 unsigned immediate_idx;
182 unsigned immediates_count;
183 unsigned immediates_size;
184 struct {
185 uint32_t val[4];
186 } *immediates;
187
188 /* State of ubo access lowered to push consts: */
189 struct ir3_ubo_analysis_state ubo_state;
190 };
191
192 /**
193 * A single output for vertex transform feedback.
194 */
195 struct ir3_stream_output {
196 unsigned register_index:6; /**< 0 to 63 (OUT index) */
197 unsigned start_component:2; /** 0 to 3 */
198 unsigned num_components:3; /** 1 to 4 */
199 unsigned output_buffer:3; /**< 0 to PIPE_MAX_SO_BUFFERS */
200 unsigned dst_offset:16; /**< offset into the buffer in dwords */
201 unsigned stream:2; /**< 0 to 3 */
202 };
203
204 /**
205 * Stream output for vertex transform feedback.
206 */
207 struct ir3_stream_output_info {
208 unsigned num_outputs;
209 /** stride for an entire vertex for each buffer in dwords */
210 uint16_t stride[IR3_MAX_SO_BUFFERS];
211
212 /**
213 * Array of stream outputs, in the order they are to be written in.
214 * Selected components are tightly packed into the output buffer.
215 */
216 struct ir3_stream_output output[IR3_MAX_SO_OUTPUTS];
217 };
218
219
220 /**
221 * Starting from a4xx, HW supports pre-dispatching texture sampling
222 * instructions prior to scheduling a shader stage, when the
223 * coordinate maps exactly to an output of the previous stage.
224 */
225
226 /**
227 * There is a limit in the number of pre-dispatches allowed for any
228 * given stage.
229 */
230 #define IR3_MAX_SAMPLER_PREFETCH 4
231
232 /**
233 * This is the output stream value for 'cmd', as used by blob. It may
234 * encode the return type (in 3 bits) but it hasn't been verified yet.
235 */
236 #define IR3_SAMPLER_PREFETCH_CMD 0x4
237 #define IR3_SAMPLER_BINDLESS_PREFETCH_CMD 0x6
238
239 /**
240 * Stream output for texture sampling pre-dispatches.
241 */
242 struct ir3_sampler_prefetch {
243 uint8_t src;
244 uint8_t samp_id;
245 uint8_t tex_id;
246 uint16_t samp_bindless_id;
247 uint16_t tex_bindless_id;
248 uint8_t dst;
249 uint8_t wrmask;
250 uint8_t half_precision;
251 uint8_t cmd;
252 };
253
254
255 /* Configuration key used to identify a shader variant.. different
256 * shader variants can be used to implement features not supported
257 * in hw (two sided color), binning-pass vertex shader, etc.
258 *
259 * When adding to this struct, please update ir3_shader_variant()'s debug
260 * output.
261 */
262 struct ir3_shader_key {
263 union {
264 struct {
265 /*
266 * Combined Vertex/Fragment shader parameters:
267 */
268 unsigned ucp_enables : 8;
269
270 /* do we need to check {v,f}saturate_{s,t,r}? */
271 unsigned has_per_samp : 1;
272
273 /*
274 * Vertex shader variant parameters:
275 */
276 unsigned vclamp_color : 1;
277
278 /*
279 * Fragment shader variant parameters:
280 */
281 unsigned sample_shading : 1;
282 unsigned msaa : 1;
283 unsigned color_two_side : 1;
284 /* used when shader needs to handle flat varyings (a4xx)
285 * for front/back color inputs to frag shader:
286 */
287 unsigned rasterflat : 1;
288 unsigned fclamp_color : 1;
289
290 /* Indicates that this is a tessellation pipeline which requires a
291 * whole different kind of vertex shader. In case of
292 * tessellation, this field also tells us which kind of output
293 * topology the TES uses, which the TCS needs to know.
294 */
295 #define IR3_TESS_NONE 0
296 #define IR3_TESS_TRIANGLES 1
297 #define IR3_TESS_QUADS 2
298 #define IR3_TESS_ISOLINES 3
299 unsigned tessellation : 2;
300
301 unsigned has_gs : 1;
302
303 /* Whether this variant sticks to the "safe" maximum constlen,
304 * which guarantees that the combined stages will never go over
305 * the limit:
306 */
307 unsigned safe_constlen : 1;
308 };
309 uint32_t global;
310 };
311
312 /* bitmask of sampler which needs coords clamped for vertex
313 * shader:
314 */
315 uint16_t vsaturate_s, vsaturate_t, vsaturate_r;
316
317 /* bitmask of sampler which needs coords clamped for frag
318 * shader:
319 */
320 uint16_t fsaturate_s, fsaturate_t, fsaturate_r;
321
322 /* bitmask of ms shifts */
323 uint32_t vsamples, fsamples;
324
325 /* bitmask of samplers which need astc srgb workaround: */
326 uint16_t vastc_srgb, fastc_srgb;
327 };
328
329 static inline unsigned
330 ir3_tess_mode(unsigned gl_tess_mode)
331 {
332 switch (gl_tess_mode) {
333 case GL_ISOLINES:
334 return IR3_TESS_ISOLINES;
335 case GL_TRIANGLES:
336 return IR3_TESS_TRIANGLES;
337 case GL_QUADS:
338 return IR3_TESS_QUADS;
339 default:
340 unreachable("bad tessmode");
341 }
342 }
343
344 static inline bool
345 ir3_shader_key_equal(const struct ir3_shader_key *a, const struct ir3_shader_key *b)
346 {
347 /* slow-path if we need to check {v,f}saturate_{s,t,r} */
348 if (a->has_per_samp || b->has_per_samp)
349 return memcmp(a, b, sizeof(struct ir3_shader_key)) == 0;
350 return a->global == b->global;
351 }
352
353 /* will the two keys produce different lowering for a fragment shader? */
354 static inline bool
355 ir3_shader_key_changes_fs(struct ir3_shader_key *key, struct ir3_shader_key *last_key)
356 {
357 if (last_key->has_per_samp || key->has_per_samp) {
358 if ((last_key->fsaturate_s != key->fsaturate_s) ||
359 (last_key->fsaturate_t != key->fsaturate_t) ||
360 (last_key->fsaturate_r != key->fsaturate_r) ||
361 (last_key->fsamples != key->fsamples) ||
362 (last_key->fastc_srgb != key->fastc_srgb))
363 return true;
364 }
365
366 if (last_key->fclamp_color != key->fclamp_color)
367 return true;
368
369 if (last_key->color_two_side != key->color_two_side)
370 return true;
371
372 if (last_key->rasterflat != key->rasterflat)
373 return true;
374
375 if (last_key->ucp_enables != key->ucp_enables)
376 return true;
377
378 if (last_key->safe_constlen != key->safe_constlen)
379 return true;
380
381 return false;
382 }
383
384 /* will the two keys produce different lowering for a vertex shader? */
385 static inline bool
386 ir3_shader_key_changes_vs(struct ir3_shader_key *key, struct ir3_shader_key *last_key)
387 {
388 if (last_key->has_per_samp || key->has_per_samp) {
389 if ((last_key->vsaturate_s != key->vsaturate_s) ||
390 (last_key->vsaturate_t != key->vsaturate_t) ||
391 (last_key->vsaturate_r != key->vsaturate_r) ||
392 (last_key->vsamples != key->vsamples) ||
393 (last_key->vastc_srgb != key->vastc_srgb))
394 return true;
395 }
396
397 if (last_key->vclamp_color != key->vclamp_color)
398 return true;
399
400 if (last_key->ucp_enables != key->ucp_enables)
401 return true;
402
403 if (last_key->safe_constlen != key->safe_constlen)
404 return true;
405
406 return false;
407 }
408
409 /**
410 * On a4xx+a5xx, Images share state with textures and SSBOs:
411 *
412 * + Uses texture (cat5) state/instruction (isam) to read
413 * + Uses SSBO state and instructions (cat6) to write and for atomics
414 *
415 * Starting with a6xx, Images and SSBOs are basically the same thing,
416 * with texture state and isam also used for SSBO reads.
417 *
418 * On top of that, gallium makes the SSBO (shader_buffers) state semi
419 * sparse, with the first half of the state space used for atomic
420 * counters lowered to atomic buffers. We could ignore this, but I
421 * don't think we could *really* handle the case of a single shader
422 * that used the max # of textures + images + SSBOs. And once we are
423 * offsetting images by num_ssbos (or visa versa) to map them into
424 * the same hardware state, the hardware state has become coupled to
425 * the shader state, so at this point we might as well just use a
426 * mapping table to remap things from image/SSBO idx to hw idx.
427 *
428 * To make things less (more?) confusing, for the hw "SSBO" state
429 * (since it is really both SSBO and Image) I'll use the name "IBO"
430 */
431 struct ir3_ibo_mapping {
432 #define IBO_INVALID 0xff
433 /* Maps logical SSBO state to hw tex state: */
434 uint8_t ssbo_to_tex[IR3_MAX_SHADER_BUFFERS];
435
436 /* Maps logical Image state to hw tex state: */
437 uint8_t image_to_tex[IR3_MAX_SHADER_IMAGES];
438
439 /* Maps hw state back to logical SSBO or Image state:
440 *
441 * note IBO_SSBO ORd into values to indicate that the
442 * hw slot is used for SSBO state vs Image state.
443 */
444 #define IBO_SSBO 0x80
445 uint8_t tex_to_image[32];
446
447 uint8_t num_tex; /* including real textures */
448 uint8_t tex_base; /* the number of real textures, ie. image/ssbo start here */
449 };
450
451 /* Represents half register in regid */
452 #define HALF_REG_ID 0x100
453
454 /**
455 * Shader variant which contains the actual hw shader instructions,
456 * and necessary info for shader state setup.
457 */
458 struct ir3_shader_variant {
459 struct fd_bo *bo;
460
461 /* variant id (for debug) */
462 uint32_t id;
463
464 struct ir3_shader_key key;
465
466 /* vertex shaders can have an extra version for hwbinning pass,
467 * which is pointed to by so->binning:
468 */
469 bool binning_pass;
470 // union {
471 struct ir3_shader_variant *binning;
472 struct ir3_shader_variant *nonbinning;
473 // };
474
475 struct ir3 *ir; /* freed after assembling machine instructions */
476
477 /* shader variants form a linked list: */
478 struct ir3_shader_variant *next;
479
480 /* replicated here to avoid passing extra ptrs everywhere: */
481 gl_shader_stage type;
482 struct ir3_shader *shader;
483
484 /* The actual binary shader instructions, size given by info.sizedwords: */
485 uint32_t *bin;
486
487 struct ir3_const_state *const_state;
488
489 struct ir3_info info;
490
491 /* Levels of nesting of flow control:
492 */
493 unsigned branchstack;
494
495 unsigned max_sun;
496 unsigned loops;
497
498 /* the instructions length is in units of instruction groups
499 * (4 instructions for a3xx, 16 instructions for a4xx.. each
500 * instruction is 2 dwords):
501 */
502 unsigned instrlen;
503
504 /* the constants length is in units of vec4's, and is the sum of
505 * the uniforms and the built-in compiler constants
506 */
507 unsigned constlen;
508
509 /* About Linkage:
510 * + Let the frag shader determine the position/compmask for the
511 * varyings, since it is the place where we know if the varying
512 * is actually used, and if so, which components are used. So
513 * what the hw calls "outloc" is taken from the "inloc" of the
514 * frag shader.
515 * + From the vert shader, we only need the output regid
516 */
517
518 bool frag_face, color0_mrt;
519 uint8_t fragcoord_compmask;
520
521 /* NOTE: for input/outputs, slot is:
522 * gl_vert_attrib - for VS inputs
523 * gl_varying_slot - for VS output / FS input
524 * gl_frag_result - for FS output
525 */
526
527 /* varyings/outputs: */
528 unsigned outputs_count;
529 struct {
530 uint8_t slot;
531 uint8_t regid;
532 bool half : 1;
533 } outputs[32 + 2]; /* +POSITION +PSIZE */
534 bool writes_pos, writes_smask, writes_psize;
535
536 /* Size in dwords of all outputs for VS, size of entire patch for HS. */
537 uint32_t output_size;
538
539 /* Map from driver_location to byte offset in per-primitive storage */
540 unsigned output_loc[32];
541
542 /* attributes (VS) / varyings (FS):
543 * Note that sysval's should come *after* normal inputs.
544 */
545 unsigned inputs_count;
546 struct {
547 uint8_t slot;
548 uint8_t regid;
549 uint8_t compmask;
550 /* location of input (ie. offset passed to bary.f, etc). This
551 * matches the SP_VS_VPC_DST_REG.OUTLOCn value (a3xx and a4xx
552 * have the OUTLOCn value offset by 8, presumably to account
553 * for gl_Position/gl_PointSize)
554 */
555 uint8_t inloc;
556 /* vertex shader specific: */
557 bool sysval : 1; /* slot is a gl_system_value */
558 /* fragment shader specific: */
559 bool bary : 1; /* fetched varying (vs one loaded into reg) */
560 bool rasterflat : 1; /* special handling for emit->rasterflat */
561 bool use_ldlv : 1; /* internal to ir3_compiler_nir */
562 bool half : 1;
563 enum glsl_interp_mode interpolate;
564 } inputs[32 + 2]; /* +POSITION +FACE */
565
566 /* sum of input components (scalar). For frag shaders, it only counts
567 * the varying inputs:
568 */
569 unsigned total_in;
570
571 /* For frag shaders, the total number of inputs (not scalar,
572 * ie. SP_VS_PARAM_REG.TOTALVSOUTVAR)
573 */
574 unsigned varying_in;
575
576 /* Remapping table to map Image and SSBO to hw state: */
577 struct ir3_ibo_mapping image_mapping;
578
579 /* number of samplers/textures (which are currently 1:1): */
580 int num_samp;
581
582 /* is there an implicit sampler to read framebuffer (FS only).. if
583 * so the sampler-idx is 'num_samp - 1' (ie. it is appended after
584 * the last "real" texture)
585 */
586 bool fb_read;
587
588 /* do we have one or more SSBO instructions: */
589 bool has_ssbo;
590
591 /* Which bindless resources are used, for filling out sp_xs_config */
592 bool bindless_tex;
593 bool bindless_samp;
594 bool bindless_ibo;
595 bool bindless_ubo;
596
597 /* do we need derivatives: */
598 bool need_pixlod;
599
600 bool need_fine_derivatives;
601
602 /* do we have image write, etc (which prevents early-z): */
603 bool no_earlyz;
604
605 /* do we have kill, which also prevents early-z, but not necessarily
606 * early-lrz (as long as lrz-write is disabled, which must be handled
607 * outside of ir3. Unlike other no_earlyz cases, kill doesn't have
608 * side effects that prevent early-lrz discard.
609 */
610 bool has_kill;
611
612 bool per_samp;
613
614 /* Are we using split or merged register file? */
615 bool mergedregs;
616
617 /* for astc srgb workaround, the number/base of additional
618 * alpha tex states we need, and index of original tex states
619 */
620 struct {
621 unsigned base, count;
622 unsigned orig_idx[16];
623 } astc_srgb;
624
625 /* texture sampler pre-dispatches */
626 uint32_t num_sampler_prefetch;
627 struct ir3_sampler_prefetch sampler_prefetch[IR3_MAX_SAMPLER_PREFETCH];
628 };
629
630 static inline const char *
631 ir3_shader_stage(struct ir3_shader_variant *v)
632 {
633 switch (v->type) {
634 case MESA_SHADER_VERTEX: return v->binning_pass ? "BVERT" : "VERT";
635 case MESA_SHADER_TESS_CTRL: return "TCS";
636 case MESA_SHADER_TESS_EVAL: return "TES";
637 case MESA_SHADER_GEOMETRY: return "GEOM";
638 case MESA_SHADER_FRAGMENT: return "FRAG";
639 case MESA_SHADER_COMPUTE: return "CL";
640 default:
641 unreachable("invalid type");
642 return NULL;
643 }
644 }
645
646
647 /**
648 * Represents a shader at the API level, before state-specific variants are
649 * generated.
650 */
651 struct ir3_shader {
652 gl_shader_stage type;
653
654 /* shader id (for debug): */
655 uint32_t id;
656 uint32_t variant_count;
657
658 /* Set by freedreno after shader_state_create, so we can emit debug info
659 * when recompiling a shader at draw time.
660 */
661 bool initial_variants_done;
662
663 struct ir3_compiler *compiler;
664
665 unsigned num_reserved_user_consts;
666
667 struct nir_shader *nir;
668 struct ir3_stream_output_info stream_output;
669
670 struct ir3_shader_variant *variants;
671 mtx_t variants_lock;
672
673 /* Bitmask of bits of the shader key used by this shader. Used to avoid
674 * recompiles for GL NOS that doesn't actually apply to the shader.
675 */
676 struct ir3_shader_key key_mask;
677 };
678
679 /**
680 * In order to use the same cmdstream, in particular constlen setup and const
681 * emit, for both binning and draw pass (a6xx+), the binning pass re-uses it's
682 * corresponding draw pass shaders const_state.
683 */
684 static inline struct ir3_const_state *
685 ir3_const_state(const struct ir3_shader_variant *v)
686 {
687 if (v->binning_pass)
688 return v->nonbinning->const_state;
689 return v->const_state;
690 }
691
692 /* Given a variant, calculate the maximum constlen it can have.
693 */
694
695 static inline unsigned
696 ir3_max_const(const struct ir3_shader_variant *v)
697 {
698 const struct ir3_compiler *compiler = v->shader->compiler;
699
700 if (v->shader->type == MESA_SHADER_COMPUTE) {
701 return compiler->max_const_compute;
702 } else if (v->key.safe_constlen) {
703 return compiler->max_const_safe;
704 } else if (v->shader->type == MESA_SHADER_FRAGMENT) {
705 return compiler->max_const_frag;
706 } else {
707 return compiler->max_const_geom;
708 }
709 }
710
711 void * ir3_shader_assemble(struct ir3_shader_variant *v);
712 struct ir3_shader_variant * ir3_shader_get_variant(struct ir3_shader *shader,
713 const struct ir3_shader_key *key, bool binning_pass, bool *created);
714 struct ir3_shader * ir3_shader_from_nir(struct ir3_compiler *compiler, nir_shader *nir,
715 unsigned reserved_user_consts, struct ir3_stream_output_info *stream_output);
716 uint32_t ir3_trim_constlen(struct ir3_shader_variant **variants,
717 const struct ir3_compiler *compiler);
718 void ir3_shader_destroy(struct ir3_shader *shader);
719 void ir3_shader_disasm(struct ir3_shader_variant *so, uint32_t *bin, FILE *out);
720 uint64_t ir3_shader_outputs(const struct ir3_shader *so);
721
722 int
723 ir3_glsl_type_size(const struct glsl_type *type, bool bindless);
724
725 /*
726 * Helper/util:
727 */
728
729 /* clears shader-key flags which don't apply to the given shader.
730 */
731 static inline void
732 ir3_key_clear_unused(struct ir3_shader_key *key, struct ir3_shader *shader)
733 {
734 uint32_t *key_bits = (uint32_t *)key;
735 uint32_t *key_mask = (uint32_t *)&shader->key_mask;
736 STATIC_ASSERT(sizeof(*key) % 4 == 0);
737 for (int i = 0; i < sizeof(*key) >> 2; i++)
738 key_bits[i] &= key_mask[i];
739 }
740
741 static inline int
742 ir3_find_output(const struct ir3_shader_variant *so, gl_varying_slot slot)
743 {
744 int j;
745
746 for (j = 0; j < so->outputs_count; j++)
747 if (so->outputs[j].slot == slot)
748 return j;
749
750 /* it seems optional to have a OUT.BCOLOR[n] for each OUT.COLOR[n]
751 * in the vertex shader.. but the fragment shader doesn't know this
752 * so it will always have both IN.COLOR[n] and IN.BCOLOR[n]. So
753 * at link time if there is no matching OUT.BCOLOR[n], we must map
754 * OUT.COLOR[n] to IN.BCOLOR[n]. And visa versa if there is only
755 * a OUT.BCOLOR[n] but no matching OUT.COLOR[n]
756 */
757 if (slot == VARYING_SLOT_BFC0) {
758 slot = VARYING_SLOT_COL0;
759 } else if (slot == VARYING_SLOT_BFC1) {
760 slot = VARYING_SLOT_COL1;
761 } else if (slot == VARYING_SLOT_COL0) {
762 slot = VARYING_SLOT_BFC0;
763 } else if (slot == VARYING_SLOT_COL1) {
764 slot = VARYING_SLOT_BFC1;
765 } else {
766 return -1;
767 }
768
769 for (j = 0; j < so->outputs_count; j++)
770 if (so->outputs[j].slot == slot)
771 return j;
772
773 debug_assert(0);
774
775 return -1;
776 }
777
778 static inline int
779 ir3_next_varying(const struct ir3_shader_variant *so, int i)
780 {
781 while (++i < so->inputs_count)
782 if (so->inputs[i].compmask && so->inputs[i].bary)
783 break;
784 return i;
785 }
786
787 struct ir3_shader_linkage {
788 /* Maximum location either consumed by the fragment shader or produced by
789 * the last geometry stage, i.e. the size required for each vertex in the
790 * VPC in DWORD's.
791 */
792 uint8_t max_loc;
793
794 /* Number of entries in var. */
795 uint8_t cnt;
796
797 /* Bitset of locations used, including ones which are only used by the FS.
798 */
799 uint32_t varmask[4];
800
801 /* Map from VS output to location. */
802 struct {
803 uint8_t regid;
804 uint8_t compmask;
805 uint8_t loc;
806 } var[32];
807
808 /* location for fixed-function gl_PrimitiveID passthrough */
809 uint8_t primid_loc;
810 };
811
812 static inline void
813 ir3_link_add(struct ir3_shader_linkage *l, uint8_t regid_, uint8_t compmask, uint8_t loc)
814 {
815 for (int j = 0; j < util_last_bit(compmask); j++) {
816 uint8_t comploc = loc + j;
817 l->varmask[comploc / 32] |= 1 << (comploc % 32);
818 }
819
820 l->max_loc = MAX2(l->max_loc, loc + util_last_bit(compmask));
821
822 if (regid_ != regid(63, 0)) {
823 int i = l->cnt++;
824 debug_assert(i < ARRAY_SIZE(l->var));
825
826 l->var[i].regid = regid_;
827 l->var[i].compmask = compmask;
828 l->var[i].loc = loc;
829 }
830 }
831
832 static inline void
833 ir3_link_shaders(struct ir3_shader_linkage *l,
834 const struct ir3_shader_variant *vs,
835 const struct ir3_shader_variant *fs,
836 bool pack_vs_out)
837 {
838 /* On older platforms, varmask isn't programmed at all, and it appears
839 * that the hardware generates a mask of used VPC locations using the VS
840 * output map, and hangs if a FS bary instruction references a location
841 * not in the list. This means that we need to have a dummy entry in the
842 * VS out map for things like gl_PointCoord which aren't written by the
843 * VS. Furthermore we can't use r63.x, so just pick a random register to
844 * use if there is no VS output.
845 */
846 const unsigned default_regid = pack_vs_out ? regid(63, 0) : regid(0, 0);
847 int j = -1, k;
848
849 l->primid_loc = 0xff;
850
851 while (l->cnt < ARRAY_SIZE(l->var)) {
852 j = ir3_next_varying(fs, j);
853
854 if (j >= fs->inputs_count)
855 break;
856
857 if (fs->inputs[j].inloc >= fs->total_in)
858 continue;
859
860 k = ir3_find_output(vs, fs->inputs[j].slot);
861
862 if (k < 0 && fs->inputs[j].slot == VARYING_SLOT_PRIMITIVE_ID) {
863 l->primid_loc = fs->inputs[j].inloc;
864 }
865
866 ir3_link_add(l, k >= 0 ? vs->outputs[k].regid : default_regid,
867 fs->inputs[j].compmask, fs->inputs[j].inloc);
868 }
869 }
870
871 static inline uint32_t
872 ir3_find_output_regid(const struct ir3_shader_variant *so, unsigned slot)
873 {
874 int j;
875 for (j = 0; j < so->outputs_count; j++)
876 if (so->outputs[j].slot == slot) {
877 uint32_t regid = so->outputs[j].regid;
878 if (so->outputs[j].half)
879 regid |= HALF_REG_ID;
880 return regid;
881 }
882 return regid(63, 0);
883 }
884
885 #define VARYING_SLOT_GS_HEADER_IR3 (VARYING_SLOT_MAX + 0)
886 #define VARYING_SLOT_GS_VERTEX_FLAGS_IR3 (VARYING_SLOT_MAX + 1)
887 #define VARYING_SLOT_TCS_HEADER_IR3 (VARYING_SLOT_MAX + 2)
888
889
890 static inline uint32_t
891 ir3_find_sysval_regid(const struct ir3_shader_variant *so, unsigned slot)
892 {
893 int j;
894 for (j = 0; j < so->inputs_count; j++)
895 if (so->inputs[j].sysval && (so->inputs[j].slot == slot))
896 return so->inputs[j].regid;
897 return regid(63, 0);
898 }
899
900 /* calculate register footprint in terms of half-regs (ie. one full
901 * reg counts as two half-regs).
902 */
903 static inline uint32_t
904 ir3_shader_halfregs(const struct ir3_shader_variant *v)
905 {
906 return (2 * (v->info.max_reg + 1)) + (v->info.max_half_reg + 1);
907 }
908
909 static inline uint32_t
910 ir3_shader_nibo(const struct ir3_shader_variant *v)
911 {
912 /* The dummy variant used in binning mode won't have an actual shader. */
913 if (!v->shader)
914 return 0;
915
916 return v->shader->nir->info.num_ssbos + v->shader->nir->info.num_images;
917 }
918
919 #endif /* IR3_SHADER_H_ */