freedreno/ir3: move nir finalization to after cache miss
[mesa.git] / src / freedreno / ir3 / ir3_shader.h
1 /*
2 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #ifndef IR3_SHADER_H_
28 #define IR3_SHADER_H_
29
30 #include <stdio.h>
31
32 #include "c11/threads.h"
33 #include "compiler/shader_enums.h"
34 #include "compiler/nir/nir.h"
35 #include "util/bitscan.h"
36 #include "util/disk_cache.h"
37
38 #include "ir3_compiler.h"
39
40 struct glsl_type;
41
42 /* driver param indices: */
43 enum ir3_driver_param {
44 /* compute shader driver params: */
45 IR3_DP_NUM_WORK_GROUPS_X = 0,
46 IR3_DP_NUM_WORK_GROUPS_Y = 1,
47 IR3_DP_NUM_WORK_GROUPS_Z = 2,
48 IR3_DP_LOCAL_GROUP_SIZE_X = 4,
49 IR3_DP_LOCAL_GROUP_SIZE_Y = 5,
50 IR3_DP_LOCAL_GROUP_SIZE_Z = 6,
51 /* NOTE: gl_NumWorkGroups should be vec4 aligned because
52 * glDispatchComputeIndirect() needs to load these from
53 * the info->indirect buffer. Keep that in mind when/if
54 * adding any addition CS driver params.
55 */
56 IR3_DP_CS_COUNT = 8, /* must be aligned to vec4 */
57
58 /* vertex shader driver params: */
59 IR3_DP_DRAWID = 0,
60 IR3_DP_VTXID_BASE = 1,
61 IR3_DP_INSTID_BASE = 2,
62 IR3_DP_VTXCNT_MAX = 3,
63 /* user-clip-plane components, up to 8x vec4's: */
64 IR3_DP_UCP0_X = 4,
65 /* .... */
66 IR3_DP_UCP7_W = 35,
67 IR3_DP_VS_COUNT = 36 /* must be aligned to vec4 */
68 };
69
70 #define IR3_MAX_SHADER_BUFFERS 32
71 #define IR3_MAX_SHADER_IMAGES 32
72 #define IR3_MAX_SO_BUFFERS 4
73 #define IR3_MAX_SO_STREAMS 4
74 #define IR3_MAX_SO_OUTPUTS 64
75 #define IR3_MAX_UBO_PUSH_RANGES 32
76
77 /**
78 * Description of a lowered UBO.
79 */
80 struct ir3_ubo_info {
81 uint32_t block; /* Which constant block */
82 uint16_t bindless_base; /* For bindless, which base register is used */
83 bool bindless;
84 };
85
86 /**
87 * Description of a range of a lowered UBO access.
88 *
89 * Drivers should not assume that there are not multiple disjoint
90 * lowered ranges of a single UBO.
91 */
92 struct ir3_ubo_range {
93 struct ir3_ubo_info ubo;
94 uint32_t offset; /* start offset to push in the const register file */
95 uint32_t start, end; /* range of block that's actually used */
96 };
97
98 struct ir3_ubo_analysis_state {
99 struct ir3_ubo_range range[IR3_MAX_UBO_PUSH_RANGES];
100 uint32_t num_enabled;
101 uint32_t size;
102 uint32_t cmdstream_size; /* for per-gen backend to stash required cmdstream size */
103 };
104
105 /**
106 * Describes the layout of shader consts. This includes:
107 * + User consts + driver lowered UBO ranges
108 * + SSBO sizes
109 * + Image sizes/dimensions
110 * + Driver params (ie. IR3_DP_*)
111 * + TFBO addresses (for generations that do not have hardware streamout)
112 * + Lowered immediates
113 *
114 * For consts needed to pass internal values to shader which may or may not
115 * be required, rather than allocating worst-case const space, we scan the
116 * shader and allocate consts as-needed:
117 *
118 * + SSBO sizes: only needed if shader has a get_buffer_size intrinsic
119 * for a given SSBO
120 *
121 * + Image dimensions: needed to calculate pixel offset, but only for
122 * images that have a image_store intrinsic
123 *
124 * Layout of constant registers, each section aligned to vec4. Note
125 * that pointer size (ubo, etc) changes depending on generation.
126 *
127 * user consts
128 * UBO addresses
129 * SSBO sizes
130 * if (vertex shader) {
131 * driver params (IR3_DP_*)
132 * if (stream_output.num_outputs > 0)
133 * stream-out addresses
134 * } else if (compute_shader) {
135 * driver params (IR3_DP_*)
136 * }
137 * immediates
138 *
139 * Immediates go last mostly because they are inserted in the CP pass
140 * after the nir -> ir3 frontend.
141 *
142 * Note UBO size in bytes should be aligned to vec4
143 */
144 struct ir3_const_state {
145 unsigned num_ubos;
146 unsigned num_driver_params; /* scalar */
147
148 struct {
149 /* user const start at zero */
150 unsigned ubo;
151 /* NOTE that a3xx might need a section for SSBO addresses too */
152 unsigned ssbo_sizes;
153 unsigned image_dims;
154 unsigned driver_param;
155 unsigned tfbo;
156 unsigned primitive_param;
157 unsigned primitive_map;
158 unsigned immediate;
159 } offsets;
160
161 struct {
162 uint32_t mask; /* bitmask of SSBOs that have get_buffer_size */
163 uint32_t count; /* number of consts allocated */
164 /* one const allocated per SSBO which has get_buffer_size,
165 * ssbo_sizes.off[ssbo_id] is offset from start of ssbo_sizes
166 * consts:
167 */
168 uint32_t off[IR3_MAX_SHADER_BUFFERS];
169 } ssbo_size;
170
171 struct {
172 uint32_t mask; /* bitmask of images that have image_store */
173 uint32_t count; /* number of consts allocated */
174 /* three const allocated per image which has image_store:
175 * + cpp (bytes per pixel)
176 * + pitch (y pitch)
177 * + array_pitch (z pitch)
178 */
179 uint32_t off[IR3_MAX_SHADER_IMAGES];
180 } image_dims;
181
182 unsigned immediate_idx;
183 unsigned immediates_count;
184 unsigned immediates_size;
185 struct {
186 uint32_t val[4];
187 } *immediates;
188
189 /* State of ubo access lowered to push consts: */
190 struct ir3_ubo_analysis_state ubo_state;
191 };
192
193 /**
194 * A single output for vertex transform feedback.
195 */
196 struct ir3_stream_output {
197 unsigned register_index:6; /**< 0 to 63 (OUT index) */
198 unsigned start_component:2; /** 0 to 3 */
199 unsigned num_components:3; /** 1 to 4 */
200 unsigned output_buffer:3; /**< 0 to PIPE_MAX_SO_BUFFERS */
201 unsigned dst_offset:16; /**< offset into the buffer in dwords */
202 unsigned stream:2; /**< 0 to 3 */
203 };
204
205 /**
206 * Stream output for vertex transform feedback.
207 */
208 struct ir3_stream_output_info {
209 unsigned num_outputs;
210 /** stride for an entire vertex for each buffer in dwords */
211 uint16_t stride[IR3_MAX_SO_BUFFERS];
212
213 /**
214 * Array of stream outputs, in the order they are to be written in.
215 * Selected components are tightly packed into the output buffer.
216 */
217 struct ir3_stream_output output[IR3_MAX_SO_OUTPUTS];
218 };
219
220
221 /**
222 * Starting from a4xx, HW supports pre-dispatching texture sampling
223 * instructions prior to scheduling a shader stage, when the
224 * coordinate maps exactly to an output of the previous stage.
225 */
226
227 /**
228 * There is a limit in the number of pre-dispatches allowed for any
229 * given stage.
230 */
231 #define IR3_MAX_SAMPLER_PREFETCH 4
232
233 /**
234 * This is the output stream value for 'cmd', as used by blob. It may
235 * encode the return type (in 3 bits) but it hasn't been verified yet.
236 */
237 #define IR3_SAMPLER_PREFETCH_CMD 0x4
238 #define IR3_SAMPLER_BINDLESS_PREFETCH_CMD 0x6
239
240 /**
241 * Stream output for texture sampling pre-dispatches.
242 */
243 struct ir3_sampler_prefetch {
244 uint8_t src;
245 uint8_t samp_id;
246 uint8_t tex_id;
247 uint16_t samp_bindless_id;
248 uint16_t tex_bindless_id;
249 uint8_t dst;
250 uint8_t wrmask;
251 uint8_t half_precision;
252 uint8_t cmd;
253 };
254
255
256 /* Configuration key used to identify a shader variant.. different
257 * shader variants can be used to implement features not supported
258 * in hw (two sided color), binning-pass vertex shader, etc.
259 *
260 * When adding to this struct, please update ir3_shader_variant()'s debug
261 * output.
262 */
263 struct ir3_shader_key {
264 union {
265 struct {
266 /*
267 * Combined Vertex/Fragment shader parameters:
268 */
269 unsigned ucp_enables : 8;
270
271 /* do we need to check {v,f}saturate_{s,t,r}? */
272 unsigned has_per_samp : 1;
273
274 /*
275 * Vertex shader variant parameters:
276 */
277 unsigned vclamp_color : 1;
278
279 /*
280 * Fragment shader variant parameters:
281 */
282 unsigned sample_shading : 1;
283 unsigned msaa : 1;
284 unsigned color_two_side : 1;
285 /* used when shader needs to handle flat varyings (a4xx)
286 * for front/back color inputs to frag shader:
287 */
288 unsigned rasterflat : 1;
289 unsigned fclamp_color : 1;
290
291 /* Indicates that this is a tessellation pipeline which requires a
292 * whole different kind of vertex shader. In case of
293 * tessellation, this field also tells us which kind of output
294 * topology the TES uses, which the TCS needs to know.
295 */
296 #define IR3_TESS_NONE 0
297 #define IR3_TESS_TRIANGLES 1
298 #define IR3_TESS_QUADS 2
299 #define IR3_TESS_ISOLINES 3
300 unsigned tessellation : 2;
301
302 unsigned has_gs : 1;
303
304 /* Whether this variant sticks to the "safe" maximum constlen,
305 * which guarantees that the combined stages will never go over
306 * the limit:
307 */
308 unsigned safe_constlen : 1;
309 };
310 uint32_t global;
311 };
312
313 /* bitmask of sampler which needs coords clamped for vertex
314 * shader:
315 */
316 uint16_t vsaturate_s, vsaturate_t, vsaturate_r;
317
318 /* bitmask of sampler which needs coords clamped for frag
319 * shader:
320 */
321 uint16_t fsaturate_s, fsaturate_t, fsaturate_r;
322
323 /* bitmask of ms shifts */
324 uint32_t vsamples, fsamples;
325
326 /* bitmask of samplers which need astc srgb workaround: */
327 uint16_t vastc_srgb, fastc_srgb;
328 };
329
330 static inline unsigned
331 ir3_tess_mode(unsigned gl_tess_mode)
332 {
333 switch (gl_tess_mode) {
334 case GL_ISOLINES:
335 return IR3_TESS_ISOLINES;
336 case GL_TRIANGLES:
337 return IR3_TESS_TRIANGLES;
338 case GL_QUADS:
339 return IR3_TESS_QUADS;
340 default:
341 unreachable("bad tessmode");
342 }
343 }
344
345 static inline bool
346 ir3_shader_key_equal(const struct ir3_shader_key *a, const struct ir3_shader_key *b)
347 {
348 /* slow-path if we need to check {v,f}saturate_{s,t,r} */
349 if (a->has_per_samp || b->has_per_samp)
350 return memcmp(a, b, sizeof(struct ir3_shader_key)) == 0;
351 return a->global == b->global;
352 }
353
354 /* will the two keys produce different lowering for a fragment shader? */
355 static inline bool
356 ir3_shader_key_changes_fs(struct ir3_shader_key *key, struct ir3_shader_key *last_key)
357 {
358 if (last_key->has_per_samp || key->has_per_samp) {
359 if ((last_key->fsaturate_s != key->fsaturate_s) ||
360 (last_key->fsaturate_t != key->fsaturate_t) ||
361 (last_key->fsaturate_r != key->fsaturate_r) ||
362 (last_key->fsamples != key->fsamples) ||
363 (last_key->fastc_srgb != key->fastc_srgb))
364 return true;
365 }
366
367 if (last_key->fclamp_color != key->fclamp_color)
368 return true;
369
370 if (last_key->color_two_side != key->color_two_side)
371 return true;
372
373 if (last_key->rasterflat != key->rasterflat)
374 return true;
375
376 if (last_key->ucp_enables != key->ucp_enables)
377 return true;
378
379 if (last_key->safe_constlen != key->safe_constlen)
380 return true;
381
382 return false;
383 }
384
385 /* will the two keys produce different lowering for a vertex shader? */
386 static inline bool
387 ir3_shader_key_changes_vs(struct ir3_shader_key *key, struct ir3_shader_key *last_key)
388 {
389 if (last_key->has_per_samp || key->has_per_samp) {
390 if ((last_key->vsaturate_s != key->vsaturate_s) ||
391 (last_key->vsaturate_t != key->vsaturate_t) ||
392 (last_key->vsaturate_r != key->vsaturate_r) ||
393 (last_key->vsamples != key->vsamples) ||
394 (last_key->vastc_srgb != key->vastc_srgb))
395 return true;
396 }
397
398 if (last_key->vclamp_color != key->vclamp_color)
399 return true;
400
401 if (last_key->ucp_enables != key->ucp_enables)
402 return true;
403
404 if (last_key->safe_constlen != key->safe_constlen)
405 return true;
406
407 return false;
408 }
409
410 /**
411 * On a4xx+a5xx, Images share state with textures and SSBOs:
412 *
413 * + Uses texture (cat5) state/instruction (isam) to read
414 * + Uses SSBO state and instructions (cat6) to write and for atomics
415 *
416 * Starting with a6xx, Images and SSBOs are basically the same thing,
417 * with texture state and isam also used for SSBO reads.
418 *
419 * On top of that, gallium makes the SSBO (shader_buffers) state semi
420 * sparse, with the first half of the state space used for atomic
421 * counters lowered to atomic buffers. We could ignore this, but I
422 * don't think we could *really* handle the case of a single shader
423 * that used the max # of textures + images + SSBOs. And once we are
424 * offsetting images by num_ssbos (or visa versa) to map them into
425 * the same hardware state, the hardware state has become coupled to
426 * the shader state, so at this point we might as well just use a
427 * mapping table to remap things from image/SSBO idx to hw idx.
428 *
429 * To make things less (more?) confusing, for the hw "SSBO" state
430 * (since it is really both SSBO and Image) I'll use the name "IBO"
431 */
432 struct ir3_ibo_mapping {
433 #define IBO_INVALID 0xff
434 /* Maps logical SSBO state to hw tex state: */
435 uint8_t ssbo_to_tex[IR3_MAX_SHADER_BUFFERS];
436
437 /* Maps logical Image state to hw tex state: */
438 uint8_t image_to_tex[IR3_MAX_SHADER_IMAGES];
439
440 /* Maps hw state back to logical SSBO or Image state:
441 *
442 * note IBO_SSBO ORd into values to indicate that the
443 * hw slot is used for SSBO state vs Image state.
444 */
445 #define IBO_SSBO 0x80
446 uint8_t tex_to_image[32];
447
448 uint8_t num_tex; /* including real textures */
449 uint8_t tex_base; /* the number of real textures, ie. image/ssbo start here */
450 };
451
452 /* Represents half register in regid */
453 #define HALF_REG_ID 0x100
454
455 /**
456 * Shader variant which contains the actual hw shader instructions,
457 * and necessary info for shader state setup.
458 */
459 struct ir3_shader_variant {
460 struct fd_bo *bo;
461
462 /* variant id (for debug) */
463 uint32_t id;
464
465 struct ir3_shader_key key;
466
467 /* vertex shaders can have an extra version for hwbinning pass,
468 * which is pointed to by so->binning:
469 */
470 bool binning_pass;
471 // union {
472 struct ir3_shader_variant *binning;
473 struct ir3_shader_variant *nonbinning;
474 // };
475
476 struct ir3 *ir; /* freed after assembling machine instructions */
477
478 /* shader variants form a linked list: */
479 struct ir3_shader_variant *next;
480
481 /* replicated here to avoid passing extra ptrs everywhere: */
482 gl_shader_stage type;
483 struct ir3_shader *shader;
484
485 /*
486 * Below here is serialized when written to disk cache:
487 */
488
489 /* The actual binary shader instructions, size given by info.sizedwords: */
490 uint32_t *bin;
491
492 struct ir3_const_state *const_state;
493
494 /*
495 * The following macros are used by the shader disk cache save/
496 * restore paths to serialize/deserialize the variant. Any
497 * pointers that require special handling in store_variant()
498 * and retrieve_variant() should go above here.
499 */
500 #define VARIANT_CACHE_START offsetof(struct ir3_shader_variant, info)
501 #define VARIANT_CACHE_PTR(v) (((char *)v) + VARIANT_CACHE_START)
502 #define VARIANT_CACHE_SIZE (sizeof(struct ir3_shader_variant) - VARIANT_CACHE_START)
503
504 struct ir3_info info;
505
506 /* Levels of nesting of flow control:
507 */
508 unsigned branchstack;
509
510 unsigned max_sun;
511 unsigned loops;
512
513 /* the instructions length is in units of instruction groups
514 * (4 instructions for a3xx, 16 instructions for a4xx.. each
515 * instruction is 2 dwords):
516 */
517 unsigned instrlen;
518
519 /* the constants length is in units of vec4's, and is the sum of
520 * the uniforms and the built-in compiler constants
521 */
522 unsigned constlen;
523
524 /* About Linkage:
525 * + Let the frag shader determine the position/compmask for the
526 * varyings, since it is the place where we know if the varying
527 * is actually used, and if so, which components are used. So
528 * what the hw calls "outloc" is taken from the "inloc" of the
529 * frag shader.
530 * + From the vert shader, we only need the output regid
531 */
532
533 bool frag_face, color0_mrt;
534 uint8_t fragcoord_compmask;
535
536 /* NOTE: for input/outputs, slot is:
537 * gl_vert_attrib - for VS inputs
538 * gl_varying_slot - for VS output / FS input
539 * gl_frag_result - for FS output
540 */
541
542 /* varyings/outputs: */
543 unsigned outputs_count;
544 struct {
545 uint8_t slot;
546 uint8_t regid;
547 bool half : 1;
548 } outputs[32 + 2]; /* +POSITION +PSIZE */
549 bool writes_pos, writes_smask, writes_psize;
550
551 /* Size in dwords of all outputs for VS, size of entire patch for HS. */
552 uint32_t output_size;
553
554 /* Map from driver_location to byte offset in per-primitive storage */
555 unsigned output_loc[32];
556
557 /* attributes (VS) / varyings (FS):
558 * Note that sysval's should come *after* normal inputs.
559 */
560 unsigned inputs_count;
561 struct {
562 uint8_t slot;
563 uint8_t regid;
564 uint8_t compmask;
565 /* location of input (ie. offset passed to bary.f, etc). This
566 * matches the SP_VS_VPC_DST_REG.OUTLOCn value (a3xx and a4xx
567 * have the OUTLOCn value offset by 8, presumably to account
568 * for gl_Position/gl_PointSize)
569 */
570 uint8_t inloc;
571 /* vertex shader specific: */
572 bool sysval : 1; /* slot is a gl_system_value */
573 /* fragment shader specific: */
574 bool bary : 1; /* fetched varying (vs one loaded into reg) */
575 bool rasterflat : 1; /* special handling for emit->rasterflat */
576 bool use_ldlv : 1; /* internal to ir3_compiler_nir */
577 bool half : 1;
578 enum glsl_interp_mode interpolate;
579 } inputs[32 + 2]; /* +POSITION +FACE */
580
581 /* sum of input components (scalar). For frag shaders, it only counts
582 * the varying inputs:
583 */
584 unsigned total_in;
585
586 /* For frag shaders, the total number of inputs (not scalar,
587 * ie. SP_VS_PARAM_REG.TOTALVSOUTVAR)
588 */
589 unsigned varying_in;
590
591 /* Remapping table to map Image and SSBO to hw state: */
592 struct ir3_ibo_mapping image_mapping;
593
594 /* number of samplers/textures (which are currently 1:1): */
595 int num_samp;
596
597 /* is there an implicit sampler to read framebuffer (FS only).. if
598 * so the sampler-idx is 'num_samp - 1' (ie. it is appended after
599 * the last "real" texture)
600 */
601 bool fb_read;
602
603 /* do we have one or more SSBO instructions: */
604 bool has_ssbo;
605
606 /* Which bindless resources are used, for filling out sp_xs_config */
607 bool bindless_tex;
608 bool bindless_samp;
609 bool bindless_ibo;
610 bool bindless_ubo;
611
612 /* do we need derivatives: */
613 bool need_pixlod;
614
615 bool need_fine_derivatives;
616
617 /* do we have image write, etc (which prevents early-z): */
618 bool no_earlyz;
619
620 /* do we have kill, which also prevents early-z, but not necessarily
621 * early-lrz (as long as lrz-write is disabled, which must be handled
622 * outside of ir3. Unlike other no_earlyz cases, kill doesn't have
623 * side effects that prevent early-lrz discard.
624 */
625 bool has_kill;
626
627 bool per_samp;
628
629 /* Are we using split or merged register file? */
630 bool mergedregs;
631
632 /* for astc srgb workaround, the number/base of additional
633 * alpha tex states we need, and index of original tex states
634 */
635 struct {
636 unsigned base, count;
637 unsigned orig_idx[16];
638 } astc_srgb;
639
640 /* texture sampler pre-dispatches */
641 uint32_t num_sampler_prefetch;
642 struct ir3_sampler_prefetch sampler_prefetch[IR3_MAX_SAMPLER_PREFETCH];
643 };
644
645 static inline const char *
646 ir3_shader_stage(struct ir3_shader_variant *v)
647 {
648 switch (v->type) {
649 case MESA_SHADER_VERTEX: return v->binning_pass ? "BVERT" : "VERT";
650 case MESA_SHADER_TESS_CTRL: return "TCS";
651 case MESA_SHADER_TESS_EVAL: return "TES";
652 case MESA_SHADER_GEOMETRY: return "GEOM";
653 case MESA_SHADER_FRAGMENT: return "FRAG";
654 case MESA_SHADER_COMPUTE: return "CL";
655 default:
656 unreachable("invalid type");
657 return NULL;
658 }
659 }
660
661 /* Currently we do not do binning for tess. And for GS there is no
662 * cross-stage VS+GS optimization, so the full VS+GS is used in
663 * the binning pass.
664 */
665 static inline bool
666 ir3_has_binning_vs(const struct ir3_shader_key *key)
667 {
668 if (key->tessellation || key->has_gs)
669 return false;
670 return true;
671 }
672
673 /**
674 * Represents a shader at the API level, before state-specific variants are
675 * generated.
676 */
677 struct ir3_shader {
678 gl_shader_stage type;
679
680 /* shader id (for debug): */
681 uint32_t id;
682 uint32_t variant_count;
683
684 /* Set by freedreno after shader_state_create, so we can emit debug info
685 * when recompiling a shader at draw time.
686 */
687 bool initial_variants_done;
688
689 struct ir3_compiler *compiler;
690
691 unsigned num_reserved_user_consts;
692
693 bool nir_finalized;
694 struct nir_shader *nir;
695 struct ir3_stream_output_info stream_output;
696
697 struct ir3_shader_variant *variants;
698 mtx_t variants_lock;
699
700 cache_key cache_key; /* shader disk-cache key */
701
702 /* Bitmask of bits of the shader key used by this shader. Used to avoid
703 * recompiles for GL NOS that doesn't actually apply to the shader.
704 */
705 struct ir3_shader_key key_mask;
706 };
707
708 /**
709 * In order to use the same cmdstream, in particular constlen setup and const
710 * emit, for both binning and draw pass (a6xx+), the binning pass re-uses it's
711 * corresponding draw pass shaders const_state.
712 */
713 static inline struct ir3_const_state *
714 ir3_const_state(const struct ir3_shader_variant *v)
715 {
716 if (v->binning_pass)
717 return v->nonbinning->const_state;
718 return v->const_state;
719 }
720
721 /* Given a variant, calculate the maximum constlen it can have.
722 */
723
724 static inline unsigned
725 ir3_max_const(const struct ir3_shader_variant *v)
726 {
727 const struct ir3_compiler *compiler = v->shader->compiler;
728
729 if (v->shader->type == MESA_SHADER_COMPUTE) {
730 return compiler->max_const_compute;
731 } else if (v->key.safe_constlen) {
732 return compiler->max_const_safe;
733 } else if (v->shader->type == MESA_SHADER_FRAGMENT) {
734 return compiler->max_const_frag;
735 } else {
736 return compiler->max_const_geom;
737 }
738 }
739
740 void * ir3_shader_assemble(struct ir3_shader_variant *v);
741 struct ir3_shader_variant * ir3_shader_get_variant(struct ir3_shader *shader,
742 const struct ir3_shader_key *key, bool binning_pass, bool *created);
743 struct ir3_shader * ir3_shader_from_nir(struct ir3_compiler *compiler, nir_shader *nir,
744 unsigned reserved_user_consts, struct ir3_stream_output_info *stream_output);
745 uint32_t ir3_trim_constlen(struct ir3_shader_variant **variants,
746 const struct ir3_compiler *compiler);
747 void ir3_shader_destroy(struct ir3_shader *shader);
748 void ir3_shader_disasm(struct ir3_shader_variant *so, uint32_t *bin, FILE *out);
749 uint64_t ir3_shader_outputs(const struct ir3_shader *so);
750
751 int
752 ir3_glsl_type_size(const struct glsl_type *type, bool bindless);
753
754 /*
755 * Helper/util:
756 */
757
758 /* clears shader-key flags which don't apply to the given shader.
759 */
760 static inline void
761 ir3_key_clear_unused(struct ir3_shader_key *key, struct ir3_shader *shader)
762 {
763 uint32_t *key_bits = (uint32_t *)key;
764 uint32_t *key_mask = (uint32_t *)&shader->key_mask;
765 STATIC_ASSERT(sizeof(*key) % 4 == 0);
766 for (int i = 0; i < sizeof(*key) >> 2; i++)
767 key_bits[i] &= key_mask[i];
768 }
769
770 static inline int
771 ir3_find_output(const struct ir3_shader_variant *so, gl_varying_slot slot)
772 {
773 int j;
774
775 for (j = 0; j < so->outputs_count; j++)
776 if (so->outputs[j].slot == slot)
777 return j;
778
779 /* it seems optional to have a OUT.BCOLOR[n] for each OUT.COLOR[n]
780 * in the vertex shader.. but the fragment shader doesn't know this
781 * so it will always have both IN.COLOR[n] and IN.BCOLOR[n]. So
782 * at link time if there is no matching OUT.BCOLOR[n], we must map
783 * OUT.COLOR[n] to IN.BCOLOR[n]. And visa versa if there is only
784 * a OUT.BCOLOR[n] but no matching OUT.COLOR[n]
785 */
786 if (slot == VARYING_SLOT_BFC0) {
787 slot = VARYING_SLOT_COL0;
788 } else if (slot == VARYING_SLOT_BFC1) {
789 slot = VARYING_SLOT_COL1;
790 } else if (slot == VARYING_SLOT_COL0) {
791 slot = VARYING_SLOT_BFC0;
792 } else if (slot == VARYING_SLOT_COL1) {
793 slot = VARYING_SLOT_BFC1;
794 } else {
795 return -1;
796 }
797
798 for (j = 0; j < so->outputs_count; j++)
799 if (so->outputs[j].slot == slot)
800 return j;
801
802 debug_assert(0);
803
804 return -1;
805 }
806
807 static inline int
808 ir3_next_varying(const struct ir3_shader_variant *so, int i)
809 {
810 while (++i < so->inputs_count)
811 if (so->inputs[i].compmask && so->inputs[i].bary)
812 break;
813 return i;
814 }
815
816 struct ir3_shader_linkage {
817 /* Maximum location either consumed by the fragment shader or produced by
818 * the last geometry stage, i.e. the size required for each vertex in the
819 * VPC in DWORD's.
820 */
821 uint8_t max_loc;
822
823 /* Number of entries in var. */
824 uint8_t cnt;
825
826 /* Bitset of locations used, including ones which are only used by the FS.
827 */
828 uint32_t varmask[4];
829
830 /* Map from VS output to location. */
831 struct {
832 uint8_t regid;
833 uint8_t compmask;
834 uint8_t loc;
835 } var[32];
836
837 /* location for fixed-function gl_PrimitiveID passthrough */
838 uint8_t primid_loc;
839 };
840
841 static inline void
842 ir3_link_add(struct ir3_shader_linkage *l, uint8_t regid_, uint8_t compmask, uint8_t loc)
843 {
844 for (int j = 0; j < util_last_bit(compmask); j++) {
845 uint8_t comploc = loc + j;
846 l->varmask[comploc / 32] |= 1 << (comploc % 32);
847 }
848
849 l->max_loc = MAX2(l->max_loc, loc + util_last_bit(compmask));
850
851 if (regid_ != regid(63, 0)) {
852 int i = l->cnt++;
853 debug_assert(i < ARRAY_SIZE(l->var));
854
855 l->var[i].regid = regid_;
856 l->var[i].compmask = compmask;
857 l->var[i].loc = loc;
858 }
859 }
860
861 static inline void
862 ir3_link_shaders(struct ir3_shader_linkage *l,
863 const struct ir3_shader_variant *vs,
864 const struct ir3_shader_variant *fs,
865 bool pack_vs_out)
866 {
867 /* On older platforms, varmask isn't programmed at all, and it appears
868 * that the hardware generates a mask of used VPC locations using the VS
869 * output map, and hangs if a FS bary instruction references a location
870 * not in the list. This means that we need to have a dummy entry in the
871 * VS out map for things like gl_PointCoord which aren't written by the
872 * VS. Furthermore we can't use r63.x, so just pick a random register to
873 * use if there is no VS output.
874 */
875 const unsigned default_regid = pack_vs_out ? regid(63, 0) : regid(0, 0);
876 int j = -1, k;
877
878 l->primid_loc = 0xff;
879
880 while (l->cnt < ARRAY_SIZE(l->var)) {
881 j = ir3_next_varying(fs, j);
882
883 if (j >= fs->inputs_count)
884 break;
885
886 if (fs->inputs[j].inloc >= fs->total_in)
887 continue;
888
889 k = ir3_find_output(vs, fs->inputs[j].slot);
890
891 if (k < 0 && fs->inputs[j].slot == VARYING_SLOT_PRIMITIVE_ID) {
892 l->primid_loc = fs->inputs[j].inloc;
893 }
894
895 ir3_link_add(l, k >= 0 ? vs->outputs[k].regid : default_regid,
896 fs->inputs[j].compmask, fs->inputs[j].inloc);
897 }
898 }
899
900 static inline uint32_t
901 ir3_find_output_regid(const struct ir3_shader_variant *so, unsigned slot)
902 {
903 int j;
904 for (j = 0; j < so->outputs_count; j++)
905 if (so->outputs[j].slot == slot) {
906 uint32_t regid = so->outputs[j].regid;
907 if (so->outputs[j].half)
908 regid |= HALF_REG_ID;
909 return regid;
910 }
911 return regid(63, 0);
912 }
913
914 #define VARYING_SLOT_GS_HEADER_IR3 (VARYING_SLOT_MAX + 0)
915 #define VARYING_SLOT_GS_VERTEX_FLAGS_IR3 (VARYING_SLOT_MAX + 1)
916 #define VARYING_SLOT_TCS_HEADER_IR3 (VARYING_SLOT_MAX + 2)
917
918
919 static inline uint32_t
920 ir3_find_sysval_regid(const struct ir3_shader_variant *so, unsigned slot)
921 {
922 int j;
923 for (j = 0; j < so->inputs_count; j++)
924 if (so->inputs[j].sysval && (so->inputs[j].slot == slot))
925 return so->inputs[j].regid;
926 return regid(63, 0);
927 }
928
929 /* calculate register footprint in terms of half-regs (ie. one full
930 * reg counts as two half-regs).
931 */
932 static inline uint32_t
933 ir3_shader_halfregs(const struct ir3_shader_variant *v)
934 {
935 return (2 * (v->info.max_reg + 1)) + (v->info.max_half_reg + 1);
936 }
937
938 static inline uint32_t
939 ir3_shader_nibo(const struct ir3_shader_variant *v)
940 {
941 /* The dummy variant used in binning mode won't have an actual shader. */
942 if (!v->shader)
943 return 0;
944
945 return v->shader->nir->info.num_ssbos + v->shader->nir->info.num_images;
946 }
947
948 #endif /* IR3_SHADER_H_ */