freedreno/ir3: rework setup_{input,output} to make struct varyings work
[mesa.git] / src / freedreno / ir3 / ir3_shader.h
1 /*
2 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #ifndef IR3_SHADER_H_
28 #define IR3_SHADER_H_
29
30 #include <stdio.h>
31
32 #include "c11/threads.h"
33 #include "compiler/shader_enums.h"
34 #include "compiler/nir/nir.h"
35 #include "util/bitscan.h"
36 #include "util/disk_cache.h"
37
38 #include "ir3_compiler.h"
39
40 struct glsl_type;
41
42 /* driver param indices: */
43 enum ir3_driver_param {
44 /* compute shader driver params: */
45 IR3_DP_NUM_WORK_GROUPS_X = 0,
46 IR3_DP_NUM_WORK_GROUPS_Y = 1,
47 IR3_DP_NUM_WORK_GROUPS_Z = 2,
48 IR3_DP_LOCAL_GROUP_SIZE_X = 4,
49 IR3_DP_LOCAL_GROUP_SIZE_Y = 5,
50 IR3_DP_LOCAL_GROUP_SIZE_Z = 6,
51 /* NOTE: gl_NumWorkGroups should be vec4 aligned because
52 * glDispatchComputeIndirect() needs to load these from
53 * the info->indirect buffer. Keep that in mind when/if
54 * adding any addition CS driver params.
55 */
56 IR3_DP_CS_COUNT = 8, /* must be aligned to vec4 */
57
58 /* vertex shader driver params: */
59 IR3_DP_DRAWID = 0,
60 IR3_DP_VTXID_BASE = 1,
61 IR3_DP_INSTID_BASE = 2,
62 IR3_DP_VTXCNT_MAX = 3,
63 /* user-clip-plane components, up to 8x vec4's: */
64 IR3_DP_UCP0_X = 4,
65 /* .... */
66 IR3_DP_UCP7_W = 35,
67 IR3_DP_VS_COUNT = 36 /* must be aligned to vec4 */
68 };
69
70 #define IR3_MAX_SHADER_BUFFERS 32
71 #define IR3_MAX_SHADER_IMAGES 32
72 #define IR3_MAX_SO_BUFFERS 4
73 #define IR3_MAX_SO_STREAMS 4
74 #define IR3_MAX_SO_OUTPUTS 64
75 #define IR3_MAX_UBO_PUSH_RANGES 32
76
77 /* mirrors SYSTEM_VALUE_BARYCENTRIC_ but starting from 0 */
78 enum ir3_bary {
79 IJ_PERSP_PIXEL,
80 IJ_PERSP_SAMPLE,
81 IJ_PERSP_CENTROID,
82 IJ_PERSP_SIZE,
83 IJ_LINEAR_PIXEL,
84 IJ_LINEAR_CENTROID,
85 IJ_LINEAR_SAMPLE,
86 IJ_COUNT,
87 };
88
89 /**
90 * Description of a lowered UBO.
91 */
92 struct ir3_ubo_info {
93 uint32_t block; /* Which constant block */
94 uint16_t bindless_base; /* For bindless, which base register is used */
95 bool bindless;
96 };
97
98 /**
99 * Description of a range of a lowered UBO access.
100 *
101 * Drivers should not assume that there are not multiple disjoint
102 * lowered ranges of a single UBO.
103 */
104 struct ir3_ubo_range {
105 struct ir3_ubo_info ubo;
106 uint32_t offset; /* start offset to push in the const register file */
107 uint32_t start, end; /* range of block that's actually used */
108 };
109
110 struct ir3_ubo_analysis_state {
111 struct ir3_ubo_range range[IR3_MAX_UBO_PUSH_RANGES];
112 uint32_t num_enabled;
113 uint32_t size;
114 uint32_t cmdstream_size; /* for per-gen backend to stash required cmdstream size */
115 };
116
117 /**
118 * Describes the layout of shader consts. This includes:
119 * + User consts + driver lowered UBO ranges
120 * + SSBO sizes
121 * + Image sizes/dimensions
122 * + Driver params (ie. IR3_DP_*)
123 * + TFBO addresses (for generations that do not have hardware streamout)
124 * + Lowered immediates
125 *
126 * For consts needed to pass internal values to shader which may or may not
127 * be required, rather than allocating worst-case const space, we scan the
128 * shader and allocate consts as-needed:
129 *
130 * + SSBO sizes: only needed if shader has a get_buffer_size intrinsic
131 * for a given SSBO
132 *
133 * + Image dimensions: needed to calculate pixel offset, but only for
134 * images that have a image_store intrinsic
135 *
136 * Layout of constant registers, each section aligned to vec4. Note
137 * that pointer size (ubo, etc) changes depending on generation.
138 *
139 * user consts
140 * UBO addresses
141 * SSBO sizes
142 * if (vertex shader) {
143 * driver params (IR3_DP_*)
144 * if (stream_output.num_outputs > 0)
145 * stream-out addresses
146 * } else if (compute_shader) {
147 * driver params (IR3_DP_*)
148 * }
149 * immediates
150 *
151 * Immediates go last mostly because they are inserted in the CP pass
152 * after the nir -> ir3 frontend.
153 *
154 * Note UBO size in bytes should be aligned to vec4
155 */
156 struct ir3_const_state {
157 unsigned num_ubos;
158 unsigned num_driver_params; /* scalar */
159
160 struct {
161 /* user const start at zero */
162 unsigned ubo;
163 /* NOTE that a3xx might need a section for SSBO addresses too */
164 unsigned ssbo_sizes;
165 unsigned image_dims;
166 unsigned driver_param;
167 unsigned tfbo;
168 unsigned primitive_param;
169 unsigned primitive_map;
170 unsigned immediate;
171 } offsets;
172
173 struct {
174 uint32_t mask; /* bitmask of SSBOs that have get_buffer_size */
175 uint32_t count; /* number of consts allocated */
176 /* one const allocated per SSBO which has get_buffer_size,
177 * ssbo_sizes.off[ssbo_id] is offset from start of ssbo_sizes
178 * consts:
179 */
180 uint32_t off[IR3_MAX_SHADER_BUFFERS];
181 } ssbo_size;
182
183 struct {
184 uint32_t mask; /* bitmask of images that have image_store */
185 uint32_t count; /* number of consts allocated */
186 /* three const allocated per image which has image_store:
187 * + cpp (bytes per pixel)
188 * + pitch (y pitch)
189 * + array_pitch (z pitch)
190 */
191 uint32_t off[IR3_MAX_SHADER_IMAGES];
192 } image_dims;
193
194 unsigned immediates_count;
195 unsigned immediates_size;
196 uint32_t *immediates;
197
198 /* State of ubo access lowered to push consts: */
199 struct ir3_ubo_analysis_state ubo_state;
200 };
201
202 /**
203 * A single output for vertex transform feedback.
204 */
205 struct ir3_stream_output {
206 unsigned register_index:6; /**< 0 to 63 (OUT index) */
207 unsigned start_component:2; /** 0 to 3 */
208 unsigned num_components:3; /** 1 to 4 */
209 unsigned output_buffer:3; /**< 0 to PIPE_MAX_SO_BUFFERS */
210 unsigned dst_offset:16; /**< offset into the buffer in dwords */
211 unsigned stream:2; /**< 0 to 3 */
212 };
213
214 /**
215 * Stream output for vertex transform feedback.
216 */
217 struct ir3_stream_output_info {
218 unsigned num_outputs;
219 /** stride for an entire vertex for each buffer in dwords */
220 uint16_t stride[IR3_MAX_SO_BUFFERS];
221
222 /**
223 * Array of stream outputs, in the order they are to be written in.
224 * Selected components are tightly packed into the output buffer.
225 */
226 struct ir3_stream_output output[IR3_MAX_SO_OUTPUTS];
227 };
228
229
230 /**
231 * Starting from a4xx, HW supports pre-dispatching texture sampling
232 * instructions prior to scheduling a shader stage, when the
233 * coordinate maps exactly to an output of the previous stage.
234 */
235
236 /**
237 * There is a limit in the number of pre-dispatches allowed for any
238 * given stage.
239 */
240 #define IR3_MAX_SAMPLER_PREFETCH 4
241
242 /**
243 * This is the output stream value for 'cmd', as used by blob. It may
244 * encode the return type (in 3 bits) but it hasn't been verified yet.
245 */
246 #define IR3_SAMPLER_PREFETCH_CMD 0x4
247 #define IR3_SAMPLER_BINDLESS_PREFETCH_CMD 0x6
248
249 /**
250 * Stream output for texture sampling pre-dispatches.
251 */
252 struct ir3_sampler_prefetch {
253 uint8_t src;
254 uint8_t samp_id;
255 uint8_t tex_id;
256 uint16_t samp_bindless_id;
257 uint16_t tex_bindless_id;
258 uint8_t dst;
259 uint8_t wrmask;
260 uint8_t half_precision;
261 uint8_t cmd;
262 };
263
264
265 /* Configuration key used to identify a shader variant.. different
266 * shader variants can be used to implement features not supported
267 * in hw (two sided color), binning-pass vertex shader, etc.
268 *
269 * When adding to this struct, please update ir3_shader_variant()'s debug
270 * output.
271 */
272 struct ir3_shader_key {
273 union {
274 struct {
275 /*
276 * Combined Vertex/Fragment shader parameters:
277 */
278 unsigned ucp_enables : 8;
279
280 /* do we need to check {v,f}saturate_{s,t,r}? */
281 unsigned has_per_samp : 1;
282
283 /*
284 * Vertex shader variant parameters:
285 */
286 unsigned vclamp_color : 1;
287
288 /*
289 * Fragment shader variant parameters:
290 */
291 unsigned sample_shading : 1;
292 unsigned msaa : 1;
293 unsigned color_two_side : 1;
294 /* used when shader needs to handle flat varyings (a4xx)
295 * for front/back color inputs to frag shader:
296 */
297 unsigned rasterflat : 1;
298 unsigned fclamp_color : 1;
299
300 /* Indicates that this is a tessellation pipeline which requires a
301 * whole different kind of vertex shader. In case of
302 * tessellation, this field also tells us which kind of output
303 * topology the TES uses, which the TCS needs to know.
304 */
305 #define IR3_TESS_NONE 0
306 #define IR3_TESS_TRIANGLES 1
307 #define IR3_TESS_QUADS 2
308 #define IR3_TESS_ISOLINES 3
309 unsigned tessellation : 2;
310
311 unsigned has_gs : 1;
312
313 /* Whether this variant sticks to the "safe" maximum constlen,
314 * which guarantees that the combined stages will never go over
315 * the limit:
316 */
317 unsigned safe_constlen : 1;
318
319 /* Whether gl_Layer must be forced to 0 because it isn't written. */
320 unsigned layer_zero : 1;
321 };
322 uint32_t global;
323 };
324
325 /* bitmask of sampler which needs coords clamped for vertex
326 * shader:
327 */
328 uint16_t vsaturate_s, vsaturate_t, vsaturate_r;
329
330 /* bitmask of sampler which needs coords clamped for frag
331 * shader:
332 */
333 uint16_t fsaturate_s, fsaturate_t, fsaturate_r;
334
335 /* bitmask of ms shifts */
336 uint32_t vsamples, fsamples;
337
338 /* bitmask of samplers which need astc srgb workaround: */
339 uint16_t vastc_srgb, fastc_srgb;
340 };
341
342 static inline unsigned
343 ir3_tess_mode(unsigned gl_tess_mode)
344 {
345 switch (gl_tess_mode) {
346 case GL_ISOLINES:
347 return IR3_TESS_ISOLINES;
348 case GL_TRIANGLES:
349 return IR3_TESS_TRIANGLES;
350 case GL_QUADS:
351 return IR3_TESS_QUADS;
352 default:
353 unreachable("bad tessmode");
354 }
355 }
356
357 static inline bool
358 ir3_shader_key_equal(const struct ir3_shader_key *a, const struct ir3_shader_key *b)
359 {
360 /* slow-path if we need to check {v,f}saturate_{s,t,r} */
361 if (a->has_per_samp || b->has_per_samp)
362 return memcmp(a, b, sizeof(struct ir3_shader_key)) == 0;
363 return a->global == b->global;
364 }
365
366 /* will the two keys produce different lowering for a fragment shader? */
367 static inline bool
368 ir3_shader_key_changes_fs(struct ir3_shader_key *key, struct ir3_shader_key *last_key)
369 {
370 if (last_key->has_per_samp || key->has_per_samp) {
371 if ((last_key->fsaturate_s != key->fsaturate_s) ||
372 (last_key->fsaturate_t != key->fsaturate_t) ||
373 (last_key->fsaturate_r != key->fsaturate_r) ||
374 (last_key->fsamples != key->fsamples) ||
375 (last_key->fastc_srgb != key->fastc_srgb))
376 return true;
377 }
378
379 if (last_key->fclamp_color != key->fclamp_color)
380 return true;
381
382 if (last_key->color_two_side != key->color_two_side)
383 return true;
384
385 if (last_key->rasterflat != key->rasterflat)
386 return true;
387
388 if (last_key->layer_zero != key->layer_zero)
389 return true;
390
391 if (last_key->ucp_enables != key->ucp_enables)
392 return true;
393
394 if (last_key->safe_constlen != key->safe_constlen)
395 return true;
396
397 return false;
398 }
399
400 /* will the two keys produce different lowering for a vertex shader? */
401 static inline bool
402 ir3_shader_key_changes_vs(struct ir3_shader_key *key, struct ir3_shader_key *last_key)
403 {
404 if (last_key->has_per_samp || key->has_per_samp) {
405 if ((last_key->vsaturate_s != key->vsaturate_s) ||
406 (last_key->vsaturate_t != key->vsaturate_t) ||
407 (last_key->vsaturate_r != key->vsaturate_r) ||
408 (last_key->vsamples != key->vsamples) ||
409 (last_key->vastc_srgb != key->vastc_srgb))
410 return true;
411 }
412
413 if (last_key->vclamp_color != key->vclamp_color)
414 return true;
415
416 if (last_key->ucp_enables != key->ucp_enables)
417 return true;
418
419 if (last_key->safe_constlen != key->safe_constlen)
420 return true;
421
422 return false;
423 }
424
425 /**
426 * On a4xx+a5xx, Images share state with textures and SSBOs:
427 *
428 * + Uses texture (cat5) state/instruction (isam) to read
429 * + Uses SSBO state and instructions (cat6) to write and for atomics
430 *
431 * Starting with a6xx, Images and SSBOs are basically the same thing,
432 * with texture state and isam also used for SSBO reads.
433 *
434 * On top of that, gallium makes the SSBO (shader_buffers) state semi
435 * sparse, with the first half of the state space used for atomic
436 * counters lowered to atomic buffers. We could ignore this, but I
437 * don't think we could *really* handle the case of a single shader
438 * that used the max # of textures + images + SSBOs. And once we are
439 * offsetting images by num_ssbos (or visa versa) to map them into
440 * the same hardware state, the hardware state has become coupled to
441 * the shader state, so at this point we might as well just use a
442 * mapping table to remap things from image/SSBO idx to hw idx.
443 *
444 * To make things less (more?) confusing, for the hw "SSBO" state
445 * (since it is really both SSBO and Image) I'll use the name "IBO"
446 */
447 struct ir3_ibo_mapping {
448 #define IBO_INVALID 0xff
449 /* Maps logical SSBO state to hw tex state: */
450 uint8_t ssbo_to_tex[IR3_MAX_SHADER_BUFFERS];
451
452 /* Maps logical Image state to hw tex state: */
453 uint8_t image_to_tex[IR3_MAX_SHADER_IMAGES];
454
455 /* Maps hw state back to logical SSBO or Image state:
456 *
457 * note IBO_SSBO ORd into values to indicate that the
458 * hw slot is used for SSBO state vs Image state.
459 */
460 #define IBO_SSBO 0x80
461 uint8_t tex_to_image[32];
462
463 uint8_t num_tex; /* including real textures */
464 uint8_t tex_base; /* the number of real textures, ie. image/ssbo start here */
465 };
466
467 /* Represents half register in regid */
468 #define HALF_REG_ID 0x100
469
470 /**
471 * Shader variant which contains the actual hw shader instructions,
472 * and necessary info for shader state setup.
473 */
474 struct ir3_shader_variant {
475 struct fd_bo *bo;
476
477 /* variant id (for debug) */
478 uint32_t id;
479
480 struct ir3_shader_key key;
481
482 /* vertex shaders can have an extra version for hwbinning pass,
483 * which is pointed to by so->binning:
484 */
485 bool binning_pass;
486 // union {
487 struct ir3_shader_variant *binning;
488 struct ir3_shader_variant *nonbinning;
489 // };
490
491 struct ir3 *ir; /* freed after assembling machine instructions */
492
493 /* shader variants form a linked list: */
494 struct ir3_shader_variant *next;
495
496 /* replicated here to avoid passing extra ptrs everywhere: */
497 gl_shader_stage type;
498 struct ir3_shader *shader;
499
500 /*
501 * Below here is serialized when written to disk cache:
502 */
503
504 /* The actual binary shader instructions, size given by info.sizedwords: */
505 uint32_t *bin;
506
507 struct ir3_const_state *const_state;
508
509 /*
510 * The following macros are used by the shader disk cache save/
511 * restore paths to serialize/deserialize the variant. Any
512 * pointers that require special handling in store_variant()
513 * and retrieve_variant() should go above here.
514 */
515 #define VARIANT_CACHE_START offsetof(struct ir3_shader_variant, info)
516 #define VARIANT_CACHE_PTR(v) (((char *)v) + VARIANT_CACHE_START)
517 #define VARIANT_CACHE_SIZE (sizeof(struct ir3_shader_variant) - VARIANT_CACHE_START)
518
519 struct ir3_info info;
520
521 /* Levels of nesting of flow control:
522 */
523 unsigned branchstack;
524
525 unsigned max_sun;
526 unsigned loops;
527
528 /* the instructions length is in units of instruction groups
529 * (4 instructions for a3xx, 16 instructions for a4xx.. each
530 * instruction is 2 dwords):
531 */
532 unsigned instrlen;
533
534 /* the constants length is in units of vec4's, and is the sum of
535 * the uniforms and the built-in compiler constants
536 */
537 unsigned constlen;
538
539 /* About Linkage:
540 * + Let the frag shader determine the position/compmask for the
541 * varyings, since it is the place where we know if the varying
542 * is actually used, and if so, which components are used. So
543 * what the hw calls "outloc" is taken from the "inloc" of the
544 * frag shader.
545 * + From the vert shader, we only need the output regid
546 */
547
548 bool frag_face, color0_mrt;
549 uint8_t fragcoord_compmask;
550
551 /* NOTE: for input/outputs, slot is:
552 * gl_vert_attrib - for VS inputs
553 * gl_varying_slot - for VS output / FS input
554 * gl_frag_result - for FS output
555 */
556
557 /* varyings/outputs: */
558 unsigned outputs_count;
559 struct {
560 uint8_t slot;
561 uint8_t regid;
562 bool half : 1;
563 } outputs[32 + 2]; /* +POSITION +PSIZE */
564 bool writes_pos, writes_smask, writes_psize, writes_stencilref;
565
566 /* Size in dwords of all outputs for VS, size of entire patch for HS. */
567 uint32_t output_size;
568
569 /* Map from driver_location to byte offset in per-primitive storage */
570 unsigned output_loc[32];
571
572 /* attributes (VS) / varyings (FS):
573 * Note that sysval's should come *after* normal inputs.
574 */
575 unsigned inputs_count;
576 struct {
577 uint8_t slot;
578 uint8_t regid;
579 uint8_t compmask;
580 /* location of input (ie. offset passed to bary.f, etc). This
581 * matches the SP_VS_VPC_DST_REG.OUTLOCn value (a3xx and a4xx
582 * have the OUTLOCn value offset by 8, presumably to account
583 * for gl_Position/gl_PointSize)
584 */
585 uint8_t inloc;
586 /* vertex shader specific: */
587 bool sysval : 1; /* slot is a gl_system_value */
588 /* fragment shader specific: */
589 bool bary : 1; /* fetched varying (vs one loaded into reg) */
590 bool rasterflat : 1; /* special handling for emit->rasterflat */
591 bool half : 1;
592 bool flat : 1;
593 } inputs[32 + 2]; /* +POSITION +FACE */
594
595 /* sum of input components (scalar). For frag shaders, it only counts
596 * the varying inputs:
597 */
598 unsigned total_in;
599
600 /* For frag shaders, the total number of inputs (not scalar,
601 * ie. SP_VS_PARAM_REG.TOTALVSOUTVAR)
602 */
603 unsigned varying_in;
604
605 /* Remapping table to map Image and SSBO to hw state: */
606 struct ir3_ibo_mapping image_mapping;
607
608 /* number of samplers/textures (which are currently 1:1): */
609 int num_samp;
610
611 /* is there an implicit sampler to read framebuffer (FS only).. if
612 * so the sampler-idx is 'num_samp - 1' (ie. it is appended after
613 * the last "real" texture)
614 */
615 bool fb_read;
616
617 /* do we have one or more SSBO instructions: */
618 bool has_ssbo;
619
620 /* Which bindless resources are used, for filling out sp_xs_config */
621 bool bindless_tex;
622 bool bindless_samp;
623 bool bindless_ibo;
624 bool bindless_ubo;
625
626 /* do we need derivatives: */
627 bool need_pixlod;
628
629 bool need_fine_derivatives;
630
631 /* do we have image write, etc (which prevents early-z): */
632 bool no_earlyz;
633
634 /* do we have kill, which also prevents early-z, but not necessarily
635 * early-lrz (as long as lrz-write is disabled, which must be handled
636 * outside of ir3. Unlike other no_earlyz cases, kill doesn't have
637 * side effects that prevent early-lrz discard.
638 */
639 bool has_kill;
640
641 bool per_samp;
642
643 /* Are we using split or merged register file? */
644 bool mergedregs;
645
646 /* for astc srgb workaround, the number/base of additional
647 * alpha tex states we need, and index of original tex states
648 */
649 struct {
650 unsigned base, count;
651 unsigned orig_idx[16];
652 } astc_srgb;
653
654 /* texture sampler pre-dispatches */
655 uint32_t num_sampler_prefetch;
656 struct ir3_sampler_prefetch sampler_prefetch[IR3_MAX_SAMPLER_PREFETCH];
657 };
658
659 static inline const char *
660 ir3_shader_stage(struct ir3_shader_variant *v)
661 {
662 switch (v->type) {
663 case MESA_SHADER_VERTEX: return v->binning_pass ? "BVERT" : "VERT";
664 case MESA_SHADER_TESS_CTRL: return "TCS";
665 case MESA_SHADER_TESS_EVAL: return "TES";
666 case MESA_SHADER_GEOMETRY: return "GEOM";
667 case MESA_SHADER_FRAGMENT: return "FRAG";
668 case MESA_SHADER_COMPUTE: return "CL";
669 default:
670 unreachable("invalid type");
671 return NULL;
672 }
673 }
674
675 /* Currently we do not do binning for tess. And for GS there is no
676 * cross-stage VS+GS optimization, so the full VS+GS is used in
677 * the binning pass.
678 */
679 static inline bool
680 ir3_has_binning_vs(const struct ir3_shader_key *key)
681 {
682 if (key->tessellation || key->has_gs)
683 return false;
684 return true;
685 }
686
687 /**
688 * Represents a shader at the API level, before state-specific variants are
689 * generated.
690 */
691 struct ir3_shader {
692 gl_shader_stage type;
693
694 /* shader id (for debug): */
695 uint32_t id;
696 uint32_t variant_count;
697
698 /* Set by freedreno after shader_state_create, so we can emit debug info
699 * when recompiling a shader at draw time.
700 */
701 bool initial_variants_done;
702
703 struct ir3_compiler *compiler;
704
705 unsigned num_reserved_user_consts;
706
707 bool nir_finalized;
708 struct nir_shader *nir;
709 struct ir3_stream_output_info stream_output;
710
711 struct ir3_shader_variant *variants;
712 mtx_t variants_lock;
713
714 cache_key cache_key; /* shader disk-cache key */
715
716 /* Bitmask of bits of the shader key used by this shader. Used to avoid
717 * recompiles for GL NOS that doesn't actually apply to the shader.
718 */
719 struct ir3_shader_key key_mask;
720 };
721
722 /**
723 * In order to use the same cmdstream, in particular constlen setup and const
724 * emit, for both binning and draw pass (a6xx+), the binning pass re-uses it's
725 * corresponding draw pass shaders const_state.
726 */
727 static inline struct ir3_const_state *
728 ir3_const_state(const struct ir3_shader_variant *v)
729 {
730 if (v->binning_pass)
731 return v->nonbinning->const_state;
732 return v->const_state;
733 }
734
735 /* Given a variant, calculate the maximum constlen it can have.
736 */
737
738 static inline unsigned
739 ir3_max_const(const struct ir3_shader_variant *v)
740 {
741 const struct ir3_compiler *compiler = v->shader->compiler;
742
743 if (v->shader->type == MESA_SHADER_COMPUTE) {
744 return compiler->max_const_compute;
745 } else if (v->key.safe_constlen) {
746 return compiler->max_const_safe;
747 } else if (v->shader->type == MESA_SHADER_FRAGMENT) {
748 return compiler->max_const_frag;
749 } else {
750 return compiler->max_const_geom;
751 }
752 }
753
754 void * ir3_shader_assemble(struct ir3_shader_variant *v);
755 struct ir3_shader_variant * ir3_shader_get_variant(struct ir3_shader *shader,
756 const struct ir3_shader_key *key, bool binning_pass, bool *created);
757 struct ir3_shader * ir3_shader_from_nir(struct ir3_compiler *compiler, nir_shader *nir,
758 unsigned reserved_user_consts, struct ir3_stream_output_info *stream_output);
759 uint32_t ir3_trim_constlen(struct ir3_shader_variant **variants,
760 const struct ir3_compiler *compiler);
761 void ir3_shader_destroy(struct ir3_shader *shader);
762 void ir3_shader_disasm(struct ir3_shader_variant *so, uint32_t *bin, FILE *out);
763 uint64_t ir3_shader_outputs(const struct ir3_shader *so);
764
765 int
766 ir3_glsl_type_size(const struct glsl_type *type, bool bindless);
767
768 /*
769 * Helper/util:
770 */
771
772 /* clears shader-key flags which don't apply to the given shader.
773 */
774 static inline void
775 ir3_key_clear_unused(struct ir3_shader_key *key, struct ir3_shader *shader)
776 {
777 uint32_t *key_bits = (uint32_t *)key;
778 uint32_t *key_mask = (uint32_t *)&shader->key_mask;
779 STATIC_ASSERT(sizeof(*key) % 4 == 0);
780 for (int i = 0; i < sizeof(*key) >> 2; i++)
781 key_bits[i] &= key_mask[i];
782 }
783
784 static inline int
785 ir3_find_output(const struct ir3_shader_variant *so, gl_varying_slot slot)
786 {
787 int j;
788
789 for (j = 0; j < so->outputs_count; j++)
790 if (so->outputs[j].slot == slot)
791 return j;
792
793 /* it seems optional to have a OUT.BCOLOR[n] for each OUT.COLOR[n]
794 * in the vertex shader.. but the fragment shader doesn't know this
795 * so it will always have both IN.COLOR[n] and IN.BCOLOR[n]. So
796 * at link time if there is no matching OUT.BCOLOR[n], we must map
797 * OUT.COLOR[n] to IN.BCOLOR[n]. And visa versa if there is only
798 * a OUT.BCOLOR[n] but no matching OUT.COLOR[n]
799 */
800 if (slot == VARYING_SLOT_BFC0) {
801 slot = VARYING_SLOT_COL0;
802 } else if (slot == VARYING_SLOT_BFC1) {
803 slot = VARYING_SLOT_COL1;
804 } else if (slot == VARYING_SLOT_COL0) {
805 slot = VARYING_SLOT_BFC0;
806 } else if (slot == VARYING_SLOT_COL1) {
807 slot = VARYING_SLOT_BFC1;
808 } else {
809 return -1;
810 }
811
812 for (j = 0; j < so->outputs_count; j++)
813 if (so->outputs[j].slot == slot)
814 return j;
815
816 debug_assert(0);
817
818 return -1;
819 }
820
821 static inline int
822 ir3_next_varying(const struct ir3_shader_variant *so, int i)
823 {
824 while (++i < so->inputs_count)
825 if (so->inputs[i].compmask && so->inputs[i].bary)
826 break;
827 return i;
828 }
829
830 struct ir3_shader_linkage {
831 /* Maximum location either consumed by the fragment shader or produced by
832 * the last geometry stage, i.e. the size required for each vertex in the
833 * VPC in DWORD's.
834 */
835 uint8_t max_loc;
836
837 /* Number of entries in var. */
838 uint8_t cnt;
839
840 /* Bitset of locations used, including ones which are only used by the FS.
841 */
842 uint32_t varmask[4];
843
844 /* Map from VS output to location. */
845 struct {
846 uint8_t regid;
847 uint8_t compmask;
848 uint8_t loc;
849 } var[32];
850
851 /* location for fixed-function gl_PrimitiveID passthrough */
852 uint8_t primid_loc;
853
854 /* location for fixed-function gl_ViewIndex passthrough */
855 uint8_t viewid_loc;
856 };
857
858 static inline void
859 ir3_link_add(struct ir3_shader_linkage *l, uint8_t regid_, uint8_t compmask, uint8_t loc)
860 {
861 for (int j = 0; j < util_last_bit(compmask); j++) {
862 uint8_t comploc = loc + j;
863 l->varmask[comploc / 32] |= 1 << (comploc % 32);
864 }
865
866 l->max_loc = MAX2(l->max_loc, loc + util_last_bit(compmask));
867
868 if (regid_ != regid(63, 0)) {
869 int i = l->cnt++;
870 debug_assert(i < ARRAY_SIZE(l->var));
871
872 l->var[i].regid = regid_;
873 l->var[i].compmask = compmask;
874 l->var[i].loc = loc;
875 }
876 }
877
878 static inline void
879 ir3_link_shaders(struct ir3_shader_linkage *l,
880 const struct ir3_shader_variant *vs,
881 const struct ir3_shader_variant *fs,
882 bool pack_vs_out)
883 {
884 /* On older platforms, varmask isn't programmed at all, and it appears
885 * that the hardware generates a mask of used VPC locations using the VS
886 * output map, and hangs if a FS bary instruction references a location
887 * not in the list. This means that we need to have a dummy entry in the
888 * VS out map for things like gl_PointCoord which aren't written by the
889 * VS. Furthermore we can't use r63.x, so just pick a random register to
890 * use if there is no VS output.
891 */
892 const unsigned default_regid = pack_vs_out ? regid(63, 0) : regid(0, 0);
893 int j = -1, k;
894
895 l->primid_loc = 0xff;
896 l->viewid_loc = 0xff;
897
898 while (l->cnt < ARRAY_SIZE(l->var)) {
899 j = ir3_next_varying(fs, j);
900
901 if (j >= fs->inputs_count)
902 break;
903
904 if (fs->inputs[j].inloc >= fs->total_in)
905 continue;
906
907 k = ir3_find_output(vs, fs->inputs[j].slot);
908
909 if (k < 0 && fs->inputs[j].slot == VARYING_SLOT_PRIMITIVE_ID) {
910 l->primid_loc = fs->inputs[j].inloc;
911 }
912
913 if (fs->inputs[j].slot == VARYING_SLOT_VIEW_INDEX) {
914 assert(k < 0);
915 l->viewid_loc = fs->inputs[j].inloc;
916 }
917
918 ir3_link_add(l, k >= 0 ? vs->outputs[k].regid : default_regid,
919 fs->inputs[j].compmask, fs->inputs[j].inloc);
920 }
921 }
922
923 static inline uint32_t
924 ir3_find_output_regid(const struct ir3_shader_variant *so, unsigned slot)
925 {
926 int j;
927 for (j = 0; j < so->outputs_count; j++)
928 if (so->outputs[j].slot == slot) {
929 uint32_t regid = so->outputs[j].regid;
930 if (so->outputs[j].half)
931 regid |= HALF_REG_ID;
932 return regid;
933 }
934 return regid(63, 0);
935 }
936
937 #define VARYING_SLOT_GS_HEADER_IR3 (VARYING_SLOT_MAX + 0)
938 #define VARYING_SLOT_GS_VERTEX_FLAGS_IR3 (VARYING_SLOT_MAX + 1)
939 #define VARYING_SLOT_TCS_HEADER_IR3 (VARYING_SLOT_MAX + 2)
940
941
942 static inline uint32_t
943 ir3_find_sysval_regid(const struct ir3_shader_variant *so, unsigned slot)
944 {
945 int j;
946 for (j = 0; j < so->inputs_count; j++)
947 if (so->inputs[j].sysval && (so->inputs[j].slot == slot))
948 return so->inputs[j].regid;
949 return regid(63, 0);
950 }
951
952 /* calculate register footprint in terms of half-regs (ie. one full
953 * reg counts as two half-regs).
954 */
955 static inline uint32_t
956 ir3_shader_halfregs(const struct ir3_shader_variant *v)
957 {
958 return (2 * (v->info.max_reg + 1)) + (v->info.max_half_reg + 1);
959 }
960
961 static inline uint32_t
962 ir3_shader_nibo(const struct ir3_shader_variant *v)
963 {
964 /* The dummy variant used in binning mode won't have an actual shader. */
965 if (!v->shader)
966 return 0;
967
968 return v->shader->nir->info.num_ssbos + v->shader->nir->info.num_images;
969 }
970
971 #endif /* IR3_SHADER_H_ */