freedreno/ir3: split ubo analysis/lowering passes
[mesa.git] / src / freedreno / ir3 / ir3_shader.h
1 /*
2 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #ifndef IR3_SHADER_H_
28 #define IR3_SHADER_H_
29
30 #include <stdio.h>
31
32 #include "c11/threads.h"
33 #include "compiler/shader_enums.h"
34 #include "compiler/nir/nir.h"
35 #include "util/bitscan.h"
36
37 #include "ir3.h"
38
39 struct glsl_type;
40
41 /* driver param indices: */
42 enum ir3_driver_param {
43 /* compute shader driver params: */
44 IR3_DP_NUM_WORK_GROUPS_X = 0,
45 IR3_DP_NUM_WORK_GROUPS_Y = 1,
46 IR3_DP_NUM_WORK_GROUPS_Z = 2,
47 IR3_DP_LOCAL_GROUP_SIZE_X = 4,
48 IR3_DP_LOCAL_GROUP_SIZE_Y = 5,
49 IR3_DP_LOCAL_GROUP_SIZE_Z = 6,
50 /* NOTE: gl_NumWorkGroups should be vec4 aligned because
51 * glDispatchComputeIndirect() needs to load these from
52 * the info->indirect buffer. Keep that in mind when/if
53 * adding any addition CS driver params.
54 */
55 IR3_DP_CS_COUNT = 8, /* must be aligned to vec4 */
56
57 /* vertex shader driver params: */
58 IR3_DP_VTXID_BASE = 0,
59 IR3_DP_VTXCNT_MAX = 1,
60 IR3_DP_INSTID_BASE = 2,
61 /* user-clip-plane components, up to 8x vec4's: */
62 IR3_DP_UCP0_X = 4,
63 /* .... */
64 IR3_DP_UCP7_W = 35,
65 IR3_DP_VS_COUNT = 36 /* must be aligned to vec4 */
66 };
67
68 #define IR3_MAX_SHADER_BUFFERS 32
69 #define IR3_MAX_SHADER_IMAGES 32
70 #define IR3_MAX_SO_BUFFERS 4
71 #define IR3_MAX_SO_STREAMS 4
72 #define IR3_MAX_SO_OUTPUTS 64
73 #define IR3_MAX_UBO_PUSH_RANGES 32
74
75 /**
76 * Description of a lowered UBO.
77 */
78 struct ir3_ubo_info {
79 uint32_t block; /* Which constant block */
80 uint16_t bindless_base; /* For bindless, which base register is used */
81 bool bindless;
82 };
83
84 /**
85 * Description of a range of a lowered UBO access.
86 *
87 * Drivers should not assume that there are not multiple disjoint
88 * lowered ranges of a single UBO.
89 */
90 struct ir3_ubo_range {
91 struct ir3_ubo_info ubo;
92 uint32_t offset; /* start offset to push in the const register file */
93 uint32_t start, end; /* range of block that's actually used */
94 };
95
96 struct ir3_ubo_analysis_state {
97 struct ir3_ubo_range range[IR3_MAX_UBO_PUSH_RANGES];
98 uint32_t num_enabled;
99 uint32_t size;
100 uint32_t cmdstream_size; /* for per-gen backend to stash required cmdstream size */
101 };
102
103 /**
104 * Describes the layout of shader consts. This includes:
105 * + User consts + driver lowered UBO ranges
106 * + SSBO sizes
107 * + Image sizes/dimensions
108 * + Driver params (ie. IR3_DP_*)
109 * + TFBO addresses (for generations that do not have hardware streamout)
110 * + Lowered immediates
111 *
112 * For consts needed to pass internal values to shader which may or may not
113 * be required, rather than allocating worst-case const space, we scan the
114 * shader and allocate consts as-needed:
115 *
116 * + SSBO sizes: only needed if shader has a get_buffer_size intrinsic
117 * for a given SSBO
118 *
119 * + Image dimensions: needed to calculate pixel offset, but only for
120 * images that have a image_store intrinsic
121 *
122 * Layout of constant registers, each section aligned to vec4. Note
123 * that pointer size (ubo, etc) changes depending on generation.
124 *
125 * user consts
126 * UBO addresses
127 * SSBO sizes
128 * if (vertex shader) {
129 * driver params (IR3_DP_*)
130 * if (stream_output.num_outputs > 0)
131 * stream-out addresses
132 * } else if (compute_shader) {
133 * driver params (IR3_DP_*)
134 * }
135 * immediates
136 *
137 * Immediates go last mostly because they are inserted in the CP pass
138 * after the nir -> ir3 frontend.
139 *
140 * Note UBO size in bytes should be aligned to vec4
141 */
142 struct ir3_const_state {
143 unsigned num_ubos;
144 unsigned num_driver_params; /* scalar */
145
146 struct {
147 /* user const start at zero */
148 unsigned ubo;
149 /* NOTE that a3xx might need a section for SSBO addresses too */
150 unsigned ssbo_sizes;
151 unsigned image_dims;
152 unsigned driver_param;
153 unsigned tfbo;
154 unsigned primitive_param;
155 unsigned primitive_map;
156 unsigned immediate;
157 } offsets;
158
159 struct {
160 uint32_t mask; /* bitmask of SSBOs that have get_buffer_size */
161 uint32_t count; /* number of consts allocated */
162 /* one const allocated per SSBO which has get_buffer_size,
163 * ssbo_sizes.off[ssbo_id] is offset from start of ssbo_sizes
164 * consts:
165 */
166 uint32_t off[IR3_MAX_SHADER_BUFFERS];
167 } ssbo_size;
168
169 struct {
170 uint32_t mask; /* bitmask of images that have image_store */
171 uint32_t count; /* number of consts allocated */
172 /* three const allocated per image which has image_store:
173 * + cpp (bytes per pixel)
174 * + pitch (y pitch)
175 * + array_pitch (z pitch)
176 */
177 uint32_t off[IR3_MAX_SHADER_IMAGES];
178 } image_dims;
179
180 unsigned immediate_idx;
181 unsigned immediates_count;
182 unsigned immediates_size;
183 struct {
184 uint32_t val[4];
185 } *immediates;
186
187 /* State of ubo access lowered to push consts: */
188 struct ir3_ubo_analysis_state ubo_state;
189 };
190
191 /**
192 * A single output for vertex transform feedback.
193 */
194 struct ir3_stream_output {
195 unsigned register_index:6; /**< 0 to 63 (OUT index) */
196 unsigned start_component:2; /** 0 to 3 */
197 unsigned num_components:3; /** 1 to 4 */
198 unsigned output_buffer:3; /**< 0 to PIPE_MAX_SO_BUFFERS */
199 unsigned dst_offset:16; /**< offset into the buffer in dwords */
200 unsigned stream:2; /**< 0 to 3 */
201 };
202
203 /**
204 * Stream output for vertex transform feedback.
205 */
206 struct ir3_stream_output_info {
207 unsigned num_outputs;
208 /** stride for an entire vertex for each buffer in dwords */
209 uint16_t stride[IR3_MAX_SO_BUFFERS];
210
211 /**
212 * Array of stream outputs, in the order they are to be written in.
213 * Selected components are tightly packed into the output buffer.
214 */
215 struct ir3_stream_output output[IR3_MAX_SO_OUTPUTS];
216 };
217
218
219 /**
220 * Starting from a4xx, HW supports pre-dispatching texture sampling
221 * instructions prior to scheduling a shader stage, when the
222 * coordinate maps exactly to an output of the previous stage.
223 */
224
225 /**
226 * There is a limit in the number of pre-dispatches allowed for any
227 * given stage.
228 */
229 #define IR3_MAX_SAMPLER_PREFETCH 4
230
231 /**
232 * This is the output stream value for 'cmd', as used by blob. It may
233 * encode the return type (in 3 bits) but it hasn't been verified yet.
234 */
235 #define IR3_SAMPLER_PREFETCH_CMD 0x4
236 #define IR3_SAMPLER_BINDLESS_PREFETCH_CMD 0x6
237
238 /**
239 * Stream output for texture sampling pre-dispatches.
240 */
241 struct ir3_sampler_prefetch {
242 uint8_t src;
243 uint8_t samp_id;
244 uint8_t tex_id;
245 uint16_t samp_bindless_id;
246 uint16_t tex_bindless_id;
247 uint8_t dst;
248 uint8_t wrmask;
249 uint8_t half_precision;
250 uint8_t cmd;
251 };
252
253
254 /* Configuration key used to identify a shader variant.. different
255 * shader variants can be used to implement features not supported
256 * in hw (two sided color), binning-pass vertex shader, etc.
257 *
258 * When adding to this struct, please update ir3_shader_variant()'s debug
259 * output.
260 */
261 struct ir3_shader_key {
262 union {
263 struct {
264 /*
265 * Combined Vertex/Fragment shader parameters:
266 */
267 unsigned ucp_enables : 8;
268
269 /* do we need to check {v,f}saturate_{s,t,r}? */
270 unsigned has_per_samp : 1;
271
272 /*
273 * Vertex shader variant parameters:
274 */
275 unsigned vclamp_color : 1;
276
277 /*
278 * Fragment shader variant parameters:
279 */
280 unsigned sample_shading : 1;
281 unsigned msaa : 1;
282 unsigned color_two_side : 1;
283 /* used when shader needs to handle flat varyings (a4xx)
284 * for front/back color inputs to frag shader:
285 */
286 unsigned rasterflat : 1;
287 unsigned fclamp_color : 1;
288
289 /* Indicates that this is a tessellation pipeline which requires a
290 * whole different kind of vertex shader. In case of
291 * tessellation, this field also tells us which kind of output
292 * topology the TES uses, which the TCS needs to know.
293 */
294 #define IR3_TESS_NONE 0
295 #define IR3_TESS_TRIANGLES 1
296 #define IR3_TESS_QUADS 2
297 #define IR3_TESS_ISOLINES 3
298 unsigned tessellation : 2;
299
300 unsigned has_gs : 1;
301 };
302 uint32_t global;
303 };
304
305 /* bitmask of sampler which needs coords clamped for vertex
306 * shader:
307 */
308 uint16_t vsaturate_s, vsaturate_t, vsaturate_r;
309
310 /* bitmask of sampler which needs coords clamped for frag
311 * shader:
312 */
313 uint16_t fsaturate_s, fsaturate_t, fsaturate_r;
314
315 /* bitmask of ms shifts */
316 uint32_t vsamples, fsamples;
317
318 /* bitmask of samplers which need astc srgb workaround: */
319 uint16_t vastc_srgb, fastc_srgb;
320 };
321
322 static inline unsigned
323 ir3_tess_mode(unsigned gl_tess_mode)
324 {
325 switch (gl_tess_mode) {
326 case GL_ISOLINES:
327 return IR3_TESS_ISOLINES;
328 case GL_TRIANGLES:
329 return IR3_TESS_TRIANGLES;
330 case GL_QUADS:
331 return IR3_TESS_QUADS;
332 default:
333 unreachable("bad tessmode");
334 }
335 }
336
337 static inline bool
338 ir3_shader_key_equal(const struct ir3_shader_key *a, const struct ir3_shader_key *b)
339 {
340 /* slow-path if we need to check {v,f}saturate_{s,t,r} */
341 if (a->has_per_samp || b->has_per_samp)
342 return memcmp(a, b, sizeof(struct ir3_shader_key)) == 0;
343 return a->global == b->global;
344 }
345
346 /* will the two keys produce different lowering for a fragment shader? */
347 static inline bool
348 ir3_shader_key_changes_fs(struct ir3_shader_key *key, struct ir3_shader_key *last_key)
349 {
350 if (last_key->has_per_samp || key->has_per_samp) {
351 if ((last_key->fsaturate_s != key->fsaturate_s) ||
352 (last_key->fsaturate_t != key->fsaturate_t) ||
353 (last_key->fsaturate_r != key->fsaturate_r) ||
354 (last_key->fsamples != key->fsamples) ||
355 (last_key->fastc_srgb != key->fastc_srgb))
356 return true;
357 }
358
359 if (last_key->fclamp_color != key->fclamp_color)
360 return true;
361
362 if (last_key->color_two_side != key->color_two_side)
363 return true;
364
365 if (last_key->rasterflat != key->rasterflat)
366 return true;
367
368 if (last_key->ucp_enables != key->ucp_enables)
369 return true;
370
371 return false;
372 }
373
374 /* will the two keys produce different lowering for a vertex shader? */
375 static inline bool
376 ir3_shader_key_changes_vs(struct ir3_shader_key *key, struct ir3_shader_key *last_key)
377 {
378 if (last_key->has_per_samp || key->has_per_samp) {
379 if ((last_key->vsaturate_s != key->vsaturate_s) ||
380 (last_key->vsaturate_t != key->vsaturate_t) ||
381 (last_key->vsaturate_r != key->vsaturate_r) ||
382 (last_key->vsamples != key->vsamples) ||
383 (last_key->vastc_srgb != key->vastc_srgb))
384 return true;
385 }
386
387 if (last_key->vclamp_color != key->vclamp_color)
388 return true;
389
390 if (last_key->ucp_enables != key->ucp_enables)
391 return true;
392
393 return false;
394 }
395
396 /**
397 * On a4xx+a5xx, Images share state with textures and SSBOs:
398 *
399 * + Uses texture (cat5) state/instruction (isam) to read
400 * + Uses SSBO state and instructions (cat6) to write and for atomics
401 *
402 * Starting with a6xx, Images and SSBOs are basically the same thing,
403 * with texture state and isam also used for SSBO reads.
404 *
405 * On top of that, gallium makes the SSBO (shader_buffers) state semi
406 * sparse, with the first half of the state space used for atomic
407 * counters lowered to atomic buffers. We could ignore this, but I
408 * don't think we could *really* handle the case of a single shader
409 * that used the max # of textures + images + SSBOs. And once we are
410 * offsetting images by num_ssbos (or visa versa) to map them into
411 * the same hardware state, the hardware state has become coupled to
412 * the shader state, so at this point we might as well just use a
413 * mapping table to remap things from image/SSBO idx to hw idx.
414 *
415 * To make things less (more?) confusing, for the hw "SSBO" state
416 * (since it is really both SSBO and Image) I'll use the name "IBO"
417 */
418 struct ir3_ibo_mapping {
419 #define IBO_INVALID 0xff
420 /* Maps logical SSBO state to hw tex state: */
421 uint8_t ssbo_to_tex[IR3_MAX_SHADER_BUFFERS];
422
423 /* Maps logical Image state to hw tex state: */
424 uint8_t image_to_tex[IR3_MAX_SHADER_IMAGES];
425
426 /* Maps hw state back to logical SSBO or Image state:
427 *
428 * note IBO_SSBO ORd into values to indicate that the
429 * hw slot is used for SSBO state vs Image state.
430 */
431 #define IBO_SSBO 0x80
432 uint8_t tex_to_image[32];
433
434 uint8_t num_tex; /* including real textures */
435 uint8_t tex_base; /* the number of real textures, ie. image/ssbo start here */
436 };
437
438 /* Represents half register in regid */
439 #define HALF_REG_ID 0x100
440
441 struct ir3_shader_variant {
442 struct fd_bo *bo;
443
444 /* variant id (for debug) */
445 uint32_t id;
446
447 struct ir3_shader_key key;
448
449 /* vertex shaders can have an extra version for hwbinning pass,
450 * which is pointed to by so->binning:
451 */
452 bool binning_pass;
453 // union {
454 struct ir3_shader_variant *binning;
455 struct ir3_shader_variant *nonbinning;
456 // };
457
458 struct ir3_info info;
459 struct ir3 *ir;
460
461 /* The actual binary shader instructions, size given by info.sizedwords: */
462 uint32_t *bin;
463
464 /* Levels of nesting of flow control:
465 */
466 unsigned branchstack;
467
468 unsigned max_sun;
469 unsigned loops;
470
471 /* the instructions length is in units of instruction groups
472 * (4 instructions for a3xx, 16 instructions for a4xx.. each
473 * instruction is 2 dwords):
474 */
475 unsigned instrlen;
476
477 /* the constants length is in units of vec4's, and is the sum of
478 * the uniforms and the built-in compiler constants
479 */
480 unsigned constlen;
481
482 struct ir3_const_state *const_state;
483
484 /* About Linkage:
485 * + Let the frag shader determine the position/compmask for the
486 * varyings, since it is the place where we know if the varying
487 * is actually used, and if so, which components are used. So
488 * what the hw calls "outloc" is taken from the "inloc" of the
489 * frag shader.
490 * + From the vert shader, we only need the output regid
491 */
492
493 bool frag_face, color0_mrt;
494 uint8_t fragcoord_compmask;
495
496 /* NOTE: for input/outputs, slot is:
497 * gl_vert_attrib - for VS inputs
498 * gl_varying_slot - for VS output / FS input
499 * gl_frag_result - for FS output
500 */
501
502 /* varyings/outputs: */
503 unsigned outputs_count;
504 struct {
505 uint8_t slot;
506 uint8_t regid;
507 bool half : 1;
508 } outputs[32 + 2]; /* +POSITION +PSIZE */
509 bool writes_pos, writes_smask, writes_psize;
510
511 /* Size in dwords of all outputs for VS, size of entire patch for HS. */
512 uint32_t output_size;
513
514 /* Map from driver_location to byte offset in per-primitive storage */
515 unsigned output_loc[32];
516
517 /* attributes (VS) / varyings (FS):
518 * Note that sysval's should come *after* normal inputs.
519 */
520 unsigned inputs_count;
521 struct {
522 uint8_t slot;
523 uint8_t regid;
524 uint8_t compmask;
525 /* location of input (ie. offset passed to bary.f, etc). This
526 * matches the SP_VS_VPC_DST_REG.OUTLOCn value (a3xx and a4xx
527 * have the OUTLOCn value offset by 8, presumably to account
528 * for gl_Position/gl_PointSize)
529 */
530 uint8_t inloc;
531 /* vertex shader specific: */
532 bool sysval : 1; /* slot is a gl_system_value */
533 /* fragment shader specific: */
534 bool bary : 1; /* fetched varying (vs one loaded into reg) */
535 bool rasterflat : 1; /* special handling for emit->rasterflat */
536 bool use_ldlv : 1; /* internal to ir3_compiler_nir */
537 bool half : 1;
538 enum glsl_interp_mode interpolate;
539 } inputs[32 + 2]; /* +POSITION +FACE */
540
541 /* sum of input components (scalar). For frag shaders, it only counts
542 * the varying inputs:
543 */
544 unsigned total_in;
545
546 /* For frag shaders, the total number of inputs (not scalar,
547 * ie. SP_VS_PARAM_REG.TOTALVSOUTVAR)
548 */
549 unsigned varying_in;
550
551 /* Remapping table to map Image and SSBO to hw state: */
552 struct ir3_ibo_mapping image_mapping;
553
554 /* number of samplers/textures (which are currently 1:1): */
555 int num_samp;
556
557 /* is there an implicit sampler to read framebuffer (FS only).. if
558 * so the sampler-idx is 'num_samp - 1' (ie. it is appended after
559 * the last "real" texture)
560 */
561 bool fb_read;
562
563 /* do we have one or more SSBO instructions: */
564 bool has_ssbo;
565
566 /* Which bindless resources are used, for filling out sp_xs_config */
567 bool bindless_tex;
568 bool bindless_samp;
569 bool bindless_ibo;
570 bool bindless_ubo;
571
572 /* do we need derivatives: */
573 bool need_pixlod;
574
575 bool need_fine_derivatives;
576
577 /* do we have image write, etc (which prevents early-z): */
578 bool no_earlyz;
579
580 /* do we have kill, which also prevents early-z, but not necessarily
581 * early-lrz (as long as lrz-write is disabled, which must be handled
582 * outside of ir3. Unlike other no_earlyz cases, kill doesn't have
583 * side effects that prevent early-lrz discard.
584 */
585 bool has_kill;
586
587 bool per_samp;
588
589 /* Are we using split or merged register file? */
590 bool mergedregs;
591
592 /* for astc srgb workaround, the number/base of additional
593 * alpha tex states we need, and index of original tex states
594 */
595 struct {
596 unsigned base, count;
597 unsigned orig_idx[16];
598 } astc_srgb;
599
600 /* shader variants form a linked list: */
601 struct ir3_shader_variant *next;
602
603 /* replicated here to avoid passing extra ptrs everywhere: */
604 gl_shader_stage type;
605 struct ir3_shader *shader;
606
607 /* texture sampler pre-dispatches */
608 uint32_t num_sampler_prefetch;
609 struct ir3_sampler_prefetch sampler_prefetch[IR3_MAX_SAMPLER_PREFETCH];
610 };
611
612 static inline const char *
613 ir3_shader_stage(struct ir3_shader_variant *v)
614 {
615 switch (v->type) {
616 case MESA_SHADER_VERTEX: return v->binning_pass ? "BVERT" : "VERT";
617 case MESA_SHADER_TESS_CTRL: return "TCS";
618 case MESA_SHADER_TESS_EVAL: return "TES";
619 case MESA_SHADER_GEOMETRY: return "GEOM";
620 case MESA_SHADER_FRAGMENT: return "FRAG";
621 case MESA_SHADER_COMPUTE: return "CL";
622 default:
623 unreachable("invalid type");
624 return NULL;
625 }
626 }
627
628
629 struct ir3_shader {
630 gl_shader_stage type;
631
632 /* shader id (for debug): */
633 uint32_t id;
634 uint32_t variant_count;
635
636 /* Set by freedreno after shader_state_create, so we can emit debug info
637 * when recompiling a shader at draw time.
638 */
639 bool initial_variants_done;
640
641 struct ir3_compiler *compiler;
642
643 unsigned num_reserved_user_consts;
644
645 struct nir_shader *nir;
646 struct ir3_stream_output_info stream_output;
647
648 struct ir3_shader_variant *variants;
649 mtx_t variants_lock;
650
651 /* Bitmask of bits of the shader key used by this shader. Used to avoid
652 * recompiles for GL NOS that doesn't actually apply to the shader.
653 */
654 struct ir3_shader_key key_mask;
655 };
656
657 /**
658 * In order to use the same cmdstream, in particular constlen setup and const
659 * emit, for both binning and draw pass (a6xx+), the binning pass re-uses it's
660 * corresponding draw pass shaders const_state.
661 */
662 static inline struct ir3_const_state *
663 ir3_const_state(const struct ir3_shader_variant *v)
664 {
665 if (v->binning_pass)
666 return v->nonbinning->const_state;
667 return v->const_state;
668 }
669
670 void * ir3_shader_assemble(struct ir3_shader_variant *v);
671 struct ir3_shader_variant * ir3_shader_get_variant(struct ir3_shader *shader,
672 const struct ir3_shader_key *key, bool binning_pass, bool *created);
673 struct ir3_shader * ir3_shader_from_nir(struct ir3_compiler *compiler, nir_shader *nir,
674 unsigned reserved_user_consts, struct ir3_stream_output_info *stream_output);
675 void ir3_shader_destroy(struct ir3_shader *shader);
676 void ir3_shader_disasm(struct ir3_shader_variant *so, uint32_t *bin, FILE *out);
677 uint64_t ir3_shader_outputs(const struct ir3_shader *so);
678
679 int
680 ir3_glsl_type_size(const struct glsl_type *type, bool bindless);
681
682 /*
683 * Helper/util:
684 */
685
686 /* clears shader-key flags which don't apply to the given shader.
687 */
688 static inline void
689 ir3_key_clear_unused(struct ir3_shader_key *key, struct ir3_shader *shader)
690 {
691 uint32_t *key_bits = (uint32_t *)key;
692 uint32_t *key_mask = (uint32_t *)&shader->key_mask;
693 STATIC_ASSERT(sizeof(*key) % 4 == 0);
694 for (int i = 0; i < sizeof(*key) >> 2; i++)
695 key_bits[i] &= key_mask[i];
696 }
697
698 static inline int
699 ir3_find_output(const struct ir3_shader_variant *so, gl_varying_slot slot)
700 {
701 int j;
702
703 for (j = 0; j < so->outputs_count; j++)
704 if (so->outputs[j].slot == slot)
705 return j;
706
707 /* it seems optional to have a OUT.BCOLOR[n] for each OUT.COLOR[n]
708 * in the vertex shader.. but the fragment shader doesn't know this
709 * so it will always have both IN.COLOR[n] and IN.BCOLOR[n]. So
710 * at link time if there is no matching OUT.BCOLOR[n], we must map
711 * OUT.COLOR[n] to IN.BCOLOR[n]. And visa versa if there is only
712 * a OUT.BCOLOR[n] but no matching OUT.COLOR[n]
713 */
714 if (slot == VARYING_SLOT_BFC0) {
715 slot = VARYING_SLOT_COL0;
716 } else if (slot == VARYING_SLOT_BFC1) {
717 slot = VARYING_SLOT_COL1;
718 } else if (slot == VARYING_SLOT_COL0) {
719 slot = VARYING_SLOT_BFC0;
720 } else if (slot == VARYING_SLOT_COL1) {
721 slot = VARYING_SLOT_BFC1;
722 } else {
723 return -1;
724 }
725
726 for (j = 0; j < so->outputs_count; j++)
727 if (so->outputs[j].slot == slot)
728 return j;
729
730 debug_assert(0);
731
732 return -1;
733 }
734
735 static inline int
736 ir3_next_varying(const struct ir3_shader_variant *so, int i)
737 {
738 while (++i < so->inputs_count)
739 if (so->inputs[i].compmask && so->inputs[i].bary)
740 break;
741 return i;
742 }
743
744 struct ir3_shader_linkage {
745 /* Maximum location either consumed by the fragment shader or produced by
746 * the last geometry stage, i.e. the size required for each vertex in the
747 * VPC in DWORD's.
748 */
749 uint8_t max_loc;
750
751 /* Number of entries in var. */
752 uint8_t cnt;
753
754 /* Bitset of locations used, including ones which are only used by the FS.
755 */
756 uint32_t varmask[4];
757
758 /* Map from VS output to location. */
759 struct {
760 uint8_t regid;
761 uint8_t compmask;
762 uint8_t loc;
763 } var[32];
764
765 /* location for fixed-function gl_PrimitiveID passthrough */
766 uint8_t primid_loc;
767 };
768
769 static inline void
770 ir3_link_add(struct ir3_shader_linkage *l, uint8_t regid_, uint8_t compmask, uint8_t loc)
771 {
772
773
774 for (int j = 0; j < util_last_bit(compmask); j++) {
775 uint8_t comploc = loc + j;
776 l->varmask[comploc / 32] |= 1 << (comploc % 32);
777 }
778
779 l->max_loc = MAX2(l->max_loc, loc + util_last_bit(compmask));
780
781 if (regid_ != regid(63, 0)) {
782 int i = l->cnt++;
783 debug_assert(i < ARRAY_SIZE(l->var));
784
785 l->var[i].regid = regid_;
786 l->var[i].compmask = compmask;
787 l->var[i].loc = loc;
788 }
789 }
790
791 static inline void
792 ir3_link_shaders(struct ir3_shader_linkage *l,
793 const struct ir3_shader_variant *vs,
794 const struct ir3_shader_variant *fs,
795 bool pack_vs_out)
796 {
797 /* On older platforms, varmask isn't programmed at all, and it appears
798 * that the hardware generates a mask of used VPC locations using the VS
799 * output map, and hangs if a FS bary instruction references a location
800 * not in the list. This means that we need to have a dummy entry in the
801 * VS out map for things like gl_PointCoord which aren't written by the
802 * VS. Furthermore we can't use r63.x, so just pick a random register to
803 * use if there is no VS output.
804 */
805 const unsigned default_regid = pack_vs_out ? regid(63, 0) : regid(0, 0);
806 int j = -1, k;
807
808 l->primid_loc = 0xff;
809
810 while (l->cnt < ARRAY_SIZE(l->var)) {
811 j = ir3_next_varying(fs, j);
812
813 if (j >= fs->inputs_count)
814 break;
815
816 if (fs->inputs[j].inloc >= fs->total_in)
817 continue;
818
819 k = ir3_find_output(vs, fs->inputs[j].slot);
820
821 if (k < 0 && fs->inputs[j].slot == VARYING_SLOT_PRIMITIVE_ID) {
822 l->primid_loc = fs->inputs[j].inloc;
823 }
824
825 ir3_link_add(l, k >= 0 ? vs->outputs[k].regid : default_regid,
826 fs->inputs[j].compmask, fs->inputs[j].inloc);
827 }
828 }
829
830 static inline uint32_t
831 ir3_find_output_regid(const struct ir3_shader_variant *so, unsigned slot)
832 {
833 int j;
834 for (j = 0; j < so->outputs_count; j++)
835 if (so->outputs[j].slot == slot) {
836 uint32_t regid = so->outputs[j].regid;
837 if (so->outputs[j].half)
838 regid |= HALF_REG_ID;
839 return regid;
840 }
841 return regid(63, 0);
842 }
843
844 #define VARYING_SLOT_GS_HEADER_IR3 (VARYING_SLOT_MAX + 0)
845 #define VARYING_SLOT_GS_VERTEX_FLAGS_IR3 (VARYING_SLOT_MAX + 1)
846 #define VARYING_SLOT_TCS_HEADER_IR3 (VARYING_SLOT_MAX + 2)
847
848
849 static inline uint32_t
850 ir3_find_sysval_regid(const struct ir3_shader_variant *so, unsigned slot)
851 {
852 int j;
853 for (j = 0; j < so->inputs_count; j++)
854 if (so->inputs[j].sysval && (so->inputs[j].slot == slot))
855 return so->inputs[j].regid;
856 return regid(63, 0);
857 }
858
859 /* calculate register footprint in terms of half-regs (ie. one full
860 * reg counts as two half-regs).
861 */
862 static inline uint32_t
863 ir3_shader_halfregs(const struct ir3_shader_variant *v)
864 {
865 return (2 * (v->info.max_reg + 1)) + (v->info.max_half_reg + 1);
866 }
867
868 static inline uint32_t
869 ir3_shader_nibo(const struct ir3_shader_variant *v)
870 {
871 /* The dummy variant used in binning mode won't have an actual shader. */
872 if (!v->shader)
873 return 0;
874
875 return v->shader->nir->info.num_ssbos + v->shader->nir->info.num_images;
876 }
877
878 #endif /* IR3_SHADER_H_ */