v3d: Move constant offsets to UBO addresses into the main uniform stream.
[mesa.git] / src / broadcom / compiler / v3d_compiler.h
1 /*
2 * Copyright © 2016 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #ifndef V3D_COMPILER_H
25 #define V3D_COMPILER_H
26
27 #include <assert.h>
28 #include <stdio.h>
29 #include <stdlib.h>
30 #include <stdbool.h>
31 #include <stdint.h>
32 #include <string.h>
33
34 #include "util/macros.h"
35 #include "common/v3d_debug.h"
36 #include "common/v3d_device_info.h"
37 #include "common/v3d_limits.h"
38 #include "compiler/nir/nir.h"
39 #include "util/list.h"
40 #include "util/u_math.h"
41
42 #include "qpu/qpu_instr.h"
43 #include "pipe/p_state.h"
44
45 struct nir_builder;
46
47 struct v3d_fs_inputs {
48 /**
49 * Array of the meanings of the VPM inputs this shader needs.
50 *
51 * It doesn't include those that aren't part of the VPM, like
52 * point/line coordinates.
53 */
54 struct v3d_varying_slot *input_slots;
55 uint32_t num_inputs;
56 };
57
58 enum qfile {
59 /** An unused source or destination register. */
60 QFILE_NULL,
61
62 /** A physical register, such as the W coordinate payload. */
63 QFILE_REG,
64 /** One of the regsiters for fixed function interactions. */
65 QFILE_MAGIC,
66
67 /**
68 * A virtual register, that will be allocated to actual accumulator
69 * or physical registers later.
70 */
71 QFILE_TEMP,
72
73 /**
74 * VPM reads use this with an index value to say what part of the VPM
75 * is being read.
76 */
77 QFILE_VPM,
78
79 /**
80 * Stores an immediate value in the index field that will be used
81 * directly by qpu_load_imm().
82 */
83 QFILE_LOAD_IMM,
84
85 /**
86 * Stores an immediate value in the index field that can be turned
87 * into a small immediate field by qpu_encode_small_immediate().
88 */
89 QFILE_SMALL_IMM,
90 };
91
92 /**
93 * A reference to a QPU register or a virtual temp register.
94 */
95 struct qreg {
96 enum qfile file;
97 uint32_t index;
98 };
99
100 static inline struct qreg vir_reg(enum qfile file, uint32_t index)
101 {
102 return (struct qreg){file, index};
103 }
104
105 static inline struct qreg vir_magic_reg(uint32_t index)
106 {
107 return (struct qreg){QFILE_MAGIC, index};
108 }
109
110 static inline struct qreg vir_nop_reg(void)
111 {
112 return (struct qreg){QFILE_NULL, 0};
113 }
114
115 /**
116 * A reference to an actual register at the QPU level, for register
117 * allocation.
118 */
119 struct qpu_reg {
120 bool magic;
121 bool smimm;
122 int index;
123 };
124
125 struct qinst {
126 /** Entry in qblock->instructions */
127 struct list_head link;
128
129 /**
130 * The instruction being wrapped. Its condition codes, pack flags,
131 * signals, etc. will all be used, with just the register references
132 * being replaced by the contents of qinst->dst and qinst->src[].
133 */
134 struct v3d_qpu_instr qpu;
135
136 /* Pre-register-allocation references to src/dst registers */
137 struct qreg dst;
138 struct qreg src[3];
139 bool is_last_thrsw;
140
141 /* If the instruction reads a uniform (other than through src[i].file
142 * == QFILE_UNIF), that uniform's index in c->uniform_contents. ~0
143 * otherwise.
144 */
145 int uniform;
146 };
147
148 enum quniform_contents {
149 /**
150 * Indicates that a constant 32-bit value is copied from the program's
151 * uniform contents.
152 */
153 QUNIFORM_CONSTANT,
154 /**
155 * Indicates that the program's uniform contents are used as an index
156 * into the GL uniform storage.
157 */
158 QUNIFORM_UNIFORM,
159
160 /** @{
161 * Scaling factors from clip coordinates to relative to the viewport
162 * center.
163 *
164 * This is used by the coordinate and vertex shaders to produce the
165 * 32-bit entry consisting of 2 16-bit fields with 12.4 signed fixed
166 * point offsets from the viewport ccenter.
167 */
168 QUNIFORM_VIEWPORT_X_SCALE,
169 QUNIFORM_VIEWPORT_Y_SCALE,
170 /** @} */
171
172 QUNIFORM_VIEWPORT_Z_OFFSET,
173 QUNIFORM_VIEWPORT_Z_SCALE,
174
175 QUNIFORM_USER_CLIP_PLANE,
176
177 /**
178 * A reference to a V3D 3.x texture config parameter 0 uniform.
179 *
180 * This is a uniform implicitly loaded with a QPU_W_TMU* write, which
181 * defines texture type, miplevels, and such. It will be found as a
182 * parameter to the first QOP_TEX_[STRB] instruction in a sequence.
183 */
184 QUNIFORM_TEXTURE_CONFIG_P0_0,
185 QUNIFORM_TEXTURE_CONFIG_P0_1,
186 QUNIFORM_TEXTURE_CONFIG_P0_2,
187 QUNIFORM_TEXTURE_CONFIG_P0_3,
188 QUNIFORM_TEXTURE_CONFIG_P0_4,
189 QUNIFORM_TEXTURE_CONFIG_P0_5,
190 QUNIFORM_TEXTURE_CONFIG_P0_6,
191 QUNIFORM_TEXTURE_CONFIG_P0_7,
192 QUNIFORM_TEXTURE_CONFIG_P0_8,
193 QUNIFORM_TEXTURE_CONFIG_P0_9,
194 QUNIFORM_TEXTURE_CONFIG_P0_10,
195 QUNIFORM_TEXTURE_CONFIG_P0_11,
196 QUNIFORM_TEXTURE_CONFIG_P0_12,
197 QUNIFORM_TEXTURE_CONFIG_P0_13,
198 QUNIFORM_TEXTURE_CONFIG_P0_14,
199 QUNIFORM_TEXTURE_CONFIG_P0_15,
200 QUNIFORM_TEXTURE_CONFIG_P0_16,
201 QUNIFORM_TEXTURE_CONFIG_P0_17,
202 QUNIFORM_TEXTURE_CONFIG_P0_18,
203 QUNIFORM_TEXTURE_CONFIG_P0_19,
204 QUNIFORM_TEXTURE_CONFIG_P0_20,
205 QUNIFORM_TEXTURE_CONFIG_P0_21,
206 QUNIFORM_TEXTURE_CONFIG_P0_22,
207 QUNIFORM_TEXTURE_CONFIG_P0_23,
208 QUNIFORM_TEXTURE_CONFIG_P0_24,
209 QUNIFORM_TEXTURE_CONFIG_P0_25,
210 QUNIFORM_TEXTURE_CONFIG_P0_26,
211 QUNIFORM_TEXTURE_CONFIG_P0_27,
212 QUNIFORM_TEXTURE_CONFIG_P0_28,
213 QUNIFORM_TEXTURE_CONFIG_P0_29,
214 QUNIFORM_TEXTURE_CONFIG_P0_30,
215 QUNIFORM_TEXTURE_CONFIG_P0_31,
216 QUNIFORM_TEXTURE_CONFIG_P0_32,
217
218 /**
219 * A reference to a V3D 3.x texture config parameter 1 uniform.
220 *
221 * This is a uniform implicitly loaded with a QPU_W_TMU* write, which
222 * has the pointer to the indirect texture state. Our data[] field
223 * will have a packed p1 value, but the address field will be just
224 * which texture unit's texture should be referenced.
225 */
226 QUNIFORM_TEXTURE_CONFIG_P1,
227
228 /* A V3D 4.x texture config parameter. The high 8 bits will be
229 * which texture or sampler is being sampled, and the driver must
230 * replace the address field with the appropriate address.
231 */
232 QUNIFORM_TMU_CONFIG_P0,
233 QUNIFORM_TMU_CONFIG_P1,
234
235 QUNIFORM_IMAGE_TMU_CONFIG_P0,
236
237 QUNIFORM_TEXTURE_FIRST_LEVEL,
238
239 QUNIFORM_TEXTURE_WIDTH,
240 QUNIFORM_TEXTURE_HEIGHT,
241 QUNIFORM_TEXTURE_DEPTH,
242 QUNIFORM_TEXTURE_ARRAY_SIZE,
243 QUNIFORM_TEXTURE_LEVELS,
244
245 QUNIFORM_UBO_ADDR,
246
247 QUNIFORM_TEXRECT_SCALE_X,
248 QUNIFORM_TEXRECT_SCALE_Y,
249
250 /* Returns the base offset of the SSBO given by the data value. */
251 QUNIFORM_SSBO_OFFSET,
252
253 /* Returns the size of the SSBO given by the data value. */
254 QUNIFORM_GET_BUFFER_SIZE,
255
256 /* Sizes (in pixels) of a shader image given by the data value. */
257 QUNIFORM_IMAGE_WIDTH,
258 QUNIFORM_IMAGE_HEIGHT,
259 QUNIFORM_IMAGE_DEPTH,
260 QUNIFORM_IMAGE_ARRAY_SIZE,
261
262 QUNIFORM_ALPHA_REF,
263
264 /* Number of workgroups passed to glDispatchCompute in the dimension
265 * selected by the data value.
266 */
267 QUNIFORM_NUM_WORK_GROUPS,
268
269 /**
270 * Returns the the offset of the scratch buffer for register spilling.
271 */
272 QUNIFORM_SPILL_OFFSET,
273 QUNIFORM_SPILL_SIZE_PER_THREAD,
274
275 /**
276 * Returns the offset of the shared memory for compute shaders.
277 *
278 * This will be accessed using TMU general memory operations, so the
279 * L2T cache will effectively be the shared memory area.
280 */
281 QUNIFORM_SHARED_OFFSET,
282 };
283
284 static inline uint32_t v3d_unit_data_create(uint32_t unit, uint32_t value)
285 {
286 assert(value < (1 << 24));
287 return unit << 24 | value;
288 }
289
290 static inline uint32_t v3d_unit_data_get_unit(uint32_t data)
291 {
292 return data >> 24;
293 }
294
295 static inline uint32_t v3d_unit_data_get_offset(uint32_t data)
296 {
297 return data & 0xffffff;
298 }
299
300 struct v3d_varying_slot {
301 uint8_t slot_and_component;
302 };
303
304 static inline struct v3d_varying_slot
305 v3d_slot_from_slot_and_component(uint8_t slot, uint8_t component)
306 {
307 assert(slot < 255 / 4);
308 return (struct v3d_varying_slot){ (slot << 2) + component };
309 }
310
311 static inline uint8_t v3d_slot_get_slot(struct v3d_varying_slot slot)
312 {
313 return slot.slot_and_component >> 2;
314 }
315
316 static inline uint8_t v3d_slot_get_component(struct v3d_varying_slot slot)
317 {
318 return slot.slot_and_component & 3;
319 }
320
321 struct v3d_ubo_range {
322 /**
323 * offset in bytes from the start of the ubo where this range is
324 * uploaded.
325 *
326 * Only set once used is set.
327 */
328 uint32_t dst_offset;
329
330 /**
331 * offset in bytes from the start of the gallium uniforms where the
332 * data comes from.
333 */
334 uint32_t src_offset;
335
336 /** size in bytes of this ubo range */
337 uint32_t size;
338 };
339
340 struct v3d_key {
341 void *shader_state;
342 struct {
343 uint8_t swizzle[4];
344 uint8_t return_size;
345 uint8_t return_channels;
346 bool clamp_s:1;
347 bool clamp_t:1;
348 bool clamp_r:1;
349 } tex[V3D_MAX_TEXTURE_SAMPLERS];
350 uint8_t ucp_enables;
351 };
352
353 struct v3d_fs_key {
354 struct v3d_key base;
355 bool depth_enabled;
356 bool is_points;
357 bool is_lines;
358 bool alpha_test;
359 bool point_coord_upper_left;
360 bool light_twoside;
361 bool msaa;
362 bool sample_coverage;
363 bool sample_alpha_to_coverage;
364 bool sample_alpha_to_one;
365 bool clamp_color;
366 bool shade_model_flat;
367 /* Mask of which color render targets are present. */
368 uint8_t cbufs;
369 uint8_t swap_color_rb;
370 /* Mask of which render targets need to be written as 32-bit floats */
371 uint8_t f32_color_rb;
372 /* Masks of which render targets need to be written as ints/uints.
373 * Used by gallium to work around lost information in TGSI.
374 */
375 uint8_t int_color_rb;
376 uint8_t uint_color_rb;
377 uint8_t alpha_test_func;
378 uint8_t logicop_func;
379 uint32_t point_sprite_mask;
380
381 struct pipe_rt_blend_state blend;
382 };
383
384 struct v3d_vs_key {
385 struct v3d_key base;
386
387 struct v3d_varying_slot fs_inputs[V3D_MAX_FS_INPUTS];
388 uint8_t num_fs_inputs;
389
390 bool is_coord;
391 bool per_vertex_point_size;
392 bool clamp_color;
393 };
394
395 /** A basic block of VIR intructions. */
396 struct qblock {
397 struct list_head link;
398
399 struct list_head instructions;
400
401 struct set *predecessors;
402 struct qblock *successors[2];
403
404 int index;
405
406 /* Instruction IPs for the first and last instruction of the block.
407 * Set by qpu_schedule.c.
408 */
409 uint32_t start_qpu_ip;
410 uint32_t end_qpu_ip;
411
412 /* Instruction IP for the branch instruction of the block. Set by
413 * qpu_schedule.c.
414 */
415 uint32_t branch_qpu_ip;
416
417 /** Offset within the uniform stream at the start of the block. */
418 uint32_t start_uniform;
419 /** Offset within the uniform stream of the branch instruction */
420 uint32_t branch_uniform;
421
422 /** @{ used by v3d_vir_live_variables.c */
423 BITSET_WORD *def;
424 BITSET_WORD *defin;
425 BITSET_WORD *defout;
426 BITSET_WORD *use;
427 BITSET_WORD *live_in;
428 BITSET_WORD *live_out;
429 int start_ip, end_ip;
430 /** @} */
431 };
432
433 /** Which util/list.h add mode we should use when inserting an instruction. */
434 enum vir_cursor_mode {
435 vir_cursor_add,
436 vir_cursor_addtail,
437 };
438
439 /**
440 * Tracking structure for where new instructions should be inserted. Create
441 * with one of the vir_after_inst()-style helper functions.
442 *
443 * This does not protect against removal of the block or instruction, so we
444 * have an assert in instruction removal to try to catch it.
445 */
446 struct vir_cursor {
447 enum vir_cursor_mode mode;
448 struct list_head *link;
449 };
450
451 static inline struct vir_cursor
452 vir_before_inst(struct qinst *inst)
453 {
454 return (struct vir_cursor){ vir_cursor_addtail, &inst->link };
455 }
456
457 static inline struct vir_cursor
458 vir_after_inst(struct qinst *inst)
459 {
460 return (struct vir_cursor){ vir_cursor_add, &inst->link };
461 }
462
463 static inline struct vir_cursor
464 vir_before_block(struct qblock *block)
465 {
466 return (struct vir_cursor){ vir_cursor_add, &block->instructions };
467 }
468
469 static inline struct vir_cursor
470 vir_after_block(struct qblock *block)
471 {
472 return (struct vir_cursor){ vir_cursor_addtail, &block->instructions };
473 }
474
475 /**
476 * Compiler state saved across compiler invocations, for any expensive global
477 * setup.
478 */
479 struct v3d_compiler {
480 const struct v3d_device_info *devinfo;
481 struct ra_regs *regs;
482 unsigned int reg_class_any[3];
483 unsigned int reg_class_r5[3];
484 unsigned int reg_class_phys[3];
485 unsigned int reg_class_phys_or_acc[3];
486 };
487
488 struct v3d_compile {
489 const struct v3d_device_info *devinfo;
490 nir_shader *s;
491 nir_function_impl *impl;
492 struct exec_list *cf_node_list;
493 const struct v3d_compiler *compiler;
494
495 void (*debug_output)(const char *msg,
496 void *debug_output_data);
497 void *debug_output_data;
498
499 /**
500 * Mapping from nir_register * or nir_ssa_def * to array of struct
501 * qreg for the values.
502 */
503 struct hash_table *def_ht;
504
505 /* For each temp, the instruction generating its value. */
506 struct qinst **defs;
507 uint32_t defs_array_size;
508
509 /**
510 * Inputs to the shader, arranged by TGSI declaration order.
511 *
512 * Not all fragment shader QFILE_VARY reads are present in this array.
513 */
514 struct qreg *inputs;
515 struct qreg *outputs;
516 bool msaa_per_sample_output;
517 struct qreg color_reads[V3D_MAX_SAMPLES];
518 struct qreg sample_colors[V3D_MAX_SAMPLES];
519 uint32_t inputs_array_size;
520 uint32_t outputs_array_size;
521 uint32_t uniforms_array_size;
522
523 /* Booleans for whether the corresponding QFILE_VARY[i] is
524 * flat-shaded. This includes gl_FragColor flat-shading, which is
525 * customized based on the shademodel_flat shader key.
526 */
527 uint32_t flat_shade_flags[BITSET_WORDS(V3D_MAX_FS_INPUTS)];
528
529 uint32_t noperspective_flags[BITSET_WORDS(V3D_MAX_FS_INPUTS)];
530
531 uint32_t centroid_flags[BITSET_WORDS(V3D_MAX_FS_INPUTS)];
532
533 bool uses_center_w;
534 bool writes_z;
535
536 struct v3d_ubo_range *ubo_ranges;
537 bool *ubo_range_used;
538 uint32_t ubo_ranges_array_size;
539 /** Number of uniform areas tracked in ubo_ranges. */
540 uint32_t num_ubo_ranges;
541 uint32_t next_ubo_dst_offset;
542
543 /* State for whether we're executing on each channel currently. 0 if
544 * yes, otherwise a block number + 1 that the channel jumped to.
545 */
546 struct qreg execute;
547 bool in_control_flow;
548
549 struct qreg line_x, point_x, point_y;
550
551 /**
552 * Instance ID, which comes in before the vertex attribute payload if
553 * the shader record requests it.
554 */
555 struct qreg iid;
556
557 /**
558 * Vertex ID, which comes in before the vertex attribute payload
559 * (after Instance ID) if the shader record requests it.
560 */
561 struct qreg vid;
562
563 /* Fragment shader payload regs. */
564 struct qreg payload_w, payload_w_centroid, payload_z;
565
566 struct qreg cs_payload[2];
567 struct qreg cs_shared_offset;
568 int local_invocation_index_bits;
569
570 uint8_t vattr_sizes[V3D_MAX_VS_INPUTS / 4];
571 uint32_t vpm_output_size;
572
573 /* Size in bytes of registers that have been spilled. This is how much
574 * space needs to be available in the spill BO per thread per QPU.
575 */
576 uint32_t spill_size;
577 /* Shader-db stats */
578 uint32_t spills, fills, loops;
579 /**
580 * Register spilling's per-thread base address, shared between each
581 * spill/fill's addressing calculations.
582 */
583 struct qreg spill_base;
584 /* Bit vector of which temps may be spilled */
585 BITSET_WORD *spillable;
586
587 /**
588 * Array of the VARYING_SLOT_* of all FS QFILE_VARY reads.
589 *
590 * This includes those that aren't part of the VPM varyings, like
591 * point/line coordinates.
592 */
593 struct v3d_varying_slot input_slots[V3D_MAX_FS_INPUTS];
594
595 /**
596 * An entry per outputs[] in the VS indicating what the VARYING_SLOT_*
597 * of the output is. Used to emit from the VS in the order that the
598 * FS needs.
599 */
600 struct v3d_varying_slot *output_slots;
601
602 struct pipe_shader_state *shader_state;
603 struct v3d_key *key;
604 struct v3d_fs_key *fs_key;
605 struct v3d_vs_key *vs_key;
606
607 /* Live ranges of temps. */
608 int *temp_start, *temp_end;
609 bool live_intervals_valid;
610
611 uint32_t *uniform_data;
612 enum quniform_contents *uniform_contents;
613 uint32_t uniform_array_size;
614 uint32_t num_uniforms;
615 uint32_t output_position_index;
616 nir_variable *output_color_var[4];
617 uint32_t output_sample_mask_index;
618
619 struct qreg undef;
620 uint32_t num_temps;
621
622 struct vir_cursor cursor;
623 struct list_head blocks;
624 int next_block_index;
625 struct qblock *cur_block;
626 struct qblock *loop_cont_block;
627 struct qblock *loop_break_block;
628
629 uint64_t *qpu_insts;
630 uint32_t qpu_inst_count;
631 uint32_t qpu_inst_size;
632
633 /* For the FS, the number of varying inputs not counting the
634 * point/line varyings payload
635 */
636 uint32_t num_inputs;
637
638 /**
639 * Number of inputs from num_inputs remaining to be queued to the read
640 * FIFO in the VS/CS.
641 */
642 uint32_t num_inputs_remaining;
643
644 /* Number of inputs currently in the read FIFO for the VS/CS */
645 uint32_t num_inputs_in_fifo;
646
647 /** Next offset in the VPM to read from in the VS/CS */
648 uint32_t vpm_read_offset;
649
650 uint32_t program_id;
651 uint32_t variant_id;
652
653 /* Set to compile program in in 1x, 2x, or 4x threaded mode, where
654 * SIG_THREAD_SWITCH is used to hide texturing latency at the cost of
655 * limiting ourselves to the part of the physical reg space.
656 *
657 * On V3D 3.x, 2x or 4x divide the physical reg space by 2x or 4x. On
658 * V3D 4.x, all shaders are 2x threaded, and 4x only divides the
659 * physical reg space in half.
660 */
661 uint8_t threads;
662 struct qinst *last_thrsw;
663 bool last_thrsw_at_top_level;
664
665 bool failed;
666 };
667
668 struct v3d_uniform_list {
669 enum quniform_contents *contents;
670 uint32_t *data;
671 uint32_t count;
672 };
673
674 struct v3d_prog_data {
675 struct v3d_uniform_list uniforms;
676
677 struct v3d_ubo_range *ubo_ranges;
678 uint32_t num_ubo_ranges;
679 uint32_t ubo_size;
680 uint32_t spill_size;
681
682 uint8_t threads;
683
684 /* For threads > 1, whether the program should be dispatched in the
685 * after-final-THRSW state.
686 */
687 bool single_seg;
688 };
689
690 struct v3d_vs_prog_data {
691 struct v3d_prog_data base;
692
693 bool uses_iid, uses_vid;
694
695 /* Number of components read from each vertex attribute. */
696 uint8_t vattr_sizes[V3D_MAX_VS_INPUTS / 4];
697
698 /* Total number of components read, for the shader state record. */
699 uint32_t vpm_input_size;
700
701 /* Total number of components written, for the shader state record. */
702 uint32_t vpm_output_size;
703
704 /* Set if there should be separate VPM segments for input and output.
705 * If unset, vpm_input_size will be 0.
706 */
707 bool separate_segments;
708
709 /* Value to be programmed in VCM_CACHE_SIZE. */
710 uint8_t vcm_cache_size;
711 };
712
713 struct v3d_fs_prog_data {
714 struct v3d_prog_data base;
715
716 struct v3d_varying_slot input_slots[V3D_MAX_FS_INPUTS];
717
718 /* Array of flat shade flags.
719 *
720 * Each entry is only 24 bits (high 8 bits 0), to match the hardware
721 * packet layout.
722 */
723 uint32_t flat_shade_flags[((V3D_MAX_FS_INPUTS - 1) / 24) + 1];
724
725 uint32_t noperspective_flags[((V3D_MAX_FS_INPUTS - 1) / 24) + 1];
726
727 uint32_t centroid_flags[((V3D_MAX_FS_INPUTS - 1) / 24) + 1];
728
729 uint8_t num_inputs;
730 bool writes_z;
731 bool disable_ez;
732 bool uses_center_w;
733 };
734
735 static inline bool
736 vir_has_uniform(struct qinst *inst)
737 {
738 return inst->uniform != ~0;
739 }
740
741 /* Special nir_load_input intrinsic index for loading the current TLB
742 * destination color.
743 */
744 #define V3D_NIR_TLB_COLOR_READ_INPUT 2000000000
745
746 #define V3D_NIR_MS_MASK_OUTPUT 2000000000
747
748 extern const nir_shader_compiler_options v3d_nir_options;
749
750 const struct v3d_compiler *v3d_compiler_init(const struct v3d_device_info *devinfo);
751 void v3d_compiler_free(const struct v3d_compiler *compiler);
752 void v3d_optimize_nir(struct nir_shader *s);
753
754 uint64_t *v3d_compile(const struct v3d_compiler *compiler,
755 struct v3d_key *key,
756 struct v3d_prog_data **prog_data,
757 nir_shader *s,
758 void (*debug_output)(const char *msg,
759 void *debug_output_data),
760 void *debug_output_data,
761 int program_id, int variant_id,
762 uint32_t *final_assembly_size);
763
764 void v3d_nir_to_vir(struct v3d_compile *c);
765
766 void vir_compile_destroy(struct v3d_compile *c);
767 const char *vir_get_stage_name(struct v3d_compile *c);
768 struct qblock *vir_new_block(struct v3d_compile *c);
769 void vir_set_emit_block(struct v3d_compile *c, struct qblock *block);
770 void vir_link_blocks(struct qblock *predecessor, struct qblock *successor);
771 struct qblock *vir_entry_block(struct v3d_compile *c);
772 struct qblock *vir_exit_block(struct v3d_compile *c);
773 struct qinst *vir_add_inst(enum v3d_qpu_add_op op, struct qreg dst,
774 struct qreg src0, struct qreg src1);
775 struct qinst *vir_mul_inst(enum v3d_qpu_mul_op op, struct qreg dst,
776 struct qreg src0, struct qreg src1);
777 struct qinst *vir_branch_inst(struct v3d_compile *c,
778 enum v3d_qpu_branch_cond cond);
779 void vir_remove_instruction(struct v3d_compile *c, struct qinst *qinst);
780 uint32_t vir_get_uniform_index(struct v3d_compile *c,
781 enum quniform_contents contents,
782 uint32_t data);
783 struct qreg vir_uniform(struct v3d_compile *c,
784 enum quniform_contents contents,
785 uint32_t data);
786 void vir_schedule_instructions(struct v3d_compile *c);
787 struct v3d_qpu_instr v3d_qpu_nop(void);
788
789 struct qreg vir_emit_def(struct v3d_compile *c, struct qinst *inst);
790 struct qinst *vir_emit_nondef(struct v3d_compile *c, struct qinst *inst);
791 void vir_set_cond(struct qinst *inst, enum v3d_qpu_cond cond);
792 void vir_set_pf(struct qinst *inst, enum v3d_qpu_pf pf);
793 void vir_set_uf(struct qinst *inst, enum v3d_qpu_uf uf);
794 void vir_set_unpack(struct qinst *inst, int src,
795 enum v3d_qpu_input_unpack unpack);
796
797 struct qreg vir_get_temp(struct v3d_compile *c);
798 void vir_emit_last_thrsw(struct v3d_compile *c);
799 void vir_calculate_live_intervals(struct v3d_compile *c);
800 int vir_get_nsrc(struct qinst *inst);
801 bool vir_has_side_effects(struct v3d_compile *c, struct qinst *inst);
802 bool vir_get_add_op(struct qinst *inst, enum v3d_qpu_add_op *op);
803 bool vir_get_mul_op(struct qinst *inst, enum v3d_qpu_mul_op *op);
804 bool vir_is_raw_mov(struct qinst *inst);
805 bool vir_is_tex(struct qinst *inst);
806 bool vir_is_add(struct qinst *inst);
807 bool vir_is_mul(struct qinst *inst);
808 bool vir_writes_r3(const struct v3d_device_info *devinfo, struct qinst *inst);
809 bool vir_writes_r4(const struct v3d_device_info *devinfo, struct qinst *inst);
810 struct qreg vir_follow_movs(struct v3d_compile *c, struct qreg reg);
811 uint8_t vir_channels_written(struct qinst *inst);
812 struct qreg ntq_get_src(struct v3d_compile *c, nir_src src, int i);
813 void ntq_store_dest(struct v3d_compile *c, nir_dest *dest, int chan,
814 struct qreg result);
815 void vir_emit_thrsw(struct v3d_compile *c);
816
817 void vir_dump(struct v3d_compile *c);
818 void vir_dump_inst(struct v3d_compile *c, struct qinst *inst);
819 void vir_dump_uniform(enum quniform_contents contents, uint32_t data);
820
821 void vir_validate(struct v3d_compile *c);
822
823 void vir_optimize(struct v3d_compile *c);
824 bool vir_opt_algebraic(struct v3d_compile *c);
825 bool vir_opt_constant_folding(struct v3d_compile *c);
826 bool vir_opt_copy_propagate(struct v3d_compile *c);
827 bool vir_opt_dead_code(struct v3d_compile *c);
828 bool vir_opt_peephole_sf(struct v3d_compile *c);
829 bool vir_opt_small_immediates(struct v3d_compile *c);
830 bool vir_opt_vpm(struct v3d_compile *c);
831 void v3d_nir_lower_blend(nir_shader *s, struct v3d_compile *c);
832 void v3d_nir_lower_io(nir_shader *s, struct v3d_compile *c);
833 void v3d_nir_lower_txf_ms(nir_shader *s, struct v3d_compile *c);
834 void v3d_nir_lower_image_load_store(nir_shader *s);
835 void vir_lower_uniforms(struct v3d_compile *c);
836
837 void v3d33_vir_vpm_read_setup(struct v3d_compile *c, int num_components);
838 void v3d33_vir_vpm_write_setup(struct v3d_compile *c);
839 void v3d33_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr);
840 void v3d40_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr);
841 void v3d40_vir_emit_image_load_store(struct v3d_compile *c,
842 nir_intrinsic_instr *instr);
843
844 void v3d_vir_to_qpu(struct v3d_compile *c, struct qpu_reg *temp_registers);
845 uint32_t v3d_qpu_schedule_instructions(struct v3d_compile *c);
846 void qpu_validate(struct v3d_compile *c);
847 struct qpu_reg *v3d_register_allocate(struct v3d_compile *c, bool *spilled);
848 bool vir_init_reg_sets(struct v3d_compiler *compiler);
849
850 bool v3d_gl_format_is_return_32(GLenum format);
851
852 static inline bool
853 quniform_contents_is_texture_p0(enum quniform_contents contents)
854 {
855 return (contents >= QUNIFORM_TEXTURE_CONFIG_P0_0 &&
856 contents < (QUNIFORM_TEXTURE_CONFIG_P0_0 +
857 V3D_MAX_TEXTURE_SAMPLERS));
858 }
859
860 static inline bool
861 vir_in_nonuniform_control_flow(struct v3d_compile *c)
862 {
863 return c->execute.file != QFILE_NULL;
864 }
865
866 static inline struct qreg
867 vir_uniform_ui(struct v3d_compile *c, uint32_t ui)
868 {
869 return vir_uniform(c, QUNIFORM_CONSTANT, ui);
870 }
871
872 static inline struct qreg
873 vir_uniform_f(struct v3d_compile *c, float f)
874 {
875 return vir_uniform(c, QUNIFORM_CONSTANT, fui(f));
876 }
877
878 #define VIR_ALU0(name, vir_inst, op) \
879 static inline struct qreg \
880 vir_##name(struct v3d_compile *c) \
881 { \
882 return vir_emit_def(c, vir_inst(op, c->undef, \
883 c->undef, c->undef)); \
884 } \
885 static inline struct qinst * \
886 vir_##name##_dest(struct v3d_compile *c, struct qreg dest) \
887 { \
888 return vir_emit_nondef(c, vir_inst(op, dest, \
889 c->undef, c->undef)); \
890 }
891
892 #define VIR_ALU1(name, vir_inst, op) \
893 static inline struct qreg \
894 vir_##name(struct v3d_compile *c, struct qreg a) \
895 { \
896 return vir_emit_def(c, vir_inst(op, c->undef, \
897 a, c->undef)); \
898 } \
899 static inline struct qinst * \
900 vir_##name##_dest(struct v3d_compile *c, struct qreg dest, \
901 struct qreg a) \
902 { \
903 return vir_emit_nondef(c, vir_inst(op, dest, a, \
904 c->undef)); \
905 }
906
907 #define VIR_ALU2(name, vir_inst, op) \
908 static inline struct qreg \
909 vir_##name(struct v3d_compile *c, struct qreg a, struct qreg b) \
910 { \
911 return vir_emit_def(c, vir_inst(op, c->undef, a, b)); \
912 } \
913 static inline struct qinst * \
914 vir_##name##_dest(struct v3d_compile *c, struct qreg dest, \
915 struct qreg a, struct qreg b) \
916 { \
917 return vir_emit_nondef(c, vir_inst(op, dest, a, b)); \
918 }
919
920 #define VIR_NODST_0(name, vir_inst, op) \
921 static inline struct qinst * \
922 vir_##name(struct v3d_compile *c) \
923 { \
924 return vir_emit_nondef(c, vir_inst(op, c->undef, \
925 c->undef, c->undef)); \
926 }
927
928 #define VIR_NODST_1(name, vir_inst, op) \
929 static inline struct qinst * \
930 vir_##name(struct v3d_compile *c, struct qreg a) \
931 { \
932 return vir_emit_nondef(c, vir_inst(op, c->undef, \
933 a, c->undef)); \
934 }
935
936 #define VIR_NODST_2(name, vir_inst, op) \
937 static inline struct qinst * \
938 vir_##name(struct v3d_compile *c, struct qreg a, struct qreg b) \
939 { \
940 return vir_emit_nondef(c, vir_inst(op, c->undef, \
941 a, b)); \
942 }
943
944 #define VIR_SFU(name) \
945 static inline struct qreg \
946 vir_##name(struct v3d_compile *c, struct qreg a) \
947 { \
948 if (c->devinfo->ver >= 41) { \
949 return vir_emit_def(c, vir_add_inst(V3D_QPU_A_##name, \
950 c->undef, \
951 a, c->undef)); \
952 } else { \
953 vir_FMOV_dest(c, vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_##name), a); \
954 return vir_FMOV(c, vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_R4)); \
955 } \
956 } \
957 static inline struct qinst * \
958 vir_##name##_dest(struct v3d_compile *c, struct qreg dest, \
959 struct qreg a) \
960 { \
961 if (c->devinfo->ver >= 41) { \
962 return vir_emit_nondef(c, vir_add_inst(V3D_QPU_A_##name, \
963 dest, \
964 a, c->undef)); \
965 } else { \
966 vir_FMOV_dest(c, vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_##name), a); \
967 return vir_FMOV_dest(c, dest, vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_R4)); \
968 } \
969 }
970
971 #define VIR_A_ALU2(name) VIR_ALU2(name, vir_add_inst, V3D_QPU_A_##name)
972 #define VIR_M_ALU2(name) VIR_ALU2(name, vir_mul_inst, V3D_QPU_M_##name)
973 #define VIR_A_ALU1(name) VIR_ALU1(name, vir_add_inst, V3D_QPU_A_##name)
974 #define VIR_M_ALU1(name) VIR_ALU1(name, vir_mul_inst, V3D_QPU_M_##name)
975 #define VIR_A_ALU0(name) VIR_ALU0(name, vir_add_inst, V3D_QPU_A_##name)
976 #define VIR_M_ALU0(name) VIR_ALU0(name, vir_mul_inst, V3D_QPU_M_##name)
977 #define VIR_A_NODST_2(name) VIR_NODST_2(name, vir_add_inst, V3D_QPU_A_##name)
978 #define VIR_M_NODST_2(name) VIR_NODST_2(name, vir_mul_inst, V3D_QPU_M_##name)
979 #define VIR_A_NODST_1(name) VIR_NODST_1(name, vir_add_inst, V3D_QPU_A_##name)
980 #define VIR_M_NODST_1(name) VIR_NODST_1(name, vir_mul_inst, V3D_QPU_M_##name)
981 #define VIR_A_NODST_0(name) VIR_NODST_0(name, vir_add_inst, V3D_QPU_A_##name)
982
983 VIR_A_ALU2(FADD)
984 VIR_A_ALU2(VFPACK)
985 VIR_A_ALU2(FSUB)
986 VIR_A_ALU2(FMIN)
987 VIR_A_ALU2(FMAX)
988
989 VIR_A_ALU2(ADD)
990 VIR_A_ALU2(SUB)
991 VIR_A_ALU2(SHL)
992 VIR_A_ALU2(SHR)
993 VIR_A_ALU2(ASR)
994 VIR_A_ALU2(ROR)
995 VIR_A_ALU2(MIN)
996 VIR_A_ALU2(MAX)
997 VIR_A_ALU2(UMIN)
998 VIR_A_ALU2(UMAX)
999 VIR_A_ALU2(AND)
1000 VIR_A_ALU2(OR)
1001 VIR_A_ALU2(XOR)
1002 VIR_A_ALU2(VADD)
1003 VIR_A_ALU2(VSUB)
1004 VIR_A_NODST_2(STVPMV)
1005 VIR_A_ALU1(NOT)
1006 VIR_A_ALU1(NEG)
1007 VIR_A_ALU1(FLAPUSH)
1008 VIR_A_ALU1(FLBPUSH)
1009 VIR_A_ALU1(FLPOP)
1010 VIR_A_ALU1(SETMSF)
1011 VIR_A_ALU1(SETREVF)
1012 VIR_A_ALU0(TIDX)
1013 VIR_A_ALU0(EIDX)
1014 VIR_A_ALU1(LDVPMV_IN)
1015 VIR_A_ALU1(LDVPMV_OUT)
1016 VIR_A_ALU0(TMUWT)
1017
1018 VIR_A_ALU0(FXCD)
1019 VIR_A_ALU0(XCD)
1020 VIR_A_ALU0(FYCD)
1021 VIR_A_ALU0(YCD)
1022 VIR_A_ALU0(MSF)
1023 VIR_A_ALU0(REVF)
1024 VIR_A_ALU0(BARRIERID)
1025 VIR_A_NODST_1(VPMSETUP)
1026 VIR_A_NODST_0(VPMWT)
1027 VIR_A_ALU2(FCMP)
1028 VIR_A_ALU2(VFMAX)
1029
1030 VIR_A_ALU1(FROUND)
1031 VIR_A_ALU1(FTOIN)
1032 VIR_A_ALU1(FTRUNC)
1033 VIR_A_ALU1(FTOIZ)
1034 VIR_A_ALU1(FFLOOR)
1035 VIR_A_ALU1(FTOUZ)
1036 VIR_A_ALU1(FCEIL)
1037 VIR_A_ALU1(FTOC)
1038
1039 VIR_A_ALU1(FDX)
1040 VIR_A_ALU1(FDY)
1041
1042 VIR_A_ALU1(ITOF)
1043 VIR_A_ALU1(CLZ)
1044 VIR_A_ALU1(UTOF)
1045
1046 VIR_M_ALU2(UMUL24)
1047 VIR_M_ALU2(FMUL)
1048 VIR_M_ALU2(SMUL24)
1049 VIR_M_NODST_2(MULTOP)
1050
1051 VIR_M_ALU1(MOV)
1052 VIR_M_ALU1(FMOV)
1053
1054 VIR_SFU(RECIP)
1055 VIR_SFU(RSQRT)
1056 VIR_SFU(EXP)
1057 VIR_SFU(LOG)
1058 VIR_SFU(SIN)
1059 VIR_SFU(RSQRT2)
1060
1061 static inline struct qinst *
1062 vir_MOV_cond(struct v3d_compile *c, enum v3d_qpu_cond cond,
1063 struct qreg dest, struct qreg src)
1064 {
1065 struct qinst *mov = vir_MOV_dest(c, dest, src);
1066 vir_set_cond(mov, cond);
1067 return mov;
1068 }
1069
1070 static inline struct qreg
1071 vir_SEL(struct v3d_compile *c, enum v3d_qpu_cond cond,
1072 struct qreg src0, struct qreg src1)
1073 {
1074 struct qreg t = vir_get_temp(c);
1075 vir_MOV_dest(c, t, src1);
1076 vir_MOV_cond(c, cond, t, src0);
1077 return t;
1078 }
1079
1080 static inline struct qinst *
1081 vir_NOP(struct v3d_compile *c)
1082 {
1083 return vir_emit_nondef(c, vir_add_inst(V3D_QPU_A_NOP,
1084 c->undef, c->undef, c->undef));
1085 }
1086
1087 static inline struct qreg
1088 vir_LDTMU(struct v3d_compile *c)
1089 {
1090 if (c->devinfo->ver >= 41) {
1091 struct qinst *ldtmu = vir_add_inst(V3D_QPU_A_NOP, c->undef,
1092 c->undef, c->undef);
1093 ldtmu->qpu.sig.ldtmu = true;
1094
1095 return vir_emit_def(c, ldtmu);
1096 } else {
1097 vir_NOP(c)->qpu.sig.ldtmu = true;
1098 return vir_MOV(c, vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_R4));
1099 }
1100 }
1101
1102 static inline struct qreg
1103 vir_UMUL(struct v3d_compile *c, struct qreg src0, struct qreg src1)
1104 {
1105 vir_MULTOP(c, src0, src1);
1106 return vir_UMUL24(c, src0, src1);
1107 }
1108
1109 /*
1110 static inline struct qreg
1111 vir_LOAD_IMM(struct v3d_compile *c, uint32_t val)
1112 {
1113 return vir_emit_def(c, vir_inst(QOP_LOAD_IMM, c->undef,
1114 vir_reg(QFILE_LOAD_IMM, val), c->undef));
1115 }
1116
1117 static inline struct qreg
1118 vir_LOAD_IMM_U2(struct v3d_compile *c, uint32_t val)
1119 {
1120 return vir_emit_def(c, vir_inst(QOP_LOAD_IMM_U2, c->undef,
1121 vir_reg(QFILE_LOAD_IMM, val),
1122 c->undef));
1123 }
1124 static inline struct qreg
1125 vir_LOAD_IMM_I2(struct v3d_compile *c, uint32_t val)
1126 {
1127 return vir_emit_def(c, vir_inst(QOP_LOAD_IMM_I2, c->undef,
1128 vir_reg(QFILE_LOAD_IMM, val),
1129 c->undef));
1130 }
1131 */
1132
1133 static inline struct qinst *
1134 vir_BRANCH(struct v3d_compile *c, enum v3d_qpu_branch_cond cond)
1135 {
1136 /* The actual uniform_data value will be set at scheduling time */
1137 return vir_emit_nondef(c, vir_branch_inst(c, cond));
1138 }
1139
1140 #define vir_for_each_block(block, c) \
1141 list_for_each_entry(struct qblock, block, &c->blocks, link)
1142
1143 #define vir_for_each_block_rev(block, c) \
1144 list_for_each_entry_rev(struct qblock, block, &c->blocks, link)
1145
1146 /* Loop over the non-NULL members of the successors array. */
1147 #define vir_for_each_successor(succ, block) \
1148 for (struct qblock *succ = block->successors[0]; \
1149 succ != NULL; \
1150 succ = (succ == block->successors[1] ? NULL : \
1151 block->successors[1]))
1152
1153 #define vir_for_each_inst(inst, block) \
1154 list_for_each_entry(struct qinst, inst, &block->instructions, link)
1155
1156 #define vir_for_each_inst_rev(inst, block) \
1157 list_for_each_entry_rev(struct qinst, inst, &block->instructions, link)
1158
1159 #define vir_for_each_inst_safe(inst, block) \
1160 list_for_each_entry_safe(struct qinst, inst, &block->instructions, link)
1161
1162 #define vir_for_each_inst_inorder(inst, c) \
1163 vir_for_each_block(_block, c) \
1164 vir_for_each_inst(inst, _block)
1165
1166 #define vir_for_each_inst_inorder_safe(inst, c) \
1167 vir_for_each_block(_block, c) \
1168 vir_for_each_inst_safe(inst, _block)
1169
1170 #endif /* V3D_COMPILER_H */