2 * Copyright © 2016 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #ifndef V3D_COMPILER_H
25 #define V3D_COMPILER_H
34 #include "util/macros.h"
35 #include "common/v3d_debug.h"
36 #include "common/v3d_device_info.h"
37 #include "common/v3d_limits.h"
38 #include "compiler/nir/nir.h"
39 #include "util/list.h"
40 #include "util/u_math.h"
42 #include "qpu/qpu_instr.h"
43 #include "pipe/p_state.h"
47 struct v3d_fs_inputs
{
49 * Array of the meanings of the VPM inputs this shader needs.
51 * It doesn't include those that aren't part of the VPM, like
52 * point/line coordinates.
54 struct v3d_varying_slot
*input_slots
;
59 /** An unused source or destination register. */
62 /** A physical register, such as the W coordinate payload. */
64 /** One of the regsiters for fixed function interactions. */
68 * A virtual register, that will be allocated to actual accumulator
69 * or physical registers later.
74 * VPM reads use this with an index value to say what part of the VPM
80 * Stores an immediate value in the index field that will be used
81 * directly by qpu_load_imm().
86 * Stores an immediate value in the index field that can be turned
87 * into a small immediate field by qpu_encode_small_immediate().
93 * A reference to a QPU register or a virtual temp register.
100 static inline struct qreg
vir_reg(enum qfile file
, uint32_t index
)
102 return (struct qreg
){file
, index
};
105 static inline struct qreg
vir_magic_reg(uint32_t index
)
107 return (struct qreg
){QFILE_MAGIC
, index
};
110 static inline struct qreg
vir_nop_reg(void)
112 return (struct qreg
){QFILE_NULL
, 0};
116 * A reference to an actual register at the QPU level, for register
126 /** Entry in qblock->instructions */
127 struct list_head link
;
130 * The instruction being wrapped. Its condition codes, pack flags,
131 * signals, etc. will all be used, with just the register references
132 * being replaced by the contents of qinst->dst and qinst->src[].
134 struct v3d_qpu_instr qpu
;
136 /* Pre-register-allocation references to src/dst registers */
141 /* If the instruction reads a uniform (other than through src[i].file
142 * == QFILE_UNIF), that uniform's index in c->uniform_contents. ~0
148 enum quniform_contents
{
150 * Indicates that a constant 32-bit value is copied from the program's
155 * Indicates that the program's uniform contents are used as an index
156 * into the GL uniform storage.
161 * Scaling factors from clip coordinates to relative to the viewport
164 * This is used by the coordinate and vertex shaders to produce the
165 * 32-bit entry consisting of 2 16-bit fields with 12.4 signed fixed
166 * point offsets from the viewport ccenter.
168 QUNIFORM_VIEWPORT_X_SCALE
,
169 QUNIFORM_VIEWPORT_Y_SCALE
,
172 QUNIFORM_VIEWPORT_Z_OFFSET
,
173 QUNIFORM_VIEWPORT_Z_SCALE
,
175 QUNIFORM_USER_CLIP_PLANE
,
178 * A reference to a V3D 3.x texture config parameter 0 uniform.
180 * This is a uniform implicitly loaded with a QPU_W_TMU* write, which
181 * defines texture type, miplevels, and such. It will be found as a
182 * parameter to the first QOP_TEX_[STRB] instruction in a sequence.
184 QUNIFORM_TEXTURE_CONFIG_P0_0
,
185 QUNIFORM_TEXTURE_CONFIG_P0_1
,
186 QUNIFORM_TEXTURE_CONFIG_P0_2
,
187 QUNIFORM_TEXTURE_CONFIG_P0_3
,
188 QUNIFORM_TEXTURE_CONFIG_P0_4
,
189 QUNIFORM_TEXTURE_CONFIG_P0_5
,
190 QUNIFORM_TEXTURE_CONFIG_P0_6
,
191 QUNIFORM_TEXTURE_CONFIG_P0_7
,
192 QUNIFORM_TEXTURE_CONFIG_P0_8
,
193 QUNIFORM_TEXTURE_CONFIG_P0_9
,
194 QUNIFORM_TEXTURE_CONFIG_P0_10
,
195 QUNIFORM_TEXTURE_CONFIG_P0_11
,
196 QUNIFORM_TEXTURE_CONFIG_P0_12
,
197 QUNIFORM_TEXTURE_CONFIG_P0_13
,
198 QUNIFORM_TEXTURE_CONFIG_P0_14
,
199 QUNIFORM_TEXTURE_CONFIG_P0_15
,
200 QUNIFORM_TEXTURE_CONFIG_P0_16
,
201 QUNIFORM_TEXTURE_CONFIG_P0_17
,
202 QUNIFORM_TEXTURE_CONFIG_P0_18
,
203 QUNIFORM_TEXTURE_CONFIG_P0_19
,
204 QUNIFORM_TEXTURE_CONFIG_P0_20
,
205 QUNIFORM_TEXTURE_CONFIG_P0_21
,
206 QUNIFORM_TEXTURE_CONFIG_P0_22
,
207 QUNIFORM_TEXTURE_CONFIG_P0_23
,
208 QUNIFORM_TEXTURE_CONFIG_P0_24
,
209 QUNIFORM_TEXTURE_CONFIG_P0_25
,
210 QUNIFORM_TEXTURE_CONFIG_P0_26
,
211 QUNIFORM_TEXTURE_CONFIG_P0_27
,
212 QUNIFORM_TEXTURE_CONFIG_P0_28
,
213 QUNIFORM_TEXTURE_CONFIG_P0_29
,
214 QUNIFORM_TEXTURE_CONFIG_P0_30
,
215 QUNIFORM_TEXTURE_CONFIG_P0_31
,
216 QUNIFORM_TEXTURE_CONFIG_P0_32
,
219 * A reference to a V3D 3.x texture config parameter 1 uniform.
221 * This is a uniform implicitly loaded with a QPU_W_TMU* write, which
222 * has the pointer to the indirect texture state. Our data[] field
223 * will have a packed p1 value, but the address field will be just
224 * which texture unit's texture should be referenced.
226 QUNIFORM_TEXTURE_CONFIG_P1
,
228 /* A V3D 4.x texture config parameter. The high 8 bits will be
229 * which texture or sampler is being sampled, and the driver must
230 * replace the address field with the appropriate address.
232 QUNIFORM_TMU_CONFIG_P0
,
233 QUNIFORM_TMU_CONFIG_P1
,
235 QUNIFORM_IMAGE_TMU_CONFIG_P0
,
237 QUNIFORM_TEXTURE_FIRST_LEVEL
,
239 QUNIFORM_TEXTURE_WIDTH
,
240 QUNIFORM_TEXTURE_HEIGHT
,
241 QUNIFORM_TEXTURE_DEPTH
,
242 QUNIFORM_TEXTURE_ARRAY_SIZE
,
243 QUNIFORM_TEXTURE_LEVELS
,
247 QUNIFORM_TEXRECT_SCALE_X
,
248 QUNIFORM_TEXRECT_SCALE_Y
,
250 /* Returns the base offset of the SSBO given by the data value. */
251 QUNIFORM_SSBO_OFFSET
,
253 /* Returns the size of the SSBO given by the data value. */
254 QUNIFORM_GET_BUFFER_SIZE
,
256 /* Sizes (in pixels) of a shader image given by the data value. */
257 QUNIFORM_IMAGE_WIDTH
,
258 QUNIFORM_IMAGE_HEIGHT
,
259 QUNIFORM_IMAGE_DEPTH
,
260 QUNIFORM_IMAGE_ARRAY_SIZE
,
264 /* Number of workgroups passed to glDispatchCompute in the dimension
265 * selected by the data value.
267 QUNIFORM_NUM_WORK_GROUPS
,
270 * Returns the the offset of the scratch buffer for register spilling.
272 QUNIFORM_SPILL_OFFSET
,
273 QUNIFORM_SPILL_SIZE_PER_THREAD
,
276 * Returns the offset of the shared memory for compute shaders.
278 * This will be accessed using TMU general memory operations, so the
279 * L2T cache will effectively be the shared memory area.
281 QUNIFORM_SHARED_OFFSET
,
284 * Returns the number of layers in the framebuffer.
286 * This is used to cap gl_Layer in geometry shaders to avoid
287 * out-of-bounds accesses into the tile state during binning.
292 static inline uint32_t v3d_unit_data_create(uint32_t unit
, uint32_t value
)
294 assert(value
< (1 << 24));
295 return unit
<< 24 | value
;
298 static inline uint32_t v3d_unit_data_get_unit(uint32_t data
)
303 static inline uint32_t v3d_unit_data_get_offset(uint32_t data
)
305 return data
& 0xffffff;
308 struct v3d_varying_slot
{
309 uint8_t slot_and_component
;
312 static inline struct v3d_varying_slot
313 v3d_slot_from_slot_and_component(uint8_t slot
, uint8_t component
)
315 assert(slot
< 255 / 4);
316 return (struct v3d_varying_slot
){ (slot
<< 2) + component
};
319 static inline uint8_t v3d_slot_get_slot(struct v3d_varying_slot slot
)
321 return slot
.slot_and_component
>> 2;
324 static inline uint8_t v3d_slot_get_component(struct v3d_varying_slot slot
)
326 return slot
.slot_and_component
& 3;
334 uint8_t return_channels
;
338 } tex
[V3D_MAX_TEXTURE_SAMPLERS
];
340 bool is_last_geometry_stage
;
349 bool point_coord_upper_left
;
352 bool sample_coverage
;
353 bool sample_alpha_to_coverage
;
354 bool sample_alpha_to_one
;
356 bool shade_model_flat
;
357 /* Mask of which color render targets are present. */
359 uint8_t swap_color_rb
;
360 /* Mask of which render targets need to be written as 32-bit floats */
361 uint8_t f32_color_rb
;
362 /* Masks of which render targets need to be written as ints/uints.
363 * Used by gallium to work around lost information in TGSI.
365 uint8_t int_color_rb
;
366 uint8_t uint_color_rb
;
368 /* Color format information per render target. Only set when logic
369 * operations are enabled.
372 enum pipe_format format
;
373 const uint8_t *swizzle
;
374 } color_fmt
[V3D_MAX_DRAW_BUFFERS
];
376 uint8_t alpha_test_func
;
377 uint8_t logicop_func
;
378 uint32_t point_sprite_mask
;
380 struct pipe_rt_blend_state blend
;
386 struct v3d_varying_slot used_outputs
[V3D_MAX_FS_INPUTS
];
387 uint8_t num_used_outputs
;
390 bool per_vertex_point_size
;
396 struct v3d_varying_slot used_outputs
[V3D_MAX_ANY_STAGE_INPUTS
];
397 uint8_t num_used_outputs
;
400 bool per_vertex_point_size
;
404 /** A basic block of VIR intructions. */
406 struct list_head link
;
408 struct list_head instructions
;
410 struct set
*predecessors
;
411 struct qblock
*successors
[2];
415 /* Instruction IPs for the first and last instruction of the block.
416 * Set by qpu_schedule.c.
418 uint32_t start_qpu_ip
;
421 /* Instruction IP for the branch instruction of the block. Set by
424 uint32_t branch_qpu_ip
;
426 /** Offset within the uniform stream at the start of the block. */
427 uint32_t start_uniform
;
428 /** Offset within the uniform stream of the branch instruction */
429 uint32_t branch_uniform
;
431 /** @{ used by v3d_vir_live_variables.c */
436 BITSET_WORD
*live_in
;
437 BITSET_WORD
*live_out
;
438 int start_ip
, end_ip
;
442 /** Which util/list.h add mode we should use when inserting an instruction. */
443 enum vir_cursor_mode
{
449 * Tracking structure for where new instructions should be inserted. Create
450 * with one of the vir_after_inst()-style helper functions.
452 * This does not protect against removal of the block or instruction, so we
453 * have an assert in instruction removal to try to catch it.
456 enum vir_cursor_mode mode
;
457 struct list_head
*link
;
460 static inline struct vir_cursor
461 vir_before_inst(struct qinst
*inst
)
463 return (struct vir_cursor
){ vir_cursor_addtail
, &inst
->link
};
466 static inline struct vir_cursor
467 vir_after_inst(struct qinst
*inst
)
469 return (struct vir_cursor
){ vir_cursor_add
, &inst
->link
};
472 static inline struct vir_cursor
473 vir_before_block(struct qblock
*block
)
475 return (struct vir_cursor
){ vir_cursor_add
, &block
->instructions
};
478 static inline struct vir_cursor
479 vir_after_block(struct qblock
*block
)
481 return (struct vir_cursor
){ vir_cursor_addtail
, &block
->instructions
};
485 * Compiler state saved across compiler invocations, for any expensive global
488 struct v3d_compiler
{
489 const struct v3d_device_info
*devinfo
;
490 struct ra_regs
*regs
;
491 unsigned int reg_class_any
[3];
492 unsigned int reg_class_r5
[3];
493 unsigned int reg_class_phys
[3];
494 unsigned int reg_class_phys_or_acc
[3];
498 const struct v3d_device_info
*devinfo
;
500 nir_function_impl
*impl
;
501 struct exec_list
*cf_node_list
;
502 const struct v3d_compiler
*compiler
;
504 void (*debug_output
)(const char *msg
,
505 void *debug_output_data
);
506 void *debug_output_data
;
509 * Mapping from nir_register * or nir_ssa_def * to array of struct
510 * qreg for the values.
512 struct hash_table
*def_ht
;
514 /* For each temp, the instruction generating its value. */
516 uint32_t defs_array_size
;
519 * Inputs to the shader, arranged by TGSI declaration order.
521 * Not all fragment shader QFILE_VARY reads are present in this array.
524 struct qreg
*outputs
;
525 bool msaa_per_sample_output
;
526 struct qreg color_reads
[V3D_MAX_DRAW_BUFFERS
* V3D_MAX_SAMPLES
* 4];
527 struct qreg sample_colors
[V3D_MAX_DRAW_BUFFERS
* V3D_MAX_SAMPLES
* 4];
528 uint32_t inputs_array_size
;
529 uint32_t outputs_array_size
;
530 uint32_t uniforms_array_size
;
532 /* Booleans for whether the corresponding QFILE_VARY[i] is
533 * flat-shaded. This includes gl_FragColor flat-shading, which is
534 * customized based on the shademodel_flat shader key.
536 uint32_t flat_shade_flags
[BITSET_WORDS(V3D_MAX_FS_INPUTS
)];
538 uint32_t noperspective_flags
[BITSET_WORDS(V3D_MAX_FS_INPUTS
)];
540 uint32_t centroid_flags
[BITSET_WORDS(V3D_MAX_FS_INPUTS
)];
544 bool uses_implicit_point_line_varyings
;
546 /* State for whether we're executing on each channel currently. 0 if
547 * yes, otherwise a block number + 1 that the channel jumped to.
550 bool in_control_flow
;
552 struct qreg line_x
, point_x
, point_y
;
555 * Instance ID, which comes in before the vertex attribute payload if
556 * the shader record requests it.
561 * Vertex ID, which comes in before the vertex attribute payload
562 * (after Instance ID) if the shader record requests it.
566 /* Fragment shader payload regs. */
567 struct qreg payload_w
, payload_w_centroid
, payload_z
;
569 struct qreg cs_payload
[2];
570 struct qreg cs_shared_offset
;
571 int local_invocation_index_bits
;
573 uint8_t vattr_sizes
[V3D_MAX_VS_INPUTS
/ 4];
574 uint32_t vpm_output_size
;
576 /* Size in bytes of registers that have been spilled. This is how much
577 * space needs to be available in the spill BO per thread per QPU.
580 /* Shader-db stats */
581 uint32_t spills
, fills
, loops
;
583 * Register spilling's per-thread base address, shared between each
584 * spill/fill's addressing calculations.
586 struct qreg spill_base
;
587 /* Bit vector of which temps may be spilled */
588 BITSET_WORD
*spillable
;
591 * Array of the VARYING_SLOT_* of all FS QFILE_VARY reads.
593 * This includes those that aren't part of the VPM varyings, like
594 * point/line coordinates.
596 struct v3d_varying_slot input_slots
[V3D_MAX_FS_INPUTS
];
599 * An entry per outputs[] in the VS indicating what the VARYING_SLOT_*
600 * of the output is. Used to emit from the VS in the order that the
603 struct v3d_varying_slot
*output_slots
;
605 struct pipe_shader_state
*shader_state
;
607 struct v3d_fs_key
*fs_key
;
608 struct v3d_gs_key
*gs_key
;
609 struct v3d_vs_key
*vs_key
;
611 /* Live ranges of temps. */
612 int *temp_start
, *temp_end
;
613 bool live_intervals_valid
;
615 uint32_t *uniform_data
;
616 enum quniform_contents
*uniform_contents
;
617 uint32_t uniform_array_size
;
618 uint32_t num_uniforms
;
619 uint32_t output_position_index
;
620 nir_variable
*output_color_var
[4];
621 uint32_t output_sample_mask_index
;
626 struct vir_cursor cursor
;
627 struct list_head blocks
;
628 int next_block_index
;
629 struct qblock
*cur_block
;
630 struct qblock
*loop_cont_block
;
631 struct qblock
*loop_break_block
;
634 uint32_t qpu_inst_count
;
635 uint32_t qpu_inst_size
;
636 uint32_t qpu_inst_stalled_count
;
638 /* For the FS, the number of varying inputs not counting the
639 * point/line varyings payload
646 /* Set to compile program in in 1x, 2x, or 4x threaded mode, where
647 * SIG_THREAD_SWITCH is used to hide texturing latency at the cost of
648 * limiting ourselves to the part of the physical reg space.
650 * On V3D 3.x, 2x or 4x divide the physical reg space by 2x or 4x. On
651 * V3D 4.x, all shaders are 2x threaded, and 4x only divides the
652 * physical reg space in half.
655 struct qinst
*last_thrsw
;
656 bool last_thrsw_at_top_level
;
658 bool emitted_tlb_load
;
659 bool lock_scoreboard_on_first_thrsw
;
666 struct v3d_uniform_list
{
667 enum quniform_contents
*contents
;
672 struct v3d_prog_data
{
673 struct v3d_uniform_list uniforms
;
679 /* For threads > 1, whether the program should be dispatched in the
680 * after-final-THRSW state.
687 struct v3d_vs_prog_data
{
688 struct v3d_prog_data base
;
690 bool uses_iid
, uses_vid
;
692 /* Number of components read from each vertex attribute. */
693 uint8_t vattr_sizes
[V3D_MAX_VS_INPUTS
/ 4];
695 /* Total number of components read, for the shader state record. */
696 uint32_t vpm_input_size
;
698 /* Total number of components written, for the shader state record. */
699 uint32_t vpm_output_size
;
701 /* Set if there should be separate VPM segments for input and output.
702 * If unset, vpm_input_size will be 0.
704 bool separate_segments
;
706 /* Value to be programmed in VCM_CACHE_SIZE. */
707 uint8_t vcm_cache_size
;
710 struct v3d_gs_prog_data
{
711 struct v3d_prog_data base
;
713 /* Whether the program reads gl_PrimitiveIDIn */
716 /* Number of components read from each input varying. */
717 uint8_t input_sizes
[V3D_MAX_GS_INPUTS
/ 4];
719 /* Number of inputs */
721 struct v3d_varying_slot input_slots
[V3D_MAX_GS_INPUTS
];
723 /* Total number of components written, for the shader state record. */
724 uint32_t vpm_output_size
;
726 /* Maximum SIMD dispatch width to not exceed VPM output size limits
727 * in the geometry shader. Notice that the final dispatch width has to
728 * be decided at draw time and could be lower based on the VPM pressure
729 * added by other shader stages.
733 /* Output primitive type */
734 uint8_t out_prim_type
;
736 /* Number of GS invocations */
737 uint8_t num_invocations
;
740 struct v3d_fs_prog_data
{
741 struct v3d_prog_data base
;
743 struct v3d_varying_slot input_slots
[V3D_MAX_FS_INPUTS
];
745 /* Array of flat shade flags.
747 * Each entry is only 24 bits (high 8 bits 0), to match the hardware
750 uint32_t flat_shade_flags
[((V3D_MAX_FS_INPUTS
- 1) / 24) + 1];
752 uint32_t noperspective_flags
[((V3D_MAX_FS_INPUTS
- 1) / 24) + 1];
754 uint32_t centroid_flags
[((V3D_MAX_FS_INPUTS
- 1) / 24) + 1];
760 bool uses_implicit_point_line_varyings
;
761 bool lock_scoreboard_on_first_thrsw
;
764 struct v3d_compute_prog_data
{
765 struct v3d_prog_data base
;
766 /* Size in bytes of the workgroup's shared space. */
767 uint32_t shared_size
;
771 vir_has_uniform(struct qinst
*inst
)
773 return inst
->uniform
!= ~0;
776 extern const nir_shader_compiler_options v3d_nir_options
;
778 const struct v3d_compiler
*v3d_compiler_init(const struct v3d_device_info
*devinfo
);
779 void v3d_compiler_free(const struct v3d_compiler
*compiler
);
780 void v3d_optimize_nir(struct nir_shader
*s
);
782 uint64_t *v3d_compile(const struct v3d_compiler
*compiler
,
784 struct v3d_prog_data
**prog_data
,
786 void (*debug_output
)(const char *msg
,
787 void *debug_output_data
),
788 void *debug_output_data
,
789 int program_id
, int variant_id
,
790 uint32_t *final_assembly_size
);
792 void v3d_nir_to_vir(struct v3d_compile
*c
);
794 void vir_compile_destroy(struct v3d_compile
*c
);
795 const char *vir_get_stage_name(struct v3d_compile
*c
);
796 struct qblock
*vir_new_block(struct v3d_compile
*c
);
797 void vir_set_emit_block(struct v3d_compile
*c
, struct qblock
*block
);
798 void vir_link_blocks(struct qblock
*predecessor
, struct qblock
*successor
);
799 struct qblock
*vir_entry_block(struct v3d_compile
*c
);
800 struct qblock
*vir_exit_block(struct v3d_compile
*c
);
801 struct qinst
*vir_add_inst(enum v3d_qpu_add_op op
, struct qreg dst
,
802 struct qreg src0
, struct qreg src1
);
803 struct qinst
*vir_mul_inst(enum v3d_qpu_mul_op op
, struct qreg dst
,
804 struct qreg src0
, struct qreg src1
);
805 struct qinst
*vir_branch_inst(struct v3d_compile
*c
,
806 enum v3d_qpu_branch_cond cond
);
807 void vir_remove_instruction(struct v3d_compile
*c
, struct qinst
*qinst
);
808 uint32_t vir_get_uniform_index(struct v3d_compile
*c
,
809 enum quniform_contents contents
,
811 struct qreg
vir_uniform(struct v3d_compile
*c
,
812 enum quniform_contents contents
,
814 void vir_schedule_instructions(struct v3d_compile
*c
);
815 void v3d_setup_spill_base(struct v3d_compile
*c
);
816 struct v3d_qpu_instr
v3d_qpu_nop(void);
818 struct qreg
vir_emit_def(struct v3d_compile
*c
, struct qinst
*inst
);
819 struct qinst
*vir_emit_nondef(struct v3d_compile
*c
, struct qinst
*inst
);
820 void vir_set_cond(struct qinst
*inst
, enum v3d_qpu_cond cond
);
821 void vir_set_pf(struct qinst
*inst
, enum v3d_qpu_pf pf
);
822 void vir_set_uf(struct qinst
*inst
, enum v3d_qpu_uf uf
);
823 void vir_set_unpack(struct qinst
*inst
, int src
,
824 enum v3d_qpu_input_unpack unpack
);
826 struct qreg
vir_get_temp(struct v3d_compile
*c
);
827 void vir_emit_last_thrsw(struct v3d_compile
*c
);
828 void vir_calculate_live_intervals(struct v3d_compile
*c
);
829 int vir_get_nsrc(struct qinst
*inst
);
830 bool vir_has_side_effects(struct v3d_compile
*c
, struct qinst
*inst
);
831 bool vir_get_add_op(struct qinst
*inst
, enum v3d_qpu_add_op
*op
);
832 bool vir_get_mul_op(struct qinst
*inst
, enum v3d_qpu_mul_op
*op
);
833 bool vir_is_raw_mov(struct qinst
*inst
);
834 bool vir_is_tex(struct qinst
*inst
);
835 bool vir_is_add(struct qinst
*inst
);
836 bool vir_is_mul(struct qinst
*inst
);
837 bool vir_writes_r3(const struct v3d_device_info
*devinfo
, struct qinst
*inst
);
838 bool vir_writes_r4(const struct v3d_device_info
*devinfo
, struct qinst
*inst
);
839 struct qreg
vir_follow_movs(struct v3d_compile
*c
, struct qreg reg
);
840 uint8_t vir_channels_written(struct qinst
*inst
);
841 struct qreg
ntq_get_src(struct v3d_compile
*c
, nir_src src
, int i
);
842 void ntq_store_dest(struct v3d_compile
*c
, nir_dest
*dest
, int chan
,
844 void vir_emit_thrsw(struct v3d_compile
*c
);
846 void vir_dump(struct v3d_compile
*c
);
847 void vir_dump_inst(struct v3d_compile
*c
, struct qinst
*inst
);
848 void vir_dump_uniform(enum quniform_contents contents
, uint32_t data
);
850 void vir_validate(struct v3d_compile
*c
);
852 void vir_optimize(struct v3d_compile
*c
);
853 bool vir_opt_algebraic(struct v3d_compile
*c
);
854 bool vir_opt_constant_folding(struct v3d_compile
*c
);
855 bool vir_opt_copy_propagate(struct v3d_compile
*c
);
856 bool vir_opt_dead_code(struct v3d_compile
*c
);
857 bool vir_opt_peephole_sf(struct v3d_compile
*c
);
858 bool vir_opt_redundant_flags(struct v3d_compile
*c
);
859 bool vir_opt_small_immediates(struct v3d_compile
*c
);
860 bool vir_opt_vpm(struct v3d_compile
*c
);
861 void v3d_nir_lower_blend(nir_shader
*s
, struct v3d_compile
*c
);
862 void v3d_nir_lower_io(nir_shader
*s
, struct v3d_compile
*c
);
863 void v3d_nir_lower_logic_ops(nir_shader
*s
, struct v3d_compile
*c
);
864 void v3d_nir_lower_scratch(nir_shader
*s
);
865 void v3d_nir_lower_txf_ms(nir_shader
*s
, struct v3d_compile
*c
);
866 void v3d_nir_lower_image_load_store(nir_shader
*s
);
867 void vir_lower_uniforms(struct v3d_compile
*c
);
869 void v3d33_vir_vpm_read_setup(struct v3d_compile
*c
, int num_components
);
870 void v3d33_vir_vpm_write_setup(struct v3d_compile
*c
);
871 void v3d33_vir_emit_tex(struct v3d_compile
*c
, nir_tex_instr
*instr
);
872 void v3d40_vir_emit_tex(struct v3d_compile
*c
, nir_tex_instr
*instr
);
873 void v3d40_vir_emit_image_load_store(struct v3d_compile
*c
,
874 nir_intrinsic_instr
*instr
);
876 void v3d_vir_to_qpu(struct v3d_compile
*c
, struct qpu_reg
*temp_registers
);
877 uint32_t v3d_qpu_schedule_instructions(struct v3d_compile
*c
);
878 void qpu_validate(struct v3d_compile
*c
);
879 struct qpu_reg
*v3d_register_allocate(struct v3d_compile
*c
, bool *spilled
);
880 bool vir_init_reg_sets(struct v3d_compiler
*compiler
);
882 bool v3d_gl_format_is_return_32(GLenum format
);
885 v3d_get_op_for_atomic_add(nir_intrinsic_instr
*instr
, unsigned src
);
888 quniform_contents_is_texture_p0(enum quniform_contents contents
)
890 return (contents
>= QUNIFORM_TEXTURE_CONFIG_P0_0
&&
891 contents
< (QUNIFORM_TEXTURE_CONFIG_P0_0
+
892 V3D_MAX_TEXTURE_SAMPLERS
));
896 vir_in_nonuniform_control_flow(struct v3d_compile
*c
)
898 return c
->execute
.file
!= QFILE_NULL
;
901 static inline struct qreg
902 vir_uniform_ui(struct v3d_compile
*c
, uint32_t ui
)
904 return vir_uniform(c
, QUNIFORM_CONSTANT
, ui
);
907 static inline struct qreg
908 vir_uniform_f(struct v3d_compile
*c
, float f
)
910 return vir_uniform(c
, QUNIFORM_CONSTANT
, fui(f
));
913 #define VIR_ALU0(name, vir_inst, op) \
914 static inline struct qreg \
915 vir_##name(struct v3d_compile *c) \
917 return vir_emit_def(c, vir_inst(op, c->undef, \
918 c->undef, c->undef)); \
920 static inline struct qinst * \
921 vir_##name##_dest(struct v3d_compile *c, struct qreg dest) \
923 return vir_emit_nondef(c, vir_inst(op, dest, \
924 c->undef, c->undef)); \
927 #define VIR_ALU1(name, vir_inst, op) \
928 static inline struct qreg \
929 vir_##name(struct v3d_compile *c, struct qreg a) \
931 return vir_emit_def(c, vir_inst(op, c->undef, \
934 static inline struct qinst * \
935 vir_##name##_dest(struct v3d_compile *c, struct qreg dest, \
938 return vir_emit_nondef(c, vir_inst(op, dest, a, \
942 #define VIR_ALU2(name, vir_inst, op) \
943 static inline struct qreg \
944 vir_##name(struct v3d_compile *c, struct qreg a, struct qreg b) \
946 return vir_emit_def(c, vir_inst(op, c->undef, a, b)); \
948 static inline struct qinst * \
949 vir_##name##_dest(struct v3d_compile *c, struct qreg dest, \
950 struct qreg a, struct qreg b) \
952 return vir_emit_nondef(c, vir_inst(op, dest, a, b)); \
955 #define VIR_NODST_0(name, vir_inst, op) \
956 static inline struct qinst * \
957 vir_##name(struct v3d_compile *c) \
959 return vir_emit_nondef(c, vir_inst(op, c->undef, \
960 c->undef, c->undef)); \
963 #define VIR_NODST_1(name, vir_inst, op) \
964 static inline struct qinst * \
965 vir_##name(struct v3d_compile *c, struct qreg a) \
967 return vir_emit_nondef(c, vir_inst(op, c->undef, \
971 #define VIR_NODST_2(name, vir_inst, op) \
972 static inline struct qinst * \
973 vir_##name(struct v3d_compile *c, struct qreg a, struct qreg b) \
975 return vir_emit_nondef(c, vir_inst(op, c->undef, \
979 #define VIR_SFU(name) \
980 static inline struct qreg \
981 vir_##name(struct v3d_compile *c, struct qreg a) \
983 if (c->devinfo->ver >= 41) { \
984 return vir_emit_def(c, vir_add_inst(V3D_QPU_A_##name, \
988 vir_FMOV_dest(c, vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_##name), a); \
989 return vir_FMOV(c, vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_R4)); \
992 static inline struct qinst * \
993 vir_##name##_dest(struct v3d_compile *c, struct qreg dest, \
996 if (c->devinfo->ver >= 41) { \
997 return vir_emit_nondef(c, vir_add_inst(V3D_QPU_A_##name, \
1001 vir_FMOV_dest(c, vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_##name), a); \
1002 return vir_FMOV_dest(c, dest, vir_reg(QFILE_MAGIC, V3D_QPU_WADDR_R4)); \
1006 #define VIR_A_ALU2(name) VIR_ALU2(name, vir_add_inst, V3D_QPU_A_##name)
1007 #define VIR_M_ALU2(name) VIR_ALU2(name, vir_mul_inst, V3D_QPU_M_##name)
1008 #define VIR_A_ALU1(name) VIR_ALU1(name, vir_add_inst, V3D_QPU_A_##name)
1009 #define VIR_M_ALU1(name) VIR_ALU1(name, vir_mul_inst, V3D_QPU_M_##name)
1010 #define VIR_A_ALU0(name) VIR_ALU0(name, vir_add_inst, V3D_QPU_A_##name)
1011 #define VIR_M_ALU0(name) VIR_ALU0(name, vir_mul_inst, V3D_QPU_M_##name)
1012 #define VIR_A_NODST_2(name) VIR_NODST_2(name, vir_add_inst, V3D_QPU_A_##name)
1013 #define VIR_M_NODST_2(name) VIR_NODST_2(name, vir_mul_inst, V3D_QPU_M_##name)
1014 #define VIR_A_NODST_1(name) VIR_NODST_1(name, vir_add_inst, V3D_QPU_A_##name)
1015 #define VIR_M_NODST_1(name) VIR_NODST_1(name, vir_mul_inst, V3D_QPU_M_##name)
1016 #define VIR_A_NODST_0(name) VIR_NODST_0(name, vir_add_inst, V3D_QPU_A_##name)
1039 VIR_A_NODST_2(STVPMV
)
1040 VIR_A_NODST_2(STVPMD
)
1050 VIR_A_ALU1(LDVPMV_IN
)
1051 VIR_A_ALU1(LDVPMV_OUT
)
1052 VIR_A_ALU1(LDVPMD_IN
)
1053 VIR_A_ALU1(LDVPMD_OUT
)
1054 VIR_A_ALU2(LDVPMG_IN
)
1055 VIR_A_ALU2(LDVPMG_OUT
)
1065 VIR_A_ALU0(BARRIERID
)
1066 VIR_A_NODST_1(VPMSETUP
)
1067 VIR_A_NODST_0(VPMWT
)
1090 VIR_M_NODST_2(MULTOP
)
1102 static inline struct qinst
*
1103 vir_MOV_cond(struct v3d_compile
*c
, enum v3d_qpu_cond cond
,
1104 struct qreg dest
, struct qreg src
)
1106 struct qinst
*mov
= vir_MOV_dest(c
, dest
, src
);
1107 vir_set_cond(mov
, cond
);
1111 static inline struct qreg
1112 vir_SEL(struct v3d_compile
*c
, enum v3d_qpu_cond cond
,
1113 struct qreg src0
, struct qreg src1
)
1115 struct qreg t
= vir_get_temp(c
);
1116 vir_MOV_dest(c
, t
, src1
);
1117 vir_MOV_cond(c
, cond
, t
, src0
);
1121 static inline struct qinst
*
1122 vir_NOP(struct v3d_compile
*c
)
1124 return vir_emit_nondef(c
, vir_add_inst(V3D_QPU_A_NOP
,
1125 c
->undef
, c
->undef
, c
->undef
));
1128 static inline struct qreg
1129 vir_LDTMU(struct v3d_compile
*c
)
1131 if (c
->devinfo
->ver
>= 41) {
1132 struct qinst
*ldtmu
= vir_add_inst(V3D_QPU_A_NOP
, c
->undef
,
1133 c
->undef
, c
->undef
);
1134 ldtmu
->qpu
.sig
.ldtmu
= true;
1136 return vir_emit_def(c
, ldtmu
);
1138 vir_NOP(c
)->qpu
.sig
.ldtmu
= true;
1139 return vir_MOV(c
, vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_R4
));
1143 static inline struct qreg
1144 vir_UMUL(struct v3d_compile
*c
, struct qreg src0
, struct qreg src1
)
1146 vir_MULTOP(c
, src0
, src1
);
1147 return vir_UMUL24(c
, src0
, src1
);
1150 static inline struct qreg
1151 vir_TLBU_COLOR_READ(struct v3d_compile
*c
, uint32_t config
)
1153 assert(c
->devinfo
->ver
>= 41); /* XXX */
1154 assert((config
& 0xffffff00) == 0xffffff00);
1156 struct qinst
*ldtlb
= vir_add_inst(V3D_QPU_A_NOP
, c
->undef
,
1157 c
->undef
, c
->undef
);
1158 ldtlb
->qpu
.sig
.ldtlbu
= true;
1159 ldtlb
->uniform
= vir_get_uniform_index(c
, QUNIFORM_CONSTANT
, config
);
1160 return vir_emit_def(c
, ldtlb
);
1163 static inline struct qreg
1164 vir_TLB_COLOR_READ(struct v3d_compile
*c
)
1166 assert(c
->devinfo
->ver
>= 41); /* XXX */
1168 struct qinst
*ldtlb
= vir_add_inst(V3D_QPU_A_NOP
, c
->undef
,
1169 c
->undef
, c
->undef
);
1170 ldtlb
->qpu
.sig
.ldtlb
= true;
1171 return vir_emit_def(c
, ldtlb
);
1175 static inline struct qreg
1176 vir_LOAD_IMM(struct v3d_compile *c, uint32_t val)
1178 return vir_emit_def(c, vir_inst(QOP_LOAD_IMM, c->undef,
1179 vir_reg(QFILE_LOAD_IMM, val), c->undef));
1182 static inline struct qreg
1183 vir_LOAD_IMM_U2(struct v3d_compile *c, uint32_t val)
1185 return vir_emit_def(c, vir_inst(QOP_LOAD_IMM_U2, c->undef,
1186 vir_reg(QFILE_LOAD_IMM, val),
1189 static inline struct qreg
1190 vir_LOAD_IMM_I2(struct v3d_compile *c, uint32_t val)
1192 return vir_emit_def(c, vir_inst(QOP_LOAD_IMM_I2, c->undef,
1193 vir_reg(QFILE_LOAD_IMM, val),
1198 static inline struct qinst
*
1199 vir_BRANCH(struct v3d_compile
*c
, enum v3d_qpu_branch_cond cond
)
1201 /* The actual uniform_data value will be set at scheduling time */
1202 return vir_emit_nondef(c
, vir_branch_inst(c
, cond
));
1205 #define vir_for_each_block(block, c) \
1206 list_for_each_entry(struct qblock, block, &c->blocks, link)
1208 #define vir_for_each_block_rev(block, c) \
1209 list_for_each_entry_rev(struct qblock, block, &c->blocks, link)
1211 /* Loop over the non-NULL members of the successors array. */
1212 #define vir_for_each_successor(succ, block) \
1213 for (struct qblock *succ = block->successors[0]; \
1215 succ = (succ == block->successors[1] ? NULL : \
1216 block->successors[1]))
1218 #define vir_for_each_inst(inst, block) \
1219 list_for_each_entry(struct qinst, inst, &block->instructions, link)
1221 #define vir_for_each_inst_rev(inst, block) \
1222 list_for_each_entry_rev(struct qinst, inst, &block->instructions, link)
1224 #define vir_for_each_inst_safe(inst, block) \
1225 list_for_each_entry_safe(struct qinst, inst, &block->instructions, link)
1227 #define vir_for_each_inst_inorder(inst, c) \
1228 vir_for_each_block(_block, c) \
1229 vir_for_each_inst(inst, _block)
1231 #define vir_for_each_inst_inorder_safe(inst, c) \
1232 vir_for_each_block(_block, c) \
1233 vir_for_each_inst_safe(inst, _block)
1235 #endif /* V3D_COMPILER_H */