2 * Copyright © 2014 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34 #include "util/macros.h"
35 #include "compiler/nir/nir.h"
36 #include "util/list.h"
37 #include "util/u_math.h"
39 #include "vc4_screen.h"
40 #include "vc4_qpu_defines.h"
42 #include "kernel/vc4_packet.h"
43 #include "pipe/p_state.h"
53 QFILE_TLB_COLOR_WRITE
,
54 QFILE_TLB_COLOR_WRITE_MS
,
56 QFILE_TLB_STENCIL_SETUP
,
58 /* If tex_s is written on its own without preceding t/r/b setup, it's
59 * a direct memory access using the input value, without the sideband
60 * uniform load. We represent these in QIR as a separate write
61 * destination so we can tell if the sideband uniform is present.
70 /* Payload registers that aren't in the physical register file, so we
71 * can just use the corresponding qpu_reg at qpu_emit time.
79 * Stores an immediate value in the index field that will be used
80 * directly by qpu_load_imm().
85 * Stores an immediate value in the index field that can be turned
86 * into a small immediate field by qpu_encode_small_immediate().
97 static inline struct qreg
qir_reg(enum qfile file
, uint32_t index
)
99 return (struct qreg
){file
, index
};
149 * Signal of texture read being necessary and then reading r4 into
155 * Insert the signal for switching threads in a threaded fragment
156 * shader. No value can be live in an accumulator across a thrsw.
158 * At the QPU level, this will have several delay slots before the
159 * switch happens. Those slots are the responsibility of the
164 /* 32-bit immediate loaded to each SIMD channel */
167 /* 32-bit immediate divided into 16 2-bit unsigned int values and
168 * loaded to each corresponding SIMD channel.
171 /* 32-bit immediate divided into 16 2-bit signed int values and
172 * loaded to each corresponding SIMD channel.
178 /* Jumps to block->successor[0] if the qinst->cond (as a
179 * QPU_COND_BRANCH_*) passes, or block->successor[1] if not. Note
180 * that block->successor[1] may be unset if the condition is ALWAYS.
184 /* Emits an ADD from src[0] to src[1], where src[0] must be a
185 * QOP_LOAD_IMM result and src[1] is a QUNIFORM_UNIFORMS_ADDRESS,
186 * required by the kernel as part of its branch validation.
191 struct queued_qpu_inst
{
192 struct list_head link
;
197 struct list_head link
;
203 bool cond_is_exec_mask
;
209 * Coordinate shader, runs during binning, before the VS, and just
217 enum quniform_contents
{
219 * Indicates that a constant 32-bit value is copied from the program's
224 * Indicates that the program's uniform contents are used as an index
225 * into the GL uniform storage.
230 * Scaling factors from clip coordinates to relative to the viewport
233 * This is used by the coordinate and vertex shaders to produce the
234 * 32-bit entry consisting of 2 16-bit fields with 12.4 signed fixed
235 * point offsets from the viewport ccenter.
237 QUNIFORM_VIEWPORT_X_SCALE
,
238 QUNIFORM_VIEWPORT_Y_SCALE
,
241 QUNIFORM_VIEWPORT_Z_OFFSET
,
242 QUNIFORM_VIEWPORT_Z_SCALE
,
244 QUNIFORM_USER_CLIP_PLANE
,
247 * A reference to a texture config parameter 0 uniform.
249 * This is a uniform implicitly loaded with a QPU_W_TMU* write, which
250 * defines texture type, miplevels, and such. It will be found as a
251 * parameter to the first QOP_TEX_[STRB] instruction in a sequence.
253 QUNIFORM_TEXTURE_CONFIG_P0
,
256 * A reference to a texture config parameter 1 uniform.
258 * This is a uniform implicitly loaded with a QPU_W_TMU* write, which
259 * defines texture width, height, filters, and wrap modes. It will be
260 * found as a parameter to the second QOP_TEX_[STRB] instruction in a
263 QUNIFORM_TEXTURE_CONFIG_P1
,
265 /** A reference to a texture config parameter 2 cubemap stride uniform */
266 QUNIFORM_TEXTURE_CONFIG_P2
,
268 QUNIFORM_TEXTURE_FIRST_LEVEL
,
270 QUNIFORM_TEXTURE_MSAA_ADDR
,
274 QUNIFORM_TEXRECT_SCALE_X
,
275 QUNIFORM_TEXRECT_SCALE_Y
,
277 QUNIFORM_TEXTURE_BORDER_COLOR
,
279 QUNIFORM_BLEND_CONST_COLOR_X
,
280 QUNIFORM_BLEND_CONST_COLOR_Y
,
281 QUNIFORM_BLEND_CONST_COLOR_Z
,
282 QUNIFORM_BLEND_CONST_COLOR_W
,
283 QUNIFORM_BLEND_CONST_COLOR_RGBA
,
284 QUNIFORM_BLEND_CONST_COLOR_AAAA
,
289 QUNIFORM_SAMPLE_MASK
,
291 /* Placeholder uniform that will be updated by the kernel when used by
292 * an instruction writing to QPU_W_UNIFORMS_ADDRESS.
294 QUNIFORM_UNIFORMS_ADDRESS
,
297 struct vc4_varying_slot
{
302 struct vc4_compiler_ubo_range
{
304 * offset in bytes from the start of the ubo where this range is
307 * Only set once used is set.
312 * offset in bytes from the start of the gallium uniforms where the
317 /** size in bytes of this ubo range */
321 * Set if this range is used by the shader for indirect uniforms
328 struct vc4_uncompiled_shader
*shader_state
;
330 enum pipe_format format
;
334 unsigned compare_mode
:1;
335 unsigned compare_func
:3;
338 bool force_first_level
:1;
341 uint16_t msaa_width
, msaa_height
;
344 } tex
[VC4_MAX_TEXTURE_SAMPLERS
];
350 enum pipe_format color_format
;
352 bool stencil_enabled
;
353 bool stencil_twoside
;
354 bool stencil_full_writemasks
;
358 bool point_coord_upper_left
;
361 bool sample_coverage
;
362 bool sample_alpha_to_coverage
;
363 bool sample_alpha_to_one
;
364 uint8_t alpha_test_func
;
365 uint8_t logicop_func
;
366 uint32_t point_sprite_mask
;
368 struct pipe_rt_blend_state blend
;
374 const struct vc4_fs_inputs
*fs_inputs
;
375 enum pipe_format attr_formats
[8];
377 bool per_vertex_point_size
;
381 /** A basic block of QIR intructions. */
383 struct list_head link
;
385 struct list_head instructions
;
386 struct list_head qpu_inst_list
;
388 struct set
*predecessors
;
389 struct qblock
*successors
[2];
393 /* Instruction IPs for the first and last instruction of the block.
394 * Set by vc4_qpu_schedule.c.
396 uint32_t start_qpu_ip
;
399 /* Instruction IP for the branch instruction of the block. Set by
400 * vc4_qpu_schedule.c.
402 uint32_t branch_qpu_ip
;
404 /** @{ used by vc4_qir_live_variables.c */
407 BITSET_WORD
*live_in
;
408 BITSET_WORD
*live_out
;
409 int start_ip
, end_ip
;
414 struct vc4_context
*vc4
;
416 nir_function_impl
*impl
;
417 struct exec_list
*cf_node_list
;
420 * Mapping from nir_register * or nir_ssa_def * to array of struct
421 * qreg for the values.
423 struct hash_table
*def_ht
;
425 /* For each temp, the instruction generating its value. */
427 uint32_t defs_array_size
;
430 * Inputs to the shader, arranged by TGSI declaration order.
432 * Not all fragment shader QFILE_VARY reads are present in this array.
435 struct qreg
*outputs
;
436 bool msaa_per_sample_output
;
437 struct qreg color_reads
[VC4_MAX_SAMPLES
];
438 struct qreg sample_colors
[VC4_MAX_SAMPLES
];
439 uint32_t inputs_array_size
;
440 uint32_t outputs_array_size
;
441 uint32_t uniforms_array_size
;
443 struct vc4_compiler_ubo_range
*ubo_ranges
;
444 uint32_t ubo_ranges_array_size
;
445 /** Number of uniform areas declared in ubo_ranges. */
446 uint32_t num_uniform_ranges
;
447 /** Number of uniform areas used for indirect addressed loads. */
448 uint32_t num_ubo_ranges
;
449 uint32_t next_ubo_dst_offset
;
451 /* State for whether we're executing on each channel currently. 0 if
452 * yes, otherwise a block number + 1 that the channel jumped to.
456 struct qreg line_x
, point_x
, point_y
;
457 /** boolean (~0 -> true) if the fragment has been discarded. */
459 struct qreg payload_FRAG_Z
;
460 struct qreg payload_FRAG_W
;
462 uint8_t vattr_sizes
[8];
465 * Array of the VARYING_SLOT_* of all FS QFILE_VARY reads.
467 * This includes those that aren't part of the VPM varyings, like
468 * point/line coordinates.
470 struct vc4_varying_slot
*input_slots
;
471 uint32_t num_input_slots
;
472 uint32_t input_slots_array_size
;
475 * An entry per outputs[] in the VS indicating what the VARYING_SLOT_*
476 * of the output is. Used to emit from the VS in the order that the
479 struct vc4_varying_slot
*output_slots
;
481 struct pipe_shader_state
*shader_state
;
483 struct vc4_fs_key
*fs_key
;
484 struct vc4_vs_key
*vs_key
;
486 /* Live ranges of temps. */
487 int *temp_start
, *temp_end
;
489 uint32_t *uniform_data
;
490 enum quniform_contents
*uniform_contents
;
491 uint32_t uniform_array_size
;
492 uint32_t num_uniforms
;
493 uint32_t num_outputs
;
494 uint32_t num_texture_samples
;
495 uint32_t output_position_index
;
496 uint32_t output_color_index
;
497 uint32_t output_point_size_index
;
498 uint32_t output_sample_mask_index
;
504 struct list_head blocks
;
505 int next_block_index
;
506 struct qblock
*cur_block
;
507 struct qblock
*loop_cont_block
;
508 struct qblock
*loop_break_block
;
509 struct qblock
*last_top_block
;
511 struct list_head qpu_inst_list
;
513 /* Pre-QPU-scheduled instruction containing the last THRSW */
514 uint64_t *last_thrsw
;
517 uint32_t qpu_inst_count
;
518 uint32_t qpu_inst_size
;
522 * Number of inputs from num_inputs remaining to be queued to the read
525 uint32_t num_inputs_remaining
;
527 /* Number of inputs currently in the read FIFO for the VS/CS */
528 uint32_t num_inputs_in_fifo
;
530 /** Next offset in the VPM to read from in the VS/CS */
531 uint32_t vpm_read_offset
;
536 /* Set to compile program in threaded FS mode, where SIG_THREAD_SWITCH
537 * is used to hide texturing latency at the cost of limiting ourselves
538 * to the bottom half of physical reg space.
542 bool last_thrsw_at_top_level
;
547 /* Special nir_load_input intrinsic index for loading the current TLB
550 #define VC4_NIR_TLB_COLOR_READ_INPUT 2000000000
552 #define VC4_NIR_MS_MASK_OUTPUT 2000000000
554 struct vc4_compile
*qir_compile_init(void);
555 void qir_compile_destroy(struct vc4_compile
*c
);
556 struct qblock
*qir_new_block(struct vc4_compile
*c
);
557 void qir_set_emit_block(struct vc4_compile
*c
, struct qblock
*block
);
558 void qir_link_blocks(struct qblock
*predecessor
, struct qblock
*successor
);
559 struct qblock
*qir_entry_block(struct vc4_compile
*c
);
560 struct qblock
*qir_exit_block(struct vc4_compile
*c
);
561 struct qinst
*qir_inst(enum qop op
, struct qreg dst
,
562 struct qreg src0
, struct qreg src1
);
563 void qir_remove_instruction(struct vc4_compile
*c
, struct qinst
*qinst
);
564 struct qreg
qir_uniform(struct vc4_compile
*c
,
565 enum quniform_contents contents
,
567 void qir_schedule_instructions(struct vc4_compile
*c
);
568 void qir_reorder_uniforms(struct vc4_compile
*c
);
569 void qir_emit_uniform_stream_resets(struct vc4_compile
*c
);
571 struct qreg
qir_emit_def(struct vc4_compile
*c
, struct qinst
*inst
);
572 struct qinst
*qir_emit_nondef(struct vc4_compile
*c
, struct qinst
*inst
);
574 struct qreg
qir_get_temp(struct vc4_compile
*c
);
575 void qir_calculate_live_intervals(struct vc4_compile
*c
);
576 int qir_get_nsrc(struct qinst
*inst
);
577 int qir_get_non_sideband_nsrc(struct qinst
*inst
);
578 int qir_get_tex_uniform_src(struct qinst
*inst
);
579 bool qir_reg_equals(struct qreg a
, struct qreg b
);
580 bool qir_has_side_effects(struct vc4_compile
*c
, struct qinst
*inst
);
581 bool qir_has_side_effect_reads(struct vc4_compile
*c
, struct qinst
*inst
);
582 bool qir_has_uniform_read(struct qinst
*inst
);
583 bool qir_is_mul(struct qinst
*inst
);
584 bool qir_is_raw_mov(struct qinst
*inst
);
585 bool qir_is_tex(struct qinst
*inst
);
586 bool qir_has_implicit_tex_uniform(struct qinst
*inst
);
587 bool qir_is_float_input(struct qinst
*inst
);
588 bool qir_depends_on_flags(struct qinst
*inst
);
589 bool qir_writes_r4(struct qinst
*inst
);
590 struct qreg
qir_follow_movs(struct vc4_compile
*c
, struct qreg reg
);
591 uint8_t qir_channels_written(struct qinst
*inst
);
593 void qir_dump(struct vc4_compile
*c
);
594 void qir_dump_inst(struct vc4_compile
*c
, struct qinst
*inst
);
595 const char *qir_get_stage_name(enum qstage stage
);
597 void qir_validate(struct vc4_compile
*c
);
599 void qir_optimize(struct vc4_compile
*c
);
600 bool qir_opt_algebraic(struct vc4_compile
*c
);
601 bool qir_opt_coalesce_ff_writes(struct vc4_compile
*c
);
602 bool qir_opt_constant_folding(struct vc4_compile
*c
);
603 bool qir_opt_copy_propagation(struct vc4_compile
*c
);
604 bool qir_opt_dead_code(struct vc4_compile
*c
);
605 bool qir_opt_peephole_sf(struct vc4_compile
*c
);
606 bool qir_opt_small_immediates(struct vc4_compile
*c
);
607 bool qir_opt_vpm(struct vc4_compile
*c
);
608 void vc4_nir_lower_blend(nir_shader
*s
, struct vc4_compile
*c
);
609 void vc4_nir_lower_io(nir_shader
*s
, struct vc4_compile
*c
);
610 nir_ssa_def
*vc4_nir_get_swizzled_channel(struct nir_builder
*b
,
611 nir_ssa_def
**srcs
, int swiz
);
612 void vc4_nir_lower_txf_ms(nir_shader
*s
, struct vc4_compile
*c
);
613 void qir_lower_uniforms(struct vc4_compile
*c
);
615 uint32_t qpu_schedule_instructions(struct vc4_compile
*c
);
617 void qir_SF(struct vc4_compile
*c
, struct qreg src
);
619 static inline struct qreg
620 qir_uniform_ui(struct vc4_compile
*c
, uint32_t ui
)
622 return qir_uniform(c
, QUNIFORM_CONSTANT
, ui
);
625 static inline struct qreg
626 qir_uniform_f(struct vc4_compile
*c
, float f
)
628 return qir_uniform(c
, QUNIFORM_CONSTANT
, fui(f
));
631 #define QIR_ALU0(name) \
632 static inline struct qreg \
633 qir_##name(struct vc4_compile *c) \
635 return qir_emit_def(c, qir_inst(QOP_##name, c->undef, \
636 c->undef, c->undef)); \
638 static inline struct qinst * \
639 qir_##name##_dest(struct vc4_compile *c, struct qreg dest) \
641 return qir_emit_nondef(c, qir_inst(QOP_##name, dest, \
642 c->undef, c->undef)); \
645 #define QIR_ALU1(name) \
646 static inline struct qreg \
647 qir_##name(struct vc4_compile *c, struct qreg a) \
649 return qir_emit_def(c, qir_inst(QOP_##name, c->undef, \
652 static inline struct qinst * \
653 qir_##name##_dest(struct vc4_compile *c, struct qreg dest, \
656 return qir_emit_nondef(c, qir_inst(QOP_##name, dest, a, \
660 #define QIR_ALU2(name) \
661 static inline struct qreg \
662 qir_##name(struct vc4_compile *c, struct qreg a, struct qreg b) \
664 return qir_emit_def(c, qir_inst(QOP_##name, c->undef, a, b)); \
666 static inline struct qinst * \
667 qir_##name##_dest(struct vc4_compile *c, struct qreg dest, \
668 struct qreg a, struct qreg b) \
670 return qir_emit_nondef(c, qir_inst(QOP_##name, dest, a, b)); \
673 #define QIR_NODST_1(name) \
674 static inline struct qinst * \
675 qir_##name(struct vc4_compile *c, struct qreg a) \
677 return qir_emit_nondef(c, qir_inst(QOP_##name, c->undef, \
681 #define QIR_NODST_2(name) \
682 static inline struct qinst * \
683 qir_##name(struct vc4_compile *c, struct qreg a, struct qreg b) \
685 return qir_emit_nondef(c, qir_inst(QOP_##name, c->undef, \
689 #define QIR_PAYLOAD(name) \
690 static inline struct qreg \
691 qir_##name(struct vc4_compile *c) \
693 struct qreg *payload = &c->payload_##name; \
694 if (payload->file != QFILE_NULL) \
696 *payload = qir_get_temp(c); \
697 struct qinst *inst = qir_inst(QOP_##name, *payload, \
698 c->undef, c->undef); \
699 struct qblock *entry = qir_entry_block(c); \
700 list_add(&inst->link, &entry->instructions); \
701 c->defs[payload->index] = inst; \
745 QIR_ALU0(TLB_COLOR_READ
)
748 static inline struct qreg
749 qir_SEL(struct vc4_compile
*c
, uint8_t cond
, struct qreg src0
, struct qreg src1
)
751 struct qreg t
= qir_get_temp(c
);
752 qir_MOV_dest(c
, t
, src1
);
753 qir_MOV_dest(c
, t
, src0
)->cond
= cond
;
757 static inline struct qreg
758 qir_UNPACK_8_F(struct vc4_compile
*c
, struct qreg src
, int i
)
760 struct qreg t
= qir_FMOV(c
, src
);
761 c
->defs
[t
.index
]->src
[0].pack
= QPU_UNPACK_8A
+ i
;
765 static inline struct qreg
766 qir_UNPACK_8_I(struct vc4_compile
*c
, struct qreg src
, int i
)
768 struct qreg t
= qir_MOV(c
, src
);
769 c
->defs
[t
.index
]->src
[0].pack
= QPU_UNPACK_8A
+ i
;
773 static inline struct qreg
774 qir_UNPACK_16_F(struct vc4_compile
*c
, struct qreg src
, int i
)
776 struct qreg t
= qir_FMOV(c
, src
);
777 c
->defs
[t
.index
]->src
[0].pack
= QPU_UNPACK_16A
+ i
;
781 static inline struct qreg
782 qir_UNPACK_16_I(struct vc4_compile
*c
, struct qreg src
, int i
)
784 struct qreg t
= qir_MOV(c
, src
);
785 c
->defs
[t
.index
]->src
[0].pack
= QPU_UNPACK_16A
+ i
;
790 qir_PACK_8_F(struct vc4_compile
*c
, struct qreg dest
, struct qreg val
, int chan
)
793 dest
.pack
= QPU_PACK_MUL_8A
+ chan
;
794 qir_emit_nondef(c
, qir_inst(QOP_MMOV
, dest
, val
, c
->undef
));
797 static inline struct qreg
798 qir_PACK_8888_F(struct vc4_compile
*c
, struct qreg val
)
800 struct qreg dest
= qir_MMOV(c
, val
);
801 c
->defs
[dest
.index
]->dst
.pack
= QPU_PACK_MUL_8888
;
805 static inline struct qreg
806 qir_POW(struct vc4_compile
*c
, struct qreg x
, struct qreg y
)
808 return qir_EXP2(c
, qir_FMUL(c
,
814 qir_VPM_WRITE(struct vc4_compile
*c
, struct qreg val
)
816 qir_MOV_dest(c
, qir_reg(QFILE_VPM
, 0), val
);
819 static inline struct qreg
820 qir_LOAD_IMM(struct vc4_compile
*c
, uint32_t val
)
822 return qir_emit_def(c
, qir_inst(QOP_LOAD_IMM
, c
->undef
,
823 qir_reg(QFILE_LOAD_IMM
, val
), c
->undef
));
826 static inline struct qreg
827 qir_LOAD_IMM_U2(struct vc4_compile
*c
, uint32_t val
)
829 return qir_emit_def(c
, qir_inst(QOP_LOAD_IMM_U2
, c
->undef
,
830 qir_reg(QFILE_LOAD_IMM
, val
),
834 static inline struct qreg
835 qir_LOAD_IMM_I2(struct vc4_compile
*c
, uint32_t val
)
837 return qir_emit_def(c
, qir_inst(QOP_LOAD_IMM_I2
, c
->undef
,
838 qir_reg(QFILE_LOAD_IMM
, val
),
842 /** Shifts the multiply output to the right by rot channels */
843 static inline struct qreg
844 qir_ROT_MUL(struct vc4_compile
*c
, struct qreg val
, uint32_t rot
)
846 return qir_emit_def(c
, qir_inst(QOP_ROT_MUL
, c
->undef
,
848 qir_reg(QFILE_LOAD_IMM
,
849 QPU_SMALL_IMM_MUL_ROT
+ rot
)));
852 static inline struct qinst
*
853 qir_MOV_cond(struct vc4_compile
*c
, uint8_t cond
,
854 struct qreg dest
, struct qreg src
)
856 struct qinst
*mov
= qir_MOV_dest(c
, dest
, src
);
861 static inline struct qinst
*
862 qir_BRANCH(struct vc4_compile
*c
, uint8_t cond
)
864 struct qinst
*inst
= qir_inst(QOP_BRANCH
, c
->undef
, c
->undef
, c
->undef
);
866 qir_emit_nondef(c
, inst
);
870 #define qir_for_each_block(block, c) \
871 list_for_each_entry(struct qblock, block, &c->blocks, link)
873 #define qir_for_each_block_rev(block, c) \
874 list_for_each_entry_rev(struct qblock, block, &c->blocks, link)
876 /* Loop over the non-NULL members of the successors array. */
877 #define qir_for_each_successor(succ, block) \
878 for (struct qblock *succ = block->successors[0]; \
880 succ = (succ == block->successors[1] ? NULL : \
881 block->successors[1]))
883 #define qir_for_each_inst(inst, block) \
884 list_for_each_entry(struct qinst, inst, &block->instructions, link)
886 #define qir_for_each_inst_rev(inst, block) \
887 list_for_each_entry_rev(struct qinst, inst, &block->instructions, link)
889 #define qir_for_each_inst_safe(inst, block) \
890 list_for_each_entry_safe(struct qinst, inst, &block->instructions, link)
892 #define qir_for_each_inst_inorder(inst, c) \
893 qir_for_each_block(_block, c) \
894 qir_for_each_inst_safe(inst, _block)
896 #endif /* VC4_QIR_H */