X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Fvc4%2Fvc4_qir.h;h=4d8bf60cf4401193924a18155c6a71503fb55ec3;hb=HEAD;hp=fabdf10e24d28d5d334aa87a0c149464893930fc;hpb=a59da513d3229c883809ac2088c9612abcec1470;p=mesa.git diff --git a/src/gallium/drivers/vc4/vc4_qir.h b/src/gallium/drivers/vc4/vc4_qir.h index fabdf10e24d..4d8bf60cf44 100644 --- a/src/gallium/drivers/vc4/vc4_qir.h +++ b/src/gallium/drivers/vc4/vc4_qir.h @@ -55,12 +55,25 @@ enum qfile { QFILE_TLB_Z_WRITE, QFILE_TLB_STENCIL_SETUP, + /* If tex_s is written on its own without preceding t/r/b setup, it's + * a direct memory access using the input value, without the sideband + * uniform load. We represent these in QIR as a separate write + * destination so we can tell if the sideband uniform is present. + */ + QFILE_TEX_S_DIRECT, + + QFILE_TEX_S, + QFILE_TEX_T, + QFILE_TEX_R, + QFILE_TEX_B, + /* Payload registers that aren't in the physical register file, so we * can just use the corresponding qpu_reg at qpu_emit time. */ QFILE_FRAG_X, QFILE_FRAG_Y, QFILE_FRAG_REV_FLAG, + QFILE_QPU_ELEMENT, /** * Stores an immediate value in the index field that will be used @@ -110,6 +123,7 @@ enum qop { QOP_SHR, QOP_ASR, QOP_MIN, + QOP_MIN_NOIMM, QOP_MAX, QOP_AND, QOP_OR, @@ -131,37 +145,47 @@ enum qop { QOP_FRAG_Z, QOP_FRAG_W, - /** Texture x coordinate parameter write */ - QOP_TEX_S, - /** Texture y coordinate parameter write */ - QOP_TEX_T, - /** Texture border color parameter or cube map z coordinate write */ - QOP_TEX_R, - /** Texture LOD bias parameter write */ - QOP_TEX_B, - - /** - * Texture-unit 4-byte read with address provided direct in S - * cooordinate. - * - * The first operand is the offset from the start of the UBO, and the - * second is the uniform that has the UBO's base pointer. - */ - QOP_TEX_DIRECT, - /** * Signal of texture read being necessary and then reading r4 into * the destination */ QOP_TEX_RESULT, + /** + * Insert the signal for switching threads in a threaded fragment + * shader. No value can be live in an accumulator across a thrsw. + * + * At the QPU level, this will have several delay slots before the + * switch happens. Those slots are the responsibility of the + * scheduler. + */ + QOP_THRSW, + + /* 32-bit immediate loaded to each SIMD channel */ QOP_LOAD_IMM, + /* 32-bit immediate divided into 16 2-bit unsigned int values and + * loaded to each corresponding SIMD channel. + */ + QOP_LOAD_IMM_U2, + /* 32-bit immediate divided into 16 2-bit signed int values and + * loaded to each corresponding SIMD channel. + */ + QOP_LOAD_IMM_I2, + + QOP_ROT_MUL, + /* Jumps to block->successor[0] if the qinst->cond (as a * QPU_COND_BRANCH_*) passes, or block->successor[1] if not. Note * that block->successor[1] may be unset if the condition is ALWAYS. */ QOP_BRANCH, + + /* Emits an ADD from src[0] to src[1], where src[0] must be a + * QOP_LOAD_IMM result and src[1] is a QUNIFORM_UNIFORMS_ADDRESS, + * required by the kernel as part of its branch validation. + */ + QOP_UNIFORMS_RESET, }; struct queued_qpu_inst { @@ -174,8 +198,9 @@ struct qinst { enum qop op; struct qreg dst; - struct qreg *src; + struct qreg src[3]; bool sf; + bool cond_is_exec_mask; uint8_t cond; }; @@ -240,9 +265,12 @@ enum quniform_contents { /** A reference to a texture config parameter 2 cubemap stride uniform */ QUNIFORM_TEXTURE_CONFIG_P2, + QUNIFORM_TEXTURE_FIRST_LEVEL, + QUNIFORM_TEXTURE_MSAA_ADDR, - QUNIFORM_UBO_ADDR, + QUNIFORM_UBO0_ADDR, + QUNIFORM_UBO1_ADDR, QUNIFORM_TEXRECT_SCALE_X, QUNIFORM_TEXRECT_SCALE_Y, @@ -260,6 +288,11 @@ enum quniform_contents { QUNIFORM_ALPHA_REF, QUNIFORM_SAMPLE_MASK, + + /* Placeholder uniform that will be updated by the kernel when used by + * an instruction writing to QPU_W_UNIFORMS_ADDRESS. + */ + QUNIFORM_UNIFORMS_ADDRESS, }; struct vc4_varying_slot { @@ -267,31 +300,6 @@ struct vc4_varying_slot { uint8_t swizzle; }; -struct vc4_compiler_ubo_range { - /** - * offset in bytes from the start of the ubo where this range is - * uploaded. - * - * Only set once used is set. - */ - uint32_t dst_offset; - - /** - * offset in bytes from the start of the gallium uniforms where the - * data comes from. - */ - uint32_t src_offset; - - /** size in bytes of this ubo range */ - uint32_t size; - - /** - * Set if this range is used by the shader for indirect uniforms - * access. - */ - bool used; -}; - struct vc4_key { struct vc4_uncompiled_shader *shader_state; struct { @@ -303,6 +311,7 @@ struct vc4_key { unsigned compare_func:3; unsigned wrap_s:3; unsigned wrap_t:3; + bool force_first_level:1; }; struct { uint16_t msaa_width, msaa_height; @@ -321,7 +330,6 @@ struct vc4_fs_key { bool stencil_full_writemasks; bool is_points; bool is_lines; - bool alpha_test; bool point_coord_upper_left; bool light_twoside; bool msaa; @@ -331,6 +339,7 @@ struct vc4_fs_key { uint8_t alpha_test_func; uint8_t logicop_func; uint32_t point_sprite_mask; + uint32_t ubo_1_size; struct pipe_rt_blend_state blend; }; @@ -338,12 +347,7 @@ struct vc4_fs_key { struct vc4_vs_key { struct vc4_key base; - /** - * This is a proxy for the array of FS input semantics, which is - * larger than we would want to put in the key. - */ - uint64_t compiled_fs_id; - + const struct vc4_fs_inputs *fs_inputs; enum pipe_format attr_formats[8]; bool is_coord; bool per_vertex_point_size; @@ -362,6 +366,17 @@ struct qblock { int index; + /* Instruction IPs for the first and last instruction of the block. + * Set by vc4_qpu_schedule.c. + */ + uint32_t start_qpu_ip; + uint32_t end_qpu_ip; + + /* Instruction IP for the branch instruction of the block. Set by + * vc4_qpu_schedule.c. + */ + uint32_t branch_qpu_ip; + /** @{ used by vc4_qir_live_variables.c */ BITSET_WORD *def; BITSET_WORD *use; @@ -401,20 +416,13 @@ struct vc4_compile { uint32_t outputs_array_size; uint32_t uniforms_array_size; - struct vc4_compiler_ubo_range *ubo_ranges; - uint32_t ubo_ranges_array_size; - /** Number of uniform areas declared in ubo_ranges. */ - uint32_t num_uniform_ranges; - /** Number of uniform areas used for indirect addressed loads. */ - uint32_t num_ubo_ranges; - uint32_t next_ubo_dst_offset; - /* State for whether we're executing on each channel currently. 0 if * yes, otherwise a block number + 1 that the channel jumped to. */ struct qreg execute; struct qreg line_x, point_x, point_y; + /** boolean (~0 -> true) if the fragment has been discarded. */ struct qreg discard; struct qreg payload_FRAG_Z; struct qreg payload_FRAG_W; @@ -466,16 +474,42 @@ struct vc4_compile { struct qblock *cur_block; struct qblock *loop_cont_block; struct qblock *loop_break_block; + struct qblock *last_top_block; struct list_head qpu_inst_list; + /* Pre-QPU-scheduled instruction containing the last THRSW */ + uint64_t *last_thrsw; + uint64_t *qpu_insts; uint32_t qpu_inst_count; uint32_t qpu_inst_size; uint32_t num_inputs; + /** + * Number of inputs from num_inputs remaining to be queued to the read + * FIFO in the VS/CS. + */ + uint32_t num_inputs_remaining; + + /* Number of inputs currently in the read FIFO for the VS/CS */ + uint32_t num_inputs_in_fifo; + + /** Next offset in the VPM to read from in the VS/CS */ + uint32_t vpm_read_offset; + uint32_t program_id; uint32_t variant_id; + + /* Set to compile program in threaded FS mode, where SIG_THREAD_SWITCH + * is used to hide texturing latency at the cost of limiting ourselves + * to the bottom half of physical reg space. + */ + bool fs_threaded; + + bool last_thrsw_at_top_level; + + bool failed; }; /* Special nir_load_input intrinsic index for loading the current TLB @@ -485,11 +519,6 @@ struct vc4_compile { #define VC4_NIR_MS_MASK_OUTPUT 2000000000 -/* Special offset for nir_load_uniform values to get a QUNIFORM_* - * state-dependent value. - */ -#define VC4_NIR_STATE_UNIFORM_OFFSET 1000000000 - struct vc4_compile *qir_compile_init(void); void qir_compile_destroy(struct vc4_compile *c); struct qblock *qir_new_block(struct vc4_compile *c); @@ -499,30 +528,30 @@ struct qblock *qir_entry_block(struct vc4_compile *c); struct qblock *qir_exit_block(struct vc4_compile *c); struct qinst *qir_inst(enum qop op, struct qreg dst, struct qreg src0, struct qreg src1); -struct qinst *qir_inst4(enum qop op, struct qreg dst, - struct qreg a, - struct qreg b, - struct qreg c, - struct qreg d); void qir_remove_instruction(struct vc4_compile *c, struct qinst *qinst); struct qreg qir_uniform(struct vc4_compile *c, enum quniform_contents contents, uint32_t data); void qir_schedule_instructions(struct vc4_compile *c); void qir_reorder_uniforms(struct vc4_compile *c); +void qir_emit_uniform_stream_resets(struct vc4_compile *c); struct qreg qir_emit_def(struct vc4_compile *c, struct qinst *inst); struct qinst *qir_emit_nondef(struct vc4_compile *c, struct qinst *inst); struct qreg qir_get_temp(struct vc4_compile *c); void qir_calculate_live_intervals(struct vc4_compile *c); -int qir_get_op_nsrc(enum qop qop); +int qir_get_nsrc(struct qinst *inst); +int qir_get_non_sideband_nsrc(struct qinst *inst); +int qir_get_tex_uniform_src(struct qinst *inst); bool qir_reg_equals(struct qreg a, struct qreg b); bool qir_has_side_effects(struct vc4_compile *c, struct qinst *inst); bool qir_has_side_effect_reads(struct vc4_compile *c, struct qinst *inst); +bool qir_has_uniform_read(struct qinst *inst); bool qir_is_mul(struct qinst *inst); bool qir_is_raw_mov(struct qinst *inst); bool qir_is_tex(struct qinst *inst); +bool qir_has_implicit_tex_uniform(struct qinst *inst); bool qir_is_float_input(struct qinst *inst); bool qir_depends_on_flags(struct qinst *inst); bool qir_writes_r4(struct qinst *inst); @@ -531,12 +560,15 @@ uint8_t qir_channels_written(struct qinst *inst); void qir_dump(struct vc4_compile *c); void qir_dump_inst(struct vc4_compile *c, struct qinst *inst); +char *qir_describe_uniform(enum quniform_contents contents, uint32_t data, + const uint32_t *uniforms); const char *qir_get_stage_name(enum qstage stage); void qir_validate(struct vc4_compile *c); void qir_optimize(struct vc4_compile *c); bool qir_opt_algebraic(struct vc4_compile *c); +bool qir_opt_coalesce_ff_writes(struct vc4_compile *c); bool qir_opt_constant_folding(struct vc4_compile *c); bool qir_opt_copy_propagation(struct vc4_compile *c); bool qir_opt_dead_code(struct vc4_compile *c); @@ -545,8 +577,6 @@ bool qir_opt_small_immediates(struct vc4_compile *c); bool qir_opt_vpm(struct vc4_compile *c); void vc4_nir_lower_blend(nir_shader *s, struct vc4_compile *c); void vc4_nir_lower_io(nir_shader *s, struct vc4_compile *c); -nir_ssa_def *vc4_nir_get_state_uniform(struct nir_builder *b, - enum quniform_contents contents); nir_ssa_def *vc4_nir_get_swizzled_channel(struct nir_builder *b, nir_ssa_def **srcs, int swiz); void vc4_nir_lower_txf_ms(nir_shader *s, struct vc4_compile *c); @@ -667,6 +697,7 @@ QIR_ALU2(SHL) QIR_ALU2(SHR) QIR_ALU2(ASR) QIR_ALU2(MIN) +QIR_ALU2(MIN_NOIMM) QIR_ALU2(MAX) QIR_ALU2(AND) QIR_ALU2(OR) @@ -678,11 +709,6 @@ QIR_ALU1(RSQ) QIR_ALU1(EXP2) QIR_ALU1(LOG2) QIR_ALU1(VARY_ADD_C) -QIR_NODST_2(TEX_S) -QIR_NODST_2(TEX_T) -QIR_NODST_2(TEX_R) -QIR_NODST_2(TEX_B) -QIR_NODST_2(TEX_DIRECT) QIR_PAYLOAD(FRAG_Z) QIR_PAYLOAD(FRAG_W) QIR_ALU0(TEX_RESULT) @@ -693,10 +719,8 @@ static inline struct qreg qir_SEL(struct vc4_compile *c, uint8_t cond, struct qreg src0, struct qreg src1) { struct qreg t = qir_get_temp(c); - struct qinst *a = qir_MOV_dest(c, t, src0); - struct qinst *b = qir_MOV_dest(c, t, src1); - a->cond = cond; - b->cond = qpu_cond_complement(cond); + qir_MOV_dest(c, t, src1); + qir_MOV_dest(c, t, src0)->cond = cond; return t; } @@ -769,11 +793,39 @@ qir_LOAD_IMM(struct vc4_compile *c, uint32_t val) qir_reg(QFILE_LOAD_IMM, val), c->undef)); } -static inline void +static inline struct qreg +qir_LOAD_IMM_U2(struct vc4_compile *c, uint32_t val) +{ + return qir_emit_def(c, qir_inst(QOP_LOAD_IMM_U2, c->undef, + qir_reg(QFILE_LOAD_IMM, val), + c->undef)); +} + +static inline struct qreg +qir_LOAD_IMM_I2(struct vc4_compile *c, uint32_t val) +{ + return qir_emit_def(c, qir_inst(QOP_LOAD_IMM_I2, c->undef, + qir_reg(QFILE_LOAD_IMM, val), + c->undef)); +} + +/** Shifts the multiply output to the right by rot channels */ +static inline struct qreg +qir_ROT_MUL(struct vc4_compile *c, struct qreg val, uint32_t rot) +{ + return qir_emit_def(c, qir_inst(QOP_ROT_MUL, c->undef, + val, + qir_reg(QFILE_LOAD_IMM, + QPU_SMALL_IMM_MUL_ROT + rot))); +} + +static inline struct qinst * qir_MOV_cond(struct vc4_compile *c, uint8_t cond, struct qreg dest, struct qreg src) { - qir_MOV_dest(c, dest, src)->cond = cond; + struct qinst *mov = qir_MOV_dest(c, dest, src); + mov->cond = cond; + return mov; } static inline struct qinst * @@ -809,6 +861,6 @@ qir_BRANCH(struct vc4_compile *c, uint8_t cond) #define qir_for_each_inst_inorder(inst, c) \ qir_for_each_block(_block, c) \ - qir_for_each_inst(inst, _block) + qir_for_each_inst_safe(inst, _block) #endif /* VC4_QIR_H */