return slot.slot_and_component & 3;
}
-struct v3d_ubo_range {
- /**
- * offset in bytes from the start of the ubo where this range is
- * uploaded.
- *
- * Only set once used is set.
- */
- uint32_t dst_offset;
-
- /**
- * offset in bytes from the start of the gallium uniforms where the
- * data comes from.
- */
- uint32_t src_offset;
-
- /** size in bytes of this ubo range */
- uint32_t size;
-};
-
struct v3d_key {
void *shader_state;
struct {
bool clamp_r:1;
} tex[V3D_MAX_TEXTURE_SAMPLERS];
uint8_t ucp_enables;
+ bool is_last_geometry_stage;
};
struct v3d_fs_key {
*/
uint8_t int_color_rb;
uint8_t uint_color_rb;
+
+ /* Color format information per render target. Only set when logic
+ * operations are enabled.
+ */
+ struct {
+ enum pipe_format format;
+ const uint8_t *swizzle;
+ } color_fmt[V3D_MAX_DRAW_BUFFERS];
+
uint8_t alpha_test_func;
uint8_t logicop_func;
uint32_t point_sprite_mask;
struct pipe_rt_blend_state blend;
};
+struct v3d_gs_key {
+ struct v3d_key base;
+
+ struct v3d_varying_slot used_outputs[V3D_MAX_FS_INPUTS];
+ uint8_t num_used_outputs;
+
+ bool is_coord;
+ bool per_vertex_point_size;
+};
+
struct v3d_vs_key {
struct v3d_key base;
- struct v3d_varying_slot fs_inputs[V3D_MAX_FS_INPUTS];
- uint8_t num_fs_inputs;
+ struct v3d_varying_slot used_outputs[V3D_MAX_ANY_STAGE_INPUTS];
+ uint8_t num_used_outputs;
bool is_coord;
bool per_vertex_point_size;
struct qreg *inputs;
struct qreg *outputs;
bool msaa_per_sample_output;
- struct qreg color_reads[V3D_MAX_SAMPLES];
- struct qreg sample_colors[V3D_MAX_SAMPLES];
+ struct qreg color_reads[V3D_MAX_DRAW_BUFFERS * V3D_MAX_SAMPLES * 4];
+ struct qreg sample_colors[V3D_MAX_DRAW_BUFFERS * V3D_MAX_SAMPLES * 4];
uint32_t inputs_array_size;
uint32_t outputs_array_size;
uint32_t uniforms_array_size;
bool uses_center_w;
bool writes_z;
-
- struct v3d_ubo_range *ubo_ranges;
- bool *ubo_range_used;
- uint32_t ubo_ranges_array_size;
- /** Number of uniform areas tracked in ubo_ranges. */
- uint32_t num_ubo_ranges;
- uint32_t next_ubo_dst_offset;
+ bool uses_implicit_point_line_varyings;
/* State for whether we're executing on each channel currently. 0 if
* yes, otherwise a block number + 1 that the channel jumped to.
int local_invocation_index_bits;
uint8_t vattr_sizes[V3D_MAX_VS_INPUTS / 4];
+ uint8_t gs_input_sizes[V3D_MAX_GS_INPUTS];
uint32_t vpm_output_size;
/* Size in bytes of registers that have been spilled. This is how much
struct pipe_shader_state *shader_state;
struct v3d_key *key;
struct v3d_fs_key *fs_key;
+ struct v3d_gs_key *gs_key;
struct v3d_vs_key *vs_key;
/* Live ranges of temps. */
uint64_t *qpu_insts;
uint32_t qpu_inst_count;
uint32_t qpu_inst_size;
+ uint32_t qpu_inst_stalled_count;
/* For the FS, the number of varying inputs not counting the
* point/line varyings payload
*/
uint32_t num_inputs;
- /**
- * Number of inputs from num_inputs remaining to be queued to the read
- * FIFO in the VS/CS.
- */
- uint32_t num_inputs_remaining;
-
- /* Number of inputs currently in the read FIFO for the VS/CS */
- uint32_t num_inputs_in_fifo;
-
- /** Next offset in the VPM to read from in the VS/CS */
- uint32_t vpm_read_offset;
-
uint32_t program_id;
uint32_t variant_id;
struct qinst *last_thrsw;
bool last_thrsw_at_top_level;
+ bool emitted_tlb_load;
+ bool lock_scoreboard_on_first_thrsw;
+
bool failed;
+
+ bool tmu_dirty_rcl;
};
struct v3d_uniform_list {
struct v3d_prog_data {
struct v3d_uniform_list uniforms;
- struct v3d_ubo_range *ubo_ranges;
- uint32_t num_ubo_ranges;
- uint32_t ubo_size;
uint32_t spill_size;
uint8_t threads;
* after-final-THRSW state.
*/
bool single_seg;
+
+ bool tmu_dirty_rcl;
};
struct v3d_vs_prog_data {
uint8_t vcm_cache_size;
};
+struct v3d_gs_prog_data {
+ struct v3d_prog_data base;
+
+ /* Whether the program reads gl_PrimitiveIDIn */
+ bool uses_pid;
+
+ /* Number of components read from each input varying. */
+ uint8_t input_sizes[V3D_MAX_GS_INPUTS / 4];
+
+ /* Number of inputs */
+ uint8_t num_inputs;
+ struct v3d_varying_slot input_slots[V3D_MAX_GS_INPUTS];
+
+ /* Total number of components written, for the shader state record. */
+ uint32_t vpm_output_size;
+
+ /* Maximum SIMD dispatch width to not exceed VPM output size limits
+ * in the geometry shader. Notice that the final dispatch width has to
+ * be decided at draw time and could be lower based on the VPM pressure
+ * added by other shader stages.
+ */
+ uint8_t simd_width;
+
+ /* Output primitive type */
+ uint8_t out_prim_type;
+
+ /* Number of GS invocations */
+ uint8_t num_invocations;
+};
+
struct v3d_fs_prog_data {
struct v3d_prog_data base;
bool writes_z;
bool disable_ez;
bool uses_center_w;
+ bool uses_implicit_point_line_varyings;
+ bool lock_scoreboard_on_first_thrsw;
+};
+
+struct v3d_compute_prog_data {
+ struct v3d_prog_data base;
+ /* Size in bytes of the workgroup's shared space. */
+ uint32_t shared_size;
};
static inline bool
return inst->uniform != ~0;
}
-/* Special nir_load_input intrinsic index for loading the current TLB
- * destination color.
- */
-#define V3D_NIR_TLB_COLOR_READ_INPUT 2000000000
-
-#define V3D_NIR_MS_MASK_OUTPUT 2000000000
-
extern const nir_shader_compiler_options v3d_nir_options;
const struct v3d_compiler *v3d_compiler_init(const struct v3d_device_info *devinfo);
enum quniform_contents contents,
uint32_t data);
void vir_schedule_instructions(struct v3d_compile *c);
+void v3d_setup_spill_base(struct v3d_compile *c);
struct v3d_qpu_instr v3d_qpu_nop(void);
struct qreg vir_emit_def(struct v3d_compile *c, struct qinst *inst);
bool vir_opt_copy_propagate(struct v3d_compile *c);
bool vir_opt_dead_code(struct v3d_compile *c);
bool vir_opt_peephole_sf(struct v3d_compile *c);
+bool vir_opt_redundant_flags(struct v3d_compile *c);
bool vir_opt_small_immediates(struct v3d_compile *c);
bool vir_opt_vpm(struct v3d_compile *c);
void v3d_nir_lower_blend(nir_shader *s, struct v3d_compile *c);
void v3d_nir_lower_io(nir_shader *s, struct v3d_compile *c);
+void v3d_nir_lower_logic_ops(nir_shader *s, struct v3d_compile *c);
+void v3d_nir_lower_scratch(nir_shader *s);
void v3d_nir_lower_txf_ms(nir_shader *s, struct v3d_compile *c);
void v3d_nir_lower_image_load_store(nir_shader *s);
void vir_lower_uniforms(struct v3d_compile *c);
bool v3d_gl_format_is_return_32(GLenum format);
+uint32_t
+v3d_get_op_for_atomic_add(nir_intrinsic_instr *instr, unsigned src);
+
static inline bool
quniform_contents_is_texture_p0(enum quniform_contents contents)
{
VIR_A_ALU0(EIDX)
VIR_A_ALU1(LDVPMV_IN)
VIR_A_ALU1(LDVPMV_OUT)
+VIR_A_ALU1(LDVPMD_IN)
+VIR_A_ALU1(LDVPMD_OUT)
+VIR_A_ALU2(LDVPMG_IN)
+VIR_A_ALU2(LDVPMG_OUT)
VIR_A_ALU0(TMUWT)
+VIR_A_ALU0(IID)
VIR_A_ALU0(FXCD)
VIR_A_ALU0(XCD)
VIR_A_ALU0(FYCD)
return vir_UMUL24(c, src0, src1);
}
+static inline struct qreg
+vir_TLBU_COLOR_READ(struct v3d_compile *c, uint32_t config)
+{
+ assert(c->devinfo->ver >= 41); /* XXX */
+ assert((config & 0xffffff00) == 0xffffff00);
+
+ struct qinst *ldtlb = vir_add_inst(V3D_QPU_A_NOP, c->undef,
+ c->undef, c->undef);
+ ldtlb->qpu.sig.ldtlbu = true;
+ ldtlb->uniform = vir_get_uniform_index(c, QUNIFORM_CONSTANT, config);
+ return vir_emit_def(c, ldtlb);
+}
+
+static inline struct qreg
+vir_TLB_COLOR_READ(struct v3d_compile *c)
+{
+ assert(c->devinfo->ver >= 41); /* XXX */
+
+ struct qinst *ldtlb = vir_add_inst(V3D_QPU_A_NOP, c->undef,
+ c->undef, c->undef);
+ ldtlb->qpu.sig.ldtlb = true;
+ return vir_emit_def(c, ldtlb);
+}
+
/*
static inline struct qreg
vir_LOAD_IMM(struct v3d_compile *c, uint32_t val)