QUNIFORM_SHARED_OFFSET,
};
-static inline uint32_t v3d_tmu_config_data_create(uint32_t unit, uint32_t value)
+static inline uint32_t v3d_unit_data_create(uint32_t unit, uint32_t value)
{
+ assert(value < (1 << 24));
return unit << 24 | value;
}
-static inline uint32_t v3d_tmu_config_data_get_unit(uint32_t data)
+static inline uint32_t v3d_unit_data_get_unit(uint32_t data)
{
return data >> 24;
}
-static inline uint32_t v3d_tmu_config_data_get_value(uint32_t data)
+static inline uint32_t v3d_unit_data_get_offset(uint32_t data)
{
return data & 0xffffff;
}
return slot.slot_and_component & 3;
}
-struct v3d_ubo_range {
- /**
- * offset in bytes from the start of the ubo where this range is
- * uploaded.
- *
- * Only set once used is set.
- */
- uint32_t dst_offset;
-
- /**
- * offset in bytes from the start of the gallium uniforms where the
- * data comes from.
- */
- uint32_t src_offset;
-
- /** size in bytes of this ubo range */
- uint32_t size;
-};
-
struct v3d_key {
void *shader_state;
struct {
*/
uint8_t int_color_rb;
uint8_t uint_color_rb;
+
+ /* Color format information per render target. Only set when logic
+ * operations are enabled.
+ */
+ struct {
+ enum pipe_format format;
+ const uint8_t *swizzle;
+ } color_fmt[V3D_MAX_DRAW_BUFFERS];
+
uint8_t alpha_test_func;
uint8_t logicop_func;
uint32_t point_sprite_mask;
struct qreg *inputs;
struct qreg *outputs;
bool msaa_per_sample_output;
- struct qreg color_reads[V3D_MAX_SAMPLES];
- struct qreg sample_colors[V3D_MAX_SAMPLES];
+ struct qreg color_reads[V3D_MAX_DRAW_BUFFERS * V3D_MAX_SAMPLES * 4];
+ struct qreg sample_colors[V3D_MAX_DRAW_BUFFERS * V3D_MAX_SAMPLES * 4];
uint32_t inputs_array_size;
uint32_t outputs_array_size;
uint32_t uniforms_array_size;
bool uses_center_w;
bool writes_z;
-
- struct v3d_ubo_range *ubo_ranges;
- bool *ubo_range_used;
- uint32_t ubo_ranges_array_size;
- /** Number of uniform areas tracked in ubo_ranges. */
- uint32_t num_ubo_ranges;
- uint32_t next_ubo_dst_offset;
+ bool uses_implicit_point_line_varyings;
/* State for whether we're executing on each channel currently. 0 if
* yes, otherwise a block number + 1 that the channel jumped to.
uint64_t *qpu_insts;
uint32_t qpu_inst_count;
uint32_t qpu_inst_size;
+ uint32_t qpu_inst_stalled_count;
/* For the FS, the number of varying inputs not counting the
* point/line varyings payload
*/
uint32_t num_inputs;
- /**
- * Number of inputs from num_inputs remaining to be queued to the read
- * FIFO in the VS/CS.
- */
- uint32_t num_inputs_remaining;
-
- /* Number of inputs currently in the read FIFO for the VS/CS */
- uint32_t num_inputs_in_fifo;
-
- /** Next offset in the VPM to read from in the VS/CS */
- uint32_t vpm_read_offset;
-
uint32_t program_id;
uint32_t variant_id;
struct qinst *last_thrsw;
bool last_thrsw_at_top_level;
+ bool emitted_tlb_load;
+ bool lock_scoreboard_on_first_thrsw;
+
bool failed;
+
+ bool tmu_dirty_rcl;
};
struct v3d_uniform_list {
struct v3d_prog_data {
struct v3d_uniform_list uniforms;
- struct v3d_ubo_range *ubo_ranges;
- uint32_t num_ubo_ranges;
- uint32_t ubo_size;
uint32_t spill_size;
uint8_t threads;
* after-final-THRSW state.
*/
bool single_seg;
+
+ bool tmu_dirty_rcl;
};
struct v3d_vs_prog_data {
bool writes_z;
bool disable_ez;
bool uses_center_w;
+ bool uses_implicit_point_line_varyings;
+ bool lock_scoreboard_on_first_thrsw;
+};
+
+struct v3d_compute_prog_data {
+ struct v3d_prog_data base;
+ /* Size in bytes of the workgroup's shared space. */
+ uint32_t shared_size;
};
static inline bool
return inst->uniform != ~0;
}
-/* Special nir_load_input intrinsic index for loading the current TLB
- * destination color.
- */
-#define V3D_NIR_TLB_COLOR_READ_INPUT 2000000000
-
-#define V3D_NIR_MS_MASK_OUTPUT 2000000000
-
extern const nir_shader_compiler_options v3d_nir_options;
const struct v3d_compiler *v3d_compiler_init(const struct v3d_device_info *devinfo);
enum quniform_contents contents,
uint32_t data);
void vir_schedule_instructions(struct v3d_compile *c);
+void v3d_setup_spill_base(struct v3d_compile *c);
struct v3d_qpu_instr v3d_qpu_nop(void);
struct qreg vir_emit_def(struct v3d_compile *c, struct qinst *inst);
bool vir_opt_copy_propagate(struct v3d_compile *c);
bool vir_opt_dead_code(struct v3d_compile *c);
bool vir_opt_peephole_sf(struct v3d_compile *c);
+bool vir_opt_redundant_flags(struct v3d_compile *c);
bool vir_opt_small_immediates(struct v3d_compile *c);
bool vir_opt_vpm(struct v3d_compile *c);
void v3d_nir_lower_blend(nir_shader *s, struct v3d_compile *c);
void v3d_nir_lower_io(nir_shader *s, struct v3d_compile *c);
+void v3d_nir_lower_logic_ops(nir_shader *s, struct v3d_compile *c);
+void v3d_nir_lower_scratch(nir_shader *s);
void v3d_nir_lower_txf_ms(nir_shader *s, struct v3d_compile *c);
void v3d_nir_lower_image_load_store(nir_shader *s);
void vir_lower_uniforms(struct v3d_compile *c);
bool v3d_gl_format_is_return_32(GLenum format);
+uint32_t
+v3d_get_op_for_atomic_add(nir_intrinsic_instr *instr, unsigned src);
+
static inline bool
quniform_contents_is_texture_p0(enum quniform_contents contents)
{
return vir_UMUL24(c, src0, src1);
}
+static inline struct qreg
+vir_TLBU_COLOR_READ(struct v3d_compile *c, uint32_t config)
+{
+ assert(c->devinfo->ver >= 41); /* XXX */
+ assert((config & 0xffffff00) == 0xffffff00);
+
+ struct qinst *ldtlb = vir_add_inst(V3D_QPU_A_NOP, c->undef,
+ c->undef, c->undef);
+ ldtlb->qpu.sig.ldtlbu = true;
+ ldtlb->uniform = vir_get_uniform_index(c, QUNIFORM_CONSTANT, config);
+ return vir_emit_def(c, ldtlb);
+}
+
+static inline struct qreg
+vir_TLB_COLOR_READ(struct v3d_compile *c)
+{
+ assert(c->devinfo->ver >= 41); /* XXX */
+
+ struct qinst *ldtlb = vir_add_inst(V3D_QPU_A_NOP, c->undef,
+ c->undef, c->undef);
+ ldtlb->qpu.sig.ldtlb = true;
+ return vir_emit_def(c, ldtlb);
+}
+
/*
static inline struct qreg
vir_LOAD_IMM(struct v3d_compile *c, uint32_t val)