* L2T cache will effectively be the shared memory area.
*/
QUNIFORM_SHARED_OFFSET,
+
+ /**
+ * Returns the number of layers in the framebuffer.
+ *
+ * This is used to cap gl_Layer in geometry shaders to avoid
+ * out-of-bounds accesses into the tile state during binning.
+ */
+ QUNIFORM_FB_LAYERS,
};
static inline uint32_t v3d_unit_data_create(uint32_t unit, uint32_t value)
bool clamp_r:1;
} tex[V3D_MAX_TEXTURE_SAMPLERS];
uint8_t ucp_enables;
+ bool is_last_geometry_stage;
};
struct v3d_fs_key {
struct pipe_rt_blend_state blend;
};
+struct v3d_gs_key {
+ struct v3d_key base;
+
+ struct v3d_varying_slot used_outputs[V3D_MAX_FS_INPUTS];
+ uint8_t num_used_outputs;
+
+ bool is_coord;
+ bool per_vertex_point_size;
+};
+
struct v3d_vs_key {
struct v3d_key base;
- struct v3d_varying_slot fs_inputs[V3D_MAX_FS_INPUTS];
- uint8_t num_fs_inputs;
+ struct v3d_varying_slot used_outputs[V3D_MAX_ANY_STAGE_INPUTS];
+ uint8_t num_used_outputs;
bool is_coord;
bool per_vertex_point_size;
struct pipe_shader_state *shader_state;
struct v3d_key *key;
struct v3d_fs_key *fs_key;
+ struct v3d_gs_key *gs_key;
struct v3d_vs_key *vs_key;
/* Live ranges of temps. */
uint64_t *qpu_insts;
uint32_t qpu_inst_count;
uint32_t qpu_inst_size;
+ uint32_t qpu_inst_stalled_count;
/* For the FS, the number of varying inputs not counting the
* point/line varyings payload
struct qinst *last_thrsw;
bool last_thrsw_at_top_level;
+ bool emitted_tlb_load;
+ bool lock_scoreboard_on_first_thrsw;
+
bool failed;
+
+ bool tmu_dirty_rcl;
};
struct v3d_uniform_list {
* after-final-THRSW state.
*/
bool single_seg;
+
+ bool tmu_dirty_rcl;
};
struct v3d_vs_prog_data {
uint8_t vcm_cache_size;
};
+struct v3d_gs_prog_data {
+ struct v3d_prog_data base;
+
+ /* Whether the program reads gl_PrimitiveIDIn */
+ bool uses_pid;
+
+ /* Number of components read from each input varying. */
+ uint8_t input_sizes[V3D_MAX_GS_INPUTS / 4];
+
+ /* Number of inputs */
+ uint8_t num_inputs;
+ struct v3d_varying_slot input_slots[V3D_MAX_GS_INPUTS];
+
+ /* Total number of components written, for the shader state record. */
+ uint32_t vpm_output_size;
+
+ /* Maximum SIMD dispatch width to not exceed VPM output size limits
+ * in the geometry shader. Notice that the final dispatch width has to
+ * be decided at draw time and could be lower based on the VPM pressure
+ * added by other shader stages.
+ */
+ uint8_t simd_width;
+
+ /* Output primitive type */
+ uint8_t out_prim_type;
+
+ /* Number of GS invocations */
+ uint8_t num_invocations;
+};
+
struct v3d_fs_prog_data {
struct v3d_prog_data base;
bool disable_ez;
bool uses_center_w;
bool uses_implicit_point_line_varyings;
+ bool lock_scoreboard_on_first_thrsw;
};
struct v3d_compute_prog_data {
return inst->uniform != ~0;
}
-/* Special nir_load_input intrinsic index for loading the current TLB
- * destination color.
- */
-#define V3D_NIR_TLB_COLOR_READ_INPUT 2000000000
-
-#define V3D_NIR_MS_MASK_OUTPUT 2000000000
-
extern const nir_shader_compiler_options v3d_nir_options;
const struct v3d_compiler *v3d_compiler_init(const struct v3d_device_info *devinfo);
bool vir_opt_vpm(struct v3d_compile *c);
void v3d_nir_lower_blend(nir_shader *s, struct v3d_compile *c);
void v3d_nir_lower_io(nir_shader *s, struct v3d_compile *c);
+void v3d_nir_lower_logic_ops(nir_shader *s, struct v3d_compile *c);
void v3d_nir_lower_scratch(nir_shader *s);
void v3d_nir_lower_txf_ms(nir_shader *s, struct v3d_compile *c);
void v3d_nir_lower_image_load_store(nir_shader *s);
bool v3d_gl_format_is_return_32(GLenum format);
+uint32_t
+v3d_get_op_for_atomic_add(nir_intrinsic_instr *instr, unsigned src);
+
static inline bool
quniform_contents_is_texture_p0(enum quniform_contents contents)
{
VIR_A_ALU0(EIDX)
VIR_A_ALU1(LDVPMV_IN)
VIR_A_ALU1(LDVPMV_OUT)
+VIR_A_ALU1(LDVPMD_IN)
+VIR_A_ALU1(LDVPMD_OUT)
+VIR_A_ALU2(LDVPMG_IN)
+VIR_A_ALU2(LDVPMG_OUT)
VIR_A_ALU0(TMUWT)
+VIR_A_ALU0(IID)
VIR_A_ALU0(FXCD)
VIR_A_ALU0(XCD)
VIR_A_ALU0(FYCD)