struct nir_shader;
struct brw_program;
+typedef struct nir_shader nir_shader;
+
struct brw_compiler {
const struct gen_device_info *devinfo;
void (*shader_debug_log)(void *, const char *str, ...) PRINTFLIKE(2, 3);
void (*shader_perf_log)(void *, const char *str, ...) PRINTFLIKE(2, 3);
- bool scalar_stage[MESA_SHADER_STAGES];
+ bool scalar_stage[MESA_ALL_SHADER_STAGES];
bool use_tcs_8_patch;
- struct gl_shader_compiler_options glsl_compiler_options[MESA_SHADER_STAGES];
+ struct gl_shader_compiler_options glsl_compiler_options[MESA_ALL_SHADER_STAGES];
/**
* Apply workarounds for SIN and COS output range problems.
* back-end compiler.
*/
bool compact_params;
+
+ /**
+ * Whether or not the driver wants variable group size to be lowered by the
+ * back-end compiler.
+ */
+ bool lower_variable_group_size;
};
/**
uint32_t xy_uxvx_image_mask;
uint32_t ayuv_image_mask;
uint32_t xyuv_image_mask;
+ uint32_t bt709_mask;
+ uint32_t bt2020_mask;
/* Scale factor for each texture. */
float scale_factors[32];
bool high_quality_derivatives:1;
bool force_dual_color_blend:1;
bool coherent_fb_fetch:1;
+ bool ignore_sample_mask_out:1;
uint8_t color_outputs_valid;
uint64_t input_slots_valid;
BRW_PARAM_BUILTIN_BASE_WORK_GROUP_ID_Y,
BRW_PARAM_BUILTIN_BASE_WORK_GROUP_ID_Z,
BRW_PARAM_BUILTIN_SUBGROUP_ID,
+ BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_X,
+ BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_Y,
+ BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_Z,
};
#define BRW_PARAM_BUILTIN_CLIP_PLANE(idx, comp) \
#define BRW_PARAM_BUILTIN_CLIP_PLANE_COMP(param) \
(((param) - BRW_PARAM_BUILTIN_CLIP_PLANE_0_X) & 0x3)
+/** Represents a code relocation
+ *
+ * Relocatable constants are immediates in the code which we want to be able
+ * to replace post-compile with the actual value.
+ */
+struct brw_shader_reloc {
+ /** The 32-bit ID of the relocatable constant */
+ uint32_t id;
+
+ /** The offset in the shader to the relocatable instruction
+ *
+ * This is the offset to the instruction rather than the immediate value
+ * itself. This allows us to do some sanity checking while we relocate.
+ */
+ uint32_t offset;
+};
+
+/** A value to write to a relocation */
+struct brw_shader_reloc_value {
+ /** The 32-bit ID of the relocatable constant */
+ uint32_t id;
+
+ /** The value with which to replace the relocated immediate */
+ uint32_t value;
+};
+
struct brw_stage_prog_data {
struct {
/** size of our binding table. */
GLuint nr_params; /**< number of float params/constants */
GLuint nr_pull_params;
+ /* zero_push_reg is a bitfield which indicates what push registers (if any)
+ * should be zeroed by SW at the start of the shader. The corresponding
+ * push_reg_mask_param specifies the param index (in 32-bit units) where
+ * the actual runtime 64-bit mask will be pushed. The shader will zero
+ * push reg i if
+ *
+ * reg_used & zero_push_reg & ~*push_reg_mask_param & (1ull << i)
+ *
+ * If this field is set, brw_compiler::compact_params must be false.
+ */
+ uint64_t zero_push_reg;
+ unsigned push_reg_mask_param;
+
unsigned curb_read_length;
unsigned total_scratch;
unsigned total_shared;
unsigned program_size;
+ unsigned const_data_size;
+ unsigned const_data_offset;
+
+ unsigned num_relocs;
+ const struct brw_shader_reloc *relocs;
+
/** Does this program pull from any UBO or other constant buffers? */
bool has_ubo_pull;
*/
uint32_t flat_inputs;
+ /**
+ * The FS inputs
+ */
+ uint64_t inputs;
+
/* Mapping of VUE slots to interpolation modes.
* Used by the Gen4-5 clip/sf/wm stages.
*/
struct brw_stage_prog_data base;
unsigned local_size[3];
- unsigned simd_size;
- unsigned threads;
unsigned slm_size;
+
+ /* Program offsets for the 8/16/32 SIMD variants. Multiple variants are
+ * kept when using variable group size, and the right one can only be
+ * decided at dispatch time.
+ */
+ unsigned prog_offset[3];
+
+ /* Bitmask indicating which program offsets are valid. */
+ unsigned prog_mask;
+
+ /* Bitmask indicating which programs have spilled. */
+ unsigned prog_spilled;
+
bool uses_barrier;
bool uses_num_work_groups;
struct {
struct brw_push_const_block cross_thread;
struct brw_push_const_block per_thread;
- struct brw_push_const_block total;
} push;
struct {
} binding_table;
};
+static inline uint32_t
+brw_cs_prog_data_prog_offset(const struct brw_cs_prog_data *prog_data,
+ unsigned dispatch_width)
+{
+ assert(dispatch_width == 8 ||
+ dispatch_width == 16 ||
+ dispatch_width == 32);
+ const unsigned index = dispatch_width / 16;
+ assert(prog_data->prog_mask & (1 << index));
+ return prog_data->prog_offset[index];
+}
+
/**
* Enum representing the i965-specific vertex results that don't correspond
* exactly to any element of gl_varying_slot. The values of this enum are
void brw_compute_vue_map(const struct gen_device_info *devinfo,
struct brw_vue_map *vue_map,
uint64_t slots_valid,
- bool separate_shader);
+ bool separate_shader,
+ uint32_t pos_slots);
void brw_compute_tess_vue_map(struct brw_vue_map *const vue_map,
uint64_t slots_valid,
/** Number vertices in output patch */
int instances;
+
+ /** Track patch count threshold */
+ int patch_count_threshold;
};
struct brw_cs_prog_data cs;
};
-#define DEFINE_PROG_DATA_DOWNCAST(stage) \
-static inline struct brw_##stage##_prog_data * \
-brw_##stage##_prog_data(struct brw_stage_prog_data *prog_data) \
-{ \
- return (struct brw_##stage##_prog_data *) prog_data; \
+#define DEFINE_PROG_DATA_DOWNCAST(stage) \
+static inline struct brw_##stage##_prog_data * \
+brw_##stage##_prog_data(struct brw_stage_prog_data *prog_data) \
+{ \
+ return (struct brw_##stage##_prog_data *) prog_data; \
+} \
+static inline const struct brw_##stage##_prog_data * \
+brw_##stage##_prog_data_const(const struct brw_stage_prog_data *prog_data) \
+{ \
+ return (const struct brw_##stage##_prog_data *) prog_data; \
}
DEFINE_PROG_DATA_DOWNCAST(vue)
DEFINE_PROG_DATA_DOWNCAST(vs)
struct brw_compile_stats {
uint32_t dispatch_width; /**< 0 for vec4 */
uint32_t instructions;
+ uint32_t sends;
uint32_t loops;
uint32_t cycles;
uint32_t spills;
void *mem_ctx,
const struct brw_vs_prog_key *key,
struct brw_vs_prog_data *prog_data,
- struct nir_shader *shader,
+ nir_shader *nir,
int shader_time_index,
struct brw_compile_stats *stats,
char **error_str);
void *mem_ctx,
const struct brw_tcs_prog_key *key,
struct brw_tcs_prog_data *prog_data,
- struct nir_shader *nir,
+ nir_shader *nir,
int shader_time_index,
struct brw_compile_stats *stats,
char **error_str);
const struct brw_tes_prog_key *key,
const struct brw_vue_map *input_vue_map,
struct brw_tes_prog_data *prog_data,
- struct nir_shader *shader,
+ nir_shader *nir,
int shader_time_index,
struct brw_compile_stats *stats,
char **error_str);
void *mem_ctx,
const struct brw_gs_prog_key *key,
struct brw_gs_prog_data *prog_data,
- struct nir_shader *shader,
+ nir_shader *nir,
struct gl_program *prog,
int shader_time_index,
struct brw_compile_stats *stats,
void *mem_ctx,
const struct brw_wm_prog_key *key,
struct brw_wm_prog_data *prog_data,
- struct nir_shader *shader,
+ nir_shader *nir,
int shader_time_index8,
int shader_time_index16,
int shader_time_index32,
void *mem_ctx,
const struct brw_cs_prog_key *key,
struct brw_cs_prog_data *prog_data,
- const struct nir_shader *shader,
+ const nir_shader *nir,
int shader_time_index,
struct brw_compile_stats *stats,
char **error_str);
return slm_size;
}
+unsigned
+brw_cs_push_const_total_size(const struct brw_cs_prog_data *cs_prog_data,
+ unsigned threads);
+
+unsigned
+brw_cs_simd_size_for_group_size(const struct gen_device_info *devinfo,
+ const struct brw_cs_prog_data *cs_prog_data,
+ unsigned group_size);
+
+void
+brw_write_shader_relocs(const struct gen_device_info *devinfo,
+ void *program,
+ const struct brw_stage_prog_data *prog_data,
+ struct brw_shader_reloc_value *values,
+ unsigned num_values);
+
+/**
+ * Calculate the RightExecutionMask field used in GPGPU_WALKER.
+ */
+static inline unsigned
+brw_cs_right_mask(unsigned group_size, unsigned simd_size)
+{
+ const uint32_t remainder = group_size & (simd_size - 1);
+ if (remainder > 0)
+ return ~0u >> (32 - remainder);
+ else
+ return ~0u >> (32 - simd_size);
+}
+
/**
* Return true if the given shader stage is dispatched contiguously by the
* relevant fixed function starting from channel 0 of the SIMD thread, which