* back-end compiler.
*/
bool compact_params;
+
+ /**
+ * Whether or not the driver wants variable group size to be lowered by the
+ * back-end compiler.
+ */
+ bool lower_variable_group_size;
};
/**
BRW_PARAM_BUILTIN_BASE_WORK_GROUP_ID_Y,
BRW_PARAM_BUILTIN_BASE_WORK_GROUP_ID_Z,
BRW_PARAM_BUILTIN_SUBGROUP_ID,
+ BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_X,
+ BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_Y,
+ BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_Z,
};
#define BRW_PARAM_BUILTIN_CLIP_PLANE(idx, comp) \
GLuint nr_params; /**< number of float params/constants */
GLuint nr_pull_params;
+ /* zero_push_reg is a bitfield which indicates what push registers (if any)
+ * should be zeroed by SW at the start of the shader. The corresponding
+ * push_reg_mask_param specifies the param index (in 32-bit units) where
+ * the actual runtime 64-bit mask will be pushed. The shader will zero
+ * push reg i if
+ *
+ * reg_used & zero_push_reg & ~*push_reg_mask_param & (1ull << i)
+ *
+ * If this field is set, brw_compiler::compact_params must be false.
+ */
+ uint64_t zero_push_reg;
+ unsigned push_reg_mask_param;
+
unsigned curb_read_length;
unsigned total_scratch;
unsigned total_shared;
*/
uint32_t *param;
uint32_t *pull_param;
+
+ /* Whether shader uses atomic operations. */
+ bool uses_atomic_load_store;
};
static inline uint32_t *
bool dispatch_16;
bool dispatch_32;
bool dual_src_blend;
- bool replicate_alpha;
bool persample_dispatch;
bool uses_pos_offset;
bool uses_omask;
*/
uint32_t flat_inputs;
+ /**
+ * The FS inputs
+ */
+ uint64_t inputs;
+
/* Mapping of VUE slots to interpolation modes.
* Used by the Gen4-5 clip/sf/wm stages.
*/
* For varying slots that are not used by the FS, the value is -1.
*/
int urb_setup[VARYING_SLOT_MAX];
+
+ /**
+ * Cache structure into the urb_setup array above that contains the
+ * attribute numbers of active varyings out of urb_setup.
+ * The actual count is stored in urb_setup_attribs_count.
+ */
+ uint8_t urb_setup_attribs[VARYING_SLOT_MAX];
+ uint8_t urb_setup_attribs_count;
};
/** Returns the SIMD width corresponding to a given KSP index
struct brw_stage_prog_data base;
unsigned local_size[3];
- unsigned simd_size;
- unsigned threads;
unsigned slm_size;
+
+ /* Program offsets for the 8/16/32 SIMD variants. Multiple variants are
+ * kept when using variable group size, and the right one can only be
+ * decided at dispatch time.
+ */
+ unsigned prog_offset[3];
+
+ /* Bitmask indicating which program offsets are valid. */
+ unsigned prog_mask;
+
+ /* Bitmask indicating which programs have spilled. */
+ unsigned prog_spilled;
+
bool uses_barrier;
bool uses_num_work_groups;
struct {
struct brw_push_const_block cross_thread;
struct brw_push_const_block per_thread;
- struct brw_push_const_block total;
} push;
struct {
} binding_table;
};
+static inline uint32_t
+brw_cs_prog_data_prog_offset(const struct brw_cs_prog_data *prog_data,
+ unsigned dispatch_width)
+{
+ assert(dispatch_width == 8 ||
+ dispatch_width == 16 ||
+ dispatch_width == 32);
+ const unsigned index = dispatch_width / 16;
+ assert(prog_data->prog_mask & (1 << index));
+ return prog_data->prog_offset[index];
+}
+
/**
* Enum representing the i965-specific vertex results that don't correspond
* exactly to any element of gl_varying_slot. The values of this enum are
void brw_compute_vue_map(const struct gen_device_info *devinfo,
struct brw_vue_map *vue_map,
uint64_t slots_valid,
- bool separate_shader);
+ bool separate_shader,
+ uint32_t pos_slots);
void brw_compute_tess_vue_map(struct brw_vue_map *const vue_map,
uint64_t slots_valid,
/** Number vertices in output patch */
int instances;
+
+ /** Track patch count threshold */
+ int patch_count_threshold;
};
struct brw_cs_prog_data cs;
};
-#define DEFINE_PROG_DATA_DOWNCAST(stage) \
-static inline struct brw_##stage##_prog_data * \
-brw_##stage##_prog_data(struct brw_stage_prog_data *prog_data) \
-{ \
- return (struct brw_##stage##_prog_data *) prog_data; \
+#define DEFINE_PROG_DATA_DOWNCAST(stage) \
+static inline struct brw_##stage##_prog_data * \
+brw_##stage##_prog_data(struct brw_stage_prog_data *prog_data) \
+{ \
+ return (struct brw_##stage##_prog_data *) prog_data; \
+} \
+static inline const struct brw_##stage##_prog_data * \
+brw_##stage##_prog_data_const(const struct brw_stage_prog_data *prog_data) \
+{ \
+ return (const struct brw_##stage##_prog_data *) prog_data; \
}
DEFINE_PROG_DATA_DOWNCAST(vue)
DEFINE_PROG_DATA_DOWNCAST(vs)
struct brw_compile_stats {
uint32_t dispatch_width; /**< 0 for vec4 */
uint32_t instructions;
+ uint32_t sends;
uint32_t loops;
uint32_t cycles;
uint32_t spills;
return slm_size;
}
+unsigned
+brw_cs_push_const_total_size(const struct brw_cs_prog_data *cs_prog_data,
+ unsigned threads);
+
+unsigned
+brw_cs_simd_size_for_group_size(const struct gen_device_info *devinfo,
+ const struct brw_cs_prog_data *cs_prog_data,
+ unsigned group_size);
+
/**
* Return true if the given shader stage is dispatched contiguously by the
* relevant fixed function starting from channel 0 of the SIMD thread, which