extern "C" {
#endif
-int type_size_scalar(const struct glsl_type *type);
-int type_size_vec4(const struct glsl_type *type);
-int type_size_dvec4(const struct glsl_type *type);
+int type_size_scalar(const struct glsl_type *type, bool bindless);
+int type_size_vec4(const struct glsl_type *type, bool bindless);
+int type_size_dvec4(const struct glsl_type *type, bool bindless);
static inline int
-type_size_scalar_bytes(const struct glsl_type *type)
+type_size_scalar_bytes(const struct glsl_type *type, bool bindless)
{
- return type_size_scalar(type) * 4;
+ return type_size_scalar(type, bindless) * 4;
}
static inline int
-type_size_vec4_bytes(const struct glsl_type *type)
+type_size_vec4_bytes(const struct glsl_type *type, bool bindless)
{
- return type_size_vec4(type) * 16;
+ return type_size_vec4(type, bindless) * 16;
}
/* Flags set in the instr->pass_flags field by i965 analysis passes */
void brw_nir_analyze_boolean_resolves(nir_shader *nir);
-nir_shader *brw_preprocess_nir(const struct brw_compiler *compiler,
- nir_shader *nir);
+void brw_preprocess_nir(const struct brw_compiler *compiler,
+ nir_shader *nir,
+ const nir_shader *softfp64);
void
brw_nir_link_shaders(const struct brw_compiler *compiler,
- nir_shader **producer, nir_shader **consumer);
+ nir_shader *producer, nir_shader *consumer);
bool brw_nir_lower_cs_intrinsics(nir_shader *nir,
unsigned dispatch_width);
+void brw_nir_lower_legacy_clipping(nir_shader *nir,
+ int nr_userclip_plane_consts,
+ struct brw_stage_prog_data *prog_data);
void brw_nir_lower_vs_inputs(nir_shader *nir,
const uint8_t *vs_attrib_wa_flags);
void brw_nir_lower_vue_inputs(nir_shader *nir,
GLenum tes_primitive_mode);
void brw_nir_lower_fs_outputs(nir_shader *nir);
-nir_shader *brw_postprocess_nir(nir_shader *nir,
- const struct brw_compiler *compiler,
- bool is_scalar);
+bool brw_nir_lower_conversions(nir_shader *nir);
+
+bool brw_nir_lower_image_load_store(nir_shader *nir,
+ const struct gen_device_info *devinfo);
+void brw_nir_rewrite_image_intrinsic(nir_intrinsic_instr *intrin,
+ nir_ssa_def *index);
+void brw_nir_rewrite_bindless_image_intrinsic(nir_intrinsic_instr *intrin,
+ nir_ssa_def *handle);
+
+bool brw_nir_lower_mem_access_bit_sizes(nir_shader *shader);
+
+void brw_postprocess_nir(nir_shader *nir,
+ const struct brw_compiler *compiler,
+ bool is_scalar);
bool brw_nir_apply_attribute_workarounds(nir_shader *nir,
const uint8_t *attrib_wa_flags);
void brw_nir_apply_tcs_quads_workaround(nir_shader *nir);
-nir_shader *brw_nir_apply_sampler_key(nir_shader *nir,
- const struct brw_compiler *compiler,
- const struct brw_sampler_prog_key_data *key,
- bool is_scalar);
+void brw_nir_apply_key(nir_shader *nir,
+ const struct brw_compiler *compiler,
+ const struct brw_base_prog_key *key,
+ unsigned max_subgroup_size,
+ bool is_scalar);
+enum brw_conditional_mod brw_cmod_for_nir_comparison(nir_op op);
+uint32_t brw_aop_for_nir_intrinsic(const nir_intrinsic_instr *atomic);
enum brw_reg_type brw_type_for_nir_type(const struct gen_device_info *devinfo,
nir_alu_type type);
struct gl_program *prog,
struct brw_stage_prog_data *stage_prog_data);
+void brw_nir_lower_gl_images(nir_shader *shader,
+ const struct gl_program *prog);
+
void brw_nir_analyze_ubo_ranges(const struct brw_compiler *compiler,
nir_shader *nir,
const struct brw_vs_prog_key *vs_key,
bool brw_nir_opt_peephole_ffma(nir_shader *shader);
-nir_shader *brw_nir_optimize(nir_shader *nir,
- const struct brw_compiler *compiler,
- bool is_scalar,
- bool allow_copies);
+void brw_nir_optimize(nir_shader *nir,
+ const struct brw_compiler *compiler,
+ bool is_scalar,
+ bool allow_copies);
+
+nir_shader *brw_nir_create_passthrough_tcs(void *mem_ctx,
+ const struct brw_compiler *compiler,
+ const nir_shader_compiler_options *options,
+ const struct brw_tcs_prog_key *key);
#define BRW_NIR_FRAG_OUTPUT_INDEX_SHIFT 0
#define BRW_NIR_FRAG_OUTPUT_INDEX_MASK INTEL_MASK(0, 0)