unsigned base_offset,
int (*type_size)(const struct glsl_type *));
+typedef enum {
+ /* If set, this forces all non-flat fragment shader inputs to be
+ * interpolated as if with the "sample" qualifier. This requires
+ * nir_shader_compiler_options::use_interpolated_input_intrinsics.
+ */
+ nir_lower_io_force_sample_interpolation = (1 << 1),
+} nir_lower_io_options;
void nir_lower_io(nir_shader *shader,
nir_variable_mode modes,
- int (*type_size)(const struct glsl_type *));
+ int (*type_size)(const struct glsl_type *),
+ nir_lower_io_options);
nir_src *nir_get_io_offset_src(nir_intrinsic_instr *instr);
nir_src *nir_get_io_vertex_index_src(nir_intrinsic_instr *instr);
void *mem_ctx;
int (*type_size)(const struct glsl_type *type);
nir_variable_mode modes;
+ nir_lower_io_options options;
};
void
assert(vertex_index == NULL);
nir_intrinsic_op bary_op;
- if (var->data.sample)
+ if (var->data.sample ||
+ (state->options & nir_lower_io_force_sample_interpolation))
bary_op = nir_intrinsic_load_barycentric_sample;
else if (var->data.centroid)
bary_op = nir_intrinsic_load_barycentric_centroid;
nir_intrinsic_op bary_op;
switch (intrin->intrinsic) {
case nir_intrinsic_interp_var_at_centroid:
- bary_op = nir_intrinsic_load_barycentric_centroid;
+ bary_op = (state->options & nir_lower_io_force_sample_interpolation) ?
+ nir_intrinsic_load_barycentric_sample :
+ nir_intrinsic_load_barycentric_centroid;
break;
case nir_intrinsic_interp_var_at_sample:
bary_op = nir_intrinsic_load_barycentric_at_sample;
static void
nir_lower_io_impl(nir_function_impl *impl,
nir_variable_mode modes,
- int (*type_size)(const struct glsl_type *))
+ int (*type_size)(const struct glsl_type *),
+ nir_lower_io_options options)
{
struct lower_io_state state;
state.mem_ctx = ralloc_parent(impl);
state.modes = modes;
state.type_size = type_size;
+ state.options = options;
nir_foreach_block(block, impl) {
nir_lower_io_block(block, &state);
void
nir_lower_io(nir_shader *shader, nir_variable_mode modes,
- int (*type_size)(const struct glsl_type *))
+ int (*type_size)(const struct glsl_type *),
+ nir_lower_io_options options)
{
nir_foreach_function(function, shader) {
- if (function->impl)
- nir_lower_io_impl(function->impl, modes, type_size);
+ if (function->impl) {
+ nir_lower_io_impl(function->impl, modes, type_size, options);
+ }
}
}
// TODO nir_assign_var_locations??
NIR_PASS_V(nir, nir_lower_system_values);
- NIR_PASS_V(nir, nir_lower_io, nir_var_all, st_glsl_type_size);
+ NIR_PASS_V(nir, nir_lower_io, nir_var_all, st_glsl_type_size, 0);
NIR_PASS_V(nir, nir_lower_samplers, prog);
return nir;
unsigned end = var->data.location + nir_uniform_type_size(var->type);
nir->num_uniforms = MAX2(nir->num_uniforms, end);
}
- nir_lower_io(nir, nir_var_uniform, nir_uniform_type_size);
+ nir_lower_io(nir, nir_var_uniform, nir_uniform_type_size, 0);
const unsigned *program =
brw_compile_fs(compiler, blorp->driver_ctx, mem_ctx,
* loaded as one vec4 or dvec4 per element (or matrix column), depending on
* whether it is a double-precision type or not.
*/
- nir_lower_io(nir, nir_var_shader_in, type_size_vs_input);
+ nir_lower_io(nir, nir_var_shader_in, type_size_vs_input, 0);
/* This pass needs actual constants */
nir_opt_constant_folding(nir);
}
/* Inputs are stored in vec4 slots, so use type_size_vec4(). */
- nir_lower_io(nir, nir_var_shader_in, type_size_vec4);
+ nir_lower_io(nir, nir_var_shader_in, type_size_vec4, 0);
if (is_scalar || nir->stage != MESA_SHADER_GEOMETRY) {
/* This pass needs actual constants */
var->data.driver_location = var->data.location;
}
- nir_lower_io(nir, nir_var_shader_in, type_size_vec4);
+ nir_lower_io(nir, nir_var_shader_in, type_size_vec4, 0);
/* This pass needs actual constants */
nir_opt_constant_folding(nir);
var->data.driver_location = var->data.location;
}
- nir_lower_io(nir, nir_var_shader_in, type_size_vec4);
+ nir_lower_io(nir, nir_var_shader_in, type_size_vec4, 0);
/* This pass needs actual constants */
nir_opt_constant_folding(nir);
nir_assign_var_locations(&nir->outputs, &nir->num_outputs,
VARYING_SLOT_VAR0,
type_size_vec4_times_4);
- nir_lower_io(nir, nir_var_shader_out, type_size_vec4_times_4);
+ nir_lower_io(nir, nir_var_shader_out, type_size_vec4_times_4, 0);
} else {
nir_foreach_variable(var, &nir->outputs)
var->data.driver_location = var->data.location;
- nir_lower_io(nir, nir_var_shader_out, type_size_vec4);
+ nir_lower_io(nir, nir_var_shader_out, type_size_vec4, 0);
}
}
var->data.driver_location = var->data.location;
}
- nir_lower_io(nir, nir_var_shader_out, type_size_vec4);
+ nir_lower_io(nir, nir_var_shader_out, type_size_vec4, 0);
/* This pass needs actual constants */
nir_opt_constant_folding(nir);
SET_FIELD(var->data.location, BRW_NIR_FRAG_OUTPUT_LOCATION);
}
- nir_lower_io(nir, nir_var_shader_out, type_size_dvec4);
+ nir_lower_io(nir, nir_var_shader_out, type_size_dvec4, 0);
}
void
{
nir_assign_var_locations(&nir->shared, &nir->num_shared, 0,
type_size_scalar_bytes);
- nir_lower_io(nir, nir_var_shared, type_size_scalar_bytes);
+ nir_lower_io(nir, nir_var_shared, type_size_scalar_bytes, 0);
}
#define OPT(pass, ...) ({ \
if (is_scalar) {
nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms, 0,
type_size_scalar_bytes);
- nir_lower_io(nir, nir_var_uniform, type_size_scalar_bytes);
+ nir_lower_io(nir, nir_var_uniform, type_size_scalar_bytes, 0);
} else {
nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms, 0,
type_size_vec4_bytes);
- nir_lower_io(nir, nir_var_uniform, type_size_vec4_bytes);
+ nir_lower_io(nir, nir_var_uniform, type_size_vec4_bytes, 0);
}
}
&nir->uniforms, &nir->num_uniforms);
NIR_PASS_V(nir, nir_lower_system_values);
- NIR_PASS_V(nir, nir_lower_io, nir_var_all, st_glsl_type_size);
+ NIR_PASS_V(nir, nir_lower_io, nir_var_all, st_glsl_type_size,
+ (nir_lower_io_options)0);
NIR_PASS_V(nir, nir_lower_samplers, shader_program);
}