*
*/
+#include "float64_glsl.h"
#include "glsl_to_nir.h"
#include "ir_visitor.h"
#include "ir_hierarchical_visitor.h"
#include "ir.h"
+#include "ir_optimization.h"
+#include "program.h"
#include "compiler/nir/nir_control_flow.h"
#include "compiler/nir/nir_builder.h"
+#include "main/errors.h"
#include "main/imports.h"
#include "main/mtypes.h"
+#include "main/shaderobj.h"
#include "util/u_math.h"
/*
class nir_visitor : public ir_visitor
{
public:
- nir_visitor(nir_shader *shader);
+ nir_visitor(gl_context *ctx, nir_shader *shader);
~nir_visitor();
virtual void visit(ir_variable *);
nir_ssa_def *src2, nir_ssa_def *src3);
bool supports_ints;
+ bool supports_std430;
nir_shader *shader;
nir_function_impl *impl;
nir_visitor *visitor;
};
+/* glsl_to_nir can only handle converting certain function paramaters
+ * to NIR. This visitor checks for parameters it can't currently handle.
+ */
+class ir_function_param_visitor : public ir_hierarchical_visitor
+{
+public:
+ ir_function_param_visitor()
+ : unsupported(false)
+ {
+ }
+
+ virtual ir_visitor_status visit_enter(ir_function_signature *ir)
+ {
+
+ if (ir->is_intrinsic())
+ return visit_continue;
+
+ foreach_in_list(ir_variable, param, &ir->parameters) {
+ if (!param->type->is_vector() || !param->type->is_scalar()) {
+ unsupported = true;
+ return visit_stop;
+ }
+
+ if (param->data.mode == ir_var_function_inout) {
+ unsupported = true;
+ return visit_stop;
+ }
+ }
+
+ return visit_continue;
+ }
+
+ bool unsupported;
+};
+
} /* end of anonymous namespace */
+
+static bool
+has_unsupported_function_param(exec_list *ir)
+{
+ ir_function_param_visitor visitor;
+ visit_list_elements(&visitor, ir);
+ return visitor.unsupported;
+}
+
nir_shader *
-glsl_to_nir(const struct gl_shader_program *shader_prog,
+glsl_to_nir(struct gl_context *ctx,
+ const struct gl_shader_program *shader_prog,
gl_shader_stage stage,
const nir_shader_compiler_options *options)
{
struct gl_linked_shader *sh = shader_prog->_LinkedShaders[stage];
+ const struct gl_shader_compiler_options *gl_options =
+ &ctx->Const.ShaderCompilerOptions[stage];
+
+ /* glsl_to_nir can only handle converting certain function paramaters
+ * to NIR. If we find something we can't handle then we get the GLSL IR
+ * opts to remove it before we continue on.
+ *
+ * TODO: add missing glsl ir to nir support and remove this loop.
+ */
+ while (has_unsupported_function_param(sh->ir)) {
+ do_common_optimization(sh->ir, true, true, gl_options,
+ ctx->Const.NativeIntegers);
+ }
+
nir_shader *shader = nir_shader_create(NULL, stage, options,
&sh->Program->info);
- nir_visitor v1(shader);
+ nir_visitor v1(ctx, shader);
nir_function_visitor v2(&v1);
v2.run(sh->ir);
visit_exec_list(sh->ir, &v1);
+ nir_validate_shader(shader, "after glsl to nir, before function inline");
+
+ /* We have to lower away local constant initializers right before we
+ * inline functions. That way they get properly initialized at the top
+ * of the function and not at the top of its caller.
+ */
nir_lower_constant_initializers(shader, (nir_variable_mode)~0);
+ nir_lower_returns(shader);
+ nir_inline_functions(shader);
+ nir_opt_deref(shader);
+
+ nir_validate_shader(shader, "after function inlining and return lowering");
+
+ /* Now that we have inlined everything remove all of the functions except
+ * main().
+ */
+ foreach_list_typed_safe(nir_function, function, node, &(shader)->functions){
+ if (strcmp("main", function->name) != 0) {
+ exec_node_remove(&function->node);
+ }
+ }
/* Remap the locations to slots so those requiring two slots will occupy
* two locations. For instance, if we have in the IR code a dvec3 attr0 in
shader->info.has_transform_feedback_varyings |=
shader_prog->last_vert_prog->sh.LinkedTransformFeedback->NumVarying > 0;
+ if (shader->info.stage == MESA_SHADER_FRAGMENT) {
+ shader->info.fs.pixel_center_integer = sh->Program->info.fs.pixel_center_integer;
+ shader->info.fs.origin_upper_left = sh->Program->info.fs.origin_upper_left;
+ }
+
return shader;
}
-nir_visitor::nir_visitor(nir_shader *shader)
+nir_visitor::nir_visitor(gl_context *ctx, nir_shader *shader)
{
this->supports_ints = shader->options->native_integers;
+ this->supports_std430 = ctx->Const.UseSTD430AsDefaultPacking;
this->shader = shader;
this->is_global = true;
this->var_table = _mesa_pointer_hash_table_create(NULL);
for (unsigned r = 0; r < rows; r++)
if (supports_ints)
- ret->values[0].u32[r] = ir->value.u[r];
+ ret->values[0][r].u32 = ir->value.u[r];
else
- ret->values[0].f32[r] = ir->value.u[r];
+ ret->values[0][r].f32 = ir->value.u[r];
break;
for (unsigned r = 0; r < rows; r++)
if (supports_ints)
- ret->values[0].i32[r] = ir->value.i[r];
+ ret->values[0][r].i32 = ir->value.i[r];
else
- ret->values[0].f32[r] = ir->value.i[r];
+ ret->values[0][r].f32 = ir->value.i[r];
break;
case GLSL_TYPE_FLOAT:
for (unsigned c = 0; c < cols; c++) {
for (unsigned r = 0; r < rows; r++)
- ret->values[c].f32[r] = ir->value.f[c * rows + r];
+ ret->values[c][r].f32 = ir->value.f[c * rows + r];
}
break;
case GLSL_TYPE_DOUBLE:
for (unsigned c = 0; c < cols; c++) {
for (unsigned r = 0; r < rows; r++)
- ret->values[c].f64[r] = ir->value.d[c * rows + r];
+ ret->values[c][r].f64 = ir->value.d[c * rows + r];
}
break;
assert(cols == 1);
for (unsigned r = 0; r < rows; r++)
- ret->values[0].u64[r] = ir->value.u64[r];
+ ret->values[0][r].u64 = ir->value.u64[r];
break;
case GLSL_TYPE_INT64:
assert(cols == 1);
for (unsigned r = 0; r < rows; r++)
- ret->values[0].i64[r] = ir->value.i64[r];
+ ret->values[0][r].i64 = ir->value.i64[r];
break;
case GLSL_TYPE_BOOL:
assert(cols == 1);
for (unsigned r = 0; r < rows; r++)
- ret->values[0].b[r] = ir->value.b[r];
+ ret->values[0][r].b = ir->value.b[r];
break;
return ret;
}
+static const glsl_type *
+wrap_type_in_array(const glsl_type *elem_type, const glsl_type *array_type)
+{
+ if (!array_type->is_array())
+ return elem_type;
+
+ elem_type = wrap_type_in_array(elem_type, array_type->fields.array);
+
+ return glsl_type::get_array_instance(elem_type, array_type->length);
+}
+
void
nir_visitor::visit(ir_variable *ir)
{
unreachable("not reached");
}
+ unsigned image_access = 0;
+ if (ir->data.memory_read_only)
+ image_access |= ACCESS_NON_WRITEABLE;
+ if (ir->data.memory_write_only)
+ image_access |= ACCESS_NON_READABLE;
+ if (ir->data.memory_coherent)
+ image_access |= ACCESS_COHERENT;
+ if (ir->data.memory_volatile)
+ image_access |= ACCESS_VOLATILE;
+ if (ir->data.memory_restrict)
+ image_access |= ACCESS_RESTRICT;
+
+ /* For UBO and SSBO variables, we need explicit types */
+ if (var->data.mode & (nir_var_mem_ubo | nir_var_mem_ssbo)) {
+ const glsl_type *explicit_ifc_type =
+ ir->get_interface_type()->get_explicit_interface_type(supports_std430);
+
+ if (ir->type->without_array()->is_interface()) {
+ /* If the type contains the interface, wrap the explicit type in the
+ * right number of arrays.
+ */
+ var->type = wrap_type_in_array(explicit_ifc_type, ir->type);
+ } else {
+ /* Otherwise, this variable is one entry in the interface */
+ UNUSED bool found = false;
+ for (unsigned i = 0; i < explicit_ifc_type->length; i++) {
+ const glsl_struct_field *field =
+ &explicit_ifc_type->fields.structure[i];
+ if (strcmp(ir->name, field->name) != 0)
+ continue;
+
+ var->type = field->type;
+ if (field->memory_read_only)
+ image_access |= ACCESS_NON_WRITEABLE;
+ if (field->memory_write_only)
+ image_access |= ACCESS_NON_READABLE;
+ if (field->memory_coherent)
+ image_access |= ACCESS_COHERENT;
+ if (field->memory_volatile)
+ image_access |= ACCESS_VOLATILE;
+ if (field->memory_restrict)
+ image_access |= ACCESS_RESTRICT;
+
+ found = true;
+ break;
+ }
+ assert(found);
+ }
+ }
+
var->data.interpolation = ir->data.interpolation;
- var->data.origin_upper_left = ir->data.origin_upper_left;
- var->data.pixel_center_integer = ir->data.pixel_center_integer;
var->data.location_frac = ir->data.location_frac;
switch (ir->data.depth_layout) {
var->data.bindless = ir->data.bindless;
var->data.offset = ir->data.offset;
- unsigned image_access = 0;
- if (ir->data.memory_read_only)
- image_access |= ACCESS_NON_WRITEABLE;
- if (ir->data.memory_write_only)
- image_access |= ACCESS_NON_READABLE;
- if (ir->data.memory_coherent)
- image_access |= ACCESS_COHERENT;
- if (ir->data.memory_volatile)
- image_access |= ACCESS_VOLATILE;
- if (ir->data.memory_restrict)
- image_access |= ACCESS_RESTRICT;
var->data.image.access = (gl_access_qualifier)image_access;
var->data.image.format = ir->data.image_format;
nir_intrinsic_op op;
switch (ir->callee->intrinsic_id) {
+ case ir_intrinsic_generic_atomic_add:
+ op = ir->return_deref->type->is_integer_32_64()
+ ? nir_intrinsic_deref_atomic_add : nir_intrinsic_deref_atomic_fadd;
+ break;
+ case ir_intrinsic_generic_atomic_and:
+ op = nir_intrinsic_deref_atomic_and;
+ break;
+ case ir_intrinsic_generic_atomic_or:
+ op = nir_intrinsic_deref_atomic_or;
+ break;
+ case ir_intrinsic_generic_atomic_xor:
+ op = nir_intrinsic_deref_atomic_xor;
+ break;
+ case ir_intrinsic_generic_atomic_min:
+ assert(ir->return_deref);
+ if (ir->return_deref->type == glsl_type::int_type)
+ op = nir_intrinsic_deref_atomic_imin;
+ else if (ir->return_deref->type == glsl_type::uint_type)
+ op = nir_intrinsic_deref_atomic_umin;
+ else if (ir->return_deref->type == glsl_type::float_type)
+ op = nir_intrinsic_deref_atomic_fmin;
+ else
+ unreachable("Invalid type");
+ break;
+ case ir_intrinsic_generic_atomic_max:
+ assert(ir->return_deref);
+ if (ir->return_deref->type == glsl_type::int_type)
+ op = nir_intrinsic_deref_atomic_imax;
+ else if (ir->return_deref->type == glsl_type::uint_type)
+ op = nir_intrinsic_deref_atomic_umax;
+ else if (ir->return_deref->type == glsl_type::float_type)
+ op = nir_intrinsic_deref_atomic_fmax;
+ else
+ unreachable("Invalid type");
+ break;
+ case ir_intrinsic_generic_atomic_exchange:
+ op = nir_intrinsic_deref_atomic_exchange;
+ break;
+ case ir_intrinsic_generic_atomic_comp_swap:
+ op = ir->return_deref->type->is_integer_32_64()
+ ? nir_intrinsic_deref_atomic_comp_swap
+ : nir_intrinsic_deref_atomic_fcomp_swap;
+ break;
case ir_intrinsic_atomic_counter_read:
op = nir_intrinsic_atomic_counter_read_deref;
break;
nir_ssa_def *ret = &instr->dest.ssa;
switch (op) {
+ case nir_intrinsic_deref_atomic_add:
+ case nir_intrinsic_deref_atomic_imin:
+ case nir_intrinsic_deref_atomic_umin:
+ case nir_intrinsic_deref_atomic_imax:
+ case nir_intrinsic_deref_atomic_umax:
+ case nir_intrinsic_deref_atomic_and:
+ case nir_intrinsic_deref_atomic_or:
+ case nir_intrinsic_deref_atomic_xor:
+ case nir_intrinsic_deref_atomic_exchange:
+ case nir_intrinsic_deref_atomic_comp_swap:
+ case nir_intrinsic_deref_atomic_fadd:
+ case nir_intrinsic_deref_atomic_fmin:
+ case nir_intrinsic_deref_atomic_fmax:
+ case nir_intrinsic_deref_atomic_fcomp_swap: {
+ int param_count = ir->actual_parameters.length();
+ assert(param_count == 2 || param_count == 3);
+
+ /* Deref */
+ exec_node *param = ir->actual_parameters.get_head();
+ ir_rvalue *rvalue = (ir_rvalue *) param;
+ ir_dereference *deref = rvalue->as_dereference();
+ ir_swizzle *swizzle = NULL;
+ if (!deref) {
+ /* We may have a swizzle to pick off a single vec4 component */
+ swizzle = rvalue->as_swizzle();
+ assert(swizzle && swizzle->type->vector_elements == 1);
+ deref = swizzle->val->as_dereference();
+ assert(deref);
+ }
+ nir_deref_instr *nir_deref = evaluate_deref(deref);
+ if (swizzle) {
+ nir_deref = nir_build_deref_array_imm(&b, nir_deref,
+ swizzle->mask.x);
+ }
+ instr->src[0] = nir_src_for_ssa(&nir_deref->dest.ssa);
+
+ /* data1 parameter (this is always present) */
+ param = param->get_next();
+ ir_instruction *inst = (ir_instruction *) param;
+ instr->src[1] = nir_src_for_ssa(evaluate_rvalue(inst->as_rvalue()));
+
+ /* data2 parameter (only with atomic_comp_swap) */
+ if (param_count == 3) {
+ assert(op == nir_intrinsic_deref_atomic_comp_swap ||
+ op == nir_intrinsic_deref_atomic_fcomp_swap);
+ param = param->get_next();
+ inst = (ir_instruction *) param;
+ instr->src[2] = nir_src_for_ssa(evaluate_rvalue(inst->as_rvalue()));
+ }
+
+ /* Atomic result */
+ assert(ir->return_deref);
+ nir_ssa_dest_init(&instr->instr, &instr->dest,
+ ir->return_deref->type->vector_elements, 32, NULL);
+ nir_builder_instr_insert(&b, &instr->instr);
+ break;
+ }
case nir_intrinsic_atomic_counter_read_deref:
case nir_intrinsic_atomic_counter_inc_deref:
case nir_intrinsic_atomic_counter_pre_dec_deref:
/* Set the image variable dereference. */
exec_node *param = ir->actual_parameters.get_head();
ir_dereference *image = (ir_dereference *)param;
- const glsl_type *type =
- image->variable_referenced()->type->without_array();
+ nir_deref_instr *deref = evaluate_deref(image);
+ const glsl_type *type = deref->type;
- instr->src[0] = nir_src_for_ssa(&evaluate_deref(image)->dest.ssa);
+ instr->src[0] = nir_src_for_ssa(&deref->dest.ssa);
param = param->get_next();
/* Set the intrinsic destination. */
return;
}
+ case ir_unop_ssbo_unsized_array_length: {
+ nir_intrinsic_instr *intrin =
+ nir_intrinsic_instr_create(b.shader,
+ nir_intrinsic_deref_buffer_array_length);
+
+ ir_dereference *deref = ir->operands[0]->as_dereference();
+ intrin->src[0] = nir_src_for_ssa(&evaluate_deref(deref)->dest.ssa);
+
+ add_instr(&intrin->instr, 1, 32);
+ return;
+ }
+
default:
break;
}
: nir_isub(&b, srcs[0], srcs[1]);
break;
case ir_binop_mul:
- result = type_is_float(out_type) ? nir_fmul(&b, srcs[0], srcs[1])
- : nir_imul(&b, srcs[0], srcs[1]);
+ if (type_is_float(out_type))
+ result = nir_fmul(&b, srcs[0], srcs[1]);
+ else if (out_type == GLSL_TYPE_INT64 &&
+ (ir->operands[0]->type->base_type == GLSL_TYPE_INT ||
+ ir->operands[1]->type->base_type == GLSL_TYPE_INT))
+ result = nir_imul_2x32_64(&b, srcs[0], srcs[1]);
+ else if (out_type == GLSL_TYPE_UINT64 &&
+ (ir->operands[0]->type->base_type == GLSL_TYPE_UINT ||
+ ir->operands[1]->type->base_type == GLSL_TYPE_UINT))
+ result = nir_umul_2x32_64(&b, srcs[0], srcs[1]);
+ else
+ result = nir_imul(&b, srcs[0], srcs[1]);
break;
case ir_binop_div:
if (type_is_float(out_type))
num_srcs++;
if (ir->shadow_comparator != NULL)
num_srcs++;
- if (ir->offset != NULL)
+ /* offsets are constants we store inside nir_tex_intrs.offsets */
+ if (ir->offset != NULL && !ir->offset->type->is_array())
num_srcs++;
/* Add one for the texture deref */
}
nir_deref_instr *sampler_deref = evaluate_deref(ir->sampler);
- instr->src[0].src = nir_src_for_ssa(&sampler_deref->dest.ssa);
- instr->src[0].src_type = nir_tex_src_texture_deref;
- instr->src[1].src = nir_src_for_ssa(&sampler_deref->dest.ssa);
- instr->src[1].src_type = nir_tex_src_sampler_deref;
+
+ /* check for bindless handles */
+ if (sampler_deref->mode != nir_var_uniform ||
+ nir_deref_instr_get_variable(sampler_deref)->data.bindless) {
+ nir_ssa_def *load = nir_load_deref(&b, sampler_deref);
+ instr->src[0].src = nir_src_for_ssa(load);
+ instr->src[0].src_type = nir_tex_src_texture_handle;
+ instr->src[1].src = nir_src_for_ssa(load);
+ instr->src[1].src_type = nir_tex_src_sampler_handle;
+ } else {
+ instr->src[0].src = nir_src_for_ssa(&sampler_deref->dest.ssa);
+ instr->src[0].src_type = nir_tex_src_texture_deref;
+ instr->src[1].src = nir_src_for_ssa(&sampler_deref->dest.ssa);
+ instr->src[1].src_type = nir_tex_src_sampler_deref;
+ }
unsigned src_number = 2;
}
if (ir->offset != NULL) {
- /* we don't support multiple offsets yet */
- assert(ir->offset->type->is_vector() || ir->offset->type->is_scalar());
+ if (ir->offset->type->is_array()) {
+ for (int i = 0; i < ir->offset->type->array_size(); i++) {
+ const ir_constant *c =
+ ir->offset->as_constant()->get_array_element(i);
+
+ for (unsigned j = 0; j < 2; ++j) {
+ int val = c->get_int_component(j);
+ assert(val <= 31 && val >= -32);
+ instr->tg4_offsets[i][j] = val;
+ }
+ }
+ } else {
+ assert(ir->offset->type->is_vector() || ir->offset->type->is_scalar());
- instr->src[src_number].src =
- nir_src_for_ssa(evaluate_rvalue(ir->offset));
- instr->src[src_number].src_type = nir_tex_src_offset;
- src_number++;
+ instr->src[src_number].src =
+ nir_src_for_ssa(evaluate_rvalue(ir->offset));
+ instr->src[src_number].src_type = nir_tex_src_offset;
+ src_number++;
+ }
}
switch (ir->op) {
nir_intrinsic_instr_create(this->shader, nir_intrinsic_barrier);
nir_builder_instr_insert(&b, &instr->instr);
}
+
+nir_shader *
+glsl_float64_funcs_to_nir(struct gl_context *ctx,
+ const nir_shader_compiler_options *options)
+{
+ /* We pretend it's a vertex shader. Ultimately, the stage shouldn't
+ * matter because we're not optimizing anything here.
+ */
+ struct gl_shader *sh = _mesa_new_shader(-1, MESA_SHADER_VERTEX);
+ sh->Source = float64_source;
+ sh->CompileStatus = COMPILE_FAILURE;
+ _mesa_glsl_compile_shader(ctx, sh, false, false, true);
+
+ if (!sh->CompileStatus) {
+ if (sh->InfoLog) {
+ _mesa_problem(ctx,
+ "fp64 software impl compile failed:\n%s\nsource:\n%s\n",
+ sh->InfoLog, float64_source);
+ }
+ return NULL;
+ }
+
+ nir_shader *nir = nir_shader_create(NULL, MESA_SHADER_VERTEX, options, NULL);
+
+ nir_visitor v1(ctx, nir);
+ nir_function_visitor v2(&v1);
+ v2.run(sh->ir);
+ visit_exec_list(sh->ir, &v1);
+
+ /* _mesa_delete_shader will try to free sh->Source but it's static const */
+ sh->Source = NULL;
+ _mesa_delete_shader(ctx, sh);
+
+ nir_validate_shader(nir, "float64_funcs_to_nir");
+
+ NIR_PASS_V(nir, nir_lower_constant_initializers, nir_var_function_temp);
+ NIR_PASS_V(nir, nir_lower_returns);
+ NIR_PASS_V(nir, nir_inline_functions);
+ NIR_PASS_V(nir, nir_opt_deref);
+
+ return nir;
+}