#include "ast.h"
#include "compiler/glsl_types.h"
#include "util/hash_table.h"
+#include "main/mtypes.h"
#include "main/macros.h"
#include "main/shaderobj.h"
#include "ir.h"
static void
detect_conflicting_assignments(struct _mesa_glsl_parse_state *state,
exec_list *instructions);
+static void
+verify_subroutine_associated_funcs(struct _mesa_glsl_parse_state *state);
+
static void
remove_per_vertex_blocks(exec_list *instructions,
_mesa_glsl_parse_state *state, ir_variable_mode mode);
foreach_list_typed (ast_node, ast, link, & state->translation_unit)
ast->hir(instructions, state);
+ verify_subroutine_associated_funcs(state);
detect_recursion_unlinked(state, instructions);
detect_conflicting_assignments(state, instructions);
}
case GLSL_TYPE_UINT:
- if (!state->is_version(400, 0) && !state->ARB_gpu_shader5_enable
- && !state->MESA_shader_integer_functions_enable)
+ if (!state->has_implicit_uint_to_int_conversion())
return (ir_expression_operation)0;
switch (from->base_type) {
case GLSL_TYPE_INT: return ir_unop_i2u;
return true;
/* Prior to GLSL 1.20, there are no implicit conversions */
- if (!state->is_version(120, 0))
- return false;
-
- /* ESSL does not allow implicit conversions */
- if (state->es_shader)
+ if (!state->has_implicit_conversions())
return false;
/* From page 27 (page 33 of the PDF) of the GLSL 1.50 spec:
const glsl_type *type_a = value_a->type;
const glsl_type *type_b = value_b->type;
- if (!state->check_version(130, 300, loc, "operator '%%' is reserved")) {
+ if (!state->EXT_gpu_shader4_enable &&
+ !state->check_version(130, 300, loc, "operator '%%' is reserved")) {
return glsl_type::error_type;
}
return glsl_type::error_type;
}
- if (!type_b->is_integer()) {
+ if (!type_b->is_integer_32()) {
_mesa_glsl_error(loc, state, "RHS of operator %s must be an integer or "
"integer vector", ast_expression::operator_string(op));
return glsl_type::error_type;
}
if (unsized_array) {
if (is_initializer) {
- return rhs;
+ if (rhs->type->get_scalar_type() == lhs->type->get_scalar_type())
+ return rhs;
} else {
_mesa_glsl_error(&loc, state,
"implicitly sized arrays cannot be assigned");
* The restriction on arrays is lifted in GLSL 1.20 and GLSL ES 3.00.
*/
error_emitted = true;
- } else if (!lhs->is_lvalue()) {
+ } else if (!lhs->is_lvalue(state)) {
_mesa_glsl_error(& lhs_loc, state, "non-lvalue in assignment");
error_emitted = true;
}
mark_whole_array_access(rhs);
mark_whole_array_access(lhs);
}
+ } else {
+ error_emitted = true;
}
/* Most callers of do_assignment (assign, add_assign, pre_inc/dec,
switch (op0->type->base_type) {
case GLSL_TYPE_FLOAT:
+ case GLSL_TYPE_FLOAT16:
case GLSL_TYPE_UINT:
case GLSL_TYPE_INT:
case GLSL_TYPE_BOOL:
case GLSL_TYPE_DOUBLE:
case GLSL_TYPE_UINT64:
case GLSL_TYPE_INT64:
+ case GLSL_TYPE_UINT16:
+ case GLSL_TYPE_INT16:
+ case GLSL_TYPE_UINT8:
+ case GLSL_TYPE_INT8:
return new(mem_ctx) ir_expression(operation, op0, op1);
case GLSL_TYPE_ARRAY: {
* scalar booleans. If it isn't, emit an error and return a constant
* boolean to avoid triggering cascading error messages.
*/
-ir_rvalue *
+static ir_rvalue *
get_scalar_boolean_operand(exec_list *instructions,
struct _mesa_glsl_parse_state *state,
ast_expression *parent_expr,
ir_binop_lshift,
ir_binop_rshift,
ir_binop_less,
- ir_binop_greater,
- ir_binop_lequal,
+ ir_binop_less, /* This is correct. See the ast_greater case below. */
+ ir_binop_gequal, /* This is correct. See the ast_lequal case below. */
ir_binop_gequal,
ir_binop_all_equal,
ir_binop_any_nequal,
switch (this->oper) {
case ast_aggregate:
- assert(!"ast_aggregate: Should never get here.");
- break;
+ unreachable("ast_aggregate: Should never get here.");
case ast_assign: {
this->subexpressions[0]->set_is_lhs(true);
assert(type->is_error()
|| (type->is_boolean() && type->is_scalar()));
+ /* Like NIR, GLSL IR does not have opcodes for > or <=. Instead, swap
+ * the arguments and use < or >=.
+ */
+ if (this->oper == ast_greater || this->oper == ast_lequal) {
+ ir_rvalue *const tmp = op[0];
+ op[0] = op[1];
+ op[1] = tmp;
+ }
+
result = new(ctx) ir_expression(operations[this->oper], type,
op[0], op[1]);
error_emitted = type->is_error();
if (rhs_instructions.is_empty()) {
result = new(ctx) ir_expression(ir_binop_logic_and, op[0], op[1]);
- type = result->type;
} else {
ir_variable *const tmp = new(ctx) ir_variable(glsl_type::bool_type,
"and_tmp",
stmt->else_instructions.push_tail(else_assign);
result = new(ctx) ir_dereference_variable(tmp);
- type = tmp->type;
}
break;
}
if (rhs_instructions.is_empty()) {
result = new(ctx) ir_expression(ir_binop_logic_or, op[0], op[1]);
- type = result->type;
} else {
ir_variable *const tmp = new(ctx) ir_variable(glsl_type::bool_type,
"or_tmp",
stmt->else_instructions.push_tail(else_assign);
result = new(ctx) ir_dereference_variable(tmp);
- type = tmp->type;
}
break;
}
op[1] = this->subexpressions[1]->hir(instructions, state);
orig_type = op[0]->type;
+
+ /* Break out if operand types were not parsed successfully. */
+ if ((op[0]->type == glsl_type::error_type ||
+ op[1]->type == glsl_type::error_type))
+ break;
+
type = arithmetic_result_type(op[0], op[1],
(this->oper == ast_mul_assign),
state, & loc);
* expressions; such use results in a compile-time error."
*/
if (type->contains_opaque()) {
- _mesa_glsl_error(&loc, state, "opaque variables cannot be operands "
- "of the ?: operator");
- error_emitted = true;
+ if (!(state->has_bindless() && (type->is_image() || type->is_sampler()))) {
+ _mesa_glsl_error(&loc, state, "variables of type %s cannot be "
+ "operands of the ?: operator", type->name);
+ error_emitted = true;
+ }
}
- ir_constant *cond_val = op[0]->constant_expression_value();
+ ir_constant *cond_val = op[0]->constant_expression_value(ctx);
if (then_instructions.is_empty()
&& else_instructions.is_empty()
error_emitted = op[0]->type->is_error() || op[1]->type->is_error();
+ if (error_emitted) {
+ result = ir_rvalue::error_value(ctx);
+ break;
+ }
+
type = arithmetic_result_type(op[0], op[1], false, state, & loc);
ir_rvalue *temp_rhs;
}
case ast_unsized_array_dim:
- assert(!"ast_unsized_array_dim: Should never get here.");
- break;
+ unreachable("ast_unsized_array_dim: Should never get here.");
case ast_function_call:
/* Should *NEVER* get here. ast_function_call should always be handled
* by ast_function_expression::hir.
*/
- assert(0);
- break;
+ unreachable("ast_function_call: handled elsewhere ");
case ast_identifier: {
/* ast_identifier can appear several places in a full abstract syntax
_mesa_glsl_warning(&loc, state, "`%s' used uninitialized",
this->primary_expression.identifier);
}
+
+ /* From the EXT_shader_framebuffer_fetch spec:
+ *
+ * "Unless the GL_EXT_shader_framebuffer_fetch extension has been
+ * enabled in addition, it's an error to use gl_LastFragData if it
+ * hasn't been explicitly redeclared with layout(noncoherent)."
+ */
+ if (var->data.fb_fetch_output && var->data.memory_coherent &&
+ !state->EXT_shader_framebuffer_fetch_enable) {
+ _mesa_glsl_error(&loc, state,
+ "invalid use of framebuffer fetch output not "
+ "qualified with layout(noncoherent)");
+ }
+
} else {
_mesa_glsl_error(& loc, state, "`%s' undeclared",
this->primary_expression.identifier);
process_array_size(exec_node *node,
struct _mesa_glsl_parse_state *state)
{
+ void *mem_ctx = state;
+
exec_list dummy_instructions;
ast_node *array_size = exec_node_data(ast_node, node, link);
return 0;
}
- if (!ir->type->is_integer()) {
+ if (!ir->type->is_integer_32()) {
_mesa_glsl_error(& loc, state,
"array size must be integer type");
return 0;
return 0;
}
- ir_constant *const size = ir->constant_expression_value();
+ ir_constant *const size = ir->constant_expression_value(mem_ctx);
if (size == NULL ||
(state->is_version(120, 300) &&
array_size->has_sequence_subexpression())) {
*/
const glsl_type *const t = type->without_array();
- return (t->is_float() || t->is_integer() || t->contains_opaque()) &&
- !t->is_record();
+ return (t->is_float() || t->is_integer_32() || t->contains_opaque()) &&
+ !t->is_struct();
}
const glsl_type *
{
const struct glsl_type *type;
- type = state->symbols->get_type(this->type_name);
+ if (this->type != NULL)
+ type = this->type;
+ else if (structure)
+ type = structure->type;
+ else
+ type = state->symbols->get_type(this->type_name);
*name = this->type_name;
YYLTYPE loc = this->get_location();
case MESA_SHADER_VERTEX:
return var->data.mode == ir_var_shader_out;
case MESA_SHADER_FRAGMENT:
- return var->data.mode == ir_var_shader_in;
+ return var->data.mode == ir_var_shader_in ||
+ (var->data.mode == ir_var_system_value &&
+ var->data.location == SYSTEM_VALUE_FRAG_COORD);
default:
return var->data.mode == ir_var_shader_out || var->data.mode == ir_var_shader_in;
}
* "Only variables output from a vertex shader can be candidates
* for invariance".
*/
- if (!state->is_version(130, 0))
+ if (!state->is_version(130, 100))
return false;
/*
/* Make sure nested structs don't contain unsized arrays, and validate
* any xfb_offsets on interface members.
*/
- if (t_without_array->is_record() || t_without_array->is_interface())
+ if (t_without_array->is_struct() || t_without_array->is_interface())
for (unsigned int i = 0; i < t_without_array->length; i++) {
const glsl_type *member_t = t_without_array->fields.structure[i].type;
* reasonable way to interpolate a fragment shader input that contains
* an integer. See Khronos bug #15671.
*/
- if (state->is_version(130, 300)
+ if ((state->is_version(130, 300) || state->EXT_gpu_shader4_enable)
&& var_type->contains_integer()) {
_mesa_glsl_error(loc, state, "if a fragment input is (or contains) "
"an integer, then it must be qualified with 'flat'");
_mesa_glsl_error(loc, state, "if a fragment input is (or contains) "
"a double, then it must be qualified with 'flat'");
}
+
+ /* Bindless sampler/image fragment inputs must be qualified with 'flat'.
+ *
+ * From section 4.3.4 of the ARB_bindless_texture spec:
+ *
+ * "(modify last paragraph, p. 35, allowing samplers and images as
+ * fragment shader inputs) ... Fragment inputs can only be signed and
+ * unsigned integers and integer vectors, floating point scalars,
+ * floating-point vectors, matrices, sampler and image types, or arrays
+ * or structures of these. Fragment shader inputs that are signed or
+ * unsigned integers, integer vectors, or any double-precision floating-
+ * point type, or any sampler or image type must be qualified with the
+ * interpolation qualifier "flat"."
+ */
+ if (state->has_bindless()
+ && (var_type->contains_sampler() || var_type->contains_image())) {
+ _mesa_glsl_error(loc, state, "if a fragment input is (or contains) "
+ "a bindless sampler (or image), then it must be "
+ "qualified with 'flat'");
+ }
}
static void
* not apply to inputs into a vertex shader or outputs from a
* fragment shader."
*/
- if (state->is_version(130, 300)
+ if ((state->is_version(130, 300) || state->EXT_gpu_shader4_enable)
&& interpolation != INTERP_MODE_NONE) {
const char *i = interpolation_string(interpolation);
if (mode != ir_var_shader_in && mode != ir_var_shader_out)
* to the deprecated storage qualifiers varying or centroid varying."
*
* These deprecated storage qualifiers do not exist in GLSL ES 3.00.
+ *
+ * GL_EXT_gpu_shader4 allows this.
*/
- if (state->is_version(130, 0)
+ if (state->is_version(130, 0) && !state->EXT_gpu_shader4_enable
&& interpolation != INTERP_MODE_NONE
&& qual->flags.q.varying) {
interpolation = INTERP_MODE_NOPERSPECTIVE;
else if (qual->flags.q.smooth)
interpolation = INTERP_MODE_SMOOTH;
- else if (state->es_shader &&
- ((mode == ir_var_shader_in &&
- state->stage != MESA_SHADER_VERTEX) ||
- (mode == ir_var_shader_out &&
- state->stage != MESA_SHADER_FRAGMENT)))
- /* Section 4.3.9 (Interpolation) of the GLSL ES 3.00 spec says:
- *
- * "When no interpolation qualifier is present, smooth interpolation
- * is used."
- */
- interpolation = INTERP_MODE_SMOOTH;
else
interpolation = INTERP_MODE_NONE;
"compute shader variables cannot be given "
"explicit locations");
return;
+ default:
+ fail = true;
+ break;
};
if (fail) {
? (qual_location + FRAG_RESULT_DATA0)
: (qual_location + VARYING_SLOT_VAR0);
break;
- case MESA_SHADER_COMPUTE:
+ default:
assert(!"Unexpected shader type");
break;
}
}
static bool
-validate_image_qualifier_for_type(struct _mesa_glsl_parse_state *state,
- YYLTYPE *loc,
- const struct ast_type_qualifier *qual,
- const glsl_type *type)
+validate_storage_for_sampler_image_types(ir_variable *var,
+ struct _mesa_glsl_parse_state *state,
+ YYLTYPE *loc)
+{
+ /* From section 4.1.7 of the GLSL 4.40 spec:
+ *
+ * "[Opaque types] can only be declared as function
+ * parameters or uniform-qualified variables."
+ *
+ * From section 4.1.7 of the ARB_bindless_texture spec:
+ *
+ * "Samplers may be declared as shader inputs and outputs, as uniform
+ * variables, as temporary variables, and as function parameters."
+ *
+ * From section 4.1.X of the ARB_bindless_texture spec:
+ *
+ * "Images may be declared as shader inputs and outputs, as uniform
+ * variables, as temporary variables, and as function parameters."
+ */
+ if (state->has_bindless()) {
+ if (var->data.mode != ir_var_auto &&
+ var->data.mode != ir_var_uniform &&
+ var->data.mode != ir_var_shader_in &&
+ var->data.mode != ir_var_shader_out &&
+ var->data.mode != ir_var_function_in &&
+ var->data.mode != ir_var_function_out &&
+ var->data.mode != ir_var_function_inout) {
+ _mesa_glsl_error(loc, state, "bindless image/sampler variables may "
+ "only be declared as shader inputs and outputs, as "
+ "uniform variables, as temporary variables and as "
+ "function parameters");
+ return false;
+ }
+ } else {
+ if (var->data.mode != ir_var_uniform &&
+ var->data.mode != ir_var_function_in) {
+ _mesa_glsl_error(loc, state, "image/sampler variables may only be "
+ "declared as function parameters or "
+ "uniform-qualified global variables");
+ return false;
+ }
+ }
+ return true;
+}
+
+static bool
+validate_memory_qualifier_for_type(struct _mesa_glsl_parse_state *state,
+ YYLTYPE *loc,
+ const struct ast_type_qualifier *qual,
+ const glsl_type *type)
{
- if (!type->is_image()) {
+ /* From Section 4.10 (Memory Qualifiers) of the GLSL 4.50 spec:
+ *
+ * "Memory qualifiers are only supported in the declarations of image
+ * variables, buffer variables, and shader storage blocks; it is an error
+ * to use such qualifiers in any other declarations.
+ */
+ if (!type->is_image() && !qual->flags.q.buffer) {
if (qual->flags.q.read_only ||
qual->flags.q.write_only ||
qual->flags.q.coherent ||
qual->flags.q._volatile ||
qual->flags.q.restrict_flag) {
_mesa_glsl_error(loc, state, "memory qualifiers may only be applied "
- "to images");
+ "in the declarations of image variables, buffer "
+ "variables, and shader storage blocks");
+ return false;
}
+ }
+ return true;
+}
- if (qual->flags.q.explicit_image_format) {
- _mesa_glsl_error(loc, state, "format layout qualifiers may only be "
- "applied to images");
- }
+static bool
+validate_image_format_qualifier_for_type(struct _mesa_glsl_parse_state *state,
+ YYLTYPE *loc,
+ const struct ast_type_qualifier *qual,
+ const glsl_type *type)
+{
+ /* From section 4.4.6.2 (Format Layout Qualifiers) of the GLSL 4.50 spec:
+ *
+ * "Format layout qualifiers can be used on image variable declarations
+ * (those declared with a basic type having “image ” in its keyword)."
+ */
+ if (!type->is_image() && qual->flags.q.explicit_image_format) {
+ _mesa_glsl_error(loc, state, "format layout qualifiers may only be "
+ "applied to images");
return false;
}
return true;
{
const glsl_type *base_type = var->type->without_array();
- if (!validate_image_qualifier_for_type(state, loc, qual, base_type))
+ if (!validate_image_format_qualifier_for_type(state, loc, qual, base_type) ||
+ !validate_memory_qualifier_for_type(state, loc, qual, base_type))
return;
- if (var->data.mode != ir_var_uniform &&
- var->data.mode != ir_var_function_in) {
- _mesa_glsl_error(loc, state, "image variables may only be declared as "
- "function parameters or uniform-qualified "
- "global variables");
- }
+ if (!base_type->is_image())
+ return;
+
+ if (!validate_storage_for_sampler_image_types(var, state, loc))
+ return;
var->data.memory_read_only |= qual->flags.q.read_only;
var->data.memory_write_only |= qual->flags.q.write_only;
var->data.memory_coherent |= qual->flags.q.coherent;
var->data.memory_volatile |= qual->flags.q._volatile;
var->data.memory_restrict |= qual->flags.q.restrict_flag;
- var->data.read_only = true;
if (qual->flags.q.explicit_image_format) {
if (var->data.mode == ir_var_function_in) {
}
var->data.image_format = qual->image_format;
+ } else if (state->has_image_load_formatted()) {
+ if (var->data.mode == ir_var_uniform &&
+ state->EXT_shader_image_load_formatted_warn) {
+ _mesa_glsl_warning(loc, state, "GL_EXT_image_load_formatted used");
+ }
} else {
if (var->data.mode == ir_var_uniform) {
- if (state->es_shader) {
+ if (state->es_shader ||
+ !(state->is_version(420, 310) || state->ARB_shader_image_load_store_enable)) {
_mesa_glsl_error(loc, state, "all image uniforms must have a "
"format layout qualifier");
} else if (!qual->flags.q.write_only) {
}
}
+static void
+apply_bindless_qualifier_to_variable(const struct ast_type_qualifier *qual,
+ ir_variable *var,
+ struct _mesa_glsl_parse_state *state,
+ YYLTYPE *loc)
+{
+ bool has_local_qualifiers = qual->flags.q.bindless_sampler ||
+ qual->flags.q.bindless_image ||
+ qual->flags.q.bound_sampler ||
+ qual->flags.q.bound_image;
+
+ /* The ARB_bindless_texture spec says:
+ *
+ * "Modify Section 4.4.6 Opaque-Uniform Layout Qualifiers of the GLSL 4.30
+ * spec"
+ *
+ * "If these layout qualifiers are applied to other types of default block
+ * uniforms, or variables with non-uniform storage, a compile-time error
+ * will be generated."
+ */
+ if (has_local_qualifiers && !qual->flags.q.uniform) {
+ _mesa_glsl_error(loc, state, "ARB_bindless_texture layout qualifiers "
+ "can only be applied to default block uniforms or "
+ "variables with uniform storage");
+ return;
+ }
+
+ /* The ARB_bindless_texture spec doesn't state anything in this situation,
+ * but it makes sense to only allow bindless_sampler/bound_sampler for
+ * sampler types, and respectively bindless_image/bound_image for image
+ * types.
+ */
+ if ((qual->flags.q.bindless_sampler || qual->flags.q.bound_sampler) &&
+ !var->type->contains_sampler()) {
+ _mesa_glsl_error(loc, state, "bindless_sampler or bound_sampler can only "
+ "be applied to sampler types");
+ return;
+ }
+
+ if ((qual->flags.q.bindless_image || qual->flags.q.bound_image) &&
+ !var->type->contains_image()) {
+ _mesa_glsl_error(loc, state, "bindless_image or bound_image can only be "
+ "applied to image types");
+ return;
+ }
+
+ /* The bindless_sampler/bindless_image (and respectively
+ * bound_sampler/bound_image) layout qualifiers can be set at global and at
+ * local scope.
+ */
+ if (var->type->contains_sampler() || var->type->contains_image()) {
+ var->data.bindless = qual->flags.q.bindless_sampler ||
+ qual->flags.q.bindless_image ||
+ state->bindless_sampler_specified ||
+ state->bindless_image_specified;
+
+ var->data.bound = qual->flags.q.bound_sampler ||
+ qual->flags.q.bound_image ||
+ state->bound_sampler_specified ||
+ state->bound_image_specified;
+ }
+}
+
static void
apply_layout_qualifier_to_variable(const struct ast_type_qualifier *qual,
ir_variable *var,
state->fs_redeclares_gl_fragcoord_with_no_layout_qualifiers;
}
- var->data.pixel_center_integer = qual->flags.q.pixel_center_integer;
- var->data.origin_upper_left = qual->flags.q.origin_upper_left;
if ((qual->flags.q.origin_upper_left || qual->flags.q.pixel_center_integer)
&& (strcmp(var->name, "gl_FragCoord") != 0)) {
const char *const qual_string = (qual->flags.q.origin_upper_left)
const glsl_type *type = var->type->without_array();
unsigned components = type->component_slots();
- if (type->is_matrix() || type->is_record()) {
+ if (type->is_matrix() || type->is_struct()) {
_mesa_glsl_error(loc, state, "component layout qualifier "
"cannot be applied to a matrix, a structure, "
"a block, or an array containing any of "
"these.");
+ } else if (components > 4 && type->is_64bit()) {
+ _mesa_glsl_error(loc, state, "component layout qualifier "
+ "cannot be applied to dvec%u.",
+ components / 2);
} else if (qual_component != 0 &&
(qual_component + components - 1) > 3) {
_mesa_glsl_error(loc, state, "component overflow (%u > 3)",
}
}
- if (var->type->contains_sampler()) {
- if (var->data.mode != ir_var_uniform &&
- var->data.mode != ir_var_function_in) {
- _mesa_glsl_error(loc, state, "sampler variables may only be declared "
- "as function parameters or uniform-qualified "
- "global variables");
- }
- }
+ if (var->type->contains_sampler() &&
+ !validate_storage_for_sampler_image_types(var, state, loc))
+ return;
/* Is the 'layout' keyword used with parameters that allow relaxed checking.
* Many implementations of GL_ARB_fragment_coord_conventions_enable and some
_mesa_glsl_error(loc, state, "post_depth_coverage layout qualifier only "
"valid in fragment shader input layout declaration.");
}
+
+ if (state->has_bindless())
+ apply_bindless_qualifier_to_variable(qual, var, state, loc);
+
+ if (qual->flags.q.pixel_interlock_ordered ||
+ qual->flags.q.pixel_interlock_unordered ||
+ qual->flags.q.sample_interlock_ordered ||
+ qual->flags.q.sample_interlock_unordered) {
+ _mesa_glsl_error(loc, state, "interlock layout qualifiers: "
+ "pixel_interlock_ordered, pixel_interlock_unordered, "
+ "sample_interlock_ordered and sample_interlock_unordered, "
+ "only valid in fragment shader input layout declaration.");
+ }
}
static void
"`invariant' after being used",
var->name);
} else {
- var->data.invariant = 1;
+ var->data.explicit_invariant = true;
+ var->data.invariant = true;
}
}
else if (qual->flags.q.shared_storage)
var->data.mode = ir_var_shader_shared;
- var->data.fb_fetch_output = state->stage == MESA_SHADER_FRAGMENT &&
- qual->flags.q.in && qual->flags.q.out;
+ if (!is_parameter && state->has_framebuffer_fetch() &&
+ state->stage == MESA_SHADER_FRAGMENT) {
+ if (state->is_version(130, 300))
+ var->data.fb_fetch_output = qual->flags.q.in && qual->flags.q.out;
+ else
+ var->data.fb_fetch_output = (strcmp(var->name, "gl_LastFragData") == 0);
+ }
+
+ if (var->data.fb_fetch_output) {
+ var->data.assigned = true;
+ var->data.memory_coherent = !qual->flags.q.non_coherent;
+
+ /* From the EXT_shader_framebuffer_fetch spec:
+ *
+ * "It is an error to declare an inout fragment output not qualified
+ * with layout(noncoherent) if the GL_EXT_shader_framebuffer_fetch
+ * extension hasn't been enabled."
+ */
+ if (var->data.memory_coherent &&
+ !state->EXT_shader_framebuffer_fetch_enable)
+ _mesa_glsl_error(loc, state,
+ "invalid declaration of framebuffer fetch output not "
+ "qualified with layout(noncoherent)");
+
+ } else {
+ /* From the EXT_shader_framebuffer_fetch spec:
+ *
+ * "Fragment outputs declared inout may specify the following layout
+ * qualifier: [...] noncoherent"
+ */
+ if (qual->flags.q.non_coherent)
+ _mesa_glsl_error(loc, state,
+ "invalid layout(noncoherent) qualifier not part of "
+ "framebuffer fetch output declaration");
+ }
if (!is_parameter && is_varying_var(var, state->stage)) {
/* User-defined ins/outs are not permitted in compute shaders. */
* Similar text exists in the GLSL ES 3.00 spec, except that the GLSL ES
* 3.00 spec allows structs as well. Varying structs are also allowed
* in GLSL 1.50.
+ *
+ * From section 4.3.4 of the ARB_bindless_texture spec:
+ *
+ * "(modify third paragraph of the section to allow sampler and image
+ * types) ... Vertex shader inputs can only be float,
+ * single-precision floating-point scalars, single-precision
+ * floating-point vectors, matrices, signed and unsigned integers
+ * and integer vectors, sampler and image types."
+ *
+ * From section 4.3.6 of the ARB_bindless_texture spec:
+ *
+ * "Output variables can only be floating-point scalars,
+ * floating-point vectors, matrices, signed or unsigned integers or
+ * integer vectors, sampler or image types, or arrays or structures
+ * of any these."
*/
switch (var->type->without_array()->base_type) {
case GLSL_TYPE_FLOAT:
break;
case GLSL_TYPE_UINT:
case GLSL_TYPE_INT:
- if (state->is_version(130, 300))
+ if (state->is_version(130, 300) || state->EXT_gpu_shader4_enable)
break;
_mesa_glsl_error(loc, state,
"varying variables must be of base type float in %s",
case GLSL_TYPE_UINT64:
case GLSL_TYPE_INT64:
break;
+ case GLSL_TYPE_SAMPLER:
+ case GLSL_TYPE_IMAGE:
+ if (state->has_bindless())
+ break;
+ /* fallthrough */
default:
_mesa_glsl_error(loc, state, "illegal type for a varying variable");
break;
}
}
- if (state->all_invariant && (state->current_function == NULL)) {
- switch (state->stage) {
- case MESA_SHADER_VERTEX:
- if (var->data.mode == ir_var_shader_out)
- var->data.invariant = true;
- break;
- case MESA_SHADER_TESS_CTRL:
- case MESA_SHADER_TESS_EVAL:
- case MESA_SHADER_GEOMETRY:
- if ((var->data.mode == ir_var_shader_in)
- || (var->data.mode == ir_var_shader_out))
- var->data.invariant = true;
- break;
- case MESA_SHADER_FRAGMENT:
- if (var->data.mode == ir_var_shader_in)
- var->data.invariant = true;
- break;
- case MESA_SHADER_COMPUTE:
- /* Invariance isn't meaningful in compute shaders. */
- break;
- }
+ if (state->all_invariant && var->data.mode == ir_var_shader_out) {
+ var->data.explicit_invariant = true;
+ var->data.invariant = true;
}
var->data.interpolation =
* otherwise.
*/
static ir_variable *
-get_variable_being_redeclared(ir_variable *var, YYLTYPE loc,
+get_variable_being_redeclared(ir_variable **var_ptr, YYLTYPE loc,
struct _mesa_glsl_parse_state *state,
bool allow_all_redeclarations,
bool *is_redeclaration)
{
+ ir_variable *var = *var_ptr;
+
/* Check if this declaration is actually a re-declaration, either to
* resize an array or add qualifiers to an existing variable.
*
*is_redeclaration = true;
+ if (earlier->data.how_declared == ir_var_declared_implicitly) {
+ /* Verify that the redeclaration of a built-in does not change the
+ * storage qualifier. There are a couple special cases.
+ *
+ * 1. Some built-in variables that are defined as 'in' in the
+ * specification are implemented as system values. Allow
+ * ir_var_system_value -> ir_var_shader_in.
+ *
+ * 2. gl_LastFragData is implemented as a ir_var_shader_out, but the
+ * specification requires that redeclarations omit any qualifier.
+ * Allow ir_var_shader_out -> ir_var_auto for this one variable.
+ */
+ if (earlier->data.mode != var->data.mode &&
+ !(earlier->data.mode == ir_var_system_value &&
+ var->data.mode == ir_var_shader_in) &&
+ !(strcmp(var->name, "gl_LastFragData") == 0 &&
+ var->data.mode == ir_var_auto)) {
+ _mesa_glsl_error(&loc, state,
+ "redeclaration cannot change qualification of `%s'",
+ var->name);
+ }
+ }
+
/* From page 24 (page 30 of the PDF) of the GLSL 1.50 spec,
*
* "It is legal to declare an array without a size and then
*/
if (earlier->type->is_unsized_array() && var->type->is_array()
&& (var->type->fields.array == earlier->type->fields.array)) {
- /* FINISHME: This doesn't match the qualifiers on the two
- * FINISHME: declarations. It's not 100% clear whether this is
- * FINISHME: required or not.
- */
-
const int size = var->type->array_size();
check_builtin_array_max_size(var->name, size, loc, state);
if ((size > 0) && (size <= earlier->data.max_array_access)) {
earlier->type = var->type;
delete var;
var = NULL;
+ *var_ptr = NULL;
+ } else if (earlier->type != var->type) {
+ _mesa_glsl_error(&loc, state,
+ "redeclaration of `%s' has incorrect type",
+ var->name);
} else if ((state->ARB_fragment_coord_conventions_enable ||
state->is_version(150, 0))
- && strcmp(var->name, "gl_FragCoord") == 0
- && earlier->type == var->type
- && var->data.mode == ir_var_shader_in) {
+ && strcmp(var->name, "gl_FragCoord") == 0) {
/* Allow redeclaration of gl_FragCoord for ARB_fcc layout
* qualifiers.
+ *
+ * We don't really need to do anything here, just allow the
+ * redeclaration. Any error on the gl_FragCoord is handled on the ast
+ * level at apply_layout_qualifier_to_variable using the
+ * ast_type_qualifier and _mesa_glsl_parse_state, or later at
+ * linker.cpp.
*/
- earlier->data.origin_upper_left = var->data.origin_upper_left;
- earlier->data.pixel_center_integer = var->data.pixel_center_integer;
-
/* According to section 4.3.7 of the GLSL 1.30 spec,
* the following built-in varaibles can be redeclared with an
* interpolation qualifier:
|| strcmp(var->name, "gl_FrontSecondaryColor") == 0
|| strcmp(var->name, "gl_BackSecondaryColor") == 0
|| strcmp(var->name, "gl_Color") == 0
- || strcmp(var->name, "gl_SecondaryColor") == 0)
- && earlier->type == var->type
- && earlier->data.mode == var->data.mode) {
+ || strcmp(var->name, "gl_SecondaryColor") == 0)) {
earlier->data.interpolation = var->data.interpolation;
/* Layout qualifiers for gl_FragDepth. */
} else if ((state->is_version(420, 0) ||
state->AMD_conservative_depth_enable ||
state->ARB_conservative_depth_enable)
- && strcmp(var->name, "gl_FragDepth") == 0
- && earlier->type == var->type
- && earlier->data.mode == var->data.mode) {
+ && strcmp(var->name, "gl_FragDepth") == 0) {
/** From the AMD_conservative_depth spec:
* Within any shader, the first redeclarations of gl_FragDepth
} else if (state->has_framebuffer_fetch() &&
strcmp(var->name, "gl_LastFragData") == 0 &&
- var->type == earlier->type &&
var->data.mode == ir_var_auto) {
/* According to the EXT_shader_framebuffer_fetch spec:
*
* "By default, gl_LastFragData is declared with the mediump precision
* qualifier. This can be changed by redeclaring the corresponding
* variables with the desired precision qualifier."
+ *
+ * "Fragment shaders may specify the following layout qualifier only for
+ * redeclaring the built-in gl_LastFragData array [...]: noncoherent"
*/
earlier->data.precision = var->data.precision;
+ earlier->data.memory_coherent = var->data.memory_coherent;
- } else if (allow_all_redeclarations) {
- if (earlier->data.mode != var->data.mode) {
- _mesa_glsl_error(&loc, state,
- "redeclaration of `%s' with incorrect qualifiers",
- var->name);
- } else if (earlier->type != var->type) {
- _mesa_glsl_error(&loc, state,
- "redeclaration of `%s' has incorrect type",
- var->name);
- }
+ } else if ((earlier->data.how_declared == ir_var_declared_implicitly &&
+ state->allow_builtin_variable_redeclaration) ||
+ allow_all_redeclarations) {
+ /* Allow verbatim redeclarations of built-in variables. Not explicitly
+ * valid, but some applications do it.
+ */
} else {
_mesa_glsl_error(&loc, state, "`%s' redeclared", var->name);
}
/**
* Generate the IR for an initializer in a variable declaration
*/
-ir_rvalue *
+static ir_rvalue *
process_initializer(ir_variable *var, ast_declaration *decl,
ast_fully_specified_type *type,
exec_list *initializer_instructions,
struct _mesa_glsl_parse_state *state)
{
+ void *mem_ctx = state;
ir_rvalue *result = NULL;
YYLTYPE initializer_loc = decl->initializer->get_location();
* "Opaque variables [...] are initialized only through the
* OpenGL API; they cannot be declared with an initializer in a
* shader."
+ *
+ * From section 4.1.7 of the ARB_bindless_texture spec:
+ *
+ * "Samplers may be declared as shader inputs and outputs, as uniform
+ * variables, as temporary variables, and as function parameters."
+ *
+ * From section 4.1.X of the ARB_bindless_texture spec:
+ *
+ * "Images may be declared as shader inputs and outputs, as uniform
+ * variables, as temporary variables, and as function parameters."
*/
- if (var->type->contains_opaque()) {
+ if (var->type->contains_atomic() ||
+ (!state->has_bindless() && var->type->contains_opaque())) {
_mesa_glsl_error(&initializer_loc, state,
- "cannot initialize opaque variable %s",
- var->name);
+ "cannot initialize %s variable %s",
+ var->name, state->has_bindless() ? "atomic" : "opaque");
}
if ((var->data.mode == ir_var_shader_in) && (state->current_function == NULL)) {
* GLSL ES 3.00.4 spec. This is a new limitation for these GLSL
* versions.
*/
- ir_constant *constant_value = rhs->constant_expression_value();
+ ir_constant *constant_value =
+ rhs->constant_expression_value(mem_ctx);
+
if (!constant_value ||
(state->is_version(430, 300) &&
decl->initializer->has_sequence_subexpression())) {
} else {
if (var->type->is_numeric()) {
/* Reduce cascading errors. */
- var->constant_value = type->qualifier.flags.q.constant
+ rhs = var->constant_value = type->qualifier.flags.q.constant
? ir_constant::zero(state, var->type) : NULL;
}
}
/* Never emit code to initialize a uniform.
*/
const glsl_type *initializer_type;
+ bool error_emitted = false;
if (!type->qualifier.flags.q.uniform) {
- do_assignment(initializer_instructions, state,
- NULL,
- lhs, rhs,
- &result, true,
- true,
- type->get_location());
+ error_emitted =
+ do_assignment(initializer_instructions, state,
+ NULL, lhs, rhs,
+ &result, true, true,
+ type->get_location());
initializer_type = result->type;
} else
initializer_type = rhs->type;
- var->constant_initializer = rhs->constant_expression_value();
- var->data.has_initializer = true;
+ if (!error_emitted) {
+ var->constant_initializer = rhs->constant_expression_value(mem_ctx);
+ var->data.has_initializer = true;
- /* If the declared variable is an unsized array, it must inherrit
- * its full type from the initializer. A declaration such as
- *
- * uniform float a[] = float[](1.0, 2.0, 3.0, 3.0);
- *
- * becomes
- *
- * uniform float a[4] = float[](1.0, 2.0, 3.0, 3.0);
- *
- * The assignment generated in the if-statement (below) will also
- * automatically handle this case for non-uniforms.
- *
- * If the declared variable is not an array, the types must
- * already match exactly. As a result, the type assignment
- * here can be done unconditionally. For non-uniforms the call
- * to do_assignment can change the type of the initializer (via
- * the implicit conversion rules). For uniforms the initializer
- * must be a constant expression, and the type of that expression
- * was validated above.
- */
- var->type = initializer_type;
+ /* If the declared variable is an unsized array, it must inherrit
+ * its full type from the initializer. A declaration such as
+ *
+ * uniform float a[] = float[](1.0, 2.0, 3.0, 3.0);
+ *
+ * becomes
+ *
+ * uniform float a[4] = float[](1.0, 2.0, 3.0, 3.0);
+ *
+ * The assignment generated in the if-statement (below) will also
+ * automatically handle this case for non-uniforms.
+ *
+ * If the declared variable is not an array, the types must
+ * already match exactly. As a result, the type assignment
+ * here can be done unconditionally. For non-uniforms the call
+ * to do_assignment can change the type of the initializer (via
+ * the implicit conversion rules). For uniforms the initializer
+ * must be a constant expression, and the type of that expression
+ * was validated above.
+ */
+ var->type = initializer_type;
+ }
var->data.read_only = temp;
}
"geometry shader input");
}
-void
+static void
validate_identifier(const char *identifier, YYLTYPE loc,
struct _mesa_glsl_parse_state *state)
{
"`invariant' after being used",
earlier->name);
} else {
+ earlier->data.explicit_invariant = true;
earlier->data.invariant = true;
}
}
assert(!this->invariant);
assert(!this->precise);
+ /* GL_EXT_shader_image_load_store base type uses GLSL_TYPE_VOID as a special value to
+ * indicate that it needs to be updated later (see glsl_parser.yy).
+ * This is done here, based on the layout qualifier and the type of the image var
+ */
+ if (this->type->qualifier.flags.q.explicit_image_format &&
+ this->type->specifier->type->is_image() &&
+ this->type->qualifier.image_base_type == GLSL_TYPE_VOID) {
+ /* "The ARB_shader_image_load_store says:
+ * If both extensions are enabled in the shading language, the "size*" layout
+ * qualifiers are treated as format qualifiers, and are mapped to equivalent
+ * format qualifiers in the table below, according to the type of image
+ * variable.
+ * image* iimage* uimage*
+ * -------- -------- --------
+ * size1x8 n/a r8i r8ui
+ * size1x16 r16f r16i r16ui
+ * size1x32 r32f r32i r32ui
+ * size2x32 rg32f rg32i rg32ui
+ * size4x32 rgba32f rgba32i rgba32ui"
+ */
+ if (strncmp(this->type->specifier->type_name, "image", strlen("image")) == 0) {
+ this->type->qualifier.image_format = GL_R8 +
+ this->type->qualifier.image_format - GL_R8I;
+ this->type->qualifier.image_base_type = GLSL_TYPE_FLOAT;
+ } else if (strncmp(this->type->specifier->type_name, "uimage", strlen("uimage")) == 0) {
+ this->type->qualifier.image_format = GL_R8UI +
+ this->type->qualifier.image_format - GL_R8I;
+ this->type->qualifier.image_base_type = GLSL_TYPE_UINT;
+ } else if (strncmp(this->type->specifier->type_name, "iimage", strlen("iimage")) == 0) {
+ this->type->qualifier.image_base_type = GLSL_TYPE_INT;
+ } else {
+ assert(false);
+ }
+ }
+
/* The type specifier may contain a structure definition. Process that
* before any of the variable declarations.
*/
&& process_qualifier_constant(state, &loc, "offset",
type->qualifier.offset,
&qual_offset)) {
- state->atomic_counter_offsets[qual_binding] = qual_offset;
+ if (qual_binding < ARRAY_SIZE(state->atomic_counter_offsets))
+ state->atomic_counter_offsets[qual_binding] = qual_offset;
}
}
&& !state->has_explicit_attrib_location()
&& !state->has_separate_shader_objects()
&& !state->ARB_fragment_coord_conventions_enable) {
- if (this->type->qualifier.flags.q.out) {
+ /* GL_EXT_gpu_shader4 only allows "varying out" on fragment shader
+ * outputs. (the varying flag is not set by the parser)
+ */
+ if (this->type->qualifier.flags.q.out &&
+ (!state->EXT_gpu_shader4_enable ||
+ state->stage != MESA_SHADER_FRAGMENT)) {
_mesa_glsl_error(& loc, state,
"`out' qualifier in declaration of `%s' "
"only valid for function parameters in %s",
* vectors, matrices, signed and unsigned integers and integer
* vectors. Vertex shader inputs cannot be arrays or
* structures."
+ *
+ * From section 4.3.4 of the ARB_bindless_texture spec:
+ *
+ * "(modify third paragraph of the section to allow sampler and
+ * image types) ... Vertex shader inputs can only be float,
+ * single-precision floating-point scalars, single-precision
+ * floating-point vectors, matrices, signed and unsigned
+ * integers and integer vectors, sampler and image types."
*/
const glsl_type *check_type = var->type->without_array();
break;
case GLSL_TYPE_UINT:
case GLSL_TYPE_INT:
- if (state->is_version(120, 300))
+ if (state->is_version(120, 300) || state->EXT_gpu_shader4_enable)
break;
case GLSL_TYPE_DOUBLE:
if (check_type->is_double() && (state->is_version(410, 0) || state->ARB_vertex_attrib_64bit_enable))
break;
+ case GLSL_TYPE_SAMPLER:
+ if (check_type->is_sampler() && state->has_bindless())
+ break;
+ case GLSL_TYPE_IMAGE:
+ if (check_type->is_image() && state->has_bindless())
+ break;
/* FALLTHROUGH */
default:
_mesa_glsl_error(& loc, state,
_mesa_shader_stage_to_string(state->stage));
}
if (var->type->is_array() &&
- var->type->fields.array->is_record()) {
+ var->type->fields.array->is_struct()) {
_mesa_glsl_error(&loc, state,
"fragment shader input "
"cannot have an array of structs");
}
- if (var->type->is_record()) {
+ if (var->type->is_struct()) {
for (unsigned i = 0; i < var->type->length; i++) {
if (var->type->fields.structure[i].type->is_array() ||
- var->type->fields.structure[i].type->is_record())
+ var->type->fields.structure[i].type->is_struct())
_mesa_glsl_error(&loc, state,
- "fragement shader input cannot have "
+ "fragment shader input cannot have "
"a struct that contains an "
"array or struct");
}
* * A structure
*/
if (state->stage == MESA_SHADER_FRAGMENT) {
- if (check_type->is_record() || check_type->is_matrix())
+ if (check_type->is_struct() || check_type->is_matrix())
_mesa_glsl_error(&loc, state,
"fragment shader output "
"cannot have struct or matrix type");
type = var->type->fields.array;
}
- if (type->is_array() && type->fields.array->is_record()) {
+ if (type->is_array() && type->fields.array->is_struct()) {
_mesa_glsl_error(&loc, state,
"%s shader output cannot have "
"an array of structs",
_mesa_shader_stage_to_string(state->stage));
}
- if (type->is_record()) {
+ if (type->is_struct()) {
for (unsigned i = 0; i < type->length; i++) {
if (type->fields.structure[i].type->is_array() ||
- type->fields.structure[i].type->is_record())
+ type->fields.structure[i].type->is_struct())
_mesa_glsl_error(&loc, state,
"%s shader output cannot have a "
"struct that contains an "
*
* "[Opaque types] can only be declared as function
* parameters or uniform-qualified variables."
+ *
+ * From section 4.1.7 of the ARB_bindless_texture spec:
+ *
+ * "Samplers may be declared as shader inputs and outputs, as uniform
+ * variables, as temporary variables, and as function parameters."
+ *
+ * From section 4.1.X of the ARB_bindless_texture spec:
+ *
+ * "Images may be declared as shader inputs and outputs, as uniform
+ * variables, as temporary variables, and as function parameters."
*/
- if (var_type->contains_opaque() &&
- !this->type->qualifier.flags.q.uniform) {
+ if (!this->type->qualifier.flags.q.uniform &&
+ (var_type->contains_atomic() ||
+ (!state->has_bindless() && var_type->contains_opaque()))) {
_mesa_glsl_error(&loc, state,
- "opaque variables must be declared uniform");
+ "%s variables must be declared uniform",
+ state->has_bindless() ? "atomic" : "opaque");
}
/* Process the initializer and add its instructions to a temporary
bool var_is_gl_id = is_gl_identifier(var->name);
bool is_redeclaration;
- ir_variable *declared_var =
- get_variable_being_redeclared(var, decl->get_location(), state,
- false /* allow_all_redeclarations */,
- &is_redeclaration);
+ var = get_variable_being_redeclared(&var, decl->get_location(), state,
+ false /* allow_all_redeclarations */,
+ &is_redeclaration);
if (is_redeclaration) {
if (var_is_gl_id &&
- declared_var->data.how_declared == ir_var_declared_in_block) {
+ var->data.how_declared == ir_var_declared_in_block) {
_mesa_glsl_error(&loc, state,
"`%s' has already been redeclared using "
- "gl_PerVertex", declared_var->name);
+ "gl_PerVertex", var->name);
}
- declared_var->data.how_declared = ir_var_declared_normally;
+ var->data.how_declared = ir_var_declared_normally;
}
if (decl->initializer != NULL) {
- result = process_initializer(declared_var,
+ result = process_initializer(var,
decl, this->type,
&initializer_instructions, state);
} else {
}
if (state->es_shader) {
- const glsl_type *const t = declared_var->type;
+ const glsl_type *const t = var->type;
/* Skip the unsized array check for TCS/TES/GS inputs & TCS outputs.
*
* present, as per the following table."
*/
const bool implicitly_sized =
- (declared_var->data.mode == ir_var_shader_in &&
+ (var->data.mode == ir_var_shader_in &&
state->stage >= MESA_SHADER_TESS_CTRL &&
state->stage <= MESA_SHADER_GEOMETRY) ||
- (declared_var->data.mode == ir_var_shader_out &&
+ (var->data.mode == ir_var_shader_out &&
state->stage == MESA_SHADER_TESS_CTRL);
if (t->is_unsized_array() && !implicitly_sized)
"GLSL ES");
}
+ /* Section 4.4.6.1 Atomic Counter Layout Qualifiers of the GLSL 4.60 spec:
+ *
+ * "It is a compile-time error to declare an unsized array of
+ * atomic_uint"
+ */
+ if (var->type->is_unsized_array() &&
+ var->type->without_array()->base_type == GLSL_TYPE_ATOMIC_UINT) {
+ _mesa_glsl_error(& loc, state,
+ "Unsized array of atomic_uint is not allowed");
+ }
+
/* If the declaration is not a redeclaration, there are a few additional
* semantic checks that must be applied. In addition, variable that was
* created for the declaration should be added to the IR stream.
* after the initializer if present or immediately after the name
* being declared if not."
*/
- if (!state->symbols->add_variable(declared_var)) {
+ if (!state->symbols->add_variable(var)) {
YYLTYPE loc = this->get_location();
_mesa_glsl_error(&loc, state, "name `%s' already taken in the "
"current scope", decl->identifier);
* global var is decled, then the function is defined with usage of
* the global var. See glslparsertest's CorrectModule.frag.
*/
- instructions->push_head(declared_var);
+ instructions->push_head(var);
}
instructions->append_list(&initializer_instructions);
* "Opaque variables cannot be treated as l-values; hence cannot
* be used as out or inout function parameters, nor can they be
* assigned into."
+ *
+ * From section 4.1.7 of the ARB_bindless_texture spec:
+ *
+ * "Samplers can be used as l-values, so can be assigned into and used
+ * as "out" and "inout" function parameters."
+ *
+ * From section 4.1.X of the ARB_bindless_texture spec:
+ *
+ * "Images can be used as l-values, so can be assigned into and used as
+ * "out" and "inout" function parameters."
*/
if ((var->data.mode == ir_var_function_inout || var->data.mode == ir_var_function_out)
- && type->contains_opaque()) {
+ && (type->contains_atomic() ||
+ (!state->has_bindless() && type->contains_opaque()))) {
_mesa_glsl_error(&loc, state, "out and inout parameters cannot "
- "contain opaque variables");
+ "contain %s variables",
+ state->has_bindless() ? "atomic" : "opaque");
type = glsl_type::error_type;
}
"sized", name);
}
+ /* From Section 6.1 (Function Definitions) of the GLSL 1.00 spec:
+ *
+ * "Arrays are allowed as arguments, but not as the return type. [...]
+ * The return type can also be a structure if the structure does not
+ * contain an array."
+ */
+ if (state->language_version == 100 && return_type->contains_array()) {
+ YYLTYPE loc = this->get_location();
+ _mesa_glsl_error(& loc, state,
+ "function `%s' return type contains an array", name);
+ }
+
/* From section 4.1.7 of the GLSL 4.40 spec:
*
* "[Opaque types] can only be declared as function parameters
* or uniform-qualified variables."
+ *
+ * The ARB_bindless_texture spec doesn't clearly state this, but as it says
+ * "Replace Section 4.1.7 (Samplers), p. 25" and, "Replace Section 4.1.X,
+ * (Images)", this should be allowed.
*/
- if (return_type->contains_opaque()) {
+ if (return_type->contains_atomic() ||
+ (!state->has_bindless() && return_type->contains_opaque())) {
YYLTYPE loc = this->get_location();
_mesa_glsl_error(&loc, state,
- "function `%s' return type can't contain an opaque type",
- name);
+ "function `%s' return type can't contain an %s type",
+ name, state->has_bindless() ? "atomic" : "opaque");
}
/**/
name);
}
+ /* Get the precision for the return type */
+ unsigned return_precision;
+
+ if (state->es_shader) {
+ YYLTYPE loc = this->get_location();
+ return_precision =
+ select_gles_precision(this->return_type->qualifier.precision,
+ return_type,
+ state,
+ &loc);
+ } else {
+ return_precision = GLSL_PRECISION_NONE;
+ }
/* Create an ir_function if one doesn't already exist. */
f = state->symbols->get_function(name);
* "User code can overload the built-in functions but cannot redefine
* them."
*/
- if (state->es_shader && state->language_version >= 300) {
+ if (state->es_shader) {
/* Local shader has no exact candidates; check the built-ins. */
- _mesa_glsl_initialize_builtin_functions();
- if (_mesa_glsl_has_builtin_function(name)) {
+ if (state->language_version >= 300 &&
+ _mesa_glsl_has_builtin_function(state, name)) {
YYLTYPE loc = this->get_location();
_mesa_glsl_error(& loc, state,
"A shader cannot redefine or overload built-in "
"function `%s' in GLSL ES 3.00", name);
return NULL;
}
+
+ if (state->language_version == 100) {
+ ir_function_signature *sig =
+ _mesa_glsl_find_builtin_function(state, name, &hir_parameters);
+ if (sig && sig->is_builtin()) {
+ _mesa_glsl_error(& loc, state,
+ "A shader cannot redefine built-in "
+ "function `%s' in GLSL ES 1.00", name);
+ }
+ }
}
/* Verify that this function's signature either doesn't match a previously
"match prototype", name);
}
+ if (sig->return_precision != return_precision) {
+ YYLTYPE loc = this->get_location();
+
+ _mesa_glsl_error(&loc, state, "function `%s' return type precision "
+ "doesn't match prototype", name);
+ }
+
if (sig->is_defined) {
if (is_definition) {
YYLTYPE loc = this->get_location();
*/
return NULL;
}
+ } else if (state->language_version == 100 && !is_definition) {
+ /* From the GLSL 1.00 spec, section 4.2.7:
+ *
+ * "A particular variable, structure or function declaration
+ * may occur at most once within a scope with the exception
+ * that a single function prototype plus the corresponding
+ * function definition are allowed."
+ */
+ YYLTYPE loc = this->get_location();
+ _mesa_glsl_error(&loc, state, "function `%s' redeclared", name);
}
}
}
*/
if (sig == NULL) {
sig = new(ctx) ir_function_signature(return_type);
+ sig->return_precision = return_precision;
f->add_signature(sig);
}
assert(state->current_function == NULL);
state->current_function = signature;
state->found_return = false;
+ state->found_begin_interlock = false;
+ state->found_end_interlock = false;
/* Duplicate parameters declared in the prototype as concrete variables.
* Add these to the symbol table.
if (state->has_420pack()) {
if (!apply_implicit_conversion(state->current_function->return_type,
- ret, state)) {
+ ret, state)
+ || (ret->type != state->current_function->return_type)) {
_mesa_glsl_error(& loc, state,
"could not implicitly convert return value "
"to %s, in function `%s'",
}
+ir_rvalue *
+ast_demote_statement::hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state)
+{
+ void *ctx = state;
+
+ if (state->stage != MESA_SHADER_FRAGMENT) {
+ YYLTYPE loc = this->get_location();
+
+ _mesa_glsl_error(& loc, state,
+ "`demote' may only appear in a fragment shader");
+ }
+
+ instructions->push_tail(new(ctx) ir_demote);
+
+ return NULL;
+}
+
+
ir_rvalue *
ast_selection_statement::hir(exec_list *instructions,
struct _mesa_glsl_parse_state *state)
}
+struct case_label {
+ /** Value of the case label. */
+ unsigned value;
+
+ /** Does this label occur after the default? */
+ bool after_default;
+
+ /**
+ * AST for the case label.
+ *
+ * This is only used to generate error messages for duplicate labels.
+ */
+ ast_expression *ast;
+};
+
/* Used for detection of duplicate case values, compare
* given contents directly.
*/
static bool
compare_case_value(const void *a, const void *b)
{
- return *(unsigned *) a == *(unsigned *) b;
+ return ((struct case_label *) a)->value == ((struct case_label *) b)->value;
}
static unsigned
key_contents(const void *key)
{
- return *(unsigned *) key;
+ return ((struct case_label *) key)->value;
}
* scalar integer."
*/
if (!test_expression->type->is_scalar() ||
- !test_expression->type->is_integer()) {
+ !test_expression->type->is_integer_32()) {
YYLTYPE loc = this->test_expression->get_location();
_mesa_glsl_error(& loc,
state,
"switch-statement expression must be scalar "
"integer");
+ return NULL;
}
/* Track the switch-statement nesting in a stack-like manner.
* if default should be chosen or not.
*/
if (!default_case.is_empty()) {
+ ir_factory body(instructions, state);
- ir_rvalue *const true_val = new (state) ir_constant(true);
- ir_dereference_variable *deref_run_default_var =
- new(state) ir_dereference_variable(state->switch_state.run_default);
+ ir_expression *cmp = NULL;
- /* Choose to run default case initially, following conditional
- * assignments might change this.
- */
- ir_assignment *const init_var =
- new(state) ir_assignment(deref_run_default_var, true_val);
- instructions->push_tail(init_var);
+ hash_table_foreach(state->switch_state.labels_ht, entry) {
+ const struct case_label *const l = (struct case_label *) entry->data;
- /* Default case was the last one, no checks required. */
- if (after_default.is_empty()) {
- instructions->append_list(&default_case);
- return NULL;
+ /* If the switch init-value is the value of one of the labels that
+ * occurs after the default case, disable execution of the default
+ * case.
+ */
+ if (l->after_default) {
+ ir_constant *const cnst =
+ state->switch_state.test_var->type->base_type == GLSL_TYPE_UINT
+ ? body.constant(unsigned(l->value))
+ : body.constant(int(l->value));
+
+ cmp = cmp == NULL
+ ? equal(cnst, state->switch_state.test_var)
+ : logic_or(cmp, equal(cnst, state->switch_state.test_var));
+ }
}
- foreach_in_list(ir_instruction, ir, &after_default) {
- ir_assignment *assign = ir->as_assignment();
-
- if (!assign)
- continue;
-
- /* Clone the check between case label and init expression. */
- ir_expression *exp = (ir_expression*) assign->condition;
- ir_expression *clone = exp->clone(state, NULL);
-
- ir_dereference_variable *deref_var =
- new(state) ir_dereference_variable(state->switch_state.run_default);
- ir_rvalue *const false_val = new (state) ir_constant(false);
-
- ir_assignment *const set_false =
- new(state) ir_assignment(deref_var, false_val, clone);
-
- instructions->push_tail(set_false);
- }
+ if (cmp != NULL)
+ body.emit(assign(state->switch_state.run_default, logic_not(cmp)));
+ else
+ body.emit(assign(state->switch_state.run_default, body.constant(true)));
/* Append default case and all cases after it. */
instructions->append_list(&default_case);
ast_case_label::hir(exec_list *instructions,
struct _mesa_glsl_parse_state *state)
{
- void *ctx = state;
-
- ir_dereference_variable *deref_fallthru_var =
- new(ctx) ir_dereference_variable(state->switch_state.is_fallthru_var);
+ ir_factory body(instructions, state);
- ir_rvalue *const true_val = new(ctx) ir_constant(true);
+ ir_variable *const fallthru_var = state->switch_state.is_fallthru_var;
/* If not default case, ... */
if (this->test_value != NULL) {
* comparison of cached test expression value to case label.
*/
ir_rvalue *const label_rval = this->test_value->hir(instructions, state);
- ir_constant *label_const = label_rval->constant_expression_value();
+ ir_constant *label_const =
+ label_rval->constant_expression_value(body.mem_ctx);
if (!label_const) {
YYLTYPE loc = this->test_value->get_location();
"constant expression");
/* Stuff a dummy value in to allow processing to continue. */
- label_const = new(ctx) ir_constant(0);
+ label_const = body.constant(0);
} else {
hash_entry *entry =
_mesa_hash_table_search(state->switch_state.labels_ht,
- (void *)(uintptr_t)&label_const->value.u[0]);
+ &label_const->value.u[0]);
if (entry) {
- ast_expression *previous_label = (ast_expression *) entry->data;
+ const struct case_label *const l =
+ (struct case_label *) entry->data;
+ const ast_expression *const previous_label = l->ast;
YYLTYPE loc = this->test_value->get_location();
+
_mesa_glsl_error(& loc, state, "duplicate case value");
loc = previous_label->get_location();
_mesa_glsl_error(& loc, state, "this is the previous case label");
} else {
+ struct case_label *l = ralloc(state->switch_state.labels_ht,
+ struct case_label);
+
+ l->value = label_const->value.u[0];
+ l->after_default = state->switch_state.previous_default != NULL;
+ l->ast = this->test_value;
+
_mesa_hash_table_insert(state->switch_state.labels_ht,
- (void *)(uintptr_t)&label_const->value.u[0],
- this->test_value);
+ &label_const->value.u[0],
+ l);
}
}
- ir_dereference_variable *deref_test_var =
- new(ctx) ir_dereference_variable(state->switch_state.test_var);
+ /* Create an r-value version of the ir_constant label here (after we may
+ * have created a fake one in error cases) that can be passed to
+ * apply_implicit_conversion below.
+ */
+ ir_rvalue *label = label_const;
- ir_expression *test_cond = new(ctx) ir_expression(ir_binop_all_equal,
- label_const,
- deref_test_var);
+ ir_rvalue *deref_test_var =
+ new(body.mem_ctx) ir_dereference_variable(state->switch_state.test_var);
/*
* From GLSL 4.40 specification section 6.2 ("Selection"):
* uint (see section 4.1.10 “Implicit Conversions”) before the compare
* is done."
*/
- if (label_const->type != state->switch_state.test_var->type) {
+ if (label->type != state->switch_state.test_var->type) {
YYLTYPE loc = this->test_value->get_location();
- const glsl_type *type_a = label_const->type;
+ const glsl_type *type_a = label->type;
const glsl_type *type_b = state->switch_state.test_var->type;
/* Check if int->uint implicit conversion is supported. */
glsl_type::int_type->can_implicitly_convert_to(glsl_type::uint_type,
state);
- if ((!type_a->is_integer() || !type_b->is_integer()) ||
+ if ((!type_a->is_integer_32() || !type_b->is_integer_32()) ||
!integer_conversion_supported) {
_mesa_glsl_error(&loc, state, "type mismatch with switch "
"init-expression and case label (%s != %s)",
/* Conversion of the case label. */
if (type_a->base_type == GLSL_TYPE_INT) {
if (!apply_implicit_conversion(glsl_type::uint_type,
- test_cond->operands[0], state))
+ label, state))
_mesa_glsl_error(&loc, state, "implicit type conversion error");
} else {
/* Conversion of the init-expression value. */
if (!apply_implicit_conversion(glsl_type::uint_type,
- test_cond->operands[1], state))
+ deref_test_var, state))
_mesa_glsl_error(&loc, state, "implicit type conversion error");
}
}
- }
- ir_assignment *set_fallthru_on_test =
- new(ctx) ir_assignment(deref_fallthru_var, true_val, test_cond);
+ /* If the implicit conversion was allowed, the types will already be
+ * the same. If the implicit conversion wasn't allowed, smash the
+ * type of the label anyway. This will prevent the expression
+ * constructor (below) from failing an assertion.
+ */
+ label->type = deref_test_var->type;
+ }
- instructions->push_tail(set_fallthru_on_test);
+ body.emit(assign(fallthru_var,
+ logic_or(fallthru_var, equal(label, deref_test_var))));
} else { /* default case */
if (state->switch_state.previous_default) {
YYLTYPE loc = this->get_location();
state->switch_state.previous_default = this;
/* Set fallthru condition on 'run_default' bool. */
- ir_dereference_variable *deref_run_default =
- new(ctx) ir_dereference_variable(state->switch_state.run_default);
- ir_rvalue *const cond_true = new(ctx) ir_constant(true);
- ir_expression *test_cond = new(ctx) ir_expression(ir_binop_all_equal,
- cond_true,
- deref_run_default);
-
- /* Set falltrhu state. */
- ir_assignment *set_fallthru =
- new(ctx) ir_assignment(deref_fallthru_var, true_val, test_cond);
-
- instructions->push_tail(set_fallthru);
+ body.emit(assign(fallthru_var,
+ logic_or(fallthru_var,
+ state->switch_state.run_default)));
}
/* Case statements do not have r-values. */
assert(decl_type);
if (is_interface) {
- if (decl_type->contains_opaque()) {
+ /* From section 4.3.7 of the ARB_bindless_texture spec:
+ *
+ * "(remove the following bullet from the last list on p. 39,
+ * thereby permitting sampler types in interface blocks; image
+ * types are also permitted in blocks by this extension)"
+ *
+ * * sampler types are not allowed
+ */
+ if (decl_type->contains_atomic() ||
+ (!state->has_bindless() && decl_type->contains_opaque())) {
_mesa_glsl_error(&loc, state, "uniform/buffer in non-default "
- "interface block contains opaque variable");
+ "interface block contains %s variable",
+ state->has_bindless() ? "atomic" : "opaque");
}
} else {
if (decl_type->contains_atomic()) {
_mesa_glsl_error(&loc, state, "atomic counter in structure");
}
- if (decl_type->contains_image()) {
+ if (!state->has_bindless() && decl_type->contains_image()) {
/* FINISHME: Same problem as with atomic counters.
* FINISHME: Request clarification from Khronos and add
* FINISHME: spec quotation here.
"to struct or interface block members");
}
+ validate_memory_qualifier_for_type(state, &loc, qual, decl_type);
+ validate_image_format_qualifier_for_type(state, &loc, qual, decl_type);
+
/* From Section 4.4.2.3 (Geometry Outputs) of the GLSL 4.50 spec:
*
* "A block member may be declared with a stream identifier, but
validate_matrix_layout_for_type(state, &loc, decl_type, NULL);
}
- if (qual->flags.q.read_only && qual->flags.q.write_only) {
- _mesa_glsl_error(&loc, state, "buffer variable can't be both "
- "readonly and writeonly.");
- }
-
foreach_list_typed (ast_declaration, decl, link,
&decl_list->declarations) {
YYLTYPE loc = decl->get_location();
fields[i].centroid = qual->flags.q.centroid ? 1 : 0;
fields[i].sample = qual->flags.q.sample ? 1 : 0;
fields[i].patch = qual->flags.q.patch ? 1 : 0;
- fields[i].precision = qual->precision;
fields[i].offset = -1;
fields[i].explicit_xfb_buffer = explicit_xfb_buffer;
fields[i].xfb_buffer = xfb_buffer;
"alignment of %s", field_type->name);
}
fields[i].offset = qual_offset;
- next_offset = glsl_align(qual_offset + size, align);
+ next_offset = qual_offset + size;
} else {
_mesa_glsl_error(&loc, state, "offset can only be used "
"with std430 and std140 layouts");
if (member_align == 0 ||
member_align & (member_align - 1)) {
_mesa_glsl_error(&loc, state, "align layout qualifier "
- "in not a power of 2");
+ "is not a power of 2");
} else {
fields[i].offset = glsl_align(offset, member_align);
- next_offset = glsl_align(fields[i].offset + size, align);
+ next_offset = fields[i].offset + size;
}
}
} else {
fields[i].offset = glsl_align(offset, expl_align);
- next_offset = glsl_align(fields[i].offset + size, align);
+ next_offset = fields[i].offset + size;
}
} else if (!qual->flags.q.explicit_offset) {
if (align != 0 && size != 0)
- next_offset = glsl_align(next_offset + size, align);
+ next_offset = glsl_align(next_offset, align) + size;
}
/* From the ARB_enhanced_layouts spec:
qual->offset, &xfb_offset)) {
fields[i].offset = xfb_offset;
block_xfb_offset = fields[i].offset +
- MAX2(xfb_stride, (int) (4 * field_type->component_slots()));
+ 4 * field_type->component_slots();
}
} else {
if (layout && layout->flags.q.explicit_xfb_offset) {
unsigned align = field_type->is_64bit() ? 8 : 4;
fields[i].offset = glsl_align(block_xfb_offset, align);
- block_xfb_offset +=
- MAX2(xfb_stride, (int) (4 * field_type->component_slots()));
+ block_xfb_offset += 4 * field_type->component_slots();
}
}
if (is_interface && layout &&
(layout->flags.q.uniform || layout->flags.q.buffer) &&
(field_type->without_array()->is_matrix()
- || field_type->without_array()->is_record())) {
+ || field_type->without_array()->is_struct())) {
/* If no layout is specified for the field, inherit the layout
* from the block.
*/
|| fields[i].matrix_layout == GLSL_MATRIX_LAYOUT_COLUMN_MAJOR);
}
- /* Image qualifiers are allowed on buffer variables, which can only
- * be defined inside shader storage buffer objects
+ /* Memory qualifiers are allowed on buffer and image variables, while
+ * the format qualifier is only accepted for images.
*/
- if (layout && var_mode == ir_var_shader_storage) {
+ if (var_mode == ir_var_shader_storage ||
+ field_type->without_array()->is_image()) {
/* For readonly and writeonly qualifiers the field definition,
* if set, overwrites the layout qualifier.
*/
- if (qual->flags.q.read_only) {
- fields[i].memory_read_only = true;
- fields[i].memory_write_only = false;
- } else if (qual->flags.q.write_only) {
- fields[i].memory_read_only = false;
- fields[i].memory_write_only = true;
+ if (qual->flags.q.read_only || qual->flags.q.write_only) {
+ fields[i].memory_read_only = qual->flags.q.read_only;
+ fields[i].memory_write_only = qual->flags.q.write_only;
} else {
- fields[i].memory_read_only = layout->flags.q.read_only;
- fields[i].memory_write_only = layout->flags.q.write_only;
+ fields[i].memory_read_only =
+ layout ? layout->flags.q.read_only : 0;
+ fields[i].memory_write_only =
+ layout ? layout->flags.q.write_only : 0;
}
/* For other qualifiers, we set the flag if either the layout
* qualifier or the field qualifier are set
*/
fields[i].memory_coherent = qual->flags.q.coherent ||
- layout->flags.q.coherent;
+ (layout && layout->flags.q.coherent);
fields[i].memory_volatile = qual->flags.q._volatile ||
- layout->flags.q._volatile;
+ (layout && layout->flags.q._volatile);
fields[i].memory_restrict = qual->flags.q.restrict_flag ||
- layout->flags.q.restrict_flag;
+ (layout && layout->flags.q.restrict_flag);
+
+ if (field_type->without_array()->is_image()) {
+ if (qual->flags.q.explicit_image_format) {
+ if (qual->image_base_type !=
+ field_type->without_array()->sampled_type) {
+ _mesa_glsl_error(&loc, state, "format qualifier doesn't "
+ "match the base data type of the image");
+ }
+
+ fields[i].image_format = qual->image_format;
+ } else {
+ if (!qual->flags.q.write_only) {
+ _mesa_glsl_error(&loc, state, "image not qualified with "
+ "`writeonly' must have a format layout "
+ "qualifier");
+ }
+
+ fields[i].image_format = GL_NONE;
+ }
+ }
+ }
+
+ /* Precision qualifiers do not hold any meaning in Desktop GLSL */
+ if (state->es_shader) {
+ fields[i].precision = select_gles_precision(qual->precision,
+ field_type,
+ state,
+ &loc);
+ } else {
+ fields[i].precision = qual->precision;
}
i++;
validate_identifier(this->name, loc, state);
- const glsl_type *t =
- glsl_type::get_record_instance(fields, decl_count, this->name);
+ type = glsl_type::get_struct_instance(fields, decl_count, this->name);
- if (!state->symbols->add_type(name, t)) {
+ if (!type->is_anonymous() && !state->symbols->add_type(name, type)) {
const glsl_type *match = state->symbols->get_type(name);
/* allow struct matching for desktop GL - older UE4 does this */
- if (match != NULL && state->is_version(130, 0) && match->record_compare(t, false))
+ if (match != NULL && state->is_version(130, 0) && match->record_compare(type, true, false))
_mesa_glsl_warning(& loc, state, "struct `%s' previously defined", name);
else
_mesa_glsl_error(& loc, state, "struct `%s' previously defined", name);
const glsl_type *,
state->num_user_structures + 1);
if (s != NULL) {
- s[state->num_user_structures] = t;
+ s[state->num_user_structures] = type;
state->user_structures = s;
state->num_user_structures++;
}
"invalid qualifier for block",
this->block_name);
- /* The ast_interface_block has a list of ast_declarator_lists. We
- * need to turn those into ir_variables with an association
- * with this uniform block.
- */
enum glsl_interface_packing packing;
- if (this->layout.flags.q.shared) {
- packing = GLSL_INTERFACE_PACKING_SHARED;
+ if (this->layout.flags.q.std140) {
+ packing = GLSL_INTERFACE_PACKING_STD140;
} else if (this->layout.flags.q.packed) {
packing = GLSL_INTERFACE_PACKING_PACKED;
} else if (this->layout.flags.q.std430) {
packing = GLSL_INTERFACE_PACKING_STD430;
} else {
- /* The default layout is std140.
+ /* The default layout is shared.
*/
- packing = GLSL_INTERFACE_PACKING_STD140;
+ packing = GLSL_INTERFACE_PACKING_SHARED;
}
ir_variable_mode var_mode;
if (redeclaring_per_vertex) {
bool is_redeclaration;
- ir_variable *declared_var =
- get_variable_being_redeclared(var, loc, state,
+ var =
+ get_variable_being_redeclared(&var, loc, state,
true /* allow_all_redeclarations */,
&is_redeclaration);
if (!var_is_gl_id || !is_redeclaration) {
_mesa_glsl_error(&loc, state,
"redeclaration of gl_PerVertex can only "
"include built-in variables");
- } else if (declared_var->data.how_declared == ir_var_declared_normally) {
+ } else if (var->data.how_declared == ir_var_declared_normally) {
_mesa_glsl_error(&loc, state,
"`%s' has already been redeclared",
- declared_var->name);
+ var->name);
} else {
- declared_var->data.how_declared = ir_var_declared_in_block;
- declared_var->reinit_interface_type(block_type);
+ var->data.how_declared = ir_var_declared_in_block;
+ var->reinit_interface_type(block_type);
}
continue;
}
}
}
+static void
+verify_subroutine_associated_funcs(struct _mesa_glsl_parse_state *state)
+{
+ YYLTYPE loc;
+ memset(&loc, 0, sizeof(loc));
+
+ /* Section 6.1.2 (Subroutines) of the GLSL 4.00 spec says:
+ *
+ * "A program will fail to compile or link if any shader
+ * or stage contains two or more functions with the same
+ * name if the name is associated with a subroutine type."
+ */
+
+ for (int i = 0; i < state->num_subroutines; i++) {
+ unsigned definitions = 0;
+ ir_function *fn = state->subroutines[i];
+ /* Calculate number of function definitions with the same name */
+ foreach_in_list(ir_function_signature, sig, &fn->signatures) {
+ if (sig->is_defined) {
+ if (++definitions > 1) {
+ _mesa_glsl_error(&loc, state,
+ "%s shader contains two or more function "
+ "definitions with name `%s', which is "
+ "associated with a subroutine type.\n",
+ _mesa_shader_stage_to_string(state->stage),
+ fn->name);
+ return;
+ }
+ }
+ }
+ }
+}
static void
remove_per_vertex_blocks(exec_list *instructions,
}
}
}
+
+ir_rvalue *
+ast_warnings_toggle::hir(exec_list *,
+ struct _mesa_glsl_parse_state *state)
+{
+ state->warnings_enabled = enable;
+ return NULL;
+}