#include "glsl_parser_extras.h"
#include "ast.h"
#include "compiler/glsl_types.h"
-#include "program/hash_table.h"
+#include "util/hash_table.h"
#include "main/macros.h"
#include "main/shaderobj.h"
#include "ir.h"
static void
detect_conflicting_assignments(struct _mesa_glsl_parse_state *state,
- exec_list *instructions);
+ exec_list *instructions);
static void
remove_per_vertex_blocks(exec_list *instructions,
_mesa_glsl_parse_state *state, ir_variable_mode mode);
case GLSL_TYPE_INT: return ir_unop_i2d;
case GLSL_TYPE_UINT: return ir_unop_u2d;
case GLSL_TYPE_FLOAT: return ir_unop_f2d;
+ case GLSL_TYPE_INT64: return ir_unop_i642d;
+ case GLSL_TYPE_UINT64: return ir_unop_u642d;
+ default: return (ir_expression_operation)0;
+ }
+
+ case GLSL_TYPE_UINT64:
+ if (!state->has_int64())
+ return (ir_expression_operation)0;
+ switch (from->base_type) {
+ case GLSL_TYPE_INT: return ir_unop_i2u64;
+ case GLSL_TYPE_UINT: return ir_unop_u2u64;
+ case GLSL_TYPE_INT64: return ir_unop_i642u64;
+ default: return (ir_expression_operation)0;
+ }
+
+ case GLSL_TYPE_INT64:
+ if (!state->has_int64())
+ return (ir_expression_operation)0;
+ switch (from->base_type) {
+ case GLSL_TYPE_INT: return ir_unop_i2i64;
default: return (ir_expression_operation)0;
}
* If a conversion is possible (or unnecessary), \c true is returned.
* Otherwise \c false is returned.
*/
-bool
+static bool
apply_implicit_conversion(const glsl_type *to, ir_rvalue * &from,
struct _mesa_glsl_parse_state *state)
{
* (|). The operands must be of type signed or unsigned integers or
* integer vectors."
*/
- if (!type_a->is_integer()) {
+ if (!type_a->is_integer_32_64()) {
_mesa_glsl_error(loc, state, "LHS of `%s' must be an integer",
ast_expression::operator_string(op));
return glsl_type::error_type;
}
- if (!type_b->is_integer()) {
+ if (!type_b->is_integer_32_64()) {
_mesa_glsl_error(loc, state, "RHS of `%s' must be an integer",
ast_expression::operator_string(op));
return glsl_type::error_type;
* "The operator modulus (%) operates on signed or unsigned integers or
* integer vectors."
*/
- if (!type_a->is_integer()) {
+ if (!type_a->is_integer_32_64()) {
_mesa_glsl_error(loc, state, "LHS of operator %% must be an integer");
return glsl_type::error_type;
}
- if (!type_b->is_integer()) {
+ if (!type_b->is_integer_32_64()) {
_mesa_glsl_error(loc, state, "RHS of operator %% must be an integer");
return glsl_type::error_type;
}
* must be signed or unsigned integers or integer vectors. One operand
* can be signed while the other is unsigned."
*/
- if (!type_a->is_integer()) {
+ if (!type_a->is_integer_32_64()) {
_mesa_glsl_error(loc, state, "LHS of operator %s must be an integer or "
"integer vector", ast_expression::operator_string(op));
return glsl_type::error_type;
/* Check for implicit conversion in GLSL 1.20 */
if (apply_implicit_conversion(lhs->type, rhs, state)) {
if (rhs->type == lhs->type)
- return rhs;
+ return rhs;
}
_mesa_glsl_error(&loc, state,
* i = j += 1;
*/
if (needs_rvalue) {
- ir_variable *var = new(ctx) ir_variable(rhs->type, "assignment_tmp",
- ir_var_temporary);
- instructions->push_tail(var);
- instructions->push_tail(assign(var, rhs));
-
+ ir_rvalue *rvalue;
if (!error_emitted) {
- ir_dereference_variable *deref_var = new(ctx) ir_dereference_variable(var);
+ ir_variable *var = new(ctx) ir_variable(rhs->type, "assignment_tmp",
+ ir_var_temporary);
+ instructions->push_tail(var);
+ instructions->push_tail(assign(var, rhs));
+
+ ir_dereference_variable *deref_var =
+ new(ctx) ir_dereference_variable(var);
instructions->push_tail(new(ctx) ir_assignment(lhs, deref_var));
+ rvalue = new(ctx) ir_dereference_variable(var);
+ } else {
+ rvalue = ir_rvalue::error_value(ctx);
}
- ir_rvalue *rvalue = new(ctx) ir_dereference_variable(var);
-
*out_rvalue = rvalue;
} else {
if (!error_emitted)
ir_variable *var;
var = new(ctx) ir_variable(lvalue->type, "_post_incdec_tmp",
- ir_var_temporary);
+ ir_var_temporary);
instructions->push_tail(var);
instructions->push_tail(new(ctx) ir_assignment(new(ctx) ir_dereference_variable(var),
- lvalue));
+ lvalue));
return new(ctx) ir_dereference_variable(var);
}
case GLSL_TYPE_INT:
case GLSL_TYPE_BOOL:
case GLSL_TYPE_DOUBLE:
+ case GLSL_TYPE_UINT64:
+ case GLSL_TYPE_INT64:
return new(mem_ctx) ir_expression(operation, op0, op1);
case GLSL_TYPE_ARRAY: {
*/
ir_rvalue *
get_scalar_boolean_operand(exec_list *instructions,
- struct _mesa_glsl_parse_state *state,
- ast_expression *parent_expr,
- int operand,
- const char *operand_name,
- bool *error_emitted)
+ struct _mesa_glsl_parse_state *state,
+ ast_expression *parent_expr,
+ int operand,
+ const char *operand_name,
+ bool *error_emitted)
{
ast_expression *expr = parent_expr->subexpressions[operand];
void *ctx = state;
return new(ctx) ir_constant((unsigned) 1);
case GLSL_TYPE_INT:
return new(ctx) ir_constant(1);
+ case GLSL_TYPE_UINT64:
+ return new(ctx) ir_constant((uint64_t) 1);
+ case GLSL_TYPE_INT64:
+ return new(ctx) ir_constant((int64_t) 1);
default:
case GLSL_TYPE_FLOAT:
return new(ctx) ir_constant(1.0f);
-1, /* ast_float_constant doesn't conv to ir_expression. */
-1, /* ast_bool_constant doesn't conv to ir_expression. */
-1, /* ast_sequence doesn't convert to ir_expression. */
+ -1, /* ast_aggregate shouldn't ever even get here. */
};
ir_rvalue *result = NULL;
ir_rvalue *op[3];
- const struct glsl_type *type; /* a temporary variable for switch cases */
+ const struct glsl_type *type, *orig_type;
bool error_emitted = false;
YYLTYPE loc;
* in a scalar boolean. See page 57 of the GLSL 1.50 spec.
*/
assert(type->is_error()
- || ((type->base_type == GLSL_TYPE_BOOL)
- && type->is_scalar()));
+ || ((type->base_type == GLSL_TYPE_BOOL)
+ && type->is_scalar()));
result = new(ctx) ir_expression(operations[this->oper], type,
op[0], op[1]);
error_emitted = true;
}
- if (!op[0]->type->is_integer()) {
+ if (!op[0]->type->is_integer_32_64()) {
_mesa_glsl_error(&loc, state, "operand of `~' must be an integer");
error_emitted = true;
}
op[0] = this->subexpressions[0]->hir(instructions, state);
op[1] = this->subexpressions[1]->hir(instructions, state);
+ orig_type = op[0]->type;
type = arithmetic_result_type(op[0], op[1],
(this->oper == ast_mul_assign),
state, & loc);
+ if (type != orig_type) {
+ _mesa_glsl_error(& loc, state,
+ "could not implicitly convert "
+ "%s to %s", type->name, orig_type->name);
+ type = glsl_type::error_type;
+ }
+
ir_rvalue *temp_rhs = new(ctx) ir_expression(operations[this->oper], type,
op[0], op[1]);
op[0] = this->subexpressions[0]->hir(instructions, state);
op[1] = this->subexpressions[1]->hir(instructions, state);
+ orig_type = op[0]->type;
type = modulus_result_type(op[0], op[1], state, &loc);
+ if (type != orig_type) {
+ _mesa_glsl_error(& loc, state,
+ "could not implicitly convert "
+ "%s to %s", type->name, orig_type->name);
+ type = glsl_type::error_type;
+ }
+
assert(operations[this->oper] == ir_binop_mod);
ir_rvalue *temp_rhs;
this->subexpressions[0]->set_is_lhs(true);
op[0] = this->subexpressions[0]->hir(instructions, state);
op[1] = this->subexpressions[1]->hir(instructions, state);
+
+ orig_type = op[0]->type;
type = bit_logic_result_type(op[0], op[1], this->oper, state, &loc);
+
+ if (type != orig_type) {
+ _mesa_glsl_error(& loc, state,
+ "could not implicitly convert "
+ "%s to %s", type->name, orig_type->name);
+ type = glsl_type::error_type;
+ }
+
ir_rvalue *temp_rhs = new(ctx) ir_expression(operations[this->oper],
type, op[0], op[1]);
error_emitted =
result = new(ctx) ir_constant(this->primary_expression.double_constant);
break;
+ case ast_uint64_constant:
+ result = new(ctx) ir_constant(this->primary_expression.uint64_constant);
+ break;
+
+ case ast_int64_constant:
+ result = new(ctx) ir_constant(this->primary_expression.int64_constant);
+ break;
+
case ast_sequence: {
/* It should not be possible to generate a sequence in the AST without
* any expressions in it.
case ast_float_constant:
case ast_bool_constant:
case ast_double_constant:
+ case ast_int64_constant:
+ case ast_uint64_constant:
return false;
case ast_aggregate:
type->name);
}
}
+
+
+ /* Section 4.1.7.3 (Atomic Counters) of the GLSL ES 3.10 spec says:
+ *
+ * "The default precision of all atomic types is highp. It is an error to
+ * declare an atomic type with a different precision or to specify the
+ * default precision for an atomic type to be lowp or mediump."
+ */
+ if (type->base_type == GLSL_TYPE_ATOMIC_UINT &&
+ precision != ast_precision_high) {
+ _mesa_glsl_error(loc, state,
+ "atomic_uint can only have highp precision qualifier");
+ }
+
return precision;
}
}
}
+static bool
+is_allowed_invariant(ir_variable *var, struct _mesa_glsl_parse_state *state)
+{
+ if (is_varying_var(var, state->stage))
+ return true;
+
+ /* From Section 4.6.1 ("The Invariant Qualifier") GLSL 1.20 spec:
+ * "Only variables output from a vertex shader can be candidates
+ * for invariance".
+ */
+ if (!state->is_version(130, 0))
+ return false;
+
+ /*
+ * Later specs remove this language - so allowed invariant
+ * on fragment shader outputs as well.
+ */
+ if (state->stage == MESA_SHADER_FRAGMENT &&
+ var->data.mode == ir_var_shader_out)
+ return true;
+ return false;
+}
/**
* Matrix layout qualifiers are only allowed on certain types
if (state->is_version(130, 300)
&& var_type->contains_integer()
&& interpolation != INTERP_MODE_FLAT
- && ((state->stage == MESA_SHADER_FRAGMENT && mode == ir_var_shader_in)
- || (state->stage == MESA_SHADER_VERTEX && mode == ir_var_shader_out
- && state->es_shader))) {
- const char *shader_var_type = (state->stage == MESA_SHADER_VERTEX) ?
- "vertex output" : "fragment input";
- _mesa_glsl_error(loc, state, "if a %s is (or contains) "
- "an integer, then it must be qualified with 'flat'",
- shader_var_type);
+ && state->stage == MESA_SHADER_FRAGMENT
+ && mode == ir_var_shader_in) {
+ _mesa_glsl_error(loc, state, "if a fragment input is (or contains) "
+ "an integer, then it must be qualified with 'flat'");
}
/* Double fragment inputs must be qualified with 'flat'.
? "origin_upper_left" : "pixel_center_integer";
_mesa_glsl_error(loc, state,
- "layout qualifier `%s' can only be applied to "
- "fragment shader input `gl_FragCoord'",
- qual_string);
+ "layout qualifier `%s' can only be applied to "
+ "fragment shader input `gl_FragCoord'",
+ qual_string);
}
if (qual->flags.q.explicit_location) {
_mesa_glsl_error(loc, state, "early_fragment_tests layout qualifier only "
"valid in fragment shader input layout declaration.");
}
+
+ if (qual->flags.q.inner_coverage) {
+ _mesa_glsl_error(loc, state, "inner_coverage layout qualifier only "
+ "valid in fragment shader input layout declaration.");
+ }
+
+ if (qual->flags.q.post_depth_coverage) {
+ _mesa_glsl_error(loc, state, "post_depth_coverage layout qualifier only "
+ "valid in fragment shader input layout declaration.");
+ }
}
static void
*/
assert(var->data.mode != ir_var_temporary);
if (qual->flags.q.in && qual->flags.q.out)
- var->data.mode = ir_var_function_inout;
+ var->data.mode = is_parameter ? ir_var_function_inout : ir_var_shader_out;
else if (qual->flags.q.in)
var->data.mode = is_parameter ? ir_var_function_in : ir_var_shader_in;
else if (qual->flags.q.attribute
- || (qual->flags.q.varying && (state->stage == MESA_SHADER_FRAGMENT)))
+ || (qual->flags.q.varying && (state->stage == MESA_SHADER_FRAGMENT)))
var->data.mode = ir_var_shader_in;
else if (qual->flags.q.out)
var->data.mode = is_parameter ? ir_var_function_out : ir_var_shader_out;
else if (qual->flags.q.shared_storage)
var->data.mode = ir_var_shader_shared;
+ var->data.fb_fetch_output = state->stage == MESA_SHADER_FRAGMENT &&
+ qual->flags.q.in && qual->flags.q.out;
+
if (!is_parameter && is_varying_var(var, state->stage)) {
/* User-defined ins/outs are not permitted in compute shaders. */
if (state->stage == MESA_SHADER_COMPUTE) {
earlier->data.depth_layout = var->data.depth_layout;
+ } else if (state->has_framebuffer_fetch() &&
+ strcmp(var->name, "gl_LastFragData") == 0 &&
+ var->type == earlier->type &&
+ var->data.mode == ir_var_auto) {
+ /* According to the EXT_shader_framebuffer_fetch spec:
+ *
+ * "By default, gl_LastFragData is declared with the mediump precision
+ * qualifier. This can be changed by redeclaring the corresponding
+ * variables with the desired precision qualifier."
+ */
+ earlier->data.precision = var->data.precision;
+
} else if (allow_all_redeclarations) {
if (earlier->data.mode != var->data.mode) {
_mesa_glsl_error(&loc, state,
*/
ir_rvalue *
process_initializer(ir_variable *var, ast_declaration *decl,
- ast_fully_specified_type *type,
- exec_list *initializer_instructions,
- struct _mesa_glsl_parse_state *state)
+ ast_fully_specified_type *type,
+ exec_list *initializer_instructions,
+ struct _mesa_glsl_parse_state *state)
{
ir_rvalue *result = NULL;
if (var->data.patch)
return;
+ var->data.tess_varying_implicit_sized_array = var->type->is_unsized_array();
+
validate_layout_qualifier_vertex_count(state, loc, var, num_vertices,
&state->tcs_output_size,
"tessellation control shader output");
if (var->data.patch)
return;
- /* Unsized arrays are implicitly sized to gl_MaxPatchVertices. */
+ /* The ARB_tessellation_shader spec says:
+ *
+ * "Declaring an array size is optional. If no size is specified, it
+ * will be taken from the implementation-dependent maximum patch size
+ * (gl_MaxPatchVertices). If a size is specified, it must match the
+ * maximum patch size; otherwise, a compile or link error will occur."
+ *
+ * This text appears twice, once for TCS inputs, and again for TES inputs.
+ */
if (var->type->is_unsized_array()) {
var->type = glsl_type::get_array_instance(var->type->fields.array,
state->Const.MaxPatchVertices);
+ var->data.tess_varying_implicit_sized_array = true;
+ } else if (var->type->length != state->Const.MaxPatchVertices) {
+ _mesa_glsl_error(&loc, state,
+ "per-vertex tessellation shader input arrays must be "
+ "sized to gl_MaxPatchVertices (%d).",
+ state->Const.MaxPatchVertices);
}
}
_mesa_glsl_error(& loc, state,
"undeclared variable `%s' cannot be marked "
"invariant", decl->identifier);
- } else if (!is_varying_var(earlier, state->stage)) {
+ } else if (!is_allowed_invariant(earlier, state)) {
_mesa_glsl_error(&loc, state,
"`%s' cannot be marked invariant; interfaces between "
"shader stages only.", decl->identifier);
* confusing error.
*/
assert(this->type->specifier->structure == NULL || decl_type != NULL
- || state->error);
+ || state->error);
if (decl_type == NULL) {
_mesa_glsl_error(&loc, state,
}
apply_type_qualifier_to_variable(& this->type->qualifier, var, state,
- & loc, false);
+ & loc, false);
apply_layout_qualifier_to_variable(&this->type->qualifier, var, state,
&loc);
}
if (this->type->qualifier.flags.q.invariant) {
- if (!is_varying_var(var, state->stage)) {
+ if (!is_allowed_invariant(var, state)) {
_mesa_glsl_error(&loc, state,
"`%s' cannot be marked invariant; interfaces between "
"shader stages only", var->name);
* * A matrix
* * A structure
* * An array of array
+ *
+ * ES 3.20 updates this to apply to tessellation and geometry shaders
+ * as well. Because there are per-vertex arrays in the new stages,
+ * it strikes the "array of..." rules and replaces them with these:
+ *
+ * * For per-vertex-arrayed variables (applies to tessellation
+ * control, tessellation evaluation and geometry shaders):
+ *
+ * * Per-vertex-arrayed arrays of arrays
+ * * Per-vertex-arrayed arrays of structures
+ *
+ * * For non-per-vertex-arrayed variables:
+ *
+ * * An array of arrays
+ * * An array of structures
+ *
+ * which basically says to unwrap the per-vertex aspect and apply
+ * the old rules.
*/
if (state->es_shader) {
if (var->type->is_array() &&
"cannot have an array of arrays",
_mesa_shader_stage_to_string(state->stage));
}
- if (state->stage == MESA_SHADER_VERTEX) {
- if (var->type->is_array() &&
- var->type->fields.array->is_record()) {
+ if (state->stage <= MESA_SHADER_GEOMETRY) {
+ const glsl_type *type = var->type;
+
+ if (state->stage == MESA_SHADER_TESS_CTRL &&
+ !var->data.patch && var->type->is_array()) {
+ type = var->type->fields.array;
+ }
+
+ if (type->is_array() && type->fields.array->is_record()) {
_mesa_glsl_error(&loc, state,
- "vertex shader output "
- "cannot have an array of structs");
+ "%s shader output cannot have "
+ "an array of structs",
+ _mesa_shader_stage_to_string(state->stage));
}
- if (var->type->is_record()) {
- for (unsigned i = 0; i < var->type->length; i++) {
- if (var->type->fields.structure[i].type->is_array() ||
- var->type->fields.structure[i].type->is_record())
+ if (type->is_record()) {
+ for (unsigned i = 0; i < type->length; i++) {
+ if (type->fields.structure[i].type->is_array() ||
+ type->fields.structure[i].type->is_record())
_mesa_glsl_error(&loc, state,
- "vertex shader output cannot have a "
+ "%s shader output cannot have a "
"struct that contains an "
- "array or struct");
+ "array or struct",
+ _mesa_shader_stage_to_string(state->stage));
}
}
}
const glsl_type *const t = (earlier == NULL)
? var->type : earlier->type;
- if (t->is_unsized_array())
+ /* Skip the unsized array check for TCS/TES/GS inputs & TCS outputs.
+ *
+ * The GL_OES_tessellation_shader spec says about inputs:
+ *
+ * "Declaring an array size is optional. If no size is specified,
+ * it will be taken from the implementation-dependent maximum
+ * patch size (gl_MaxPatchVertices)."
+ *
+ * and about TCS outputs:
+ *
+ * "If no size is specified, it will be taken from output patch
+ * size declared in the shader."
+ *
+ * The GL_OES_geometry_shader spec says:
+ *
+ * "All geometry shader input unsized array declarations will be
+ * sized by an earlier input primitive layout qualifier, when
+ * present, as per the following table."
+ */
+ const bool implicitly_sized =
+ (var->data.mode == ir_var_shader_in &&
+ state->stage >= MESA_SHADER_TESS_CTRL &&
+ state->stage <= MESA_SHADER_GEOMETRY) ||
+ (var->data.mode == ir_var_shader_out &&
+ state->stage == MESA_SHADER_TESS_CTRL);
+
+ if (t->is_unsized_array() && !implicitly_sized)
/* Section 10.17 of the GLSL ES 1.00 specification states that
* unsized array declarations have been removed from the language.
* Arrays that are sized using an initializer are still explicitly
state->is_version(120, 100)) {
YYLTYPE loc = this->get_location();
_mesa_glsl_error(&loc, state,
- "declaration of function `%s' not allowed within "
- "function body", name);
+ "declaration of function `%s' not allowed within "
+ "function body", name);
}
validate_identifier(name, this->get_location(), state);
}
+/* Used for detection of duplicate case values, compare
+ * given contents directly.
+ */
+static bool
+compare_case_value(const void *a, const void *b)
+{
+ return *(unsigned *) a == *(unsigned *) b;
+}
+
+
+/* Used for detection of duplicate case values, just
+ * returns key contents as is.
+ */
+static unsigned
+key_contents(const void *key)
+{
+ return *(unsigned *) key;
+}
+
+
ir_rvalue *
ast_switch_statement::hir(exec_list *instructions,
struct _mesa_glsl_parse_state *state)
state->switch_state.is_switch_innermost = true;
state->switch_state.switch_nesting_ast = this;
- state->switch_state.labels_ht = hash_table_ctor(0, hash_table_pointer_hash,
- hash_table_pointer_compare);
+ state->switch_state.labels_ht =
+ _mesa_hash_table_create(NULL, key_contents,
+ compare_case_value);
state->switch_state.previous_default = NULL;
/* Initalize is_fallthru state to false.
instructions->push_tail(irif);
}
- hash_table_dtor(state->switch_state.labels_ht);
+ _mesa_hash_table_destroy(state->switch_state.labels_ht, NULL);
state->switch_state = saved;
*/
test_expression->set_is_lhs(true);
/* Cache value of test expression. */
- ir_rvalue *const test_val =
- test_expression->hir(instructions,
- state);
+ ir_rvalue *const test_val = test_expression->hir(instructions, state);
state->switch_state.test_var = new(ctx) ir_variable(test_val->type,
"switch_test_tmp",
/* Stuff a dummy value in to allow processing to continue. */
label_const = new(ctx) ir_constant(0);
} else {
- ast_expression *previous_label = (ast_expression *)
- hash_table_find(state->switch_state.labels_ht,
- (void *)(uintptr_t)label_const->value.u[0]);
+ hash_entry *entry =
+ _mesa_hash_table_search(state->switch_state.labels_ht,
+ (void *)(uintptr_t)&label_const->value.u[0]);
- if (previous_label) {
+ if (entry) {
+ ast_expression *previous_label = (ast_expression *) entry->data;
YYLTYPE loc = this->test_value->get_location();
_mesa_glsl_error(& loc, state, "duplicate case value");
loc = previous_label->get_location();
_mesa_glsl_error(& loc, state, "this is the previous case label");
} else {
- hash_table_insert(state->switch_state.labels_ht,
- this->test_value,
- (void *)(uintptr_t)label_const->value.u[0]);
+ _mesa_hash_table_insert(state->switch_state.labels_ht,
+ (void *)(uintptr_t)&label_const->value.u[0],
+ this->test_value);
}
}
* the types to HIR. This ensures that structure definitions embedded in
* other structure definitions or in interface blocks are processed.
*/
- glsl_struct_field *const fields = ralloc_array(state, glsl_struct_field,
- decl_count);
+ glsl_struct_field *const fields = rzalloc_array(state, glsl_struct_field,
+ decl_count);
bool first_member = true;
bool first_member_has_explicit_location = false;
unsigned qual_location;
if (process_qualifier_constant(state, &loc, "location",
qual->location, &qual_location)) {
- fields[i].location = VARYING_SLOT_VAR0 + qual_location;
+ fields[i].location = qual_location +
+ (fields[i].patch ? VARYING_SLOT_PATCH0 : VARYING_SLOT_VAR0);
expl_location = fields[i].location +
fields[i].type->count_attribute_slots(false);
}
this->block_name);
}
- if (!this->layout.flags.q.buffer &&
- this->layout.flags.q.std430) {
- _mesa_glsl_error(&loc, state,
- "std430 storage block layout qualifier is supported "
- "only for shader storage blocks");
+ /* Validate qualifiers:
+ *
+ * - Layout Qualifiers as per the table in Section 4.4
+ * ("Layout Qualifiers") of the GLSL 4.50 spec.
+ *
+ * - Memory Qualifiers as per Section 4.10 ("Memory Qualifiers") of the
+ * GLSL 4.50 spec:
+ *
+ * "Additionally, memory qualifiers may also be used in the declaration
+ * of shader storage blocks"
+ *
+ * Note the table in Section 4.4 says std430 is allowed on both uniform and
+ * buffer blocks however Section 4.4.5 (Uniform and Shader Storage Block
+ * Layout Qualifiers) of the GLSL 4.50 spec says:
+ *
+ * "The std430 qualifier is supported only for shader storage blocks;
+ * using std430 on a uniform block will result in a compile-time error."
+ */
+ ast_type_qualifier allowed_blk_qualifiers;
+ allowed_blk_qualifiers.flags.i = 0;
+ if (this->layout.flags.q.buffer || this->layout.flags.q.uniform) {
+ allowed_blk_qualifiers.flags.q.shared = 1;
+ allowed_blk_qualifiers.flags.q.packed = 1;
+ allowed_blk_qualifiers.flags.q.std140 = 1;
+ allowed_blk_qualifiers.flags.q.row_major = 1;
+ allowed_blk_qualifiers.flags.q.column_major = 1;
+ allowed_blk_qualifiers.flags.q.explicit_align = 1;
+ allowed_blk_qualifiers.flags.q.explicit_binding = 1;
+ if (this->layout.flags.q.buffer) {
+ allowed_blk_qualifiers.flags.q.buffer = 1;
+ allowed_blk_qualifiers.flags.q.std430 = 1;
+ allowed_blk_qualifiers.flags.q.coherent = 1;
+ allowed_blk_qualifiers.flags.q._volatile = 1;
+ allowed_blk_qualifiers.flags.q.restrict_flag = 1;
+ allowed_blk_qualifiers.flags.q.read_only = 1;
+ allowed_blk_qualifiers.flags.q.write_only = 1;
+ } else {
+ allowed_blk_qualifiers.flags.q.uniform = 1;
+ }
+ } else {
+ /* Interface block */
+ assert(this->layout.flags.q.in || this->layout.flags.q.out);
+
+ allowed_blk_qualifiers.flags.q.explicit_location = 1;
+ if (this->layout.flags.q.out) {
+ allowed_blk_qualifiers.flags.q.out = 1;
+ if (state->stage == MESA_SHADER_GEOMETRY ||
+ state->stage == MESA_SHADER_TESS_CTRL ||
+ state->stage == MESA_SHADER_TESS_EVAL ||
+ state->stage == MESA_SHADER_VERTEX ) {
+ allowed_blk_qualifiers.flags.q.explicit_xfb_offset = 1;
+ allowed_blk_qualifiers.flags.q.explicit_xfb_buffer = 1;
+ allowed_blk_qualifiers.flags.q.xfb_buffer = 1;
+ allowed_blk_qualifiers.flags.q.explicit_xfb_stride = 1;
+ allowed_blk_qualifiers.flags.q.xfb_stride = 1;
+ if (state->stage == MESA_SHADER_GEOMETRY) {
+ allowed_blk_qualifiers.flags.q.stream = 1;
+ allowed_blk_qualifiers.flags.q.explicit_stream = 1;
+ }
+ if (state->stage == MESA_SHADER_TESS_CTRL) {
+ allowed_blk_qualifiers.flags.q.patch = 1;
+ }
+ }
+ } else {
+ allowed_blk_qualifiers.flags.q.in = 1;
+ if (state->stage == MESA_SHADER_TESS_EVAL) {
+ allowed_blk_qualifiers.flags.q.patch = 1;
+ }
+ }
}
+ this->layout.validate_flags(&loc, state, allowed_blk_qualifiers,
+ "invalid qualifier for block",
+ this->block_name);
+
/* The ast_interface_block has a list of ast_declarator_lists. We
* need to turn those into ir_variables with an association
* with this uniform block.
"Interface block sets both readonly and writeonly");
}
- if (this->layout.flags.q.explicit_component) {
- _mesa_glsl_error(&loc, state, "component layout qualifier cannot be "
- "applied to a matrix, a structure, a block, or an "
- "array containing any of these.");
- }
-
unsigned qual_stream;
if (!process_qualifier_constant(state, &loc, "stream", this->layout.stream,
&qual_stream) ||
layout.location, &expl_location)) {
return NULL;
} else {
- expl_location = VARYING_SLOT_VAR0 + expl_location;
+ expl_location += this->layout.flags.q.patch ? VARYING_SLOT_PATCH0
+ : VARYING_SLOT_VAR0;
}
}
glsl_type::get_interface_instance(fields,
num_variables,
packing,
+ matrix_layout ==
+ GLSL_MATRIX_LAYOUT_ROW_MAJOR,
this->block_name);
unsigned component_size = block_type->contains_double() ? 8 : 4;
_mesa_glsl_error(&loc, state, "geometry shader inputs must be arrays");
} else if ((state->stage == MESA_SHADER_TESS_CTRL ||
state->stage == MESA_SHADER_TESS_EVAL) &&
+ !this->layout.flags.q.patch &&
this->array_specifier == NULL &&
var_mode == ir_var_shader_in) {
_mesa_glsl_error(&loc, state, "per-vertex tessellation shader inputs must be arrays");
} else if (state->stage == MESA_SHADER_TESS_CTRL &&
+ !this->layout.flags.q.patch &&
this->array_specifier == NULL &&
var_mode == ir_var_shader_out) {
_mesa_glsl_error(&loc, state, "tessellation control shader outputs must be arrays");
if (var_mode == ir_var_shader_in || var_mode == ir_var_uniform)
var->data.read_only = true;
+ var->data.patch = this->layout.flags.q.patch;
+
if (state->stage == MESA_SHADER_GEOMETRY && var_mode == ir_var_shader_in)
handle_geometry_shader_input_decl(state, loc, var);
else if ((state->stage == MESA_SHADER_TESS_CTRL ||
ir_rvalue *
ast_tcs_output_layout::hir(exec_list *instructions,
- struct _mesa_glsl_parse_state *state)
+ struct _mesa_glsl_parse_state *state)
{
YYLTYPE loc = this->get_location();
*/
if (state->tcs_output_size != 0 && state->tcs_output_size != num_vertices) {
_mesa_glsl_error(&loc, state,
- "this tessellation control shader output layout "
- "specifies %u vertices, but a previous output "
- "is declared with size %u",
- num_vertices, state->tcs_output_size);
+ "this tessellation control shader output layout "
+ "specifies %u vertices, but a previous output "
+ "is declared with size %u",
+ num_vertices, state->tcs_output_size);
return NULL;
}
foreach_in_list (ir_instruction, node, instructions) {
ir_variable *var = node->as_variable();
if (var == NULL || var->data.mode != ir_var_shader_out)
- continue;
+ continue;
/* Note: Not all tessellation control shader output are arrays. */
if (!var->type->is_unsized_array() || var->data.patch)
continue;
if (var->data.max_array_access >= (int)num_vertices) {
- _mesa_glsl_error(&loc, state,
- "this tessellation control shader output layout "
- "specifies %u vertices, but an access to element "
- "%u of output `%s' already exists", num_vertices,
- var->data.max_array_access, var->name);
+ _mesa_glsl_error(&loc, state,
+ "this tessellation control shader output layout "
+ "specifies %u vertices, but an access to element "
+ "%u of output `%s' already exists", num_vertices,
+ var->data.max_array_access, var->name);
} else {
- var->type = glsl_type::get_array_instance(var->type->fields.array,
- num_vertices);
+ var->type = glsl_type::get_array_instance(var->type->fields.array,
+ num_vertices);
}
}
{
YYLTYPE loc = this->get_location();
- /* If any geometry input layout declaration preceded this one, make sure it
- * was consistent with this one.
- */
- if (state->gs_input_prim_type_specified &&
- state->in_qualifier->prim_type != this->prim_type) {
- _mesa_glsl_error(&loc, state,
- "geometry shader input layout does not match"
- " previous declaration");
- return NULL;
- }
+ /* Should have been prevented by the parser. */
+ assert(!state->gs_input_prim_type_specified
+ || state->in_qualifier->prim_type == this->prim_type);
/* If any shader inputs occurred before this declaration and specified an
* array size, make sure the size they specified is consistent with the
}
}
+ /* The ARB_compute_variable_group_size spec says:
+ *
+ * If a compute shader including a *local_size_variable* qualifier also
+ * declares a fixed local group size using the *local_size_x*,
+ * *local_size_y*, or *local_size_z* qualifiers, a compile-time error
+ * results
+ */
+ if (state->cs_input_local_size_variable_specified) {
+ _mesa_glsl_error(&loc, state,
+ "compute shader can't include both a variable and a "
+ "fixed local group size");
+ return NULL;
+ }
+
state->cs_input_local_size_specified = true;
for (int i = 0; i < 3; i++)
state->cs_input_local_size[i] = qual_local_size[i];
gl_FragColor_assigned = true;
else if (strcmp(var->name, "gl_FragData") == 0)
gl_FragData_assigned = true;
- else if (strcmp(var->name, "gl_SecondaryFragColorEXT") == 0)
+ else if (strcmp(var->name, "gl_SecondaryFragColorEXT") == 0)
gl_FragSecondaryColor_assigned = true;
- else if (strcmp(var->name, "gl_SecondaryFragDataEXT") == 0)
+ else if (strcmp(var->name, "gl_SecondaryFragDataEXT") == 0)
gl_FragSecondaryData_assigned = true;
else if (!is_gl_identifier(var->name)) {
if (state->stage == MESA_SHADER_FRAGMENT &&