*/
unsigned how_declared:2;
+ /**
+ * Is this variable per-view? If so, we know it must be an array with
+ * size corresponding to the number of views.
+ */
+ unsigned per_view:1;
+
/**
* \brief Layout qualifier for gl_FragDepth.
*
type = glsl_get_array_element(type);
}
+ if (var->data.per_view) {
+ /* TODO: Per view and Per Vertex are not currently used together. When
+ * they start to be used (e.g. when adding Primitive Replication for GS
+ * on Intel), verify that "peeling" the type twice is correct. This
+ * assert ensures we remember it.
+ */
+ assert(!nir_is_per_vertex_io(var, shader->info.stage));
+ assert(glsl_type_is_array(type));
+ type = glsl_get_array_element(type);
+ }
+
const unsigned slots =
var->data.compact ? DIV_ROUND_UP(glsl_get_length(type), 4)
: glsl_count_attribute_slots(type, false);
type = glsl_get_array_element(type);
}
+ /* Per view variables will be considered as a whole. */
+ if (var->data.per_view)
+ return false;
+
/* The code below only handles:
*
* - Indexing into matrices
assert(var->data.location >= 0);
const struct glsl_type *type = var->type;
- if (nir_is_per_vertex_io(var, stage)) {
+ if (nir_is_per_vertex_io(var, stage) || var->data.per_view) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}
var->data.location - VARYING_SLOT_VAR0 < MAX_VARYINGS_INCL_PATCH) {
const struct glsl_type *type = var->type;
- if (nir_is_per_vertex_io(var, stage)) {
+ if (nir_is_per_vertex_io(var, stage) || var->data.per_view) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}
var->data.location - VARYING_SLOT_VAR0 < MAX_VARYINGS_INCL_PATCH) {
const struct glsl_type *type = var->type;
- if (nir_is_per_vertex_io(var, stage)) {
+ if (nir_is_per_vertex_io(var, stage) || var->data.per_view) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}
continue;
const struct glsl_type *type = var->type;
- if (nir_is_per_vertex_io(var, producer->info.stage)) {
+ if (nir_is_per_vertex_io(var, producer->info.stage) || var->data.per_view) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}
if (!vc_info->initialised) {
const struct glsl_type *type = in_var->type;
- if (nir_is_per_vertex_io(in_var, consumer->info.stage)) {
+ if (nir_is_per_vertex_io(in_var, consumer->info.stage) ||
+ in_var->data.per_view) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}
bool last_partial = false;
nir_foreach_variable(var, var_list) {
const struct glsl_type *type = var->type;
- if (nir_is_per_vertex_io(var, stage)) {
+ if (nir_is_per_vertex_io(var, stage) || var->data.per_view) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}
if (var->data.compact)
continue;
+ /* Per-view variables are expected to remain arrays. */
+ if (var->data.per_view)
+ continue;
+
/* Skip indirects */
int loc = var->data.location * 4 + var->data.location_frac;
if (BITSET_TEST(indirects, loc))
if (a->data.compact || b->data.compact)
return false;
+ if (a->data.per_view || b->data.per_view)
+ return false;
+
const struct glsl_type *a_type_tail = a->type;
const struct glsl_type *b_type_tail = b->type;
const char *const samp = (var->data.sample) ? "sample " : "";
const char *const patch = (var->data.patch) ? "patch " : "";
const char *const inv = (var->data.invariant) ? "invariant " : "";
- fprintf(fp, "%s%s%s%s%s %s ",
- cent, samp, patch, inv, get_variable_mode_str(var->data.mode, false),
+ const char *const per_view = (var->data.per_view) ? "per_view " : "";
+ fprintf(fp, "%s%s%s%s%s%s %s ",
+ cent, samp, patch, inv, per_view,
+ get_variable_mode_str(var->data.mode, false),
glsl_interp_mode_name(var->data.interpolation));
enum gl_access_qualifier access = var->data.access;
validate_assert(state, var->members != NULL);
}
+ if (var->data.per_view)
+ validate_assert(state, glsl_type_is_array(var->type));
+
/*
* TODO validate some things ir_validate.cpp does (requires more GLSL type
* support)