--- /dev/null
- nir_shader *shader = nir_shader_create(NULL, options);
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jason Ekstrand (jason@jlekstrand.net)
+ *
+ */
+
+#include "spirv_to_nir_private.h"
+#include "nir_vla.h"
++#include "nir_control_flow.h"
+
+static struct vtn_ssa_value *
+vtn_const_ssa_value(struct vtn_builder *b, nir_constant *constant,
+ const struct glsl_type *type)
+{
+ struct hash_entry *entry = _mesa_hash_table_search(b->const_table, constant);
+
+ if (entry)
+ return entry->data;
+
+ struct vtn_ssa_value *val = rzalloc(b, struct vtn_ssa_value);
+ val->type = type;
+
+ switch (glsl_get_base_type(type)) {
+ case GLSL_TYPE_INT:
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_BOOL:
+ case GLSL_TYPE_FLOAT:
+ case GLSL_TYPE_DOUBLE:
+ if (glsl_type_is_vector_or_scalar(type)) {
+ unsigned num_components = glsl_get_vector_elements(val->type);
+ nir_load_const_instr *load =
+ nir_load_const_instr_create(b->shader, num_components);
+
+ for (unsigned i = 0; i < num_components; i++)
+ load->value.u[i] = constant->value.u[i];
+
+ nir_instr_insert_before_cf_list(&b->impl->body, &load->instr);
+ val->def = &load->def;
+ } else {
+ assert(glsl_type_is_matrix(type));
+ unsigned rows = glsl_get_vector_elements(val->type);
+ unsigned columns = glsl_get_matrix_columns(val->type);
+ val->elems = ralloc_array(b, struct vtn_ssa_value *, columns);
+
+ for (unsigned i = 0; i < columns; i++) {
+ struct vtn_ssa_value *col_val = rzalloc(b, struct vtn_ssa_value);
+ col_val->type = glsl_get_column_type(val->type);
+ nir_load_const_instr *load =
+ nir_load_const_instr_create(b->shader, rows);
+
+ for (unsigned j = 0; j < rows; j++)
+ load->value.u[j] = constant->value.u[rows * i + j];
+
+ nir_instr_insert_before_cf_list(&b->impl->body, &load->instr);
+ col_val->def = &load->def;
+
+ val->elems[i] = col_val;
+ }
+ }
+ break;
+
+ case GLSL_TYPE_ARRAY: {
+ unsigned elems = glsl_get_length(val->type);
+ val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
+ const struct glsl_type *elem_type = glsl_get_array_element(val->type);
+ for (unsigned i = 0; i < elems; i++)
+ val->elems[i] = vtn_const_ssa_value(b, constant->elements[i],
+ elem_type);
+ break;
+ }
+
+ case GLSL_TYPE_STRUCT: {
+ unsigned elems = glsl_get_length(val->type);
+ val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
+ for (unsigned i = 0; i < elems; i++) {
+ const struct glsl_type *elem_type =
+ glsl_get_struct_field(val->type, i);
+ val->elems[i] = vtn_const_ssa_value(b, constant->elements[i],
+ elem_type);
+ }
+ break;
+ }
+
+ default:
+ unreachable("bad constant type");
+ }
+
+ return val;
+}
+
+struct vtn_ssa_value *
+vtn_ssa_value(struct vtn_builder *b, uint32_t value_id)
+{
+ struct vtn_value *val = vtn_untyped_value(b, value_id);
+ switch (val->value_type) {
+ case vtn_value_type_constant:
+ return vtn_const_ssa_value(b, val->constant, val->const_type);
+
+ case vtn_value_type_ssa:
+ return val->ssa;
+ default:
+ unreachable("Invalid type for an SSA value");
+ }
+}
+
+static char *
+vtn_string_literal(struct vtn_builder *b, const uint32_t *words,
+ unsigned word_count)
+{
+ return ralloc_strndup(b, (char *)words, word_count * sizeof(*words));
+}
+
+static const uint32_t *
+vtn_foreach_instruction(struct vtn_builder *b, const uint32_t *start,
+ const uint32_t *end, vtn_instruction_handler handler)
+{
+ const uint32_t *w = start;
+ while (w < end) {
+ SpvOp opcode = w[0] & SpvOpCodeMask;
+ unsigned count = w[0] >> SpvWordCountShift;
+ assert(count >= 1 && w + count <= end);
+
+ if (!handler(b, opcode, w, count))
+ return w;
+
+ w += count;
+ }
+ assert(w == end);
+ return w;
+}
+
+static void
+vtn_handle_extension(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ switch (opcode) {
+ case SpvOpExtInstImport: {
+ struct vtn_value *val = vtn_push_value(b, w[1], vtn_value_type_extension);
+ if (strcmp((const char *)&w[2], "GLSL.std.450") == 0) {
+ val->ext_handler = vtn_handle_glsl450_instruction;
+ } else {
+ assert(!"Unsupported extension");
+ }
+ break;
+ }
+
+ case SpvOpExtInst: {
+ struct vtn_value *val = vtn_value(b, w[3], vtn_value_type_extension);
+ bool handled = val->ext_handler(b, w[4], w, count);
+ (void)handled;
+ assert(handled);
+ break;
+ }
+
+ default:
+ unreachable("Unhandled opcode");
+ }
+}
+
+static void
+_foreach_decoration_helper(struct vtn_builder *b,
+ struct vtn_value *base_value,
+ int member,
+ struct vtn_value *value,
+ vtn_decoration_foreach_cb cb, void *data)
+{
+ int new_member = member;
+
+ for (struct vtn_decoration *dec = value->decoration; dec; dec = dec->next) {
+ if (dec->member >= 0) {
+ assert(member == -1);
+ new_member = dec->member;
+ }
+
+ if (dec->group) {
+ assert(dec->group->value_type == vtn_value_type_decoration_group);
+ _foreach_decoration_helper(b, base_value, new_member, dec->group,
+ cb, data);
+ } else {
+ cb(b, base_value, new_member, dec, data);
+ }
+ }
+}
+
+/** Iterates (recursively if needed) over all of the decorations on a value
+ *
+ * This function iterates over all of the decorations applied to a given
+ * value. If it encounters a decoration group, it recurses into the group
+ * and iterates over all of those decorations as well.
+ */
+void
+vtn_foreach_decoration(struct vtn_builder *b, struct vtn_value *value,
+ vtn_decoration_foreach_cb cb, void *data)
+{
+ _foreach_decoration_helper(b, value, -1, value, cb, data);
+}
+
+static void
+vtn_handle_decoration(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ const uint32_t *w_end = w + count;
+ const uint32_t target = w[1];
+ w += 2;
+
+ int member = -1;
+ switch (opcode) {
+ case SpvOpDecorationGroup:
+ vtn_push_value(b, target, vtn_value_type_undef);
+ break;
+
+ case SpvOpMemberDecorate:
+ member = *(w++);
+ /* fallthrough */
+ case SpvOpDecorate: {
+ struct vtn_value *val = &b->values[target];
+
+ struct vtn_decoration *dec = rzalloc(b, struct vtn_decoration);
+ dec->member = member;
+ dec->decoration = *(w++);
+ dec->literals = w;
+
+ /* Link into the list */
+ dec->next = val->decoration;
+ val->decoration = dec;
+ break;
+ }
+
+ case SpvOpGroupMemberDecorate:
+ member = *(w++);
+ /* fallthrough */
+ case SpvOpGroupDecorate: {
+ struct vtn_value *group = &b->values[target];
+ assert(group->value_type == vtn_value_type_decoration_group);
+
+ for (; w < w_end; w++) {
+ struct vtn_value *val = &b->values[*w];
+ struct vtn_decoration *dec = rzalloc(b, struct vtn_decoration);
+ dec->member = member;
+ dec->group = group;
+
+ /* Link into the list */
+ dec->next = val->decoration;
+ val->decoration = dec;
+ }
+ break;
+ }
+
+ default:
+ unreachable("Unhandled opcode");
+ }
+}
+
+struct member_decoration_ctx {
+ struct glsl_struct_field *fields;
+ struct vtn_type *type;
+};
+
+/* does a shallow copy of a vtn_type */
+
+static struct vtn_type *
+vtn_type_copy(struct vtn_builder *b, struct vtn_type *src)
+{
+ struct vtn_type *dest = ralloc(b, struct vtn_type);
+ dest->type = src->type;
+ dest->is_builtin = src->is_builtin;
+ if (src->is_builtin)
+ dest->builtin = src->builtin;
+
+ if (!glsl_type_is_vector_or_scalar(src->type)) {
+ switch (glsl_get_base_type(src->type)) {
+ case GLSL_TYPE_ARRAY:
+ dest->array_element = src->array_element;
+ dest->stride = src->stride;
+ break;
+
+ case GLSL_TYPE_INT:
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_BOOL:
+ case GLSL_TYPE_FLOAT:
+ case GLSL_TYPE_DOUBLE:
+ /* matrices */
+ dest->row_major = src->row_major;
+ dest->stride = src->stride;
+ break;
+
+ case GLSL_TYPE_STRUCT: {
+ unsigned elems = glsl_get_length(src->type);
+
+ dest->members = ralloc_array(b, struct vtn_type *, elems);
+ memcpy(dest->members, src->members, elems * sizeof(struct vtn_type *));
+
+ dest->offsets = ralloc_array(b, unsigned, elems);
+ memcpy(dest->offsets, src->offsets, elems * sizeof(unsigned));
+ break;
+ }
+
+ default:
+ unreachable("unhandled type");
+ }
+ }
+
+ return dest;
+}
+
+static void
+struct_member_decoration_cb(struct vtn_builder *b,
+ struct vtn_value *val, int member,
+ const struct vtn_decoration *dec, void *void_ctx)
+{
+ struct member_decoration_ctx *ctx = void_ctx;
+
+ if (member < 0)
+ return;
+
+ switch (dec->decoration) {
+ case SpvDecorationRelaxedPrecision:
+ break; /* FIXME: Do nothing with this for now. */
+ case SpvDecorationSmooth:
+ ctx->fields[member].interpolation = INTERP_QUALIFIER_SMOOTH;
+ break;
+ case SpvDecorationNoperspective:
+ ctx->fields[member].interpolation = INTERP_QUALIFIER_NOPERSPECTIVE;
+ break;
+ case SpvDecorationFlat:
+ ctx->fields[member].interpolation = INTERP_QUALIFIER_FLAT;
+ break;
+ case SpvDecorationCentroid:
+ ctx->fields[member].centroid = true;
+ break;
+ case SpvDecorationSample:
+ ctx->fields[member].sample = true;
+ break;
+ case SpvDecorationLocation:
+ ctx->fields[member].location = dec->literals[0];
+ break;
+ case SpvDecorationBuiltIn:
+ ctx->type->members[member] = vtn_type_copy(b,
+ ctx->type->members[member]);
+ ctx->type->members[member]->is_builtin = true;
+ ctx->type->members[member]->builtin = dec->literals[0];
+ ctx->type->builtin_block = true;
+ break;
+ case SpvDecorationOffset:
+ ctx->type->offsets[member] = dec->literals[0];
+ break;
+ default:
+ unreachable("Unhandled member decoration");
+ }
+}
+
+static void
+type_decoration_cb(struct vtn_builder *b,
+ struct vtn_value *val, int member,
+ const struct vtn_decoration *dec, void *ctx)
+{
+ struct vtn_type *type = val->type;
+
+ if (member != -1)
+ return;
+
+ switch (dec->decoration) {
+ case SpvDecorationArrayStride:
+ type->stride = dec->literals[0];
+ break;
+ case SpvDecorationBlock:
+ type->block = true;
+ break;
+ case SpvDecorationBufferBlock:
+ type->buffer_block = true;
+ break;
+ case SpvDecorationGLSLShared:
+ case SpvDecorationGLSLPacked:
+ /* Ignore these, since we get explicit offsets anyways */
+ break;
+
+ default:
+ unreachable("Unhandled type decoration");
+ }
+}
+
+static void
+vtn_handle_type(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ struct vtn_value *val = vtn_push_value(b, w[1], vtn_value_type_type);
+
+ val->type = rzalloc(b, struct vtn_type);
+ val->type->is_builtin = false;
+
+ switch (opcode) {
+ case SpvOpTypeVoid:
+ val->type->type = glsl_void_type();
+ break;
+ case SpvOpTypeBool:
+ val->type->type = glsl_bool_type();
+ break;
+ case SpvOpTypeInt:
+ val->type->type = glsl_int_type();
+ break;
+ case SpvOpTypeFloat:
+ val->type->type = glsl_float_type();
+ break;
+
+ case SpvOpTypeVector: {
+ const struct glsl_type *base =
+ vtn_value(b, w[2], vtn_value_type_type)->type->type;
+ unsigned elems = w[3];
+
+ assert(glsl_type_is_scalar(base));
+ val->type->type = glsl_vector_type(glsl_get_base_type(base), elems);
+ break;
+ }
+
+ case SpvOpTypeMatrix: {
+ struct vtn_type *base =
+ vtn_value(b, w[2], vtn_value_type_type)->type;
+ unsigned columns = w[3];
+
+ assert(glsl_type_is_vector(base->type));
+ val->type->type = glsl_matrix_type(glsl_get_base_type(base->type),
+ glsl_get_vector_elements(base->type),
+ columns);
+ val->type->array_element = base;
+ val->type->row_major = false;
+ val->type->stride = 0;
+ break;
+ }
+
+ case SpvOpTypeArray: {
+ struct vtn_type *array_element =
+ vtn_value(b, w[2], vtn_value_type_type)->type;
+ val->type->type = glsl_array_type(array_element->type, w[3]);
+ val->type->array_element = array_element;
+ val->type->stride = 0;
+ break;
+ }
+
+ case SpvOpTypeStruct: {
+ unsigned num_fields = count - 2;
+ val->type->members = ralloc_array(b, struct vtn_type *, num_fields);
+ val->type->offsets = ralloc_array(b, unsigned, num_fields);
+
+ NIR_VLA(struct glsl_struct_field, fields, count);
+ for (unsigned i = 0; i < num_fields; i++) {
+ /* TODO: Handle decorators */
+ val->type->members[i] =
+ vtn_value(b, w[i + 2], vtn_value_type_type)->type;
+ fields[i].type = val->type->members[i]->type;
+ fields[i].name = ralloc_asprintf(b, "field%d", i);
+ fields[i].location = -1;
+ fields[i].interpolation = 0;
+ fields[i].centroid = 0;
+ fields[i].sample = 0;
+ fields[i].matrix_layout = 2;
+ fields[i].stream = -1;
+ }
+
+ struct member_decoration_ctx ctx = {
+ .fields = fields,
+ .type = val->type
+ };
+
+ vtn_foreach_decoration(b, val, struct_member_decoration_cb, &ctx);
+
+ const char *name = val->name ? val->name : "struct";
+
+ val->type->type = glsl_struct_type(fields, num_fields, name);
+ break;
+ }
+
+ case SpvOpTypeFunction: {
+ const struct glsl_type *return_type =
+ vtn_value(b, w[2], vtn_value_type_type)->type->type;
+ NIR_VLA(struct glsl_function_param, params, count - 3);
+ for (unsigned i = 0; i < count - 3; i++) {
+ params[i].type = vtn_value(b, w[i + 3], vtn_value_type_type)->type->type;
+
+ /* FIXME: */
+ params[i].in = true;
+ params[i].out = true;
+ }
+ val->type->type = glsl_function_type(return_type, params, count - 3);
+ break;
+ }
+
+ case SpvOpTypePointer:
+ /* FIXME: For now, we'll just do the really lame thing and return
+ * the same type. The validator should ensure that the proper number
+ * of dereferences happen
+ */
+ val->type = vtn_value(b, w[3], vtn_value_type_type)->type;
+ break;
+
+ case SpvOpTypeImage: {
+ const struct glsl_type *sampled_type =
+ vtn_value(b, w[2], vtn_value_type_type)->type->type;
+
+ assert(glsl_type_is_vector_or_scalar(sampled_type));
+
+ enum glsl_sampler_dim dim;
+ switch ((SpvDim)w[3]) {
+ case SpvDim1D: dim = GLSL_SAMPLER_DIM_1D; break;
+ case SpvDim2D: dim = GLSL_SAMPLER_DIM_2D; break;
+ case SpvDim3D: dim = GLSL_SAMPLER_DIM_3D; break;
+ case SpvDimCube: dim = GLSL_SAMPLER_DIM_CUBE; break;
+ case SpvDimRect: dim = GLSL_SAMPLER_DIM_RECT; break;
+ case SpvDimBuffer: dim = GLSL_SAMPLER_DIM_BUF; break;
+ default:
+ unreachable("Invalid SPIR-V Sampler dimension");
+ }
+
+ bool is_shadow = w[4];
+ bool is_array = w[5];
+
+ assert(w[6] == 0 && "FIXME: Handl multi-sampled textures");
+ assert(w[7] == 1 && "FIXME: Add support for non-sampled images");
+
+ val->type->type = glsl_sampler_type(dim, is_shadow, is_array,
+ glsl_get_base_type(sampled_type));
+ break;
+ }
+
+ case SpvOpTypeSampledImage:
+ val->type = vtn_value(b, w[2], vtn_value_type_type)->type;
+ break;
+
+ case SpvOpTypeRuntimeArray:
+ case SpvOpTypeOpaque:
+ case SpvOpTypeEvent:
+ case SpvOpTypeDeviceEvent:
+ case SpvOpTypeReserveId:
+ case SpvOpTypeQueue:
+ case SpvOpTypePipe:
+ default:
+ unreachable("Unhandled opcode");
+ }
+
+ vtn_foreach_decoration(b, val, type_decoration_cb, NULL);
+}
+
+static void
+vtn_handle_constant(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_constant);
+ val->const_type = vtn_value(b, w[1], vtn_value_type_type)->type->type;
+ val->constant = ralloc(b, nir_constant);
+ switch (opcode) {
+ case SpvOpConstantTrue:
+ assert(val->const_type == glsl_bool_type());
+ val->constant->value.u[0] = NIR_TRUE;
+ break;
+ case SpvOpConstantFalse:
+ assert(val->const_type == glsl_bool_type());
+ val->constant->value.u[0] = NIR_FALSE;
+ break;
+ case SpvOpConstant:
+ assert(glsl_type_is_scalar(val->const_type));
+ val->constant->value.u[0] = w[3];
+ break;
+ case SpvOpConstantComposite: {
+ unsigned elem_count = count - 3;
+ nir_constant **elems = ralloc_array(b, nir_constant *, elem_count);
+ for (unsigned i = 0; i < elem_count; i++)
+ elems[i] = vtn_value(b, w[i + 3], vtn_value_type_constant)->constant;
+
+ switch (glsl_get_base_type(val->const_type)) {
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_INT:
+ case GLSL_TYPE_FLOAT:
+ case GLSL_TYPE_BOOL:
+ if (glsl_type_is_matrix(val->const_type)) {
+ unsigned rows = glsl_get_vector_elements(val->const_type);
+ assert(glsl_get_matrix_columns(val->const_type) == elem_count);
+ for (unsigned i = 0; i < elem_count; i++)
+ for (unsigned j = 0; j < rows; j++)
+ val->constant->value.u[rows * i + j] = elems[i]->value.u[j];
+ } else {
+ assert(glsl_type_is_vector(val->const_type));
+ assert(glsl_get_vector_elements(val->const_type) == elem_count);
+ for (unsigned i = 0; i < elem_count; i++)
+ val->constant->value.u[i] = elems[i]->value.u[0];
+ }
+ ralloc_free(elems);
+ break;
+
+ case GLSL_TYPE_STRUCT:
+ case GLSL_TYPE_ARRAY:
+ ralloc_steal(val->constant, elems);
+ val->constant->elements = elems;
+ break;
+
+ default:
+ unreachable("Unsupported type for constants");
+ }
+ break;
+ }
+
+ default:
+ unreachable("Unhandled opcode");
+ }
+}
+
+static void
+vtn_get_builtin_location(SpvBuiltIn builtin, int *location,
+ nir_variable_mode *mode)
+{
+ switch (builtin) {
+ case SpvBuiltInPosition:
+ *location = VARYING_SLOT_POS;
+ *mode = nir_var_shader_out;
+ break;
+ case SpvBuiltInPointSize:
+ *location = VARYING_SLOT_PSIZ;
+ *mode = nir_var_shader_out;
+ break;
+ case SpvBuiltInClipVertex:
+ *location = VARYING_SLOT_CLIP_VERTEX;
+ *mode = nir_var_shader_out;
+ break;
+ case SpvBuiltInClipDistance:
+ *location = VARYING_SLOT_CLIP_DIST0; /* XXX CLIP_DIST1? */
+ *mode = nir_var_shader_in;
+ break;
+ case SpvBuiltInCullDistance:
+ /* XXX figure this out */
+ unreachable("unhandled builtin");
+ case SpvBuiltInVertexId:
+ *location = SYSTEM_VALUE_VERTEX_ID;
+ *mode = nir_var_system_value;
+ break;
+ case SpvBuiltInInstanceId:
+ *location = SYSTEM_VALUE_INSTANCE_ID;
+ *mode = nir_var_system_value;
+ break;
+ case SpvBuiltInPrimitiveId:
+ *location = VARYING_SLOT_PRIMITIVE_ID;
+ *mode = nir_var_shader_out;
+ break;
+ case SpvBuiltInInvocationId:
+ *location = SYSTEM_VALUE_INVOCATION_ID;
+ *mode = nir_var_system_value;
+ break;
+ case SpvBuiltInLayer:
+ *location = VARYING_SLOT_LAYER;
+ *mode = nir_var_shader_out;
+ break;
+ case SpvBuiltInTessLevelOuter:
+ case SpvBuiltInTessLevelInner:
+ case SpvBuiltInTessCoord:
+ case SpvBuiltInPatchVertices:
+ unreachable("no tessellation support");
+ case SpvBuiltInFragCoord:
+ *location = VARYING_SLOT_POS;
+ *mode = nir_var_shader_in;
+ break;
+ case SpvBuiltInPointCoord:
+ *location = VARYING_SLOT_PNTC;
+ *mode = nir_var_shader_out;
+ break;
+ case SpvBuiltInFrontFacing:
+ *location = VARYING_SLOT_FACE;
+ *mode = nir_var_shader_out;
+ break;
+ case SpvBuiltInSampleId:
+ *location = SYSTEM_VALUE_SAMPLE_ID;
+ *mode = nir_var_shader_in;
+ break;
+ case SpvBuiltInSamplePosition:
+ *location = SYSTEM_VALUE_SAMPLE_POS;
+ *mode = nir_var_shader_in;
+ break;
+ case SpvBuiltInSampleMask:
+ *location = SYSTEM_VALUE_SAMPLE_MASK_IN; /* XXX out? */
+ *mode = nir_var_shader_in;
+ break;
+ case SpvBuiltInFragColor:
+ *location = FRAG_RESULT_COLOR;
+ *mode = nir_var_shader_out;
+ break;
+ case SpvBuiltInFragDepth:
+ *location = FRAG_RESULT_DEPTH;
+ *mode = nir_var_shader_out;
+ break;
+ case SpvBuiltInHelperInvocation:
+ unreachable("unsupported builtin"); /* XXX */
+ break;
+ case SpvBuiltInNumWorkgroups:
+ case SpvBuiltInWorkgroupSize:
+ /* these are constants, need to be handled specially */
+ unreachable("unsupported builtin");
+ case SpvBuiltInWorkgroupId:
+ case SpvBuiltInLocalInvocationId:
+ case SpvBuiltInGlobalInvocationId:
+ case SpvBuiltInLocalInvocationIndex:
+ unreachable("no compute shader support");
+ default:
+ unreachable("unsupported builtin");
+ }
+}
+
+static void
+var_decoration_cb(struct vtn_builder *b, struct vtn_value *val, int member,
+ const struct vtn_decoration *dec, void *void_var)
+{
+ assert(val->value_type == vtn_value_type_deref);
+ assert(val->deref->deref.child == NULL);
+ assert(val->deref->var == void_var);
+
+ nir_variable *var = void_var;
+ switch (dec->decoration) {
+ case SpvDecorationRelaxedPrecision:
+ break; /* FIXME: Do nothing with this for now. */
+ case SpvDecorationSmooth:
+ var->data.interpolation = INTERP_QUALIFIER_SMOOTH;
+ break;
+ case SpvDecorationNoperspective:
+ var->data.interpolation = INTERP_QUALIFIER_NOPERSPECTIVE;
+ break;
+ case SpvDecorationFlat:
+ var->data.interpolation = INTERP_QUALIFIER_FLAT;
+ break;
+ case SpvDecorationCentroid:
+ var->data.centroid = true;
+ break;
+ case SpvDecorationSample:
+ var->data.sample = true;
+ break;
+ case SpvDecorationInvariant:
+ var->data.invariant = true;
+ break;
+ case SpvDecorationConstant:
+ assert(var->constant_initializer != NULL);
+ var->data.read_only = true;
+ break;
+ case SpvDecorationNonwritable:
+ var->data.read_only = true;
+ break;
+ case SpvDecorationLocation:
+ var->data.explicit_location = true;
+ var->data.location = dec->literals[0];
+ break;
+ case SpvDecorationComponent:
+ var->data.location_frac = dec->literals[0];
+ break;
+ case SpvDecorationIndex:
+ var->data.explicit_index = true;
+ var->data.index = dec->literals[0];
+ break;
+ case SpvDecorationBinding:
+ var->data.explicit_binding = true;
+ var->data.binding = dec->literals[0];
+ break;
+ case SpvDecorationDescriptorSet:
+ var->data.descriptor_set = dec->literals[0];
+ break;
+ case SpvDecorationBuiltIn: {
+ nir_variable_mode mode;
+ vtn_get_builtin_location(dec->literals[0], &var->data.location,
+ &mode);
+ var->data.mode = mode;
+ if (mode == nir_var_shader_in || mode == nir_var_system_value)
+ var->data.read_only = true;
+ b->builtins[dec->literals[0]] = var;
+ break;
+ }
+ case SpvDecorationNoStaticUse:
+ /* This can safely be ignored */
+ break;
+ case SpvDecorationRowMajor:
+ case SpvDecorationColMajor:
+ case SpvDecorationGLSLShared:
+ case SpvDecorationPatch:
+ case SpvDecorationRestrict:
+ case SpvDecorationAliased:
+ case SpvDecorationVolatile:
+ case SpvDecorationCoherent:
+ case SpvDecorationNonreadable:
+ case SpvDecorationUniform:
+ /* This is really nice but we have no use for it right now. */
+ case SpvDecorationCPacked:
+ case SpvDecorationSaturatedConversion:
+ case SpvDecorationStream:
+ case SpvDecorationOffset:
+ case SpvDecorationXfbBuffer:
+ case SpvDecorationFuncParamAttr:
+ case SpvDecorationFPRoundingMode:
+ case SpvDecorationFPFastMathMode:
+ case SpvDecorationLinkageAttributes:
+ case SpvDecorationSpecId:
+ break;
+ default:
+ unreachable("Unhandled variable decoration");
+ }
+}
+
+static nir_variable *
+get_builtin_variable(struct vtn_builder *b,
+ const struct glsl_type *type,
+ SpvBuiltIn builtin)
+{
+ nir_variable *var = b->builtins[builtin];
+
+ if (!var) {
+ var = ralloc(b->shader, nir_variable);
+ var->type = type;
+
+ nir_variable_mode mode;
+ vtn_get_builtin_location(builtin, &var->data.location, &mode);
+ var->data.mode = mode;
+ var->name = ralloc_strdup(var, "builtin");
+
+ switch (mode) {
+ case nir_var_shader_in:
+ exec_list_push_tail(&b->shader->inputs, &var->node);
+ break;
+ case nir_var_shader_out:
+ exec_list_push_tail(&b->shader->outputs, &var->node);
+ break;
+ case nir_var_system_value:
+ exec_list_push_tail(&b->shader->system_values, &var->node);
+ break;
+ default:
+ unreachable("bad builtin mode");
+ }
+
+ b->builtins[builtin] = var;
+ }
+
+ return var;
+}
+
+static void
+vtn_builtin_load(struct vtn_builder *b,
+ struct vtn_ssa_value *val,
+ SpvBuiltIn builtin)
+{
+ assert(glsl_type_is_vector_or_scalar(val->type));
+
+ nir_variable *var = get_builtin_variable(b, val->type, builtin);
+
+ nir_intrinsic_instr *load =
+ nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_var);
+ nir_ssa_dest_init(&load->instr, &load->dest,
+ glsl_get_vector_elements(val->type), NULL);
+
+ load->variables[0] = nir_deref_var_create(load, var);
+ load->num_components = glsl_get_vector_elements(val->type);
+ nir_builder_instr_insert(&b->nb, &load->instr);
+ val->def = &load->dest.ssa;
+}
+
+static void
+vtn_builtin_store(struct vtn_builder *b,
+ struct vtn_ssa_value *val,
+ SpvBuiltIn builtin)
+{
+ assert(glsl_type_is_vector_or_scalar(val->type));
+
+ nir_variable *var = get_builtin_variable(b, val->type, builtin);
+
+ nir_intrinsic_instr *store =
+ nir_intrinsic_instr_create(b->shader, nir_intrinsic_store_var);
+
+ store->variables[0] = nir_deref_var_create(store, var);
+ store->num_components = glsl_get_vector_elements(val->type);
+ store->src[0] = nir_src_for_ssa(val->def);
+ nir_builder_instr_insert(&b->nb, &store->instr);
+}
+
+static struct vtn_ssa_value *
+_vtn_variable_load(struct vtn_builder *b,
+ nir_deref_var *src_deref, struct vtn_type *src_type,
+ nir_deref *src_deref_tail)
+{
+ struct vtn_ssa_value *val = rzalloc(b, struct vtn_ssa_value);
+ val->type = src_deref_tail->type;
+
+ if (src_type->is_builtin) {
+ vtn_builtin_load(b, val, src_type->builtin);
+ return val;
+ }
+
+ /* The deref tail may contain a deref to select a component of a vector (in
+ * other words, it might not be an actual tail) so we have to save it away
+ * here since we overwrite it later.
+ */
+ nir_deref *old_child = src_deref_tail->child;
+
+ if (glsl_type_is_vector_or_scalar(val->type)) {
+ nir_intrinsic_instr *load =
+ nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_var);
+ load->variables[0] =
+ nir_deref_as_var(nir_copy_deref(load, &src_deref->deref));
+ load->num_components = glsl_get_vector_elements(val->type);
+ nir_ssa_dest_init(&load->instr, &load->dest, load->num_components, NULL);
+
+ nir_builder_instr_insert(&b->nb, &load->instr);
+
+ if (src_deref->var->data.mode == nir_var_uniform &&
+ glsl_get_base_type(val->type) == GLSL_TYPE_BOOL) {
+ /* Uniform boolean loads need to be fixed up since they're defined
+ * to be zero/nonzero rather than NIR_FALSE/NIR_TRUE.
+ */
+ val->def = nir_ine(&b->nb, &load->dest.ssa, nir_imm_int(&b->nb, 0));
+ } else {
+ val->def = &load->dest.ssa;
+ }
+ } else if (glsl_get_base_type(val->type) == GLSL_TYPE_ARRAY ||
+ glsl_type_is_matrix(val->type)) {
+ unsigned elems = glsl_get_length(val->type);
+ val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
+
+ nir_deref_array *deref = nir_deref_array_create(b);
+ deref->deref_array_type = nir_deref_array_type_direct;
+ deref->deref.type = glsl_get_array_element(val->type);
+ src_deref_tail->child = &deref->deref;
+ for (unsigned i = 0; i < elems; i++) {
+ deref->base_offset = i;
+ val->elems[i] = _vtn_variable_load(b, src_deref,
+ src_type->array_element,
+ &deref->deref);
+ }
+ } else {
+ assert(glsl_get_base_type(val->type) == GLSL_TYPE_STRUCT);
+ unsigned elems = glsl_get_length(val->type);
+ val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
+
+ nir_deref_struct *deref = nir_deref_struct_create(b, 0);
+ src_deref_tail->child = &deref->deref;
+ for (unsigned i = 0; i < elems; i++) {
+ deref->index = i;
+ deref->deref.type = glsl_get_struct_field(val->type, i);
+ val->elems[i] = _vtn_variable_load(b, src_deref,
+ src_type->members[i],
+ &deref->deref);
+ }
+ }
+
+ src_deref_tail->child = old_child;
+
+ return val;
+}
+
+static void
+_vtn_variable_store(struct vtn_builder *b, struct vtn_type *dest_type,
+ nir_deref_var *dest_deref, nir_deref *dest_deref_tail,
+ struct vtn_ssa_value *src)
+{
+ if (dest_type->is_builtin) {
+ vtn_builtin_store(b, src, dest_type->builtin);
+ return;
+ }
+
+ nir_deref *old_child = dest_deref_tail->child;
+
+ if (glsl_type_is_vector_or_scalar(src->type)) {
+ nir_intrinsic_instr *store =
+ nir_intrinsic_instr_create(b->shader, nir_intrinsic_store_var);
+ store->variables[0] =
+ nir_deref_as_var(nir_copy_deref(store, &dest_deref->deref));
+ store->num_components = glsl_get_vector_elements(src->type);
+ store->src[0] = nir_src_for_ssa(src->def);
+
+ nir_builder_instr_insert(&b->nb, &store->instr);
+ } else if (glsl_get_base_type(src->type) == GLSL_TYPE_ARRAY ||
+ glsl_type_is_matrix(src->type)) {
+ unsigned elems = glsl_get_length(src->type);
+
+ nir_deref_array *deref = nir_deref_array_create(b);
+ deref->deref_array_type = nir_deref_array_type_direct;
+ deref->deref.type = glsl_get_array_element(src->type);
+ dest_deref_tail->child = &deref->deref;
+ for (unsigned i = 0; i < elems; i++) {
+ deref->base_offset = i;
+ _vtn_variable_store(b, dest_type->array_element, dest_deref,
+ &deref->deref, src->elems[i]);
+ }
+ } else {
+ assert(glsl_get_base_type(src->type) == GLSL_TYPE_STRUCT);
+ unsigned elems = glsl_get_length(src->type);
+
+ nir_deref_struct *deref = nir_deref_struct_create(b, 0);
+ dest_deref_tail->child = &deref->deref;
+ for (unsigned i = 0; i < elems; i++) {
+ deref->index = i;
+ deref->deref.type = glsl_get_struct_field(src->type, i);
+ _vtn_variable_store(b, dest_type->members[i], dest_deref,
+ &deref->deref, src->elems[i]);
+ }
+ }
+
+ dest_deref_tail->child = old_child;
+}
+
+static struct vtn_ssa_value *
+_vtn_block_load(struct vtn_builder *b, nir_intrinsic_op op,
+ unsigned set, nir_ssa_def *binding,
+ unsigned offset, nir_ssa_def *indirect,
+ struct vtn_type *type)
+{
+ struct vtn_ssa_value *val = ralloc(b, struct vtn_ssa_value);
+ val->type = type->type;
+ val->transposed = NULL;
+ if (glsl_type_is_vector_or_scalar(type->type)) {
+ nir_intrinsic_instr *load = nir_intrinsic_instr_create(b->shader, op);
+ load->num_components = glsl_get_vector_elements(type->type);
+ load->const_index[0] = set;
+ load->src[0] = nir_src_for_ssa(binding);
+ load->const_index[1] = offset;
+ if (indirect)
+ load->src[1] = nir_src_for_ssa(indirect);
+ nir_ssa_dest_init(&load->instr, &load->dest, load->num_components, NULL);
+ nir_builder_instr_insert(&b->nb, &load->instr);
+ val->def = &load->dest.ssa;
+ } else {
+ unsigned elems = glsl_get_length(type->type);
+ val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
+ if (glsl_type_is_struct(type->type)) {
+ for (unsigned i = 0; i < elems; i++) {
+ val->elems[i] = _vtn_block_load(b, op, set, binding,
+ offset + type->offsets[i],
+ indirect, type->members[i]);
+ }
+ } else {
+ for (unsigned i = 0; i < elems; i++) {
+ val->elems[i] = _vtn_block_load(b, op, set, binding,
+ offset + i * type->stride,
+ indirect, type->array_element);
+ }
+ }
+ }
+
+ return val;
+}
+
+static struct vtn_ssa_value *
+vtn_block_load(struct vtn_builder *b, nir_deref_var *src,
+ struct vtn_type *type, nir_deref *src_tail)
+{
+ unsigned set = src->var->data.descriptor_set;
+
+ nir_ssa_def *binding = nir_imm_int(&b->nb, src->var->data.binding);
+ nir_deref *deref = &src->deref;
+
+ /* The block variable may be an array, in which case the array index adds
+ * an offset to the binding. Figure out that index now.
+ */
+
+ if (deref->child->deref_type == nir_deref_type_array) {
+ deref = deref->child;
+ type = type->array_element;
+ nir_deref_array *deref_array = nir_deref_as_array(deref);
+ if (deref_array->deref_array_type == nir_deref_array_type_direct) {
+ binding = nir_imm_int(&b->nb, src->var->data.binding +
+ deref_array->base_offset);
+ } else {
+ binding = nir_iadd(&b->nb, binding, deref_array->indirect.ssa);
+ }
+ }
+
+ unsigned offset = 0;
+ nir_ssa_def *indirect = NULL;
+ while (deref != src_tail) {
+ deref = deref->child;
+ switch (deref->deref_type) {
+ case nir_deref_type_array: {
+ nir_deref_array *deref_array = nir_deref_as_array(deref);
+ if (deref_array->deref_array_type == nir_deref_array_type_direct) {
+ offset += type->stride * deref_array->base_offset;
+ } else {
+ nir_ssa_def *offset = nir_imul(&b->nb, deref_array->indirect.ssa,
+ nir_imm_int(&b->nb, type->stride));
+ indirect = indirect ? nir_iadd(&b->nb, indirect, offset) : offset;
+ }
+ type = type->array_element;
+ break;
+ }
+
+ case nir_deref_type_struct: {
+ nir_deref_struct *deref_struct = nir_deref_as_struct(deref);
+ offset += type->offsets[deref_struct->index];
+ type = type->members[deref_struct->index];
+ break;
+ }
+
+ default:
+ unreachable("unknown deref type");
+ }
+ }
+
+ /* TODO SSBO's */
+ nir_intrinsic_op op = indirect ? nir_intrinsic_load_ubo_indirect
+ : nir_intrinsic_load_ubo;
+
+ return _vtn_block_load(b, op, set, binding, offset, indirect, type);
+}
+
+/*
+ * Gets the NIR-level deref tail, which may have as a child an array deref
+ * selecting which component due to OpAccessChain supporting per-component
+ * indexing in SPIR-V.
+ */
+
+static nir_deref *
+get_deref_tail(nir_deref_var *deref)
+{
+ nir_deref *cur = &deref->deref;
+ while (!glsl_type_is_vector_or_scalar(cur->type) && cur->child)
+ cur = cur->child;
+
+ return cur;
+}
+
+static nir_ssa_def *vtn_vector_extract(struct vtn_builder *b,
+ nir_ssa_def *src, unsigned index);
+
+static nir_ssa_def *vtn_vector_extract_dynamic(struct vtn_builder *b,
+ nir_ssa_def *src,
+ nir_ssa_def *index);
+
+static struct vtn_ssa_value *
+vtn_variable_load(struct vtn_builder *b, nir_deref_var *src,
+ struct vtn_type *src_type)
+{
+ nir_deref *src_tail = get_deref_tail(src);
+
+ struct vtn_ssa_value *val;
+ if (src->var->interface_type)
+ val = vtn_block_load(b, src, src_type, src_tail);
+ else
+ val = _vtn_variable_load(b, src, src_type, src_tail);
+
+ if (src_tail->child) {
+ nir_deref_array *vec_deref = nir_deref_as_array(src_tail->child);
+ assert(vec_deref->deref.child == NULL);
+ val->type = vec_deref->deref.type;
+ if (vec_deref->deref_array_type == nir_deref_array_type_direct)
+ val->def = vtn_vector_extract(b, val->def, vec_deref->base_offset);
+ else
+ val->def = vtn_vector_extract_dynamic(b, val->def,
+ vec_deref->indirect.ssa);
+ }
+
+ return val;
+}
+
+static nir_ssa_def * vtn_vector_insert(struct vtn_builder *b,
+ nir_ssa_def *src, nir_ssa_def *insert,
+ unsigned index);
+
+static nir_ssa_def * vtn_vector_insert_dynamic(struct vtn_builder *b,
+ nir_ssa_def *src,
+ nir_ssa_def *insert,
+ nir_ssa_def *index);
+static void
+vtn_variable_store(struct vtn_builder *b, struct vtn_ssa_value *src,
+ nir_deref_var *dest, struct vtn_type *dest_type)
+{
+ nir_deref *dest_tail = get_deref_tail(dest);
+ if (dest_tail->child) {
+ struct vtn_ssa_value *val = _vtn_variable_load(b, dest, dest_type,
+ dest_tail);
+ nir_deref_array *deref = nir_deref_as_array(dest_tail->child);
+ assert(deref->deref.child == NULL);
+ if (deref->deref_array_type == nir_deref_array_type_direct)
+ val->def = vtn_vector_insert(b, val->def, src->def,
+ deref->base_offset);
+ else
+ val->def = vtn_vector_insert_dynamic(b, val->def, src->def,
+ deref->indirect.ssa);
+ _vtn_variable_store(b, dest_type, dest, dest_tail, val);
+ } else {
+ _vtn_variable_store(b, dest_type, dest, dest_tail, src);
+ }
+}
+
+static void
+vtn_variable_copy(struct vtn_builder *b, nir_deref_var *src,
+ nir_deref_var *dest, struct vtn_type *type)
+{
+ nir_deref *src_tail = get_deref_tail(src);
+
+ if (src_tail->child || src->var->interface_type) {
+ assert(get_deref_tail(dest)->child);
+ struct vtn_ssa_value *val = vtn_variable_load(b, src, type);
+ vtn_variable_store(b, val, dest, type);
+ } else {
+ nir_intrinsic_instr *copy =
+ nir_intrinsic_instr_create(b->shader, nir_intrinsic_copy_var);
+ copy->variables[0] = nir_deref_as_var(nir_copy_deref(copy, &dest->deref));
+ copy->variables[1] = nir_deref_as_var(nir_copy_deref(copy, &src->deref));
+
+ nir_builder_instr_insert(&b->nb, ©->instr);
+ }
+}
+
+static void
+vtn_handle_variables(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ switch (opcode) {
+ case SpvOpVariable: {
+ struct vtn_type *type =
+ vtn_value(b, w[1], vtn_value_type_type)->type;
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_deref);
+
+ nir_variable *var = ralloc(b->shader, nir_variable);
+
+ var->type = type->type;
+ var->name = ralloc_strdup(var, val->name);
+
+ bool builtin_block = false;
+ if (type->block) {
+ var->interface_type = type->type;
+ builtin_block = type->builtin_block;
+ } else if (glsl_type_is_array(type->type) &&
+ (type->array_element->block ||
+ type->array_element->buffer_block)) {
+ var->interface_type = type->array_element->type;
+ builtin_block = type->array_element->builtin_block;
+ } else {
+ var->interface_type = NULL;
+ }
+
+ switch ((SpvStorageClass)w[3]) {
+ case SpvStorageClassUniform:
+ case SpvStorageClassUniformConstant:
+ var->data.mode = nir_var_uniform;
+ var->data.read_only = true;
+ break;
+ case SpvStorageClassInput:
+ var->data.mode = nir_var_shader_in;
+ var->data.read_only = true;
+ break;
+ case SpvStorageClassOutput:
+ var->data.mode = nir_var_shader_out;
+ break;
+ case SpvStorageClassPrivateGlobal:
+ var->data.mode = nir_var_global;
+ break;
+ case SpvStorageClassFunction:
+ var->data.mode = nir_var_local;
+ break;
+ case SpvStorageClassWorkgroupLocal:
+ case SpvStorageClassWorkgroupGlobal:
+ case SpvStorageClassGeneric:
+ case SpvStorageClassAtomicCounter:
+ default:
+ unreachable("Unhandled variable storage class");
+ }
+
+ if (count > 4) {
+ assert(count == 5);
+ var->constant_initializer =
+ vtn_value(b, w[4], vtn_value_type_constant)->constant;
+ }
+
+ val->deref = nir_deref_var_create(b, var);
+ val->deref_type = type;
+
+ vtn_foreach_decoration(b, val, var_decoration_cb, var);
+
+ if (b->execution_model == SpvExecutionModelFragment &&
+ var->data.mode == nir_var_shader_out) {
+ var->data.location += FRAG_RESULT_DATA0;
+ } else if (b->execution_model == SpvExecutionModelVertex &&
+ var->data.mode == nir_var_shader_in) {
+ var->data.location += VERT_ATTRIB_GENERIC0;
+ } else if (var->data.mode == nir_var_shader_in ||
+ var->data.mode == nir_var_shader_out) {
+ var->data.location += VARYING_SLOT_VAR0;
+ }
+
+ /* If this was a uniform block, then we're not going to actually use the
+ * variable (we're only going to use it to compute offsets), so don't
+ * declare it in the shader.
+ */
+ if (var->data.mode == nir_var_uniform && var->interface_type)
+ break;
+
+ /* Builtin blocks are lowered to individual variables during SPIR-V ->
+ * NIR, so don't declare them either.
+ */
+ if (builtin_block)
+ break;
+
+ switch (var->data.mode) {
+ case nir_var_shader_in:
+ exec_list_push_tail(&b->shader->inputs, &var->node);
+ break;
+ case nir_var_shader_out:
+ exec_list_push_tail(&b->shader->outputs, &var->node);
+ break;
+ case nir_var_global:
+ exec_list_push_tail(&b->shader->globals, &var->node);
+ break;
+ case nir_var_local:
+ exec_list_push_tail(&b->impl->locals, &var->node);
+ break;
+ case nir_var_uniform:
+ exec_list_push_tail(&b->shader->uniforms, &var->node);
+ break;
+ case nir_var_system_value:
+ exec_list_push_tail(&b->shader->system_values, &var->node);
+ break;
+ }
+ break;
+ }
+
+ case SpvOpAccessChain:
+ case SpvOpInBoundsAccessChain: {
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_deref);
+ nir_deref_var *base = vtn_value(b, w[3], vtn_value_type_deref)->deref;
+ val->deref = nir_deref_as_var(nir_copy_deref(b, &base->deref));
+ struct vtn_type *deref_type = vtn_value(b, w[3], vtn_value_type_deref)->deref_type;
+
+ nir_deref *tail = &val->deref->deref;
+ while (tail->child)
+ tail = tail->child;
+
+ for (unsigned i = 0; i < count - 4; i++) {
+ assert(w[i + 4] < b->value_id_bound);
+ struct vtn_value *idx_val = &b->values[w[i + 4]];
+
+ enum glsl_base_type base_type = glsl_get_base_type(tail->type);
+ switch (base_type) {
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_INT:
+ case GLSL_TYPE_FLOAT:
+ case GLSL_TYPE_DOUBLE:
+ case GLSL_TYPE_BOOL:
+ case GLSL_TYPE_ARRAY: {
+ nir_deref_array *deref_arr = nir_deref_array_create(b);
+ if (base_type == GLSL_TYPE_ARRAY ||
+ glsl_type_is_matrix(tail->type)) {
+ deref_type = deref_type->array_element;
+ } else {
+ assert(glsl_type_is_vector(tail->type));
+ deref_type = ralloc(b, struct vtn_type);
+ deref_type->type = glsl_scalar_type(base_type);
+ }
+
+ deref_arr->deref.type = deref_type->type;
+
+ if (idx_val->value_type == vtn_value_type_constant) {
+ unsigned idx = idx_val->constant->value.u[0];
+ deref_arr->deref_array_type = nir_deref_array_type_direct;
+ deref_arr->base_offset = idx;
+ } else {
+ assert(idx_val->value_type == vtn_value_type_ssa);
+ deref_arr->deref_array_type = nir_deref_array_type_indirect;
+ deref_arr->base_offset = 0;
+ deref_arr->indirect =
+ nir_src_for_ssa(vtn_ssa_value(b, w[1])->def);
+ }
+ tail->child = &deref_arr->deref;
+ break;
+ }
+
+ case GLSL_TYPE_STRUCT: {
+ assert(idx_val->value_type == vtn_value_type_constant);
+ unsigned idx = idx_val->constant->value.u[0];
+ deref_type = deref_type->members[idx];
+ nir_deref_struct *deref_struct = nir_deref_struct_create(b, idx);
+ deref_struct->deref.type = deref_type->type;
+ tail->child = &deref_struct->deref;
+ break;
+ }
+ default:
+ unreachable("Invalid type for deref");
+ }
+ tail = tail->child;
+ }
+
+ /* For uniform blocks, we don't resolve the access chain until we
+ * actually access the variable, so we need to keep around the original
+ * type of the variable.
+ */
+ if (base->var->interface_type && base->var->data.mode == nir_var_uniform)
+ val->deref_type = vtn_value(b, w[3], vtn_value_type_deref)->deref_type;
+ else
+ val->deref_type = deref_type;
+
+
+ break;
+ }
+
+ case SpvOpCopyMemory: {
+ nir_deref_var *dest = vtn_value(b, w[1], vtn_value_type_deref)->deref;
+ nir_deref_var *src = vtn_value(b, w[2], vtn_value_type_deref)->deref;
+ struct vtn_type *type =
+ vtn_value(b, w[1], vtn_value_type_deref)->deref_type;
+
+ vtn_variable_copy(b, src, dest, type);
+ break;
+ }
+
+ case SpvOpLoad: {
+ nir_deref_var *src = vtn_value(b, w[3], vtn_value_type_deref)->deref;
+ struct vtn_type *src_type =
+ vtn_value(b, w[3], vtn_value_type_deref)->deref_type;
+
+ if (glsl_get_base_type(src_type->type) == GLSL_TYPE_SAMPLER) {
+ vtn_push_value(b, w[2], vtn_value_type_deref)->deref = src;
+ return;
+ }
+
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
+ val->ssa = vtn_variable_load(b, src, src_type);
+ break;
+ }
+
+ case SpvOpStore: {
+ nir_deref_var *dest = vtn_value(b, w[1], vtn_value_type_deref)->deref;
+ struct vtn_type *dest_type =
+ vtn_value(b, w[1], vtn_value_type_deref)->deref_type;
+ struct vtn_ssa_value *src = vtn_ssa_value(b, w[2]);
+ vtn_variable_store(b, src, dest, dest_type);
+ break;
+ }
+
+ case SpvOpCopyMemorySized:
+ case SpvOpArrayLength:
+ case SpvOpImageTexelPointer:
+ default:
+ unreachable("Unhandled opcode");
+ }
+}
+
+static void
+vtn_handle_function_call(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ unreachable("Unhandled opcode");
+}
+
+static struct vtn_ssa_value *
+vtn_create_ssa_value(struct vtn_builder *b, const struct glsl_type *type)
+{
+ struct vtn_ssa_value *val = rzalloc(b, struct vtn_ssa_value);
+ val->type = type;
+
+ if (!glsl_type_is_vector_or_scalar(type)) {
+ unsigned elems = glsl_get_length(type);
+ val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
+ for (unsigned i = 0; i < elems; i++) {
+ const struct glsl_type *child_type;
+
+ switch (glsl_get_base_type(type)) {
+ case GLSL_TYPE_INT:
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_BOOL:
+ case GLSL_TYPE_FLOAT:
+ case GLSL_TYPE_DOUBLE:
+ child_type = glsl_get_column_type(type);
+ break;
+ case GLSL_TYPE_ARRAY:
+ child_type = glsl_get_array_element(type);
+ break;
+ case GLSL_TYPE_STRUCT:
+ child_type = glsl_get_struct_field(type, i);
+ break;
+ default:
+ unreachable("unkown base type");
+ }
+
+ val->elems[i] = vtn_create_ssa_value(b, child_type);
+ }
+ }
+
+ return val;
+}
+
+static nir_tex_src
+vtn_tex_src(struct vtn_builder *b, unsigned index, nir_tex_src_type type)
+{
+ nir_tex_src src;
+ src.src = nir_src_for_ssa(vtn_value(b, index, vtn_value_type_ssa)->ssa->def);
+ src.src_type = type;
+ return src;
+}
+
+static void
+vtn_handle_texture(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
+ nir_deref_var *sampler = vtn_value(b, w[3], vtn_value_type_deref)->deref;
+
+ nir_tex_src srcs[8]; /* 8 should be enough */
+ nir_tex_src *p = srcs;
+
+ unsigned idx = 4;
+
+ unsigned coord_components = 0;
+ switch (opcode) {
+ case SpvOpImageSampleImplicitLod:
+ case SpvOpImageSampleExplicitLod:
+ case SpvOpImageSampleDrefImplicitLod:
+ case SpvOpImageSampleDrefExplicitLod:
+ case SpvOpImageSampleProjImplicitLod:
+ case SpvOpImageSampleProjExplicitLod:
+ case SpvOpImageSampleProjDrefImplicitLod:
+ case SpvOpImageSampleProjDrefExplicitLod:
+ case SpvOpImageFetch:
+ case SpvOpImageGather:
+ case SpvOpImageDrefGather:
+ case SpvOpImageQueryLod: {
+ /* All these types have the coordinate as their first real argument */
+ struct vtn_ssa_value *coord = vtn_ssa_value(b, w[idx++]);
+ coord_components = glsl_get_vector_elements(coord->type);
+ p->src = nir_src_for_ssa(coord->def);
+ p->src_type = nir_tex_src_coord;
+ p++;
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ nir_texop texop;
+ switch (opcode) {
+ case SpvOpImageSampleImplicitLod:
+ texop = nir_texop_tex;
+ break;
+
+ case SpvOpImageSampleExplicitLod:
+ case SpvOpImageSampleDrefImplicitLod:
+ case SpvOpImageSampleDrefExplicitLod:
+ case SpvOpImageSampleProjImplicitLod:
+ case SpvOpImageSampleProjExplicitLod:
+ case SpvOpImageSampleProjDrefImplicitLod:
+ case SpvOpImageSampleProjDrefExplicitLod:
+ case SpvOpImageFetch:
+ case SpvOpImageGather:
+ case SpvOpImageDrefGather:
+ case SpvOpImageQuerySizeLod:
+ case SpvOpImageQuerySize:
+ case SpvOpImageQueryLod:
+ case SpvOpImageQueryLevels:
+ case SpvOpImageQuerySamples:
+ default:
+ unreachable("Unhandled opcode");
+ }
+
+ /* From now on, the remaining sources are "Optional Image Operands." */
+ if (idx < count) {
+ /* XXX handle these (bias, lod, etc.) */
+ assert(0);
+ }
+
+
+ nir_tex_instr *instr = nir_tex_instr_create(b->shader, p - srcs);
+
+ const struct glsl_type *sampler_type = nir_deref_tail(&sampler->deref)->type;
+ instr->sampler_dim = glsl_get_sampler_dim(sampler_type);
+
+ switch (glsl_get_sampler_result_type(sampler_type)) {
+ case GLSL_TYPE_FLOAT: instr->dest_type = nir_type_float; break;
+ case GLSL_TYPE_INT: instr->dest_type = nir_type_int; break;
+ case GLSL_TYPE_UINT: instr->dest_type = nir_type_unsigned; break;
+ case GLSL_TYPE_BOOL: instr->dest_type = nir_type_bool; break;
+ default:
+ unreachable("Invalid base type for sampler result");
+ }
+
+ instr->op = texop;
+ memcpy(instr->src, srcs, instr->num_srcs * sizeof(*instr->src));
+ instr->coord_components = coord_components;
+ instr->is_array = glsl_sampler_type_is_array(sampler_type);
+ instr->is_shadow = glsl_sampler_type_is_shadow(sampler_type);
+
+ instr->sampler = nir_deref_as_var(nir_copy_deref(instr, &sampler->deref));
+
+ nir_ssa_dest_init(&instr->instr, &instr->dest, 4, NULL);
+ val->ssa = vtn_create_ssa_value(b, glsl_vector_type(GLSL_TYPE_FLOAT, 4));
+ val->ssa->def = &instr->dest.ssa;
+
+ nir_builder_instr_insert(&b->nb, &instr->instr);
+}
+
+
+static nir_alu_instr *
+create_vec(void *mem_ctx, unsigned num_components)
+{
+ nir_op op;
+ switch (num_components) {
+ case 1: op = nir_op_fmov; break;
+ case 2: op = nir_op_vec2; break;
+ case 3: op = nir_op_vec3; break;
+ case 4: op = nir_op_vec4; break;
+ default: unreachable("bad vector size");
+ }
+
+ nir_alu_instr *vec = nir_alu_instr_create(mem_ctx, op);
+ nir_ssa_dest_init(&vec->instr, &vec->dest.dest, num_components, NULL);
+ vec->dest.write_mask = (1 << num_components) - 1;
+
+ return vec;
+}
+
+static struct vtn_ssa_value *
+vtn_transpose(struct vtn_builder *b, struct vtn_ssa_value *src)
+{
+ if (src->transposed)
+ return src->transposed;
+
+ struct vtn_ssa_value *dest =
+ vtn_create_ssa_value(b, glsl_transposed_type(src->type));
+
+ for (unsigned i = 0; i < glsl_get_matrix_columns(dest->type); i++) {
+ nir_alu_instr *vec = create_vec(b, glsl_get_matrix_columns(src->type));
+ if (glsl_type_is_vector_or_scalar(src->type)) {
+ vec->src[0].src = nir_src_for_ssa(src->def);
+ vec->src[0].swizzle[0] = i;
+ } else {
+ for (unsigned j = 0; j < glsl_get_matrix_columns(src->type); j++) {
+ vec->src[j].src = nir_src_for_ssa(src->elems[j]->def);
+ vec->src[j].swizzle[0] = i;
+ }
+ }
+ nir_builder_instr_insert(&b->nb, &vec->instr);
+ dest->elems[i]->def = &vec->dest.dest.ssa;
+ }
+
+ dest->transposed = src;
+
+ return dest;
+}
+
+/*
+ * Normally, column vectors in SPIR-V correspond to a single NIR SSA
+ * definition. But for matrix multiplies, we want to do one routine for
+ * multiplying a matrix by a matrix and then pretend that vectors are matrices
+ * with one column. So we "wrap" these things, and unwrap the result before we
+ * send it off.
+ */
+
+static struct vtn_ssa_value *
+vtn_wrap_matrix(struct vtn_builder *b, struct vtn_ssa_value *val)
+{
+ if (val == NULL)
+ return NULL;
+
+ if (glsl_type_is_matrix(val->type))
+ return val;
+
+ struct vtn_ssa_value *dest = rzalloc(b, struct vtn_ssa_value);
+ dest->type = val->type;
+ dest->elems = ralloc_array(b, struct vtn_ssa_value *, 1);
+ dest->elems[0] = val;
+
+ return dest;
+}
+
+static struct vtn_ssa_value *
+vtn_unwrap_matrix(struct vtn_ssa_value *val)
+{
+ if (glsl_type_is_matrix(val->type))
+ return val;
+
+ return val->elems[0];
+}
+
+static struct vtn_ssa_value *
+vtn_matrix_multiply(struct vtn_builder *b,
+ struct vtn_ssa_value *_src0, struct vtn_ssa_value *_src1)
+{
+
+ struct vtn_ssa_value *src0 = vtn_wrap_matrix(b, _src0);
+ struct vtn_ssa_value *src1 = vtn_wrap_matrix(b, _src1);
+ struct vtn_ssa_value *src0_transpose = vtn_wrap_matrix(b, _src0->transposed);
+ struct vtn_ssa_value *src1_transpose = vtn_wrap_matrix(b, _src1->transposed);
+
+ unsigned src0_rows = glsl_get_vector_elements(src0->type);
+ unsigned src0_columns = glsl_get_matrix_columns(src0->type);
+ unsigned src1_columns = glsl_get_matrix_columns(src1->type);
+
+ struct vtn_ssa_value *dest =
+ vtn_create_ssa_value(b, glsl_matrix_type(glsl_get_base_type(src0->type),
+ src0_rows, src1_columns));
+
+ dest = vtn_wrap_matrix(b, dest);
+
+ bool transpose_result = false;
+ if (src0_transpose && src1_transpose) {
+ /* transpose(A) * transpose(B) = transpose(B * A) */
+ src1 = src0_transpose;
+ src0 = src1_transpose;
+ src0_transpose = NULL;
+ src1_transpose = NULL;
+ transpose_result = true;
+ }
+
+ if (src0_transpose && !src1_transpose &&
+ glsl_get_base_type(src0->type) == GLSL_TYPE_FLOAT) {
+ /* We already have the rows of src0 and the columns of src1 available,
+ * so we can just take the dot product of each row with each column to
+ * get the result.
+ */
+
+ for (unsigned i = 0; i < src1_columns; i++) {
+ nir_alu_instr *vec = create_vec(b, src0_rows);
+ for (unsigned j = 0; j < src0_rows; j++) {
+ vec->src[j].src =
+ nir_src_for_ssa(nir_fdot(&b->nb, src0_transpose->elems[j]->def,
+ src1->elems[i]->def));
+ }
+
+ nir_builder_instr_insert(&b->nb, &vec->instr);
+ dest->elems[i]->def = &vec->dest.dest.ssa;
+ }
+ } else {
+ /* We don't handle the case where src1 is transposed but not src0, since
+ * the general case only uses individual components of src1 so the
+ * optimizer should chew through the transpose we emitted for src1.
+ */
+
+ for (unsigned i = 0; i < src1_columns; i++) {
+ /* dest[i] = sum(src0[j] * src1[i][j] for all j) */
+ dest->elems[i]->def =
+ nir_fmul(&b->nb, src0->elems[0]->def,
+ vtn_vector_extract(b, src1->elems[i]->def, 0));
+ for (unsigned j = 1; j < src0_columns; j++) {
+ dest->elems[i]->def =
+ nir_fadd(&b->nb, dest->elems[i]->def,
+ nir_fmul(&b->nb, src0->elems[j]->def,
+ vtn_vector_extract(b,
+ src1->elems[i]->def, j)));
+ }
+ }
+ }
+
+ dest = vtn_unwrap_matrix(dest);
+
+ if (transpose_result)
+ dest = vtn_transpose(b, dest);
+
+ return dest;
+}
+
+static struct vtn_ssa_value *
+vtn_mat_times_scalar(struct vtn_builder *b,
+ struct vtn_ssa_value *mat,
+ nir_ssa_def *scalar)
+{
+ struct vtn_ssa_value *dest = vtn_create_ssa_value(b, mat->type);
+ for (unsigned i = 0; i < glsl_get_matrix_columns(mat->type); i++) {
+ if (glsl_get_base_type(mat->type) == GLSL_TYPE_FLOAT)
+ dest->elems[i]->def = nir_fmul(&b->nb, mat->elems[i]->def, scalar);
+ else
+ dest->elems[i]->def = nir_imul(&b->nb, mat->elems[i]->def, scalar);
+ }
+
+ return dest;
+}
+
+static void
+vtn_handle_matrix_alu(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
+
+ switch (opcode) {
+ case SpvOpTranspose: {
+ struct vtn_ssa_value *src = vtn_ssa_value(b, w[3]);
+ val->ssa = vtn_transpose(b, src);
+ break;
+ }
+
+ case SpvOpOuterProduct: {
+ struct vtn_ssa_value *src0 = vtn_ssa_value(b, w[3]);
+ struct vtn_ssa_value *src1 = vtn_ssa_value(b, w[4]);
+
+ val->ssa = vtn_matrix_multiply(b, src0, vtn_transpose(b, src1));
+ break;
+ }
+
+ case SpvOpMatrixTimesScalar: {
+ struct vtn_ssa_value *mat = vtn_ssa_value(b, w[3]);
+ struct vtn_ssa_value *scalar = vtn_ssa_value(b, w[4]);
+
+ if (mat->transposed) {
+ val->ssa = vtn_transpose(b, vtn_mat_times_scalar(b, mat->transposed,
+ scalar->def));
+ } else {
+ val->ssa = vtn_mat_times_scalar(b, mat, scalar->def);
+ }
+ break;
+ }
+
+ case SpvOpVectorTimesMatrix:
+ case SpvOpMatrixTimesVector:
+ case SpvOpMatrixTimesMatrix: {
+ struct vtn_ssa_value *src0 = vtn_ssa_value(b, w[3]);
+ struct vtn_ssa_value *src1 = vtn_ssa_value(b, w[4]);
+
+ val->ssa = vtn_matrix_multiply(b, src0, src1);
+ break;
+ }
+
+ default: unreachable("unknown matrix opcode");
+ }
+}
+
+static void
+vtn_handle_alu(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
+ const struct glsl_type *type =
+ vtn_value(b, w[1], vtn_value_type_type)->type->type;
+ val->ssa = vtn_create_ssa_value(b, type);
+
+ /* Collect the various SSA sources */
+ unsigned num_inputs = count - 3;
+ nir_ssa_def *src[4];
+ for (unsigned i = 0; i < num_inputs; i++)
+ src[i] = vtn_ssa_value(b, w[i + 3])->def;
+
+ /* Indicates that the first two arguments should be swapped. This is
+ * used for implementing greater-than and less-than-or-equal.
+ */
+ bool swap = false;
+
+ nir_op op;
+ switch (opcode) {
+ /* Basic ALU operations */
+ case SpvOpSNegate: op = nir_op_ineg; break;
+ case SpvOpFNegate: op = nir_op_fneg; break;
+ case SpvOpNot: op = nir_op_inot; break;
+
+ case SpvOpAny:
+ switch (src[0]->num_components) {
+ case 1: op = nir_op_imov; break;
+ case 2: op = nir_op_bany2; break;
+ case 3: op = nir_op_bany3; break;
+ case 4: op = nir_op_bany4; break;
+ }
+ break;
+
+ case SpvOpAll:
+ switch (src[0]->num_components) {
+ case 1: op = nir_op_imov; break;
+ case 2: op = nir_op_ball2; break;
+ case 3: op = nir_op_ball3; break;
+ case 4: op = nir_op_ball4; break;
+ }
+ break;
+
+ case SpvOpIAdd: op = nir_op_iadd; break;
+ case SpvOpFAdd: op = nir_op_fadd; break;
+ case SpvOpISub: op = nir_op_isub; break;
+ case SpvOpFSub: op = nir_op_fsub; break;
+ case SpvOpIMul: op = nir_op_imul; break;
+ case SpvOpFMul: op = nir_op_fmul; break;
+ case SpvOpUDiv: op = nir_op_udiv; break;
+ case SpvOpSDiv: op = nir_op_idiv; break;
+ case SpvOpFDiv: op = nir_op_fdiv; break;
+ case SpvOpUMod: op = nir_op_umod; break;
+ case SpvOpSMod: op = nir_op_umod; break; /* FIXME? */
+ case SpvOpFMod: op = nir_op_fmod; break;
+
+ case SpvOpDot:
+ assert(src[0]->num_components == src[1]->num_components);
+ switch (src[0]->num_components) {
+ case 1: op = nir_op_fmul; break;
+ case 2: op = nir_op_fdot2; break;
+ case 3: op = nir_op_fdot3; break;
+ case 4: op = nir_op_fdot4; break;
+ }
+ break;
+
+ case SpvOpShiftRightLogical: op = nir_op_ushr; break;
+ case SpvOpShiftRightArithmetic: op = nir_op_ishr; break;
+ case SpvOpShiftLeftLogical: op = nir_op_ishl; break;
+ case SpvOpLogicalOr: op = nir_op_ior; break;
+ case SpvOpLogicalEqual: op = nir_op_ieq; break;
+ case SpvOpLogicalNotEqual: op = nir_op_ine; break;
+ case SpvOpLogicalAnd: op = nir_op_iand; break;
+ case SpvOpBitwiseOr: op = nir_op_ior; break;
+ case SpvOpBitwiseXor: op = nir_op_ixor; break;
+ case SpvOpBitwiseAnd: op = nir_op_iand; break;
+ case SpvOpSelect: op = nir_op_bcsel; break;
+ case SpvOpIEqual: op = nir_op_ieq; break;
+
+ /* Comparisons: (TODO: How do we want to handled ordered/unordered?) */
+ case SpvOpFOrdEqual: op = nir_op_feq; break;
+ case SpvOpFUnordEqual: op = nir_op_feq; break;
+ case SpvOpINotEqual: op = nir_op_ine; break;
+ case SpvOpFOrdNotEqual: op = nir_op_fne; break;
+ case SpvOpFUnordNotEqual: op = nir_op_fne; break;
+ case SpvOpULessThan: op = nir_op_ult; break;
+ case SpvOpSLessThan: op = nir_op_ilt; break;
+ case SpvOpFOrdLessThan: op = nir_op_flt; break;
+ case SpvOpFUnordLessThan: op = nir_op_flt; break;
+ case SpvOpUGreaterThan: op = nir_op_ult; swap = true; break;
+ case SpvOpSGreaterThan: op = nir_op_ilt; swap = true; break;
+ case SpvOpFOrdGreaterThan: op = nir_op_flt; swap = true; break;
+ case SpvOpFUnordGreaterThan: op = nir_op_flt; swap = true; break;
+ case SpvOpULessThanEqual: op = nir_op_uge; swap = true; break;
+ case SpvOpSLessThanEqual: op = nir_op_ige; swap = true; break;
+ case SpvOpFOrdLessThanEqual: op = nir_op_fge; swap = true; break;
+ case SpvOpFUnordLessThanEqual: op = nir_op_fge; swap = true; break;
+ case SpvOpUGreaterThanEqual: op = nir_op_uge; break;
+ case SpvOpSGreaterThanEqual: op = nir_op_ige; break;
+ case SpvOpFOrdGreaterThanEqual: op = nir_op_fge; break;
+ case SpvOpFUnordGreaterThanEqual:op = nir_op_fge; break;
+
+ /* Conversions: */
+ case SpvOpConvertFToU: op = nir_op_f2u; break;
+ case SpvOpConvertFToS: op = nir_op_f2i; break;
+ case SpvOpConvertSToF: op = nir_op_i2f; break;
+ case SpvOpConvertUToF: op = nir_op_u2f; break;
+ case SpvOpBitcast: op = nir_op_imov; break;
+ case SpvOpUConvert:
+ case SpvOpSConvert:
+ op = nir_op_imov; /* TODO: NIR is 32-bit only; these are no-ops. */
+ break;
+ case SpvOpFConvert:
+ op = nir_op_fmov;
+ break;
+
+ /* Derivatives: */
+ case SpvOpDPdx: op = nir_op_fddx; break;
+ case SpvOpDPdy: op = nir_op_fddy; break;
+ case SpvOpDPdxFine: op = nir_op_fddx_fine; break;
+ case SpvOpDPdyFine: op = nir_op_fddy_fine; break;
+ case SpvOpDPdxCoarse: op = nir_op_fddx_coarse; break;
+ case SpvOpDPdyCoarse: op = nir_op_fddy_coarse; break;
+ case SpvOpFwidth:
+ val->ssa->def = nir_fadd(&b->nb,
+ nir_fabs(&b->nb, nir_fddx(&b->nb, src[0])),
+ nir_fabs(&b->nb, nir_fddx(&b->nb, src[1])));
+ return;
+ case SpvOpFwidthFine:
+ val->ssa->def = nir_fadd(&b->nb,
+ nir_fabs(&b->nb, nir_fddx_fine(&b->nb, src[0])),
+ nir_fabs(&b->nb, nir_fddx_fine(&b->nb, src[1])));
+ return;
+ case SpvOpFwidthCoarse:
+ val->ssa->def = nir_fadd(&b->nb,
+ nir_fabs(&b->nb, nir_fddx_coarse(&b->nb, src[0])),
+ nir_fabs(&b->nb, nir_fddx_coarse(&b->nb, src[1])));
+ return;
+
+ case SpvOpVectorTimesScalar:
+ /* The builder will take care of splatting for us. */
+ val->ssa->def = nir_fmul(&b->nb, src[0], src[1]);
+ return;
+
+ case SpvOpSRem:
+ case SpvOpFRem:
+ unreachable("No NIR equivalent");
+
+ case SpvOpIsNan:
+ case SpvOpIsInf:
+ case SpvOpIsFinite:
+ case SpvOpIsNormal:
+ case SpvOpSignBitSet:
+ case SpvOpLessOrGreater:
+ case SpvOpOrdered:
+ case SpvOpUnordered:
+ default:
+ unreachable("Unhandled opcode");
+ }
+
+ if (swap) {
+ nir_ssa_def *tmp = src[0];
+ src[0] = src[1];
+ src[1] = tmp;
+ }
+
+ nir_alu_instr *instr = nir_alu_instr_create(b->shader, op);
+ nir_ssa_dest_init(&instr->instr, &instr->dest.dest,
+ glsl_get_vector_elements(type), val->name);
+ instr->dest.write_mask = (1 << glsl_get_vector_elements(type)) - 1;
+ val->ssa->def = &instr->dest.dest.ssa;
+
+ for (unsigned i = 0; i < nir_op_infos[op].num_inputs; i++)
+ instr->src[i].src = nir_src_for_ssa(src[i]);
+
+ nir_builder_instr_insert(&b->nb, &instr->instr);
+}
+
+static nir_ssa_def *
+vtn_vector_extract(struct vtn_builder *b, nir_ssa_def *src, unsigned index)
+{
+ unsigned swiz[4] = { index };
+ return nir_swizzle(&b->nb, src, swiz, 1, true);
+}
+
+
+static nir_ssa_def *
+vtn_vector_insert(struct vtn_builder *b, nir_ssa_def *src, nir_ssa_def *insert,
+ unsigned index)
+{
+ nir_alu_instr *vec = create_vec(b->shader, src->num_components);
+
+ for (unsigned i = 0; i < src->num_components; i++) {
+ if (i == index) {
+ vec->src[i].src = nir_src_for_ssa(insert);
+ } else {
+ vec->src[i].src = nir_src_for_ssa(src);
+ vec->src[i].swizzle[0] = i;
+ }
+ }
+
+ nir_builder_instr_insert(&b->nb, &vec->instr);
+
+ return &vec->dest.dest.ssa;
+}
+
+static nir_ssa_def *
+vtn_vector_extract_dynamic(struct vtn_builder *b, nir_ssa_def *src,
+ nir_ssa_def *index)
+{
+ nir_ssa_def *dest = vtn_vector_extract(b, src, 0);
+ for (unsigned i = 1; i < src->num_components; i++)
+ dest = nir_bcsel(&b->nb, nir_ieq(&b->nb, index, nir_imm_int(&b->nb, i)),
+ vtn_vector_extract(b, src, i), dest);
+
+ return dest;
+}
+
+static nir_ssa_def *
+vtn_vector_insert_dynamic(struct vtn_builder *b, nir_ssa_def *src,
+ nir_ssa_def *insert, nir_ssa_def *index)
+{
+ nir_ssa_def *dest = vtn_vector_insert(b, src, insert, 0);
+ for (unsigned i = 1; i < src->num_components; i++)
+ dest = nir_bcsel(&b->nb, nir_ieq(&b->nb, index, nir_imm_int(&b->nb, i)),
+ vtn_vector_insert(b, src, insert, i), dest);
+
+ return dest;
+}
+
+static nir_ssa_def *
+vtn_vector_shuffle(struct vtn_builder *b, unsigned num_components,
+ nir_ssa_def *src0, nir_ssa_def *src1,
+ const uint32_t *indices)
+{
+ nir_alu_instr *vec = create_vec(b->shader, num_components);
+
+ nir_ssa_undef_instr *undef = nir_ssa_undef_instr_create(b->shader, 1);
+ nir_builder_instr_insert(&b->nb, &undef->instr);
+
+ for (unsigned i = 0; i < num_components; i++) {
+ uint32_t index = indices[i];
+ if (index == 0xffffffff) {
+ vec->src[i].src = nir_src_for_ssa(&undef->def);
+ } else if (index < src0->num_components) {
+ vec->src[i].src = nir_src_for_ssa(src0);
+ vec->src[i].swizzle[0] = index;
+ } else {
+ vec->src[i].src = nir_src_for_ssa(src1);
+ vec->src[i].swizzle[0] = index - src0->num_components;
+ }
+ }
+
+ nir_builder_instr_insert(&b->nb, &vec->instr);
+
+ return &vec->dest.dest.ssa;
+}
+
+/*
+ * Concatentates a number of vectors/scalars together to produce a vector
+ */
+static nir_ssa_def *
+vtn_vector_construct(struct vtn_builder *b, unsigned num_components,
+ unsigned num_srcs, nir_ssa_def **srcs)
+{
+ nir_alu_instr *vec = create_vec(b->shader, num_components);
+
+ unsigned dest_idx = 0;
+ for (unsigned i = 0; i < num_srcs; i++) {
+ nir_ssa_def *src = srcs[i];
+ for (unsigned j = 0; j < src->num_components; j++) {
+ vec->src[dest_idx].src = nir_src_for_ssa(src);
+ vec->src[dest_idx].swizzle[0] = j;
+ dest_idx++;
+ }
+ }
+
+ nir_builder_instr_insert(&b->nb, &vec->instr);
+
+ return &vec->dest.dest.ssa;
+}
+
+static struct vtn_ssa_value *
+vtn_composite_copy(void *mem_ctx, struct vtn_ssa_value *src)
+{
+ struct vtn_ssa_value *dest = rzalloc(mem_ctx, struct vtn_ssa_value);
+ dest->type = src->type;
+
+ if (glsl_type_is_vector_or_scalar(src->type)) {
+ dest->def = src->def;
+ } else {
+ unsigned elems = glsl_get_length(src->type);
+
+ dest->elems = ralloc_array(mem_ctx, struct vtn_ssa_value *, elems);
+ for (unsigned i = 0; i < elems; i++)
+ dest->elems[i] = vtn_composite_copy(mem_ctx, src->elems[i]);
+ }
+
+ return dest;
+}
+
+static struct vtn_ssa_value *
+vtn_composite_insert(struct vtn_builder *b, struct vtn_ssa_value *src,
+ struct vtn_ssa_value *insert, const uint32_t *indices,
+ unsigned num_indices)
+{
+ struct vtn_ssa_value *dest = vtn_composite_copy(b, src);
+
+ struct vtn_ssa_value *cur = dest;
+ unsigned i;
+ for (i = 0; i < num_indices - 1; i++) {
+ cur = cur->elems[indices[i]];
+ }
+
+ if (glsl_type_is_vector_or_scalar(cur->type)) {
+ /* According to the SPIR-V spec, OpCompositeInsert may work down to
+ * the component granularity. In that case, the last index will be
+ * the index to insert the scalar into the vector.
+ */
+
+ cur->def = vtn_vector_insert(b, cur->def, insert->def, indices[i]);
+ } else {
+ cur->elems[indices[i]] = insert;
+ }
+
+ return dest;
+}
+
+static struct vtn_ssa_value *
+vtn_composite_extract(struct vtn_builder *b, struct vtn_ssa_value *src,
+ const uint32_t *indices, unsigned num_indices)
+{
+ struct vtn_ssa_value *cur = src;
+ for (unsigned i = 0; i < num_indices; i++) {
+ if (glsl_type_is_vector_or_scalar(cur->type)) {
+ assert(i == num_indices - 1);
+ /* According to the SPIR-V spec, OpCompositeExtract may work down to
+ * the component granularity. The last index will be the index of the
+ * vector to extract.
+ */
+
+ struct vtn_ssa_value *ret = rzalloc(b, struct vtn_ssa_value);
+ ret->type = glsl_scalar_type(glsl_get_base_type(cur->type));
+ ret->def = vtn_vector_extract(b, cur->def, indices[i]);
+ return ret;
+ }
+ }
+
+ return cur;
+}
+
+static void
+vtn_handle_composite(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
+ const struct glsl_type *type =
+ vtn_value(b, w[1], vtn_value_type_type)->type->type;
+ val->ssa = vtn_create_ssa_value(b, type);
+
+ switch (opcode) {
+ case SpvOpVectorExtractDynamic:
+ val->ssa->def = vtn_vector_extract_dynamic(b, vtn_ssa_value(b, w[3])->def,
+ vtn_ssa_value(b, w[4])->def);
+ break;
+
+ case SpvOpVectorInsertDynamic:
+ val->ssa->def = vtn_vector_insert_dynamic(b, vtn_ssa_value(b, w[3])->def,
+ vtn_ssa_value(b, w[4])->def,
+ vtn_ssa_value(b, w[5])->def);
+ break;
+
+ case SpvOpVectorShuffle:
+ val->ssa->def = vtn_vector_shuffle(b, glsl_get_vector_elements(type),
+ vtn_ssa_value(b, w[3])->def,
+ vtn_ssa_value(b, w[4])->def,
+ w + 5);
+ break;
+
+ case SpvOpCompositeConstruct: {
+ unsigned elems = count - 3;
+ if (glsl_type_is_vector_or_scalar(type)) {
+ nir_ssa_def *srcs[4];
+ for (unsigned i = 0; i < elems; i++)
+ srcs[i] = vtn_ssa_value(b, w[3 + i])->def;
+ val->ssa->def =
+ vtn_vector_construct(b, glsl_get_vector_elements(type),
+ elems, srcs);
+ } else {
+ val->ssa->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
+ for (unsigned i = 0; i < elems; i++)
+ val->ssa->elems[i] = vtn_ssa_value(b, w[3 + i]);
+ }
+ break;
+ }
+ case SpvOpCompositeExtract:
+ val->ssa = vtn_composite_extract(b, vtn_ssa_value(b, w[3]),
+ w + 4, count - 4);
+ break;
+
+ case SpvOpCompositeInsert:
+ val->ssa = vtn_composite_insert(b, vtn_ssa_value(b, w[4]),
+ vtn_ssa_value(b, w[3]),
+ w + 5, count - 5);
+ break;
+
+ case SpvOpCopyObject:
+ val->ssa = vtn_composite_copy(b, vtn_ssa_value(b, w[3]));
+ break;
+
+ default:
+ unreachable("unknown composite operation");
+ }
+}
+
+static void
+vtn_phi_node_init(struct vtn_builder *b, struct vtn_ssa_value *val)
+{
+ if (glsl_type_is_vector_or_scalar(val->type)) {
+ nir_phi_instr *phi = nir_phi_instr_create(b->shader);
+ nir_ssa_dest_init(&phi->instr, &phi->dest,
+ glsl_get_vector_elements(val->type), NULL);
+ exec_list_make_empty(&phi->srcs);
+ nir_builder_instr_insert(&b->nb, &phi->instr);
+ val->def = &phi->dest.ssa;
+ } else {
+ unsigned elems = glsl_get_length(val->type);
+ for (unsigned i = 0; i < elems; i++)
+ vtn_phi_node_init(b, val->elems[i]);
+ }
+}
+
+static struct vtn_ssa_value *
+vtn_phi_node_create(struct vtn_builder *b, const struct glsl_type *type)
+{
+ struct vtn_ssa_value *val = vtn_create_ssa_value(b, type);
+ vtn_phi_node_init(b, val);
+ return val;
+}
+
+static void
+vtn_handle_phi_first_pass(struct vtn_builder *b, const uint32_t *w)
+{
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
+ const struct glsl_type *type =
+ vtn_value(b, w[1], vtn_value_type_type)->type->type;
+ val->ssa = vtn_phi_node_create(b, type);
+}
+
+static void
+vtn_phi_node_add_src(struct vtn_ssa_value *phi, const nir_block *pred,
+ struct vtn_ssa_value *val)
+{
+ assert(phi->type == val->type);
+ if (glsl_type_is_vector_or_scalar(phi->type)) {
+ nir_phi_instr *phi_instr = nir_instr_as_phi(phi->def->parent_instr);
+ nir_phi_src *src = ralloc(phi_instr, nir_phi_src);
+ src->pred = (nir_block *) pred;
+ src->src = nir_src_for_ssa(val->def);
+ exec_list_push_tail(&phi_instr->srcs, &src->node);
+ } else {
+ unsigned elems = glsl_get_length(phi->type);
+ for (unsigned i = 0; i < elems; i++)
+ vtn_phi_node_add_src(phi->elems[i], pred, val->elems[i]);
+ }
+}
+
+static struct vtn_ssa_value *
+vtn_get_phi_node_src(struct vtn_builder *b, nir_block *block,
+ const struct glsl_type *type, const uint32_t *w,
+ unsigned count)
+{
+ struct hash_entry *entry = _mesa_hash_table_search(b->block_table, block);
+ if (entry) {
+ struct vtn_block *spv_block = entry->data;
+ for (unsigned off = 4; off < count; off += 2) {
+ if (spv_block == vtn_value(b, w[off], vtn_value_type_block)->block) {
+ return vtn_ssa_value(b, w[off - 1]);
+ }
+ }
+ }
+
+ nir_builder_insert_before_block(&b->nb, block);
+ struct vtn_ssa_value *phi = vtn_phi_node_create(b, type);
+
+ struct set_entry *entry2;
+ set_foreach(block->predecessors, entry2) {
+ nir_block *pred = (nir_block *) entry2->key;
+ struct vtn_ssa_value *val = vtn_get_phi_node_src(b, pred, type, w,
+ count);
+ vtn_phi_node_add_src(phi, pred, val);
+ }
+
+ return phi;
+}
+
+static bool
+vtn_handle_phi_second_pass(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ if (opcode == SpvOpLabel) {
+ b->block = vtn_value(b, w[1], vtn_value_type_block)->block;
+ return true;
+ }
+
+ if (opcode != SpvOpPhi)
+ return true;
+
+ struct vtn_ssa_value *phi = vtn_value(b, w[2], vtn_value_type_ssa)->ssa;
+
+ struct set_entry *entry;
+ set_foreach(b->block->block->predecessors, entry) {
+ nir_block *pred = (nir_block *) entry->key;
+
+ struct vtn_ssa_value *val = vtn_get_phi_node_src(b, pred, phi->type, w,
+ count);
+ vtn_phi_node_add_src(phi, pred, val);
+ }
+
+ return true;
+}
+
+static bool
+vtn_handle_preamble_instruction(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ switch (opcode) {
+ case SpvOpSource:
+ case SpvOpSourceExtension:
+ case SpvOpExtension:
+ /* Unhandled, but these are for debug so that's ok. */
+ break;
+
+ case SpvOpCapability:
+ /*
+ * TODO properly handle these and give a real error if asking for too
+ * much.
+ */
+ assert(w[1] == SpvCapabilityMatrix ||
+ w[1] == SpvCapabilityShader);
+ break;
+
+ case SpvOpExtInstImport:
+ vtn_handle_extension(b, opcode, w, count);
+ break;
+
+ case SpvOpMemoryModel:
+ assert(w[1] == SpvAddressingModelLogical);
+ assert(w[2] == SpvMemoryModelGLSL450);
+ break;
+
+ case SpvOpEntryPoint:
+ assert(b->entry_point == NULL);
+ b->entry_point = &b->values[w[2]];
+ b->execution_model = w[1];
+ break;
+
+ case SpvOpExecutionMode:
+ /*
+ * TODO handle these - for Vulkan OriginUpperLeft is always set for
+ * fragment shaders, so we can ignore this for now
+ */
+ break;
+
+ case SpvOpString:
+ vtn_push_value(b, w[1], vtn_value_type_string)->str =
+ vtn_string_literal(b, &w[2], count - 2);
+ break;
+
+ case SpvOpName:
+ b->values[w[1]].name = vtn_string_literal(b, &w[2], count - 2);
+ break;
+
+ case SpvOpMemberName:
+ /* TODO */
+ break;
+
+ case SpvOpLine:
+ break; /* Ignored for now */
+
+ case SpvOpDecorationGroup:
+ case SpvOpDecorate:
+ case SpvOpMemberDecorate:
+ case SpvOpGroupDecorate:
+ case SpvOpGroupMemberDecorate:
+ vtn_handle_decoration(b, opcode, w, count);
+ break;
+
+ case SpvOpTypeVoid:
+ case SpvOpTypeBool:
+ case SpvOpTypeInt:
+ case SpvOpTypeFloat:
+ case SpvOpTypeVector:
+ case SpvOpTypeMatrix:
+ case SpvOpTypeImage:
+ case SpvOpTypeSampler:
+ case SpvOpTypeSampledImage:
+ case SpvOpTypeArray:
+ case SpvOpTypeRuntimeArray:
+ case SpvOpTypeStruct:
+ case SpvOpTypeOpaque:
+ case SpvOpTypePointer:
+ case SpvOpTypeFunction:
+ case SpvOpTypeEvent:
+ case SpvOpTypeDeviceEvent:
+ case SpvOpTypeReserveId:
+ case SpvOpTypeQueue:
+ case SpvOpTypePipe:
+ vtn_handle_type(b, opcode, w, count);
+ break;
+
+ case SpvOpConstantTrue:
+ case SpvOpConstantFalse:
+ case SpvOpConstant:
+ case SpvOpConstantComposite:
+ case SpvOpConstantSampler:
+ case SpvOpSpecConstantTrue:
+ case SpvOpSpecConstantFalse:
+ case SpvOpSpecConstant:
+ case SpvOpSpecConstantComposite:
+ vtn_handle_constant(b, opcode, w, count);
+ break;
+
+ case SpvOpVariable:
+ vtn_handle_variables(b, opcode, w, count);
+ break;
+
+ default:
+ return false; /* End of preamble */
+ }
+
+ return true;
+}
+
+static bool
+vtn_handle_first_cfg_pass_instruction(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ switch (opcode) {
+ case SpvOpFunction: {
+ assert(b->func == NULL);
+ b->func = rzalloc(b, struct vtn_function);
+
+ const struct glsl_type *result_type =
+ vtn_value(b, w[1], vtn_value_type_type)->type->type;
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_function);
+ const struct glsl_type *func_type =
+ vtn_value(b, w[4], vtn_value_type_type)->type->type;
+
+ assert(glsl_get_function_return_type(func_type) == result_type);
+
+ nir_function *func =
+ nir_function_create(b->shader, ralloc_strdup(b->shader, val->name));
+
+ nir_function_overload *overload = nir_function_overload_create(func);
+ overload->num_params = glsl_get_length(func_type);
+ overload->params = ralloc_array(overload, nir_parameter,
+ overload->num_params);
+ for (unsigned i = 0; i < overload->num_params; i++) {
+ const struct glsl_function_param *param =
+ glsl_get_function_param(func_type, i);
+ overload->params[i].type = param->type;
+ if (param->in) {
+ if (param->out) {
+ overload->params[i].param_type = nir_parameter_inout;
+ } else {
+ overload->params[i].param_type = nir_parameter_in;
+ }
+ } else {
+ if (param->out) {
+ overload->params[i].param_type = nir_parameter_out;
+ } else {
+ assert(!"Parameter is neither in nor out");
+ }
+ }
+ }
+ b->func->overload = overload;
+ break;
+ }
+
+ case SpvOpFunctionEnd:
+ b->func->end = w;
+ b->func = NULL;
+ break;
+
+ case SpvOpFunctionParameter:
+ break; /* Does nothing */
+
+ case SpvOpLabel: {
+ assert(b->block == NULL);
+ b->block = rzalloc(b, struct vtn_block);
+ b->block->label = w;
+ vtn_push_value(b, w[1], vtn_value_type_block)->block = b->block;
+
+ if (b->func->start_block == NULL) {
+ /* This is the first block encountered for this function. In this
+ * case, we set the start block and add it to the list of
+ * implemented functions that we'll walk later.
+ */
+ b->func->start_block = b->block;
+ exec_list_push_tail(&b->functions, &b->func->node);
+ }
+ break;
+ }
+
+ case SpvOpBranch:
+ case SpvOpBranchConditional:
+ case SpvOpSwitch:
+ case SpvOpKill:
+ case SpvOpReturn:
+ case SpvOpReturnValue:
+ case SpvOpUnreachable:
+ assert(b->block);
+ b->block->branch = w;
+ b->block = NULL;
+ break;
+
+ case SpvOpSelectionMerge:
+ case SpvOpLoopMerge:
+ assert(b->block && b->block->merge_op == SpvOpNop);
+ b->block->merge_op = opcode;
+ b->block->merge_block_id = w[1];
+ break;
+
+ default:
+ /* Continue on as per normal */
+ return true;
+ }
+
+ return true;
+}
+
+static bool
+vtn_handle_body_instruction(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ switch (opcode) {
+ case SpvOpLabel: {
+ struct vtn_block *block = vtn_value(b, w[1], vtn_value_type_block)->block;
+ assert(block->block == NULL);
+
+ struct exec_node *list_tail = exec_list_get_tail(b->nb.cf_node_list);
+ nir_cf_node *tail_node = exec_node_data(nir_cf_node, list_tail, node);
+ assert(tail_node->type == nir_cf_node_block);
+ block->block = nir_cf_node_as_block(tail_node);
+ break;
+ }
+
+ case SpvOpLoopMerge:
+ case SpvOpSelectionMerge:
+ /* This is handled by cfg pre-pass and walk_blocks */
+ break;
+
+ case SpvOpUndef:
+ vtn_push_value(b, w[2], vtn_value_type_undef);
+ break;
+
+ case SpvOpExtInst:
+ vtn_handle_extension(b, opcode, w, count);
+ break;
+
+ case SpvOpVariable:
+ case SpvOpLoad:
+ case SpvOpStore:
+ case SpvOpCopyMemory:
+ case SpvOpCopyMemorySized:
+ case SpvOpAccessChain:
+ case SpvOpInBoundsAccessChain:
+ case SpvOpArrayLength:
+ case SpvOpImageTexelPointer:
+ vtn_handle_variables(b, opcode, w, count);
+ break;
+
+ case SpvOpFunctionCall:
+ vtn_handle_function_call(b, opcode, w, count);
+ break;
+
+ case SpvOpImageSampleImplicitLod:
+ case SpvOpImageSampleExplicitLod:
+ case SpvOpImageSampleDrefImplicitLod:
+ case SpvOpImageSampleDrefExplicitLod:
+ case SpvOpImageSampleProjImplicitLod:
+ case SpvOpImageSampleProjExplicitLod:
+ case SpvOpImageSampleProjDrefImplicitLod:
+ case SpvOpImageSampleProjDrefExplicitLod:
+ case SpvOpImageFetch:
+ case SpvOpImageGather:
+ case SpvOpImageDrefGather:
+ case SpvOpImageQuerySizeLod:
+ case SpvOpImageQuerySize:
+ case SpvOpImageQueryLod:
+ case SpvOpImageQueryLevels:
+ case SpvOpImageQuerySamples:
+ vtn_handle_texture(b, opcode, w, count);
+ break;
+
+ case SpvOpSNegate:
+ case SpvOpFNegate:
+ case SpvOpNot:
+ case SpvOpAny:
+ case SpvOpAll:
+ case SpvOpConvertFToU:
+ case SpvOpConvertFToS:
+ case SpvOpConvertSToF:
+ case SpvOpConvertUToF:
+ case SpvOpUConvert:
+ case SpvOpSConvert:
+ case SpvOpFConvert:
+ case SpvOpConvertPtrToU:
+ case SpvOpConvertUToPtr:
+ case SpvOpPtrCastToGeneric:
+ case SpvOpGenericCastToPtr:
+ case SpvOpBitcast:
+ case SpvOpIsNan:
+ case SpvOpIsInf:
+ case SpvOpIsFinite:
+ case SpvOpIsNormal:
+ case SpvOpSignBitSet:
+ case SpvOpLessOrGreater:
+ case SpvOpOrdered:
+ case SpvOpUnordered:
+ case SpvOpIAdd:
+ case SpvOpFAdd:
+ case SpvOpISub:
+ case SpvOpFSub:
+ case SpvOpIMul:
+ case SpvOpFMul:
+ case SpvOpUDiv:
+ case SpvOpSDiv:
+ case SpvOpFDiv:
+ case SpvOpUMod:
+ case SpvOpSRem:
+ case SpvOpSMod:
+ case SpvOpFRem:
+ case SpvOpFMod:
+ case SpvOpVectorTimesScalar:
+ case SpvOpDot:
+ case SpvOpShiftRightLogical:
+ case SpvOpShiftRightArithmetic:
+ case SpvOpShiftLeftLogical:
+ case SpvOpLogicalOr:
+ case SpvOpLogicalEqual:
+ case SpvOpLogicalNotEqual:
+ case SpvOpLogicalAnd:
+ case SpvOpBitwiseOr:
+ case SpvOpBitwiseXor:
+ case SpvOpBitwiseAnd:
+ case SpvOpSelect:
+ case SpvOpIEqual:
+ case SpvOpFOrdEqual:
+ case SpvOpFUnordEqual:
+ case SpvOpINotEqual:
+ case SpvOpFOrdNotEqual:
+ case SpvOpFUnordNotEqual:
+ case SpvOpULessThan:
+ case SpvOpSLessThan:
+ case SpvOpFOrdLessThan:
+ case SpvOpFUnordLessThan:
+ case SpvOpUGreaterThan:
+ case SpvOpSGreaterThan:
+ case SpvOpFOrdGreaterThan:
+ case SpvOpFUnordGreaterThan:
+ case SpvOpULessThanEqual:
+ case SpvOpSLessThanEqual:
+ case SpvOpFOrdLessThanEqual:
+ case SpvOpFUnordLessThanEqual:
+ case SpvOpUGreaterThanEqual:
+ case SpvOpSGreaterThanEqual:
+ case SpvOpFOrdGreaterThanEqual:
+ case SpvOpFUnordGreaterThanEqual:
+ case SpvOpDPdx:
+ case SpvOpDPdy:
+ case SpvOpFwidth:
+ case SpvOpDPdxFine:
+ case SpvOpDPdyFine:
+ case SpvOpFwidthFine:
+ case SpvOpDPdxCoarse:
+ case SpvOpDPdyCoarse:
+ case SpvOpFwidthCoarse:
+ vtn_handle_alu(b, opcode, w, count);
+ break;
+
+ case SpvOpTranspose:
+ case SpvOpOuterProduct:
+ case SpvOpMatrixTimesScalar:
+ case SpvOpVectorTimesMatrix:
+ case SpvOpMatrixTimesVector:
+ case SpvOpMatrixTimesMatrix:
+ vtn_handle_matrix_alu(b, opcode, w, count);
+ break;
+
+ case SpvOpVectorExtractDynamic:
+ case SpvOpVectorInsertDynamic:
+ case SpvOpVectorShuffle:
+ case SpvOpCompositeConstruct:
+ case SpvOpCompositeExtract:
+ case SpvOpCompositeInsert:
+ case SpvOpCopyObject:
+ vtn_handle_composite(b, opcode, w, count);
+ break;
+
+ case SpvOpPhi:
+ vtn_handle_phi_first_pass(b, w);
+ break;
+
+ default:
+ unreachable("Unhandled opcode");
+ }
+
+ return true;
+}
+
+static void
+vtn_walk_blocks(struct vtn_builder *b, struct vtn_block *start,
+ struct vtn_block *break_block, struct vtn_block *cont_block,
+ struct vtn_block *end_block)
+{
+ struct vtn_block *block = start;
+ while (block != end_block) {
+ if (block->merge_op == SpvOpLoopMerge) {
+ /* This is the jump into a loop. */
+ struct vtn_block *new_cont_block = block;
+ struct vtn_block *new_break_block =
+ vtn_value(b, block->merge_block_id, vtn_value_type_block)->block;
+
+ nir_loop *loop = nir_loop_create(b->shader);
+ nir_cf_node_insert_end(b->nb.cf_node_list, &loop->cf_node);
+
+ struct exec_list *old_list = b->nb.cf_node_list;
+
+ /* Reset the merge_op to prerevent infinite recursion */
+ block->merge_op = SpvOpNop;
+
+ nir_builder_insert_after_cf_list(&b->nb, &loop->body);
+ vtn_walk_blocks(b, block, new_break_block, new_cont_block, NULL);
+
+ nir_builder_insert_after_cf_list(&b->nb, old_list);
+ block = new_break_block;
+ continue;
+ }
+
+ const uint32_t *w = block->branch;
+ SpvOp branch_op = w[0] & SpvOpCodeMask;
+
+ b->block = block;
+ vtn_foreach_instruction(b, block->label, block->branch,
+ vtn_handle_body_instruction);
+
+ nir_cf_node *cur_cf_node =
+ exec_node_data(nir_cf_node, exec_list_get_tail(b->nb.cf_node_list),
+ node);
+ nir_block *cur_block = nir_cf_node_as_block(cur_cf_node);
+ _mesa_hash_table_insert(b->block_table, cur_block, block);
+
+ switch (branch_op) {
+ case SpvOpBranch: {
+ struct vtn_block *branch_block =
+ vtn_value(b, w[1], vtn_value_type_block)->block;
+
+ if (branch_block == break_block) {
+ nir_jump_instr *jump = nir_jump_instr_create(b->shader,
+ nir_jump_break);
+ nir_builder_instr_insert(&b->nb, &jump->instr);
+
+ return;
+ } else if (branch_block == cont_block) {
+ nir_jump_instr *jump = nir_jump_instr_create(b->shader,
+ nir_jump_continue);
+ nir_builder_instr_insert(&b->nb, &jump->instr);
+
+ return;
+ } else if (branch_block == end_block) {
+ /* We're branching to the merge block of an if, since for loops
+ * and functions end_block == NULL, so we're done here.
+ */
+ return;
+ } else {
+ /* We're branching to another block, and according to the rules,
+ * we can only branch to another block with one predecessor (so
+ * we're the only one jumping to it) so we can just process it
+ * next.
+ */
+ block = branch_block;
+ continue;
+ }
+ }
+
+ case SpvOpBranchConditional: {
+ /* Gather up the branch blocks */
+ struct vtn_block *then_block =
+ vtn_value(b, w[2], vtn_value_type_block)->block;
+ struct vtn_block *else_block =
+ vtn_value(b, w[3], vtn_value_type_block)->block;
+
+ nir_if *if_stmt = nir_if_create(b->shader);
+ if_stmt->condition = nir_src_for_ssa(vtn_ssa_value(b, w[1])->def);
+ nir_cf_node_insert_end(b->nb.cf_node_list, &if_stmt->cf_node);
+
+ if (then_block == break_block) {
+ nir_jump_instr *jump = nir_jump_instr_create(b->shader,
+ nir_jump_break);
+ nir_instr_insert_after_cf_list(&if_stmt->then_list,
+ &jump->instr);
+ block = else_block;
+ } else if (else_block == break_block) {
+ nir_jump_instr *jump = nir_jump_instr_create(b->shader,
+ nir_jump_break);
+ nir_instr_insert_after_cf_list(&if_stmt->else_list,
+ &jump->instr);
+ block = then_block;
+ } else if (then_block == cont_block) {
+ nir_jump_instr *jump = nir_jump_instr_create(b->shader,
+ nir_jump_continue);
+ nir_instr_insert_after_cf_list(&if_stmt->then_list,
+ &jump->instr);
+ block = else_block;
+ } else if (else_block == cont_block) {
+ nir_jump_instr *jump = nir_jump_instr_create(b->shader,
+ nir_jump_continue);
+ nir_instr_insert_after_cf_list(&if_stmt->else_list,
+ &jump->instr);
+ block = then_block;
+ } else {
+ /* According to the rules we're branching to two blocks that don't
+ * have any other predecessors, so we can handle this as a
+ * conventional if.
+ */
+ assert(block->merge_op == SpvOpSelectionMerge);
+ struct vtn_block *merge_block =
+ vtn_value(b, block->merge_block_id, vtn_value_type_block)->block;
+
+ struct exec_list *old_list = b->nb.cf_node_list;
+
+ nir_builder_insert_after_cf_list(&b->nb, &if_stmt->then_list);
+ vtn_walk_blocks(b, then_block, break_block, cont_block, merge_block);
+
+ nir_builder_insert_after_cf_list(&b->nb, &if_stmt->else_list);
+ vtn_walk_blocks(b, else_block, break_block, cont_block, merge_block);
+
+ nir_builder_insert_after_cf_list(&b->nb, old_list);
+ block = merge_block;
+ continue;
+ }
+
+ /* If we got here then we inserted a predicated break or continue
+ * above and we need to handle the other case. We already set
+ * `block` above to indicate what block to visit after the
+ * predicated break.
+ */
+
+ /* It's possible that the other branch is also a break/continue.
+ * If it is, we handle that here.
+ */
+ if (block == break_block) {
+ nir_jump_instr *jump = nir_jump_instr_create(b->shader,
+ nir_jump_break);
+ nir_builder_instr_insert(&b->nb, &jump->instr);
+
+ return;
+ } else if (block == cont_block) {
+ nir_jump_instr *jump = nir_jump_instr_create(b->shader,
+ nir_jump_continue);
+ nir_builder_instr_insert(&b->nb, &jump->instr);
+
+ return;
+ }
+
+ /* If we got here then there was a predicated break/continue but
+ * the other half of the if has stuff in it. `block` was already
+ * set above so there is nothing left for us to do.
+ */
+ continue;
+ }
+
+ case SpvOpReturn: {
+ nir_jump_instr *jump = nir_jump_instr_create(b->shader,
+ nir_jump_return);
+ nir_builder_instr_insert(&b->nb, &jump->instr);
+ return;
+ }
+
+ case SpvOpKill: {
+ nir_intrinsic_instr *discard =
+ nir_intrinsic_instr_create(b->shader, nir_intrinsic_discard);
+ nir_builder_instr_insert(&b->nb, &discard->instr);
+ return;
+ }
+
+ case SpvOpSwitch:
+ case SpvOpReturnValue:
+ case SpvOpUnreachable:
+ default:
+ unreachable("Unhandled opcode");
+ }
+ }
+}
+
+nir_shader *
+spirv_to_nir(const uint32_t *words, size_t word_count,
++ gl_shader_stage stage,
+ const nir_shader_compiler_options *options)
+{
+ const uint32_t *word_end = words + word_count;
+
+ /* Handle the SPIR-V header (first 4 dwords) */
+ assert(word_count > 5);
+
+ assert(words[0] == SpvMagicNumber);
+ assert(words[1] == 99);
+ /* words[2] == generator magic */
+ unsigned value_id_bound = words[3];
+ assert(words[4] == 0);
+
+ words+= 5;
+
++ nir_shader *shader = nir_shader_create(NULL, stage, options);
+
+ /* Initialize the stn_builder object */
+ struct vtn_builder *b = rzalloc(NULL, struct vtn_builder);
+ b->shader = shader;
+ b->value_id_bound = value_id_bound;
+ b->values = rzalloc_array(b, struct vtn_value, value_id_bound);
+ exec_list_make_empty(&b->functions);
+
+ /* Handle all the preamble instructions */
+ words = vtn_foreach_instruction(b, words, word_end,
+ vtn_handle_preamble_instruction);
+
+ /* Do a very quick CFG analysis pass */
+ vtn_foreach_instruction(b, words, word_end,
+ vtn_handle_first_cfg_pass_instruction);
+
+ foreach_list_typed(struct vtn_function, func, node, &b->functions) {
+ b->impl = nir_function_impl_create(func->overload);
+ b->const_table = _mesa_hash_table_create(b, _mesa_hash_pointer,
+ _mesa_key_pointer_equal);
+ b->block_table = _mesa_hash_table_create(b, _mesa_hash_pointer,
+ _mesa_key_pointer_equal);
+ nir_builder_init(&b->nb, b->impl);
+ nir_builder_insert_after_cf_list(&b->nb, &b->impl->body);
+ vtn_walk_blocks(b, func->start_block, NULL, NULL, NULL);
+ vtn_foreach_instruction(b, func->start_block->label, func->end,
+ vtn_handle_phi_second_pass);
+ }
+
+ ralloc_free(b);
+
+ return shader;
+}
--- /dev/null
- glsl_options->NirOptions);
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <sys/stat.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+#include "anv_private.h"
+
+#include <brw_context.h>
+#include <brw_wm.h> /* brw_new_shader_program is here */
+#include <brw_nir.h>
+
+#include <brw_vs.h>
+#include <brw_gs.h>
+#include <brw_cs.h>
+
+#include <mesa/main/shaderobj.h>
+#include <mesa/main/fbobject.h>
+#include <mesa/main/context.h>
+#include <mesa/program/program.h>
+#include <glsl/program.h>
+
+/* XXX: We need this to keep symbols in nir.h from conflicting with the
+ * generated GEN command packing headers. We need to fix *both* to not
+ * define something as generic as LOAD.
+ */
+#undef LOAD
+
+#include <glsl/nir/nir_spirv.h>
+
+#define SPIR_V_MAGIC_NUMBER 0x07230203
+
+static void
+fail_if(int cond, const char *format, ...)
+{
+ va_list args;
+
+ if (!cond)
+ return;
+
+ va_start(args, format);
+ vfprintf(stderr, format, args);
+ va_end(args);
+
+ exit(1);
+}
+
+static VkResult
+set_binding_table_layout(struct brw_stage_prog_data *prog_data,
+ struct anv_pipeline *pipeline, uint32_t stage)
+{
+ uint32_t bias, count, k, *map;
+ struct anv_pipeline_layout *layout = pipeline->layout;
+
+ /* No layout is valid for shaders that don't bind any resources. */
+ if (pipeline->layout == NULL)
+ return VK_SUCCESS;
+
+ if (stage == VK_SHADER_STAGE_FRAGMENT)
+ bias = MAX_RTS;
+ else
+ bias = 0;
+
+ count = layout->stage[stage].surface_count;
+ prog_data->map_entries =
+ (uint32_t *) malloc(count * sizeof(prog_data->map_entries[0]));
+ if (prog_data->map_entries == NULL)
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ k = bias;
+ map = prog_data->map_entries;
+ for (uint32_t i = 0; i < layout->num_sets; i++) {
+ prog_data->bind_map[i].index = map;
+ for (uint32_t j = 0; j < layout->set[i].layout->stage[stage].surface_count; j++)
+ *map++ = k++;
+
+ prog_data->bind_map[i].index_count =
+ layout->set[i].layout->stage[stage].surface_count;
+ }
+
+ return VK_SUCCESS;
+}
+
+static uint32_t
+upload_kernel(struct anv_pipeline *pipeline, const void *data, size_t size)
+{
+ struct anv_state state =
+ anv_state_stream_alloc(&pipeline->program_stream, size, 64);
+
+ assert(size < pipeline->program_stream.block_pool->block_size);
+
+ memcpy(state.map, data, size);
+
+ return state.offset;
+}
+
+static void
+brw_vs_populate_key(struct brw_context *brw,
+ struct brw_vertex_program *vp,
+ struct brw_vs_prog_key *key)
+{
+ struct gl_context *ctx = &brw->ctx;
+ /* BRW_NEW_VERTEX_PROGRAM */
+ struct gl_program *prog = (struct gl_program *) vp;
+
+ memset(key, 0, sizeof(*key));
+
+ /* Just upload the program verbatim for now. Always send it all
+ * the inputs it asks for, whether they are varying or not.
+ */
+ key->base.program_string_id = vp->id;
+ brw_setup_vue_key_clip_info(brw, &key->base,
+ vp->program.Base.UsesClipDistanceOut);
+
+ /* _NEW_POLYGON */
+ if (brw->gen < 6) {
+ key->copy_edgeflag = (ctx->Polygon.FrontMode != GL_FILL ||
+ ctx->Polygon.BackMode != GL_FILL);
+ }
+
+ if (prog->OutputsWritten & (VARYING_BIT_COL0 | VARYING_BIT_COL1 |
+ VARYING_BIT_BFC0 | VARYING_BIT_BFC1)) {
+ /* _NEW_LIGHT | _NEW_BUFFERS */
+ key->clamp_vertex_color = ctx->Light._ClampVertexColor;
+ }
+
+ /* _NEW_POINT */
+ if (brw->gen < 6 && ctx->Point.PointSprite) {
+ for (int i = 0; i < 8; i++) {
+ if (ctx->Point.CoordReplace[i])
+ key->point_coord_replace |= (1 << i);
+ }
+ }
+
+ /* _NEW_TEXTURE */
+ brw_populate_sampler_prog_key_data(ctx, prog, brw->vs.base.sampler_count,
+ &key->base.tex);
+}
+
+static bool
+really_do_vs_prog(struct brw_context *brw,
+ struct gl_shader_program *prog,
+ struct brw_vertex_program *vp,
+ struct brw_vs_prog_key *key, struct anv_pipeline *pipeline)
+{
+ GLuint program_size;
+ const GLuint *program;
+ struct brw_vs_prog_data *prog_data = &pipeline->vs_prog_data;
+ struct brw_stage_prog_data *stage_prog_data = &prog_data->base.base;
+ void *mem_ctx;
+ struct gl_shader *vs = NULL;
+
+ if (prog)
+ vs = prog->_LinkedShaders[MESA_SHADER_VERTEX];
+
+ memset(prog_data, 0, sizeof(*prog_data));
+
+ mem_ctx = ralloc_context(NULL);
+
+ /* Allocate the references to the uniforms that will end up in the
+ * prog_data associated with the compiled program, and which will be freed
+ * by the state cache.
+ */
+ int param_count;
+ if (vs) {
+ /* We add padding around uniform values below vec4 size, with the worst
+ * case being a float value that gets blown up to a vec4, so be
+ * conservative here.
+ */
+ param_count = vs->num_uniform_components * 4;
+
+ } else {
+ param_count = vp->program.Base.Parameters->NumParameters * 4;
+ }
+ /* vec4_visitor::setup_uniform_clipplane_values() also uploads user clip
+ * planes as uniforms.
+ */
+ param_count += key->base.nr_userclip_plane_consts * 4;
+
+ /* Setting nr_params here NOT to the size of the param and pull_param
+ * arrays, but to the number of uniform components vec4_visitor
+ * needs. vec4_visitor::setup_uniforms() will set it back to a proper value.
+ */
+ stage_prog_data->nr_params = ALIGN(param_count, 4) / 4;
+ if (vs) {
+ stage_prog_data->nr_params += vs->num_samplers;
+ }
+
+ GLbitfield64 outputs_written = vp->program.Base.OutputsWritten;
+ prog_data->inputs_read = vp->program.Base.InputsRead;
+
+ if (key->copy_edgeflag) {
+ outputs_written |= BITFIELD64_BIT(VARYING_SLOT_EDGE);
+ prog_data->inputs_read |= VERT_BIT_EDGEFLAG;
+ }
+
+ if (brw->gen < 6) {
+ /* Put dummy slots into the VUE for the SF to put the replaced
+ * point sprite coords in. We shouldn't need these dummy slots,
+ * which take up precious URB space, but it would mean that the SF
+ * doesn't get nice aligned pairs of input coords into output
+ * coords, which would be a pain to handle.
+ */
+ for (int i = 0; i < 8; i++) {
+ if (key->point_coord_replace & (1 << i))
+ outputs_written |= BITFIELD64_BIT(VARYING_SLOT_TEX0 + i);
+ }
+
+ /* if back colors are written, allocate slots for front colors too */
+ if (outputs_written & BITFIELD64_BIT(VARYING_SLOT_BFC0))
+ outputs_written |= BITFIELD64_BIT(VARYING_SLOT_COL0);
+ if (outputs_written & BITFIELD64_BIT(VARYING_SLOT_BFC1))
+ outputs_written |= BITFIELD64_BIT(VARYING_SLOT_COL1);
+ }
+
+ /* In order for legacy clipping to work, we need to populate the clip
+ * distance varying slots whenever clipping is enabled, even if the vertex
+ * shader doesn't write to gl_ClipDistance.
+ */
+ if (key->base.userclip_active) {
+ outputs_written |= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST0);
+ outputs_written |= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST1);
+ }
+
+ brw_compute_vue_map(brw->intelScreen->devinfo,
+ &prog_data->base.vue_map, outputs_written);
+\
+ set_binding_table_layout(&prog_data->base.base, pipeline,
+ VK_SHADER_STAGE_VERTEX);
+
+ /* Emit GEN4 code.
+ */
+ program = brw_vs_emit(brw, mem_ctx, key, prog_data, &vp->program,
+ prog, &program_size);
+ if (program == NULL) {
+ ralloc_free(mem_ctx);
+ return false;
+ }
+
+ const uint32_t offset = upload_kernel(pipeline, program, program_size);
+ if (prog_data->base.dispatch_mode == DISPATCH_MODE_SIMD8) {
+ pipeline->vs_simd8 = offset;
+ pipeline->vs_vec4 = NO_KERNEL;
+ } else {
+ pipeline->vs_simd8 = NO_KERNEL;
+ pipeline->vs_vec4 = offset;
+ }
+
+ ralloc_free(mem_ctx);
+
+ return true;
+}
+
+void brw_wm_populate_key(struct brw_context *brw,
+ struct brw_fragment_program *fp,
+ struct brw_wm_prog_key *key)
+{
+ struct gl_context *ctx = &brw->ctx;
+ struct gl_program *prog = (struct gl_program *) brw->fragment_program;
+ GLuint lookup = 0;
+ GLuint line_aa;
+ bool program_uses_dfdy = fp->program.UsesDFdy;
+ struct gl_framebuffer draw_buffer;
+ bool multisample_fbo;
+
+ memset(key, 0, sizeof(*key));
+
+ for (int i = 0; i < MAX_SAMPLERS; i++) {
+ /* Assume color sampler, no swizzling. */
+ key->tex.swizzles[i] = SWIZZLE_XYZW;
+ }
+
+ /* A non-zero framebuffer name indicates that the framebuffer was created by
+ * the user rather than the window system. */
+ draw_buffer.Name = 1;
+ draw_buffer.Visual.samples = 1;
+ draw_buffer._NumColorDrawBuffers = 1;
+ draw_buffer._NumColorDrawBuffers = 1;
+ draw_buffer.Width = 400;
+ draw_buffer.Height = 400;
+ ctx->DrawBuffer = &draw_buffer;
+
+ multisample_fbo = ctx->DrawBuffer->Visual.samples > 1;
+
+ /* Build the index for table lookup
+ */
+ if (brw->gen < 6) {
+ /* _NEW_COLOR */
+ if (fp->program.UsesKill || ctx->Color.AlphaEnabled)
+ lookup |= IZ_PS_KILL_ALPHATEST_BIT;
+
+ if (fp->program.Base.OutputsWritten & BITFIELD64_BIT(FRAG_RESULT_DEPTH))
+ lookup |= IZ_PS_COMPUTES_DEPTH_BIT;
+
+ /* _NEW_DEPTH */
+ if (ctx->Depth.Test)
+ lookup |= IZ_DEPTH_TEST_ENABLE_BIT;
+
+ if (ctx->Depth.Test && ctx->Depth.Mask) /* ?? */
+ lookup |= IZ_DEPTH_WRITE_ENABLE_BIT;
+
+ /* _NEW_STENCIL | _NEW_BUFFERS */
+ if (ctx->Stencil._Enabled) {
+ lookup |= IZ_STENCIL_TEST_ENABLE_BIT;
+
+ if (ctx->Stencil.WriteMask[0] ||
+ ctx->Stencil.WriteMask[ctx->Stencil._BackFace])
+ lookup |= IZ_STENCIL_WRITE_ENABLE_BIT;
+ }
+ key->iz_lookup = lookup;
+ }
+
+ line_aa = AA_NEVER;
+
+ /* _NEW_LINE, _NEW_POLYGON, BRW_NEW_REDUCED_PRIMITIVE */
+ if (ctx->Line.SmoothFlag) {
+ if (brw->reduced_primitive == GL_LINES) {
+ line_aa = AA_ALWAYS;
+ }
+ else if (brw->reduced_primitive == GL_TRIANGLES) {
+ if (ctx->Polygon.FrontMode == GL_LINE) {
+ line_aa = AA_SOMETIMES;
+
+ if (ctx->Polygon.BackMode == GL_LINE ||
+ (ctx->Polygon.CullFlag &&
+ ctx->Polygon.CullFaceMode == GL_BACK))
+ line_aa = AA_ALWAYS;
+ }
+ else if (ctx->Polygon.BackMode == GL_LINE) {
+ line_aa = AA_SOMETIMES;
+
+ if ((ctx->Polygon.CullFlag &&
+ ctx->Polygon.CullFaceMode == GL_FRONT))
+ line_aa = AA_ALWAYS;
+ }
+ }
+ }
+
+ key->line_aa = line_aa;
+
+ /* _NEW_HINT */
+ key->high_quality_derivatives =
+ ctx->Hint.FragmentShaderDerivative == GL_NICEST;
+
+ if (brw->gen < 6)
+ key->stats_wm = brw->stats_wm;
+
+ /* _NEW_LIGHT */
+ key->flat_shade = (ctx->Light.ShadeModel == GL_FLAT);
+
+ /* _NEW_FRAG_CLAMP | _NEW_BUFFERS */
+ key->clamp_fragment_color = ctx->Color._ClampFragmentColor;
+
+ /* _NEW_TEXTURE */
+ brw_populate_sampler_prog_key_data(ctx, prog, brw->wm.base.sampler_count,
+ &key->tex);
+
+ /* _NEW_BUFFERS */
+ /*
+ * Include the draw buffer origin and height so that we can calculate
+ * fragment position values relative to the bottom left of the drawable,
+ * from the incoming screen origin relative position we get as part of our
+ * payload.
+ *
+ * This is only needed for the WM_WPOSXY opcode when the fragment program
+ * uses the gl_FragCoord input.
+ *
+ * We could avoid recompiling by including this as a constant referenced by
+ * our program, but if we were to do that it would also be nice to handle
+ * getting that constant updated at batchbuffer submit time (when we
+ * hold the lock and know where the buffer really is) rather than at emit
+ * time when we don't hold the lock and are just guessing. We could also
+ * just avoid using this as key data if the program doesn't use
+ * fragment.position.
+ *
+ * For DRI2 the origin_x/y will always be (0,0) but we still need the
+ * drawable height in order to invert the Y axis.
+ */
+ if (fp->program.Base.InputsRead & VARYING_BIT_POS) {
+ key->drawable_height = ctx->DrawBuffer->Height;
+ }
+
+ if ((fp->program.Base.InputsRead & VARYING_BIT_POS) || program_uses_dfdy) {
+ key->render_to_fbo = _mesa_is_user_fbo(ctx->DrawBuffer);
+ }
+
+ /* _NEW_BUFFERS */
+ key->nr_color_regions = ctx->DrawBuffer->_NumColorDrawBuffers;
+
+ /* _NEW_MULTISAMPLE, _NEW_COLOR, _NEW_BUFFERS */
+ key->replicate_alpha = ctx->DrawBuffer->_NumColorDrawBuffers > 1 &&
+ (ctx->Multisample.SampleAlphaToCoverage || ctx->Color.AlphaEnabled);
+
+ /* _NEW_BUFFERS _NEW_MULTISAMPLE */
+ /* Ignore sample qualifier while computing this flag. */
+ key->persample_shading =
+ _mesa_get_min_invocations_per_fragment(ctx, &fp->program, true) > 1;
+ if (key->persample_shading)
+ key->persample_2x = ctx->DrawBuffer->Visual.samples == 2;
+
+ key->compute_pos_offset =
+ _mesa_get_min_invocations_per_fragment(ctx, &fp->program, false) > 1 &&
+ fp->program.Base.SystemValuesRead & SYSTEM_BIT_SAMPLE_POS;
+
+ key->compute_sample_id =
+ multisample_fbo &&
+ ctx->Multisample.Enabled &&
+ (fp->program.Base.SystemValuesRead & SYSTEM_BIT_SAMPLE_ID);
+
+ /* BRW_NEW_VUE_MAP_GEOM_OUT */
+ if (brw->gen < 6 || _mesa_bitcount_64(fp->program.Base.InputsRead &
+ BRW_FS_VARYING_INPUT_MASK) > 16)
+ key->input_slots_valid = brw->vue_map_geom_out.slots_valid;
+
+
+ /* _NEW_COLOR | _NEW_BUFFERS */
+ /* Pre-gen6, the hardware alpha test always used each render
+ * target's alpha to do alpha test, as opposed to render target 0's alpha
+ * like GL requires. Fix that by building the alpha test into the
+ * shader, and we'll skip enabling the fixed function alpha test.
+ */
+ if (brw->gen < 6 && ctx->DrawBuffer->_NumColorDrawBuffers > 1 && ctx->Color.AlphaEnabled) {
+ key->alpha_test_func = ctx->Color.AlphaFunc;
+ key->alpha_test_ref = ctx->Color.AlphaRef;
+ }
+
+ /* The unique fragment program ID */
+ key->program_string_id = fp->id;
+
+ ctx->DrawBuffer = NULL;
+}
+
+static uint8_t
+computed_depth_mode(struct gl_fragment_program *fp)
+{
+ if (fp->Base.OutputsWritten & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
+ switch (fp->FragDepthLayout) {
+ case FRAG_DEPTH_LAYOUT_NONE:
+ case FRAG_DEPTH_LAYOUT_ANY:
+ return BRW_PSCDEPTH_ON;
+ case FRAG_DEPTH_LAYOUT_GREATER:
+ return BRW_PSCDEPTH_ON_GE;
+ case FRAG_DEPTH_LAYOUT_LESS:
+ return BRW_PSCDEPTH_ON_LE;
+ case FRAG_DEPTH_LAYOUT_UNCHANGED:
+ return BRW_PSCDEPTH_OFF;
+ }
+ }
+ return BRW_PSCDEPTH_OFF;
+}
+
+static bool
+really_do_wm_prog(struct brw_context *brw,
+ struct gl_shader_program *prog,
+ struct brw_fragment_program *fp,
+ struct brw_wm_prog_key *key, struct anv_pipeline *pipeline)
+{
+ struct gl_context *ctx = &brw->ctx;
+ void *mem_ctx = ralloc_context(NULL);
+ struct brw_wm_prog_data *prog_data = &pipeline->wm_prog_data;
+ struct gl_shader *fs = NULL;
+ unsigned int program_size;
+ const uint32_t *program;
+
+ if (prog)
+ fs = prog->_LinkedShaders[MESA_SHADER_FRAGMENT];
+
+ memset(prog_data, 0, sizeof(*prog_data));
+
+ /* key->alpha_test_func means simulating alpha testing via discards,
+ * so the shader definitely kills pixels.
+ */
+ prog_data->uses_kill = fp->program.UsesKill || key->alpha_test_func;
+
+ prog_data->computed_depth_mode = computed_depth_mode(&fp->program);
+
+ /* Allocate the references to the uniforms that will end up in the
+ * prog_data associated with the compiled program, and which will be freed
+ * by the state cache.
+ */
+ int param_count;
+ if (fs) {
+ param_count = fs->num_uniform_components;
+ } else {
+ param_count = fp->program.Base.Parameters->NumParameters * 4;
+ }
+ /* The backend also sometimes adds params for texture size. */
+ param_count += 2 * ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits;
+ prog_data->base.param =
+ rzalloc_array(NULL, const gl_constant_value *, param_count);
+ prog_data->base.pull_param =
+ rzalloc_array(NULL, const gl_constant_value *, param_count);
+ prog_data->base.nr_params = param_count;
+
+ prog_data->barycentric_interp_modes =
+ brw_compute_barycentric_interp_modes(brw, key->flat_shade,
+ key->persample_shading,
+ &fp->program);
+
+ set_binding_table_layout(&prog_data->base, pipeline,
+ VK_SHADER_STAGE_FRAGMENT);
+ /* This needs to come after shader time and pull constant entries, but we
+ * don't have those set up now, so just put it after the layout entries.
+ */
+ prog_data->binding_table.render_target_start = 0;
+
+ program = brw_wm_fs_emit(brw, mem_ctx, key, prog_data,
+ &fp->program, prog, &program_size);
+ if (program == NULL) {
+ ralloc_free(mem_ctx);
+ return false;
+ }
+
+ uint32_t offset = upload_kernel(pipeline, program, program_size);
+
+ if (prog_data->no_8)
+ pipeline->ps_simd8 = NO_KERNEL;
+ else
+ pipeline->ps_simd8 = offset;
+
+ if (prog_data->no_8 || prog_data->prog_offset_16) {
+ pipeline->ps_simd16 = offset + prog_data->prog_offset_16;
+ } else {
+ pipeline->ps_simd16 = NO_KERNEL;
+ }
+
+ ralloc_free(mem_ctx);
+
+ return true;
+}
+
+static void
+brw_gs_populate_key(struct brw_context *brw,
+ struct anv_pipeline *pipeline,
+ struct brw_geometry_program *gp,
+ struct brw_gs_prog_key *key)
+{
+ struct gl_context *ctx = &brw->ctx;
+ struct brw_stage_state *stage_state = &brw->gs.base;
+ struct gl_program *prog = &gp->program.Base;
+
+ memset(key, 0, sizeof(*key));
+
+ key->base.program_string_id = gp->id;
+ brw_setup_vue_key_clip_info(brw, &key->base,
+ gp->program.Base.UsesClipDistanceOut);
+
+ /* _NEW_TEXTURE */
+ brw_populate_sampler_prog_key_data(ctx, prog, stage_state->sampler_count,
+ &key->base.tex);
+
+ struct brw_vs_prog_data *prog_data = &pipeline->vs_prog_data;
+
+ /* BRW_NEW_VUE_MAP_VS */
+ key->input_varyings = prog_data->base.vue_map.slots_valid;
+}
+
+static bool
+really_do_gs_prog(struct brw_context *brw,
+ struct gl_shader_program *prog,
+ struct brw_geometry_program *gp,
+ struct brw_gs_prog_key *key, struct anv_pipeline *pipeline)
+{
+ struct brw_gs_compile_output output;
+
+ /* FIXME: We pass the bind map to the compile in the output struct. Need
+ * something better. */
+ set_binding_table_layout(&output.prog_data.base.base,
+ pipeline, VK_SHADER_STAGE_GEOMETRY);
+
+ brw_compile_gs_prog(brw, prog, gp, key, &output);
+
+ pipeline->gs_vec4 = upload_kernel(pipeline, output.program, output.program_size);
+ pipeline->gs_vertex_count = gp->program.VerticesIn;
+
+ ralloc_free(output.mem_ctx);
+
+ return true;
+}
+
+static bool
+brw_codegen_cs_prog(struct brw_context *brw,
+ struct gl_shader_program *prog,
+ struct brw_compute_program *cp,
+ struct brw_cs_prog_key *key, struct anv_pipeline *pipeline)
+{
+ struct gl_context *ctx = &brw->ctx;
+ const GLuint *program;
+ void *mem_ctx = ralloc_context(NULL);
+ GLuint program_size;
+ struct brw_cs_prog_data *prog_data = &pipeline->cs_prog_data;
+
+ struct gl_shader *cs = prog->_LinkedShaders[MESA_SHADER_COMPUTE];
+ assert (cs);
+
+ memset(prog_data, 0, sizeof(*prog_data));
+
+ set_binding_table_layout(&prog_data->base, pipeline, VK_SHADER_STAGE_COMPUTE);
+
+ /* Allocate the references to the uniforms that will end up in the
+ * prog_data associated with the compiled program, and which will be freed
+ * by the state cache.
+ */
+ int param_count = cs->num_uniform_components;
+
+ /* The backend also sometimes adds params for texture size. */
+ param_count += 2 * ctx->Const.Program[MESA_SHADER_COMPUTE].MaxTextureImageUnits;
+ prog_data->base.param =
+ rzalloc_array(NULL, const gl_constant_value *, param_count);
+ prog_data->base.pull_param =
+ rzalloc_array(NULL, const gl_constant_value *, param_count);
+ prog_data->base.nr_params = param_count;
+
+ program = brw_cs_emit(brw, mem_ctx, key, prog_data,
+ &cp->program, prog, &program_size);
+ if (program == NULL) {
+ ralloc_free(mem_ctx);
+ return false;
+ }
+
+ if (unlikely(INTEL_DEBUG & DEBUG_CS))
+ fprintf(stderr, "\n");
+
+ pipeline->cs_simd = upload_kernel(pipeline, program, program_size);
+
+ ralloc_free(mem_ctx);
+
+ return true;
+}
+
+static void
+brw_cs_populate_key(struct brw_context *brw,
+ struct brw_compute_program *bcp, struct brw_cs_prog_key *key)
+{
+ memset(key, 0, sizeof(*key));
+
+ /* The unique compute program ID */
+ key->program_string_id = bcp->id;
+}
+
+static void
+fail_on_compile_error(int status, const char *msg)
+{
+ int source, line, column;
+ char error[256];
+
+ if (status)
+ return;
+
+ if (sscanf(msg, "%d:%d(%d): error: %255[^\n]", &source, &line, &column, error) == 4)
+ fail_if(!status, "%d:%s\n", line, error);
+ else
+ fail_if(!status, "%s\n", msg);
+}
+
+struct anv_compiler {
+ struct anv_device *device;
+ struct intel_screen *screen;
+ struct brw_context *brw;
+ struct gl_pipeline_object pipeline;
+};
+
+extern "C" {
+
+struct anv_compiler *
+anv_compiler_create(struct anv_device *device)
+{
+ const struct brw_device_info *devinfo = &device->info;
+ struct anv_compiler *compiler;
+ struct gl_context *ctx;
+
+ compiler = rzalloc(NULL, struct anv_compiler);
+ if (compiler == NULL)
+ return NULL;
+
+ compiler->screen = rzalloc(compiler, struct intel_screen);
+ if (compiler->screen == NULL)
+ goto fail;
+
+ compiler->brw = rzalloc(compiler, struct brw_context);
+ if (compiler->brw == NULL)
+ goto fail;
+
+ compiler->device = device;
+
+ compiler->brw->optionCache.info = NULL;
+ compiler->brw->bufmgr = NULL;
+ compiler->brw->gen = devinfo->gen;
+ compiler->brw->is_g4x = devinfo->is_g4x;
+ compiler->brw->is_baytrail = devinfo->is_baytrail;
+ compiler->brw->is_haswell = devinfo->is_haswell;
+ compiler->brw->is_cherryview = devinfo->is_cherryview;
+
+ /* We need this at least for CS, which will check brw->max_cs_threads
+ * against the work group size. */
+ compiler->brw->max_vs_threads = devinfo->max_vs_threads;
+ compiler->brw->max_hs_threads = devinfo->max_hs_threads;
+ compiler->brw->max_ds_threads = devinfo->max_ds_threads;
+ compiler->brw->max_gs_threads = devinfo->max_gs_threads;
+ compiler->brw->max_wm_threads = devinfo->max_wm_threads;
+ compiler->brw->max_cs_threads = devinfo->max_cs_threads;
+ compiler->brw->urb.size = devinfo->urb.size;
+ compiler->brw->urb.min_vs_entries = devinfo->urb.min_vs_entries;
+ compiler->brw->urb.max_vs_entries = devinfo->urb.max_vs_entries;
+ compiler->brw->urb.max_hs_entries = devinfo->urb.max_hs_entries;
+ compiler->brw->urb.max_ds_entries = devinfo->urb.max_ds_entries;
+ compiler->brw->urb.max_gs_entries = devinfo->urb.max_gs_entries;
+
+ compiler->brw->intelScreen = compiler->screen;
+ compiler->screen->devinfo = &device->info;
+
+ brw_process_intel_debug_variable(compiler->screen);
+
+ compiler->screen->compiler = brw_compiler_create(compiler, &device->info);
+
+ ctx = &compiler->brw->ctx;
+ _mesa_init_shader_object_functions(&ctx->Driver);
+
+ _mesa_init_constants(&ctx->Const, API_OPENGL_CORE);
+
+ brw_initialize_context_constants(compiler->brw);
+
+ intelInitExtensions(ctx);
+
+ /* Set dd::NewShader */
+ brwInitFragProgFuncs(&ctx->Driver);
+
+ ctx->_Shader = &compiler->pipeline;
+
+ compiler->brw->precompile = false;
+
+ return compiler;
+
+ fail:
+ ralloc_free(compiler);
+ return NULL;
+}
+
+void
+anv_compiler_destroy(struct anv_compiler *compiler)
+{
+ _mesa_free_errors_data(&compiler->brw->ctx);
+ ralloc_free(compiler);
+}
+
+/* From gen7_urb.c */
+
+/* FIXME: Add to struct intel_device_info */
+
+static const int gen8_push_size = 32 * 1024;
+
+static void
+gen7_compute_urb_partition(struct anv_pipeline *pipeline)
+{
+ const struct brw_device_info *devinfo = &pipeline->device->info;
+ bool vs_present = pipeline->vs_simd8 != NO_KERNEL;
+ unsigned vs_size = vs_present ? pipeline->vs_prog_data.base.urb_entry_size : 1;
+ unsigned vs_entry_size_bytes = vs_size * 64;
+ bool gs_present = pipeline->gs_vec4 != NO_KERNEL;
+ unsigned gs_size = gs_present ? pipeline->gs_prog_data.base.urb_entry_size : 1;
+ unsigned gs_entry_size_bytes = gs_size * 64;
+
+ /* From p35 of the Ivy Bridge PRM (section 1.7.1: 3DSTATE_URB_GS):
+ *
+ * VS Number of URB Entries must be divisible by 8 if the VS URB Entry
+ * Allocation Size is less than 9 512-bit URB entries.
+ *
+ * Similar text exists for GS.
+ */
+ unsigned vs_granularity = (vs_size < 9) ? 8 : 1;
+ unsigned gs_granularity = (gs_size < 9) ? 8 : 1;
+
+ /* URB allocations must be done in 8k chunks. */
+ unsigned chunk_size_bytes = 8192;
+
+ /* Determine the size of the URB in chunks. */
+ unsigned urb_chunks = devinfo->urb.size * 1024 / chunk_size_bytes;
+
+ /* Reserve space for push constants */
+ unsigned push_constant_bytes = gen8_push_size;
+ unsigned push_constant_chunks =
+ push_constant_bytes / chunk_size_bytes;
+
+ /* Initially, assign each stage the minimum amount of URB space it needs,
+ * and make a note of how much additional space it "wants" (the amount of
+ * additional space it could actually make use of).
+ */
+
+ /* VS has a lower limit on the number of URB entries */
+ unsigned vs_chunks =
+ ALIGN(devinfo->urb.min_vs_entries * vs_entry_size_bytes,
+ chunk_size_bytes) / chunk_size_bytes;
+ unsigned vs_wants =
+ ALIGN(devinfo->urb.max_vs_entries * vs_entry_size_bytes,
+ chunk_size_bytes) / chunk_size_bytes - vs_chunks;
+
+ unsigned gs_chunks = 0;
+ unsigned gs_wants = 0;
+ if (gs_present) {
+ /* There are two constraints on the minimum amount of URB space we can
+ * allocate:
+ *
+ * (1) We need room for at least 2 URB entries, since we always operate
+ * the GS in DUAL_OBJECT mode.
+ *
+ * (2) We can't allocate less than nr_gs_entries_granularity.
+ */
+ gs_chunks = ALIGN(MAX2(gs_granularity, 2) * gs_entry_size_bytes,
+ chunk_size_bytes) / chunk_size_bytes;
+ gs_wants =
+ ALIGN(devinfo->urb.max_gs_entries * gs_entry_size_bytes,
+ chunk_size_bytes) / chunk_size_bytes - gs_chunks;
+ }
+
+ /* There should always be enough URB space to satisfy the minimum
+ * requirements of each stage.
+ */
+ unsigned total_needs = push_constant_chunks + vs_chunks + gs_chunks;
+ assert(total_needs <= urb_chunks);
+
+ /* Mete out remaining space (if any) in proportion to "wants". */
+ unsigned total_wants = vs_wants + gs_wants;
+ unsigned remaining_space = urb_chunks - total_needs;
+ if (remaining_space > total_wants)
+ remaining_space = total_wants;
+ if (remaining_space > 0) {
+ unsigned vs_additional = (unsigned)
+ round(vs_wants * (((double) remaining_space) / total_wants));
+ vs_chunks += vs_additional;
+ remaining_space -= vs_additional;
+ gs_chunks += remaining_space;
+ }
+
+ /* Sanity check that we haven't over-allocated. */
+ assert(push_constant_chunks + vs_chunks + gs_chunks <= urb_chunks);
+
+ /* Finally, compute the number of entries that can fit in the space
+ * allocated to each stage.
+ */
+ unsigned nr_vs_entries = vs_chunks * chunk_size_bytes / vs_entry_size_bytes;
+ unsigned nr_gs_entries = gs_chunks * chunk_size_bytes / gs_entry_size_bytes;
+
+ /* Since we rounded up when computing *_wants, this may be slightly more
+ * than the maximum allowed amount, so correct for that.
+ */
+ nr_vs_entries = MIN2(nr_vs_entries, devinfo->urb.max_vs_entries);
+ nr_gs_entries = MIN2(nr_gs_entries, devinfo->urb.max_gs_entries);
+
+ /* Ensure that we program a multiple of the granularity. */
+ nr_vs_entries = ROUND_DOWN_TO(nr_vs_entries, vs_granularity);
+ nr_gs_entries = ROUND_DOWN_TO(nr_gs_entries, gs_granularity);
+
+ /* Finally, sanity check to make sure we have at least the minimum number
+ * of entries needed for each stage.
+ */
+ assert(nr_vs_entries >= devinfo->urb.min_vs_entries);
+ if (gs_present)
+ assert(nr_gs_entries >= 2);
+
+ /* Lay out the URB in the following order:
+ * - push constants
+ * - VS
+ * - GS
+ */
+ pipeline->urb.vs_start = push_constant_chunks;
+ pipeline->urb.vs_size = vs_size;
+ pipeline->urb.nr_vs_entries = nr_vs_entries;
+
+ pipeline->urb.gs_start = push_constant_chunks + vs_chunks;
+ pipeline->urb.gs_size = gs_size;
+ pipeline->urb.nr_gs_entries = nr_gs_entries;
+}
+
+static const struct {
+ uint32_t token;
+ gl_shader_stage stage;
+ const char *name;
+} stage_info[] = {
+ { GL_VERTEX_SHADER, MESA_SHADER_VERTEX, "vertex" },
+ { GL_TESS_CONTROL_SHADER, (gl_shader_stage)-1,"tess control" },
+ { GL_TESS_EVALUATION_SHADER, (gl_shader_stage)-1, "tess evaluation" },
+ { GL_GEOMETRY_SHADER, MESA_SHADER_GEOMETRY, "geometry" },
+ { GL_FRAGMENT_SHADER, MESA_SHADER_FRAGMENT, "fragment" },
+ { GL_COMPUTE_SHADER, MESA_SHADER_COMPUTE, "compute" },
+};
+
+struct spirv_header{
+ uint32_t magic;
+ uint32_t version;
+ uint32_t gen_magic;
+};
+
+static const char *
+src_as_glsl(const char *data)
+{
+ const struct spirv_header *as_spirv = (const struct spirv_header *)data;
+
+ /* Check alignment */
+ if ((intptr_t)data & 0x3) {
+ return data;
+ }
+
+ if (as_spirv->magic == SPIR_V_MAGIC_NUMBER) {
+ /* LunarG back-door */
+ if (as_spirv->version == 0)
+ return data + 12;
+ else
+ return NULL;
+ } else {
+ return data;
+ }
+}
+
+static void
+anv_compile_shader_glsl(struct anv_compiler *compiler,
+ struct gl_shader_program *program,
+ struct anv_pipeline *pipeline, uint32_t stage)
+{
+ struct brw_context *brw = compiler->brw;
+ struct gl_shader *shader;
+ int name = 0;
+
+ shader = brw_new_shader(&brw->ctx, name, stage_info[stage].token);
+ fail_if(shader == NULL, "failed to create %s shader\n", stage_info[stage].name);
+
+ shader->Source = strdup(src_as_glsl(pipeline->shaders[stage]->module->data));
+ _mesa_glsl_compile_shader(&brw->ctx, shader, false, false);
+ fail_on_compile_error(shader->CompileStatus, shader->InfoLog);
+
+ program->Shaders[program->NumShaders] = shader;
+ program->NumShaders++;
+}
+
+static void
+setup_nir_io(struct gl_program *prog,
+ nir_shader *shader)
+{
+ foreach_list_typed(nir_variable, var, node, &shader->inputs) {
+ prog->InputsRead |= BITFIELD64_BIT(var->data.location);
+ }
+
+ foreach_list_typed(nir_variable, var, node, &shader->outputs) {
+ prog->OutputsWritten |= BITFIELD64_BIT(var->data.location);
+ }
+}
+
+static void
+anv_compile_shader_spirv(struct anv_compiler *compiler,
+ struct gl_shader_program *program,
+ struct anv_pipeline *pipeline, uint32_t stage)
+{
+ struct brw_context *brw = compiler->brw;
+ struct anv_shader *shader = pipeline->shaders[stage];
+ struct gl_shader *mesa_shader;
+ int name = 0;
+
+ mesa_shader = brw_new_shader(&brw->ctx, name, stage_info[stage].token);
+ fail_if(mesa_shader == NULL,
+ "failed to create %s shader\n", stage_info[stage].name);
+
+ switch (stage) {
+ case VK_SHADER_STAGE_VERTEX:
+ mesa_shader->Program = &rzalloc(mesa_shader, struct brw_vertex_program)->program.Base;
+ break;
+ case VK_SHADER_STAGE_GEOMETRY:
+ mesa_shader->Program = &rzalloc(mesa_shader, struct brw_geometry_program)->program.Base;
+ break;
+ case VK_SHADER_STAGE_FRAGMENT:
+ mesa_shader->Program = &rzalloc(mesa_shader, struct brw_fragment_program)->program.Base;
+ break;
+ case VK_SHADER_STAGE_COMPUTE:
+ mesa_shader->Program = &rzalloc(mesa_shader, struct brw_compute_program)->program.Base;
+ break;
+ }
+
+ mesa_shader->Program->Parameters =
+ rzalloc(mesa_shader, struct gl_program_parameter_list);
+
+ mesa_shader->Type = stage_info[stage].token;
+ mesa_shader->Stage = stage_info[stage].stage;
+
+ assert(shader->module->size % 4 == 0);
+
+ struct gl_shader_compiler_options *glsl_options =
+ &compiler->screen->compiler->glsl_compiler_options[stage_info[stage].stage];
+
+ mesa_shader->Program->nir =
+ spirv_to_nir((uint32_t *)shader->module->data, shader->module->size / 4,
++ stage_info[stage].stage, glsl_options->NirOptions);
+ nir_validate_shader(mesa_shader->Program->nir);
+
+ brw_process_nir(mesa_shader->Program->nir,
+ compiler->screen->devinfo,
+ NULL, mesa_shader->Stage, false);
+
+ setup_nir_io(mesa_shader->Program, mesa_shader->Program->nir);
+
+ fail_if(mesa_shader->Program->nir == NULL,
+ "failed to translate SPIR-V to NIR\n");
+
+ program->Shaders[program->NumShaders] = mesa_shader;
+ program->NumShaders++;
+}
+
+static void
+add_compiled_stage(struct anv_pipeline *pipeline, uint32_t stage,
+ struct brw_stage_prog_data *prog_data)
+{
+ struct brw_device_info *devinfo = &pipeline->device->info;
+ uint32_t max_threads[] = {
+ [VK_SHADER_STAGE_VERTEX] = devinfo->max_vs_threads,
+ [VK_SHADER_STAGE_TESS_CONTROL] = 0,
+ [VK_SHADER_STAGE_TESS_EVALUATION] = 0,
+ [VK_SHADER_STAGE_GEOMETRY] = devinfo->max_gs_threads,
+ [VK_SHADER_STAGE_FRAGMENT] = devinfo->max_wm_threads,
+ [VK_SHADER_STAGE_COMPUTE] = devinfo->max_cs_threads,
+ };
+
+ pipeline->prog_data[stage] = prog_data;
+ pipeline->active_stages |= 1 << stage;
+ pipeline->scratch_start[stage] = pipeline->total_scratch;
+ pipeline->total_scratch =
+ align_u32(pipeline->total_scratch, 1024) +
+ prog_data->total_scratch * max_threads[stage];
+}
+
+int
+anv_compiler_run(struct anv_compiler *compiler, struct anv_pipeline *pipeline)
+{
+ struct gl_shader_program *program;
+ int name = 0;
+ struct brw_context *brw = compiler->brw;
+
+ pipeline->writes_point_size = false;
+
+ /* When we free the pipeline, we detect stages based on the NULL status
+ * of various prog_data pointers. Make them NULL by default.
+ */
+ memset(pipeline->prog_data, 0, sizeof(pipeline->prog_data));
+ memset(pipeline->scratch_start, 0, sizeof(pipeline->scratch_start));
+
+ brw->use_rep_send = pipeline->use_repclear;
+ brw->no_simd8 = pipeline->use_repclear;
+
+ program = brw->ctx.Driver.NewShaderProgram(name);
+ program->Shaders = (struct gl_shader **)
+ calloc(VK_SHADER_STAGE_NUM, sizeof(struct gl_shader *));
+ fail_if(program == NULL || program->Shaders == NULL,
+ "failed to create program\n");
+
+ bool all_spirv = true;
+ for (unsigned i = 0; i < VK_SHADER_STAGE_NUM; i++) {
+ if (pipeline->shaders[i] == NULL)
+ continue;
+
+ /* You need at least this much for "void main() { }" anyway */
+ assert(pipeline->shaders[i]->module->size >= 12);
+
+ if (src_as_glsl(pipeline->shaders[i]->module->data)) {
+ all_spirv = false;
+ break;
+ }
+
+ assert(pipeline->shaders[i]->module->size % 4 == 0);
+ }
+
+ if (all_spirv) {
+ for (unsigned i = 0; i < VK_SHADER_STAGE_NUM; i++) {
+ if (pipeline->shaders[i])
+ anv_compile_shader_spirv(compiler, program, pipeline, i);
+ }
+
+ for (unsigned i = 0; i < program->NumShaders; i++) {
+ struct gl_shader *shader = program->Shaders[i];
+ program->_LinkedShaders[shader->Stage] = shader;
+ }
+ } else {
+ for (unsigned i = 0; i < VK_SHADER_STAGE_NUM; i++) {
+ if (pipeline->shaders[i])
+ anv_compile_shader_glsl(compiler, program, pipeline, i);
+ }
+
+ _mesa_glsl_link_shader(&brw->ctx, program);
+ fail_on_compile_error(program->LinkStatus,
+ program->InfoLog);
+ }
+
+ bool success;
+ pipeline->active_stages = 0;
+ pipeline->total_scratch = 0;
+
+ if (pipeline->shaders[VK_SHADER_STAGE_VERTEX]) {
+ struct brw_vs_prog_key vs_key;
+ struct gl_vertex_program *vp = (struct gl_vertex_program *)
+ program->_LinkedShaders[MESA_SHADER_VERTEX]->Program;
+ struct brw_vertex_program *bvp = brw_vertex_program(vp);
+
+ brw_vs_populate_key(brw, bvp, &vs_key);
+
+ success = really_do_vs_prog(brw, program, bvp, &vs_key, pipeline);
+ fail_if(!success, "do_wm_prog failed\n");
+ add_compiled_stage(pipeline, VK_SHADER_STAGE_VERTEX,
+ &pipeline->vs_prog_data.base.base);
+
+ if (vp->Base.OutputsWritten & VARYING_SLOT_PSIZ)
+ pipeline->writes_point_size = true;
+ } else {
+ memset(&pipeline->vs_prog_data, 0, sizeof(pipeline->vs_prog_data));
+ pipeline->vs_simd8 = NO_KERNEL;
+ pipeline->vs_vec4 = NO_KERNEL;
+ }
+
+
+ if (pipeline->shaders[VK_SHADER_STAGE_GEOMETRY]) {
+ struct brw_gs_prog_key gs_key;
+ struct gl_geometry_program *gp = (struct gl_geometry_program *)
+ program->_LinkedShaders[MESA_SHADER_GEOMETRY]->Program;
+ struct brw_geometry_program *bgp = brw_geometry_program(gp);
+
+ brw_gs_populate_key(brw, pipeline, bgp, &gs_key);
+
+ success = really_do_gs_prog(brw, program, bgp, &gs_key, pipeline);
+ fail_if(!success, "do_gs_prog failed\n");
+ add_compiled_stage(pipeline, VK_SHADER_STAGE_GEOMETRY,
+ &pipeline->gs_prog_data.base.base);
+
+ if (gp->Base.OutputsWritten & VARYING_SLOT_PSIZ)
+ pipeline->writes_point_size = true;
+ } else {
+ pipeline->gs_vec4 = NO_KERNEL;
+ }
+
+ if (pipeline->shaders[VK_SHADER_STAGE_FRAGMENT]) {
+ struct brw_wm_prog_key wm_key;
+ struct gl_fragment_program *fp = (struct gl_fragment_program *)
+ program->_LinkedShaders[MESA_SHADER_FRAGMENT]->Program;
+ struct brw_fragment_program *bfp = brw_fragment_program(fp);
+
+ brw_wm_populate_key(brw, bfp, &wm_key);
+
+ success = really_do_wm_prog(brw, program, bfp, &wm_key, pipeline);
+ fail_if(!success, "do_wm_prog failed\n");
+ add_compiled_stage(pipeline, VK_SHADER_STAGE_FRAGMENT,
+ &pipeline->wm_prog_data.base);
+ }
+
+ if (pipeline->shaders[VK_SHADER_STAGE_COMPUTE]) {
+ struct brw_cs_prog_key cs_key;
+ struct gl_compute_program *cp = (struct gl_compute_program *)
+ program->_LinkedShaders[MESA_SHADER_COMPUTE]->Program;
+ struct brw_compute_program *bcp = brw_compute_program(cp);
+
+ brw_cs_populate_key(brw, bcp, &cs_key);
+
+ success = brw_codegen_cs_prog(brw, program, bcp, &cs_key, pipeline);
+ fail_if(!success, "brw_codegen_cs_prog failed\n");
+ add_compiled_stage(pipeline, VK_SHADER_STAGE_COMPUTE,
+ &pipeline->cs_prog_data.base);
+ }
+
+ /* XXX: Deleting the shader is broken with our current SPIR-V hacks. We
+ * need to fix this ASAP.
+ */
+ if (!all_spirv)
+ brw->ctx.Driver.DeleteShaderProgram(&brw->ctx, program);
+
+ struct anv_device *device = compiler->device;
+ while (device->scratch_block_pool.bo.size < pipeline->total_scratch)
+ anv_block_pool_alloc(&device->scratch_block_pool);
+
+ gen7_compute_urb_partition(pipeline);
+
+ return 0;
+}
+
+/* This badly named function frees the struct anv_pipeline data that the compiler
+ * allocates. Currently just the prog_data structs.
+ */
+void
+anv_compiler_free(struct anv_pipeline *pipeline)
+{
+ for (uint32_t stage = 0; stage < VK_SHADER_STAGE_NUM; stage++) {
+ if (pipeline->prog_data[stage]) {
+ free(pipeline->prog_data[stage]->map_entries);
+ ralloc_free(pipeline->prog_data[stage]->param);
+ ralloc_free(pipeline->prog_data[stage]->pull_param);
+ }
+ }
+}
+
+}