SYSTEM_VALUE(helper_invocation, 1, 0)
/*
- * The format of the indices depends on the type of the load. For uniforms,
- * the first index is the base address and the second index is an offset that
- * should be added to the base address. (This way you can determine in the
- * back-end which variable is being accessed even in an array.) For inputs,
- * the one and only index corresponds to the attribute slot. UBO loads
- * have two indices the first of which is the descriptor set and the second
- * is the base address to load from.
+ * Load operations pull data from some piece of GPU memory. All load
+ * operations operate in terms of offsets into some piece of theoretical
+ * memory. Loads from externally visible memory (UBO and SSBO) simply take a
+ * byte offset as a source. Loads from opaque memory (uniforms, inputs, etc.)
+ * take a base+offset pair where the base (const_index[0]) gives the location
+ * of the start of the variable being loaded and and the offset source is a
+ * offset into that variable.
*
- * UBO loads have a (possibly constant) source which is the UBO buffer index.
- * For each type of load, the _indirect variant has one additional source
- * (the second in the case of UBO's) that is the is an indirect to be added to
- * the constant address or base offset to compute the final offset.
+ * Some load operations such as UBO/SSBO load and per_vertex loads take an
+ * additional source to specify which UBO/SSBO/vertex to load from.
*
- * For vector backends, the address is in terms of one vec4, and so each array
- * element is +4 scalar components from the previous array element. For scalar
- * backends, the address is in terms of a single 4-byte float/int and arrays
- * elements begin immediately after the previous array element.
+ * The exact address type depends on the lowering pass that generates the
+ * load/store intrinsics. Typically, this is vec4 units for things such as
+ * varying slots and float units for fragment shader inputs. UBO and SSBO
+ * offsets are always in bytes.
*/
- #define LOAD(name, extra_srcs, indices, flags) \
- INTRINSIC(load_##name, extra_srcs, ARR(1), true, 0, 0, indices, flags) \
- INTRINSIC(load_##name##_indirect, extra_srcs + 1, ARR(1, 1), \
- true, 0, 0, indices, flags)
+ #define LOAD(name, srcs, indices, flags) \
+ INTRINSIC(load_##name, srcs, ARR(1, 1, 1, 1), true, 0, 0, indices, flags)
- LOAD(uniform, 0, 2, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
- LOAD(ubo, 1, 2, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
- LOAD(input, 0, 1, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
- LOAD(per_vertex_input, 1, 1, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
- LOAD(ssbo, 1, 1, NIR_INTRINSIC_CAN_ELIMINATE)
- LOAD(output, 0, 1, NIR_INTRINSIC_CAN_ELIMINATE)
- LOAD(per_vertex_output, 1, 1, NIR_INTRINSIC_CAN_ELIMINATE)
- LOAD(push_constant, 0, 1, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
+ /* src[] = { offset }. const_index[] = { base } */
+ LOAD(uniform, 1, 1, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
+ /* src[] = { buffer_index, offset }. No const_index */
+ LOAD(ubo, 2, 0, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
+ /* src[] = { offset }. const_index[] = { base } */
+ LOAD(input, 1, 1, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
+ /* src[] = { vertex, offset }. const_index[] = { base } */
+ LOAD(per_vertex_input, 2, 1, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
+ /* src[] = { buffer_index, offset }. No const_index */
+ LOAD(ssbo, 2, 0, NIR_INTRINSIC_CAN_ELIMINATE)
+ /* src[] = { offset }. const_index[] = { base } */
+ LOAD(output, 1, 1, NIR_INTRINSIC_CAN_ELIMINATE)
+ /* src[] = { vertex, offset }. const_index[] = { base } */
+ LOAD(per_vertex_output, 2, 1, NIR_INTRINSIC_CAN_ELIMINATE)
+ /* src[] = { offset }. const_index[] = { base } */
+ LOAD(shared, 1, 1, NIR_INTRINSIC_CAN_ELIMINATE)
++/* src[] = { offset }. const_index[] = { base, size } */
++LOAD(push_constant, 1, 2, NIR_INTRINSIC_CAN_ELIMINATE | NIR_INTRINSIC_CAN_REORDER)
/*
- * Stores work the same way as loads, except now the first register input is
- * the value or array to store and the optional second input is the indirect
- * offset. SSBO stores are similar, but they accept an extra source for the
- * block index and an extra index with the writemask to use.
+ * Stores work the same way as loads, except now the first source is the value
+ * to store and the second (and possibly third) source specify where to store
+ * the value. SSBO and shared memory stores also have a write mask as
+ * const_index[0].
*/
- #define STORE(name, extra_srcs, extra_srcs_size, extra_indices, flags) \
- INTRINSIC(store_##name, 1 + extra_srcs, \
- ARR(0, extra_srcs_size, extra_srcs_size, extra_srcs_size), \
- false, 0, 0, 1 + extra_indices, flags) \
- INTRINSIC(store_##name##_indirect, 2 + extra_srcs, \
- ARR(0, 1, extra_srcs_size, extra_srcs_size), \
- false, 0, 0, 1 + extra_indices, flags)
+ #define STORE(name, srcs, indices, flags) \
+ INTRINSIC(store_##name, srcs, ARR(0, 1, 1, 1), false, 0, 0, indices, flags)
- STORE(output, 0, 0, 0, 0)
- STORE(per_vertex_output, 1, 1, 0, 0)
- STORE(ssbo, 1, 1, 1, 0)
+ /* src[] = { value, offset }. const_index[] = { base } */
+ STORE(output, 2, 1, 0)
+ /* src[] = { value, vertex, offset }. const_index[] = { base } */
+ STORE(per_vertex_output, 3, 1, 0)
+ /* src[] = { value, block_index, offset }. const_index[] = { write_mask } */
+ STORE(ssbo, 3, 1, 0)
+ /* src[] = { value, offset }. const_index[] = { base, write_mask } */
+ STORE(shared, 2, 1, 0)
- LAST_INTRINSIC(store_ssbo_indirect)
+ LAST_INTRINSIC(store_shared)
}
/**
- * Return the indirect source for a load/store indirect intrinsic.
+ * Return the offset soruce for a load/store intrinsic.
*/
nir_src *
- nir_get_io_indirect_src(nir_intrinsic_instr *instr)
+ nir_get_io_offset_src(nir_intrinsic_instr *instr)
{
switch (instr->intrinsic) {
- case nir_intrinsic_load_input_indirect:
- case nir_intrinsic_load_output_indirect:
- case nir_intrinsic_load_uniform_indirect:
+ case nir_intrinsic_load_input:
+ case nir_intrinsic_load_output:
+ case nir_intrinsic_load_uniform:
return &instr->src[0];
- case nir_intrinsic_load_per_vertex_input_indirect:
- case nir_intrinsic_load_per_vertex_output_indirect:
- case nir_intrinsic_store_output_indirect:
++ case nir_intrinsic_load_ubo:
++ case nir_intrinsic_load_ssbo:
+ case nir_intrinsic_load_per_vertex_input:
+ case nir_intrinsic_load_per_vertex_output:
+ case nir_intrinsic_store_output:
return &instr->src[1];
- case nir_intrinsic_store_per_vertex_output_indirect:
++ case nir_intrinsic_store_ssbo:
+ case nir_intrinsic_store_per_vertex_output:
return &instr->src[2];
default:
return NULL;
--- /dev/null
- nir_ssa_def *index, unsigned offset, nir_ssa_def *indirect,
- struct vtn_type *type)
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jason Ekstrand (jason@jlekstrand.net)
+ *
+ */
+
+#include "spirv_to_nir_private.h"
+#include "nir_vla.h"
+#include "nir_control_flow.h"
+
+static struct vtn_ssa_value *
+vtn_const_ssa_value(struct vtn_builder *b, nir_constant *constant,
+ const struct glsl_type *type)
+{
+ struct hash_entry *entry = _mesa_hash_table_search(b->const_table, constant);
+
+ if (entry)
+ return entry->data;
+
+ struct vtn_ssa_value *val = rzalloc(b, struct vtn_ssa_value);
+ val->type = type;
+
+ switch (glsl_get_base_type(type)) {
+ case GLSL_TYPE_INT:
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_BOOL:
+ case GLSL_TYPE_FLOAT:
+ case GLSL_TYPE_DOUBLE:
+ if (glsl_type_is_vector_or_scalar(type)) {
+ unsigned num_components = glsl_get_vector_elements(val->type);
+ nir_load_const_instr *load =
+ nir_load_const_instr_create(b->shader, num_components);
+
+ for (unsigned i = 0; i < num_components; i++)
+ load->value.u[i] = constant->value.u[i];
+
+ nir_instr_insert_before_cf_list(&b->impl->body, &load->instr);
+ val->def = &load->def;
+ } else {
+ assert(glsl_type_is_matrix(type));
+ unsigned rows = glsl_get_vector_elements(val->type);
+ unsigned columns = glsl_get_matrix_columns(val->type);
+ val->elems = ralloc_array(b, struct vtn_ssa_value *, columns);
+
+ for (unsigned i = 0; i < columns; i++) {
+ struct vtn_ssa_value *col_val = rzalloc(b, struct vtn_ssa_value);
+ col_val->type = glsl_get_column_type(val->type);
+ nir_load_const_instr *load =
+ nir_load_const_instr_create(b->shader, rows);
+
+ for (unsigned j = 0; j < rows; j++)
+ load->value.u[j] = constant->value.u[rows * i + j];
+
+ nir_instr_insert_before_cf_list(&b->impl->body, &load->instr);
+ col_val->def = &load->def;
+
+ val->elems[i] = col_val;
+ }
+ }
+ break;
+
+ case GLSL_TYPE_ARRAY: {
+ unsigned elems = glsl_get_length(val->type);
+ val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
+ const struct glsl_type *elem_type = glsl_get_array_element(val->type);
+ for (unsigned i = 0; i < elems; i++)
+ val->elems[i] = vtn_const_ssa_value(b, constant->elements[i],
+ elem_type);
+ break;
+ }
+
+ case GLSL_TYPE_STRUCT: {
+ unsigned elems = glsl_get_length(val->type);
+ val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
+ for (unsigned i = 0; i < elems; i++) {
+ const struct glsl_type *elem_type =
+ glsl_get_struct_field(val->type, i);
+ val->elems[i] = vtn_const_ssa_value(b, constant->elements[i],
+ elem_type);
+ }
+ break;
+ }
+
+ default:
+ unreachable("bad constant type");
+ }
+
+ return val;
+}
+
+struct vtn_ssa_value *
+vtn_ssa_value(struct vtn_builder *b, uint32_t value_id)
+{
+ struct vtn_value *val = vtn_untyped_value(b, value_id);
+ switch (val->value_type) {
+ case vtn_value_type_constant:
+ return vtn_const_ssa_value(b, val->constant, val->const_type);
+
+ case vtn_value_type_ssa:
+ return val->ssa;
+ default:
+ unreachable("Invalid type for an SSA value");
+ }
+}
+
+static char *
+vtn_string_literal(struct vtn_builder *b, const uint32_t *words,
+ unsigned word_count)
+{
+ return ralloc_strndup(b, (char *)words, word_count * sizeof(*words));
+}
+
+static const uint32_t *
+vtn_foreach_instruction(struct vtn_builder *b, const uint32_t *start,
+ const uint32_t *end, vtn_instruction_handler handler)
+{
+ const uint32_t *w = start;
+ while (w < end) {
+ SpvOp opcode = w[0] & SpvOpCodeMask;
+ unsigned count = w[0] >> SpvWordCountShift;
+ assert(count >= 1 && w + count <= end);
+
+ if (opcode == SpvOpNop) {
+ w++;
+ continue;
+ }
+
+ if (!handler(b, opcode, w, count))
+ return w;
+
+ w += count;
+ }
+ assert(w == end);
+ return w;
+}
+
+static void
+vtn_handle_extension(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ switch (opcode) {
+ case SpvOpExtInstImport: {
+ struct vtn_value *val = vtn_push_value(b, w[1], vtn_value_type_extension);
+ if (strcmp((const char *)&w[2], "GLSL.std.450") == 0) {
+ val->ext_handler = vtn_handle_glsl450_instruction;
+ } else {
+ assert(!"Unsupported extension");
+ }
+ break;
+ }
+
+ case SpvOpExtInst: {
+ struct vtn_value *val = vtn_value(b, w[3], vtn_value_type_extension);
+ bool handled = val->ext_handler(b, w[4], w, count);
+ (void)handled;
+ assert(handled);
+ break;
+ }
+
+ default:
+ unreachable("Unhandled opcode");
+ }
+}
+
+static void
+_foreach_decoration_helper(struct vtn_builder *b,
+ struct vtn_value *base_value,
+ int parent_member,
+ struct vtn_value *value,
+ vtn_decoration_foreach_cb cb, void *data)
+{
+ for (struct vtn_decoration *dec = value->decoration; dec; dec = dec->next) {
+ int member;
+ if (dec->member < 0) {
+ member = parent_member;
+ } else {
+ assert(parent_member == -1);
+ member = dec->member;
+ }
+
+ if (dec->group) {
+ assert(dec->group->value_type == vtn_value_type_decoration_group);
+ _foreach_decoration_helper(b, base_value, member, dec->group,
+ cb, data);
+ } else {
+ cb(b, base_value, member, dec, data);
+ }
+ }
+}
+
+/** Iterates (recursively if needed) over all of the decorations on a value
+ *
+ * This function iterates over all of the decorations applied to a given
+ * value. If it encounters a decoration group, it recurses into the group
+ * and iterates over all of those decorations as well.
+ */
+void
+vtn_foreach_decoration(struct vtn_builder *b, struct vtn_value *value,
+ vtn_decoration_foreach_cb cb, void *data)
+{
+ _foreach_decoration_helper(b, value, -1, value, cb, data);
+}
+
+static void
+vtn_handle_decoration(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ const uint32_t *w_end = w + count;
+ const uint32_t target = w[1];
+ w += 2;
+
+ int member = -1;
+ switch (opcode) {
+ case SpvOpDecorationGroup:
+ vtn_push_value(b, target, vtn_value_type_undef);
+ break;
+
+ case SpvOpMemberDecorate:
+ member = *(w++);
+ /* fallthrough */
+ case SpvOpDecorate: {
+ struct vtn_value *val = &b->values[target];
+
+ struct vtn_decoration *dec = rzalloc(b, struct vtn_decoration);
+ dec->member = member;
+ dec->decoration = *(w++);
+ dec->literals = w;
+
+ /* Link into the list */
+ dec->next = val->decoration;
+ val->decoration = dec;
+ break;
+ }
+
+ case SpvOpGroupMemberDecorate:
+ member = *(w++);
+ /* fallthrough */
+ case SpvOpGroupDecorate: {
+ struct vtn_value *group = &b->values[target];
+ assert(group->value_type == vtn_value_type_decoration_group);
+
+ for (; w < w_end; w++) {
+ struct vtn_value *val = &b->values[*w];
+ struct vtn_decoration *dec = rzalloc(b, struct vtn_decoration);
+ dec->member = member;
+ dec->group = group;
+
+ /* Link into the list */
+ dec->next = val->decoration;
+ val->decoration = dec;
+ }
+ break;
+ }
+
+ default:
+ unreachable("Unhandled opcode");
+ }
+}
+
+struct member_decoration_ctx {
+ struct glsl_struct_field *fields;
+ struct vtn_type *type;
+};
+
+/* does a shallow copy of a vtn_type */
+
+static struct vtn_type *
+vtn_type_copy(struct vtn_builder *b, struct vtn_type *src)
+{
+ struct vtn_type *dest = ralloc(b, struct vtn_type);
+ dest->type = src->type;
+ dest->is_builtin = src->is_builtin;
+ if (src->is_builtin)
+ dest->builtin = src->builtin;
+
+ if (!glsl_type_is_vector_or_scalar(src->type)) {
+ switch (glsl_get_base_type(src->type)) {
+ case GLSL_TYPE_ARRAY:
+ dest->array_element = src->array_element;
+ dest->stride = src->stride;
+ break;
+
+ case GLSL_TYPE_INT:
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_BOOL:
+ case GLSL_TYPE_FLOAT:
+ case GLSL_TYPE_DOUBLE:
+ /* matrices */
+ dest->row_major = src->row_major;
+ dest->stride = src->stride;
+ break;
+
+ case GLSL_TYPE_STRUCT: {
+ unsigned elems = glsl_get_length(src->type);
+
+ dest->members = ralloc_array(b, struct vtn_type *, elems);
+ memcpy(dest->members, src->members, elems * sizeof(struct vtn_type *));
+
+ dest->offsets = ralloc_array(b, unsigned, elems);
+ memcpy(dest->offsets, src->offsets, elems * sizeof(unsigned));
+ break;
+ }
+
+ default:
+ unreachable("unhandled type");
+ }
+ }
+
+ return dest;
+}
+
+static void
+struct_member_decoration_cb(struct vtn_builder *b,
+ struct vtn_value *val, int member,
+ const struct vtn_decoration *dec, void *void_ctx)
+{
+ struct member_decoration_ctx *ctx = void_ctx;
+
+ if (member < 0)
+ return;
+
+ switch (dec->decoration) {
+ case SpvDecorationRelaxedPrecision:
+ break; /* FIXME: Do nothing with this for now. */
+ case SpvDecorationNoPerspective:
+ ctx->fields[member].interpolation = INTERP_QUALIFIER_NOPERSPECTIVE;
+ break;
+ case SpvDecorationFlat:
+ ctx->fields[member].interpolation = INTERP_QUALIFIER_FLAT;
+ break;
+ case SpvDecorationCentroid:
+ ctx->fields[member].centroid = true;
+ break;
+ case SpvDecorationSample:
+ ctx->fields[member].sample = true;
+ break;
+ case SpvDecorationLocation:
+ ctx->fields[member].location = dec->literals[0];
+ break;
+ case SpvDecorationBuiltIn:
+ ctx->type->members[member] = vtn_type_copy(b,
+ ctx->type->members[member]);
+ ctx->type->members[member]->is_builtin = true;
+ ctx->type->members[member]->builtin = dec->literals[0];
+ ctx->type->builtin_block = true;
+ break;
+ case SpvDecorationOffset:
+ ctx->type->offsets[member] = dec->literals[0];
+ break;
+ case SpvDecorationMatrixStride:
+ ctx->type->members[member]->stride = dec->literals[0];
+ break;
+ case SpvDecorationColMajor:
+ break; /* Nothing to do here. Column-major is the default. */
+ default:
+ unreachable("Unhandled member decoration");
+ }
+}
+
+static void
+type_decoration_cb(struct vtn_builder *b,
+ struct vtn_value *val, int member,
+ const struct vtn_decoration *dec, void *ctx)
+{
+ struct vtn_type *type = val->type;
+
+ if (member != -1)
+ return;
+
+ switch (dec->decoration) {
+ case SpvDecorationArrayStride:
+ type->stride = dec->literals[0];
+ break;
+ case SpvDecorationBlock:
+ type->block = true;
+ break;
+ case SpvDecorationBufferBlock:
+ type->buffer_block = true;
+ break;
+ case SpvDecorationGLSLShared:
+ case SpvDecorationGLSLPacked:
+ /* Ignore these, since we get explicit offsets anyways */
+ break;
+
+ case SpvDecorationStream:
+ assert(dec->literals[0] == 0);
+ break;
+
+ default:
+ unreachable("Unhandled type decoration");
+ }
+}
+
+static unsigned
+translate_image_format(SpvImageFormat format)
+{
+ switch (format) {
+ case SpvImageFormatUnknown: return 0; /* GL_NONE */
+ case SpvImageFormatRgba32f: return 0x8814; /* GL_RGBA32F */
+ case SpvImageFormatRgba16f: return 0x881A; /* GL_RGBA16F */
+ case SpvImageFormatR32f: return 0x822E; /* GL_R32F */
+ case SpvImageFormatRgba8: return 0x8058; /* GL_RGBA8 */
+ case SpvImageFormatRgba8Snorm: return 0x8F97; /* GL_RGBA8_SNORM */
+ case SpvImageFormatRg32f: return 0x8230; /* GL_RG32F */
+ case SpvImageFormatRg16f: return 0x822F; /* GL_RG16F */
+ case SpvImageFormatR11fG11fB10f: return 0x8C3A; /* GL_R11F_G11F_B10F */
+ case SpvImageFormatR16f: return 0x822D; /* GL_R16F */
+ case SpvImageFormatRgba16: return 0x805B; /* GL_RGBA16 */
+ case SpvImageFormatRgb10A2: return 0x8059; /* GL_RGB10_A2 */
+ case SpvImageFormatRg16: return 0x822C; /* GL_RG16 */
+ case SpvImageFormatRg8: return 0x822B; /* GL_RG8 */
+ case SpvImageFormatR16: return 0x822A; /* GL_R16 */
+ case SpvImageFormatR8: return 0x8229; /* GL_R8 */
+ case SpvImageFormatRgba16Snorm: return 0x8F9B; /* GL_RGBA16_SNORM */
+ case SpvImageFormatRg16Snorm: return 0x8F99; /* GL_RG16_SNORM */
+ case SpvImageFormatRg8Snorm: return 0x8F95; /* GL_RG8_SNORM */
+ case SpvImageFormatR16Snorm: return 0x8F98; /* GL_R16_SNORM */
+ case SpvImageFormatR8Snorm: return 0x8F94; /* GL_R8_SNORM */
+ case SpvImageFormatRgba32i: return 0x8D82; /* GL_RGBA32I */
+ case SpvImageFormatRgba16i: return 0x8D88; /* GL_RGBA16I */
+ case SpvImageFormatRgba8i: return 0x8D8E; /* GL_RGBA8I */
+ case SpvImageFormatR32i: return 0x8235; /* GL_R32I */
+ case SpvImageFormatRg32i: return 0x823B; /* GL_RG32I */
+ case SpvImageFormatRg16i: return 0x8239; /* GL_RG16I */
+ case SpvImageFormatRg8i: return 0x8237; /* GL_RG8I */
+ case SpvImageFormatR16i: return 0x8233; /* GL_R16I */
+ case SpvImageFormatR8i: return 0x8231; /* GL_R8I */
+ case SpvImageFormatRgba32ui: return 0x8D70; /* GL_RGBA32UI */
+ case SpvImageFormatRgba16ui: return 0x8D76; /* GL_RGBA16UI */
+ case SpvImageFormatRgba8ui: return 0x8D7C; /* GL_RGBA8UI */
+ case SpvImageFormatR32ui: return 0x8236; /* GL_R32UI */
+ case SpvImageFormatRgb10a2ui: return 0x906F; /* GL_RGB10_A2UI */
+ case SpvImageFormatRg32ui: return 0x823C; /* GL_RG32UI */
+ case SpvImageFormatRg16ui: return 0x823A; /* GL_RG16UI */
+ case SpvImageFormatRg8ui: return 0x8238; /* GL_RG8UI */
+ case SpvImageFormatR16ui: return 0x823A; /* GL_RG16UI */
+ case SpvImageFormatR8ui: return 0x8232; /* GL_R8UI */
+ default:
+ assert(!"Invalid image format");
+ return 0;
+ }
+}
+
+static void
+vtn_handle_type(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ struct vtn_value *val = vtn_push_value(b, w[1], vtn_value_type_type);
+
+ val->type = rzalloc(b, struct vtn_type);
+ val->type->is_builtin = false;
+
+ switch (opcode) {
+ case SpvOpTypeVoid:
+ val->type->type = glsl_void_type();
+ break;
+ case SpvOpTypeBool:
+ val->type->type = glsl_bool_type();
+ break;
+ case SpvOpTypeInt:
+ val->type->type = glsl_int_type();
+ break;
+ case SpvOpTypeFloat:
+ val->type->type = glsl_float_type();
+ break;
+
+ case SpvOpTypeVector: {
+ const struct glsl_type *base =
+ vtn_value(b, w[2], vtn_value_type_type)->type->type;
+ unsigned elems = w[3];
+
+ assert(glsl_type_is_scalar(base));
+ val->type->type = glsl_vector_type(glsl_get_base_type(base), elems);
+ break;
+ }
+
+ case SpvOpTypeMatrix: {
+ struct vtn_type *base =
+ vtn_value(b, w[2], vtn_value_type_type)->type;
+ unsigned columns = w[3];
+
+ assert(glsl_type_is_vector(base->type));
+ val->type->type = glsl_matrix_type(glsl_get_base_type(base->type),
+ glsl_get_vector_elements(base->type),
+ columns);
+ val->type->array_element = base;
+ val->type->row_major = false;
+ val->type->stride = 0;
+ break;
+ }
+
+ case SpvOpTypeRuntimeArray:
+ case SpvOpTypeArray: {
+ struct vtn_type *array_element =
+ vtn_value(b, w[2], vtn_value_type_type)->type;
+
+ /* A length of 0 is used to denote unsized arrays */
+ unsigned length = (opcode == SpvOpTypeArray) ? w[3] : 0;
+
+ val->type->type = glsl_array_type(array_element->type, length);
+ val->type->array_element = array_element;
+ val->type->stride = 0;
+ break;
+ }
+
+ case SpvOpTypeStruct: {
+ unsigned num_fields = count - 2;
+ val->type->members = ralloc_array(b, struct vtn_type *, num_fields);
+ val->type->offsets = ralloc_array(b, unsigned, num_fields);
+
+ NIR_VLA(struct glsl_struct_field, fields, count);
+ for (unsigned i = 0; i < num_fields; i++) {
+ /* TODO: Handle decorators */
+ val->type->members[i] =
+ vtn_value(b, w[i + 2], vtn_value_type_type)->type;
+ fields[i].type = val->type->members[i]->type;
+ fields[i].name = ralloc_asprintf(b, "field%d", i);
+ fields[i].location = -1;
+ fields[i].interpolation = 0;
+ fields[i].centroid = 0;
+ fields[i].sample = 0;
+ fields[i].matrix_layout = 2;
+ }
+
+ struct member_decoration_ctx ctx = {
+ .fields = fields,
+ .type = val->type
+ };
+
+ vtn_foreach_decoration(b, val, struct_member_decoration_cb, &ctx);
+
+ const char *name = val->name ? val->name : "struct";
+
+ val->type->type = glsl_struct_type(fields, num_fields, name);
+ break;
+ }
+
+ case SpvOpTypeFunction: {
+ const struct glsl_type *return_type =
+ vtn_value(b, w[2], vtn_value_type_type)->type->type;
+ NIR_VLA(struct glsl_function_param, params, count - 3);
+ for (unsigned i = 0; i < count - 3; i++) {
+ params[i].type = vtn_value(b, w[i + 3], vtn_value_type_type)->type->type;
+
+ /* FIXME: */
+ params[i].in = true;
+ params[i].out = true;
+ }
+ val->type->type = glsl_function_type(return_type, params, count - 3);
+ break;
+ }
+
+ case SpvOpTypePointer:
+ /* FIXME: For now, we'll just do the really lame thing and return
+ * the same type. The validator should ensure that the proper number
+ * of dereferences happen
+ */
+ val->type = vtn_value(b, w[3], vtn_value_type_type)->type;
+ break;
+
+ case SpvOpTypeImage: {
+ const struct glsl_type *sampled_type =
+ vtn_value(b, w[2], vtn_value_type_type)->type->type;
+
+ assert(glsl_type_is_vector_or_scalar(sampled_type));
+
+ enum glsl_sampler_dim dim;
+ switch ((SpvDim)w[3]) {
+ case SpvDim1D: dim = GLSL_SAMPLER_DIM_1D; break;
+ case SpvDim2D: dim = GLSL_SAMPLER_DIM_2D; break;
+ case SpvDim3D: dim = GLSL_SAMPLER_DIM_3D; break;
+ case SpvDimCube: dim = GLSL_SAMPLER_DIM_CUBE; break;
+ case SpvDimRect: dim = GLSL_SAMPLER_DIM_RECT; break;
+ case SpvDimBuffer: dim = GLSL_SAMPLER_DIM_BUF; break;
+ default:
+ unreachable("Invalid SPIR-V Sampler dimension");
+ }
+
+ bool is_shadow = w[4];
+ bool is_array = w[5];
+ bool multisampled = w[6];
+ unsigned sampled = w[7];
+ SpvImageFormat format = w[8];
+
+ assert(!multisampled && "FIXME: Handl multi-sampled textures");
+
+ val->type->image_format = translate_image_format(format);
+
+ if (sampled == 1) {
+ val->type->type = glsl_sampler_type(dim, is_shadow, is_array,
+ glsl_get_base_type(sampled_type));
+ } else if (sampled == 2) {
+ assert(format);
+ assert(!is_shadow);
+ val->type->type = glsl_image_type(dim, is_array,
+ glsl_get_base_type(sampled_type));
+ } else {
+ assert(!"We need to know if the image will be sampled");
+ }
+ break;
+ }
+
+ case SpvOpTypeSampledImage:
+ val->type = vtn_value(b, w[2], vtn_value_type_type)->type;
+ break;
+
+ case SpvOpTypeSampler:
+ /* The actual sampler type here doesn't really matter. It gets
+ * thrown away the moment you combine it with an image. What really
+ * matters is that it's a sampler type as opposed to an integer type
+ * so the backend knows what to do.
+ *
+ * TODO: Eventually we should consider adding a "bare sampler" type
+ * to glsl_types.
+ */
+ val->type->type = glsl_sampler_type(GLSL_SAMPLER_DIM_2D, false, false,
+ GLSL_TYPE_FLOAT);
+ break;
+
+ case SpvOpTypeOpaque:
+ case SpvOpTypeEvent:
+ case SpvOpTypeDeviceEvent:
+ case SpvOpTypeReserveId:
+ case SpvOpTypeQueue:
+ case SpvOpTypePipe:
+ default:
+ unreachable("Unhandled opcode");
+ }
+
+ vtn_foreach_decoration(b, val, type_decoration_cb, NULL);
+}
+
+static void
+vtn_handle_constant(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_constant);
+ val->const_type = vtn_value(b, w[1], vtn_value_type_type)->type->type;
+ val->constant = ralloc(b, nir_constant);
+ switch (opcode) {
+ case SpvOpConstantTrue:
+ assert(val->const_type == glsl_bool_type());
+ val->constant->value.u[0] = NIR_TRUE;
+ break;
+ case SpvOpConstantFalse:
+ assert(val->const_type == glsl_bool_type());
+ val->constant->value.u[0] = NIR_FALSE;
+ break;
+ case SpvOpConstant:
+ assert(glsl_type_is_scalar(val->const_type));
+ val->constant->value.u[0] = w[3];
+ break;
+ case SpvOpConstantComposite: {
+ unsigned elem_count = count - 3;
+ nir_constant **elems = ralloc_array(b, nir_constant *, elem_count);
+ for (unsigned i = 0; i < elem_count; i++)
+ elems[i] = vtn_value(b, w[i + 3], vtn_value_type_constant)->constant;
+
+ switch (glsl_get_base_type(val->const_type)) {
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_INT:
+ case GLSL_TYPE_FLOAT:
+ case GLSL_TYPE_BOOL:
+ if (glsl_type_is_matrix(val->const_type)) {
+ unsigned rows = glsl_get_vector_elements(val->const_type);
+ assert(glsl_get_matrix_columns(val->const_type) == elem_count);
+ for (unsigned i = 0; i < elem_count; i++)
+ for (unsigned j = 0; j < rows; j++)
+ val->constant->value.u[rows * i + j] = elems[i]->value.u[j];
+ } else {
+ assert(glsl_type_is_vector(val->const_type));
+ assert(glsl_get_vector_elements(val->const_type) == elem_count);
+ for (unsigned i = 0; i < elem_count; i++)
+ val->constant->value.u[i] = elems[i]->value.u[0];
+ }
+ ralloc_free(elems);
+ break;
+
+ case GLSL_TYPE_STRUCT:
+ case GLSL_TYPE_ARRAY:
+ ralloc_steal(val->constant, elems);
+ val->constant->elements = elems;
+ break;
+
+ default:
+ unreachable("Unsupported type for constants");
+ }
+ break;
+ }
+
+ default:
+ unreachable("Unhandled opcode");
+ }
+}
+
+static void
+set_mode_system_value(nir_variable_mode *mode)
+{
+ assert(*mode == nir_var_system_value || *mode == nir_var_shader_in);
+ *mode = nir_var_system_value;
+}
+
+static void
+validate_per_vertex_mode(struct vtn_builder *b, nir_variable_mode mode)
+{
+ switch (b->shader->stage) {
+ case MESA_SHADER_VERTEX:
+ assert(mode == nir_var_shader_out);
+ break;
+ case MESA_SHADER_GEOMETRY:
+ assert(mode == nir_var_shader_out || mode == nir_var_shader_in);
+ break;
+ default:
+ assert(!"Invalid shader stage");
+ }
+}
+
+static void
+vtn_get_builtin_location(struct vtn_builder *b,
+ SpvBuiltIn builtin, int *location,
+ nir_variable_mode *mode)
+{
+ switch (builtin) {
+ case SpvBuiltInPosition:
+ *location = VARYING_SLOT_POS;
+ validate_per_vertex_mode(b, *mode);
+ break;
+ case SpvBuiltInPointSize:
+ *location = VARYING_SLOT_PSIZ;
+ validate_per_vertex_mode(b, *mode);
+ break;
+ case SpvBuiltInClipDistance:
+ *location = VARYING_SLOT_CLIP_DIST0; /* XXX CLIP_DIST1? */
+ validate_per_vertex_mode(b, *mode);
+ break;
+ case SpvBuiltInCullDistance:
+ /* XXX figure this out */
+ unreachable("unhandled builtin");
+ case SpvBuiltInVertexId:
+ /* Vulkan defines VertexID to be zero-based and reserves the new
+ * builtin keyword VertexIndex to indicate the non-zero-based value.
+ */
+ *location = SYSTEM_VALUE_VERTEX_ID_ZERO_BASE;
+ set_mode_system_value(mode);
+ break;
+ case SpvBuiltInInstanceId:
+ *location = SYSTEM_VALUE_INSTANCE_ID;
+ set_mode_system_value(mode);
+ break;
+ case SpvBuiltInPrimitiveId:
+ *location = VARYING_SLOT_PRIMITIVE_ID;
+ *mode = nir_var_shader_out;
+ break;
+ case SpvBuiltInInvocationId:
+ *location = SYSTEM_VALUE_INVOCATION_ID;
+ set_mode_system_value(mode);
+ break;
+ case SpvBuiltInLayer:
+ *location = VARYING_SLOT_LAYER;
+ *mode = nir_var_shader_out;
+ break;
+ case SpvBuiltInTessLevelOuter:
+ case SpvBuiltInTessLevelInner:
+ case SpvBuiltInTessCoord:
+ case SpvBuiltInPatchVertices:
+ unreachable("no tessellation support");
+ case SpvBuiltInFragCoord:
+ *location = VARYING_SLOT_POS;
+ assert(b->shader->stage == MESA_SHADER_FRAGMENT);
+ assert(*mode == nir_var_shader_in);
+ break;
+ case SpvBuiltInPointCoord:
+ *location = VARYING_SLOT_PNTC;
+ assert(b->shader->stage == MESA_SHADER_FRAGMENT);
+ assert(*mode == nir_var_shader_in);
+ break;
+ case SpvBuiltInFrontFacing:
+ *location = VARYING_SLOT_FACE;
+ assert(b->shader->stage == MESA_SHADER_FRAGMENT);
+ assert(*mode == nir_var_shader_in);
+ break;
+ case SpvBuiltInSampleId:
+ *location = SYSTEM_VALUE_SAMPLE_ID;
+ set_mode_system_value(mode);
+ break;
+ case SpvBuiltInSamplePosition:
+ *location = SYSTEM_VALUE_SAMPLE_POS;
+ set_mode_system_value(mode);
+ break;
+ case SpvBuiltInSampleMask:
+ *location = SYSTEM_VALUE_SAMPLE_MASK_IN; /* XXX out? */
+ set_mode_system_value(mode);
+ break;
+ case SpvBuiltInFragDepth:
+ *location = FRAG_RESULT_DEPTH;
+ assert(b->shader->stage == MESA_SHADER_FRAGMENT);
+ assert(*mode == nir_var_shader_out);
+ break;
+ case SpvBuiltInNumWorkgroups:
+ case SpvBuiltInWorkgroupSize:
+ /* these are constants, need to be handled specially */
+ unreachable("unsupported builtin");
+ break;
+ case SpvBuiltInGlobalInvocationId:
+ case SpvBuiltInLocalInvocationIndex:
+ /* these are computed values, need to be handled specially */
+ unreachable("unsupported builtin");
+ case SpvBuiltInWorkgroupId:
+ *location = SYSTEM_VALUE_WORK_GROUP_ID;
+ set_mode_system_value(mode);
+ break;
+ case SpvBuiltInLocalInvocationId:
+ *location = SYSTEM_VALUE_LOCAL_INVOCATION_ID;
+ set_mode_system_value(mode);
+ break;
+ case SpvBuiltInHelperInvocation:
+ default:
+ unreachable("unsupported builtin");
+ }
+}
+
+static void
+var_decoration_cb(struct vtn_builder *b, struct vtn_value *val, int member,
+ const struct vtn_decoration *dec, void *void_var)
+{
+ assert(val->value_type == vtn_value_type_deref);
+ assert(val->deref->deref.child == NULL);
+ assert(val->deref->var == void_var);
+
+ nir_variable *var = void_var;
+ switch (dec->decoration) {
+ case SpvDecorationRelaxedPrecision:
+ break; /* FIXME: Do nothing with this for now. */
+ case SpvDecorationNoPerspective:
+ var->data.interpolation = INTERP_QUALIFIER_NOPERSPECTIVE;
+ break;
+ case SpvDecorationFlat:
+ var->data.interpolation = INTERP_QUALIFIER_FLAT;
+ break;
+ case SpvDecorationCentroid:
+ var->data.centroid = true;
+ break;
+ case SpvDecorationSample:
+ var->data.sample = true;
+ break;
+ case SpvDecorationInvariant:
+ var->data.invariant = true;
+ break;
+ case SpvDecorationConstant:
+ assert(var->constant_initializer != NULL);
+ var->data.read_only = true;
+ break;
+ case SpvDecorationNonWritable:
+ var->data.read_only = true;
+ break;
+ case SpvDecorationLocation:
+ var->data.location = dec->literals[0];
+ break;
+ case SpvDecorationComponent:
+ var->data.location_frac = dec->literals[0];
+ break;
+ case SpvDecorationIndex:
+ var->data.explicit_index = true;
+ var->data.index = dec->literals[0];
+ break;
+ case SpvDecorationBinding:
+ var->data.explicit_binding = true;
+ var->data.binding = dec->literals[0];
+ break;
+ case SpvDecorationDescriptorSet:
+ var->data.descriptor_set = dec->literals[0];
+ break;
+ case SpvDecorationBuiltIn: {
+ SpvBuiltIn builtin = dec->literals[0];
+
+ nir_variable_mode mode = var->data.mode;
+ vtn_get_builtin_location(b, builtin, &var->data.location, &mode);
+ var->data.explicit_location = true;
+ var->data.mode = mode;
+ if (mode == nir_var_shader_in || mode == nir_var_system_value)
+ var->data.read_only = true;
+
+ if (builtin == SpvBuiltInFragCoord || builtin == SpvBuiltInSamplePosition)
+ var->data.origin_upper_left = b->origin_upper_left;
+
+ if (mode == nir_var_shader_out)
+ b->builtins[dec->literals[0]].out = var;
+ else
+ b->builtins[dec->literals[0]].in = var;
+ break;
+ }
+ case SpvDecorationRowMajor:
+ case SpvDecorationColMajor:
+ case SpvDecorationGLSLShared:
+ case SpvDecorationPatch:
+ case SpvDecorationRestrict:
+ case SpvDecorationAliased:
+ case SpvDecorationVolatile:
+ case SpvDecorationCoherent:
+ case SpvDecorationNonReadable:
+ case SpvDecorationUniform:
+ /* This is really nice but we have no use for it right now. */
+ case SpvDecorationCPacked:
+ case SpvDecorationSaturatedConversion:
+ case SpvDecorationStream:
+ case SpvDecorationOffset:
+ case SpvDecorationXfbBuffer:
+ case SpvDecorationFuncParamAttr:
+ case SpvDecorationFPRoundingMode:
+ case SpvDecorationFPFastMathMode:
+ case SpvDecorationLinkageAttributes:
+ case SpvDecorationSpecId:
+ break;
+ default:
+ unreachable("Unhandled variable decoration");
+ }
+}
+
+static nir_variable *
+get_builtin_variable(struct vtn_builder *b,
+ nir_variable_mode mode,
+ const struct glsl_type *type,
+ SpvBuiltIn builtin)
+{
+ nir_variable *var;
+ if (mode == nir_var_shader_out)
+ var = b->builtins[builtin].out;
+ else
+ var = b->builtins[builtin].in;
+
+ if (!var) {
+ int location;
+ vtn_get_builtin_location(b, builtin, &location, &mode);
+
+ var = nir_variable_create(b->shader, mode, type, "builtin");
+
+ var->data.location = location;
+ var->data.explicit_location = true;
+
+ if (builtin == SpvBuiltInFragCoord || builtin == SpvBuiltInSamplePosition)
+ var->data.origin_upper_left = b->origin_upper_left;
+
+ if (mode == nir_var_shader_out)
+ b->builtins[builtin].out = var;
+ else
+ b->builtins[builtin].in = var;
+ }
+
+ return var;
+}
+
+static struct vtn_ssa_value *
+_vtn_variable_load(struct vtn_builder *b,
+ nir_deref_var *src_deref, nir_deref *src_deref_tail)
+{
+ struct vtn_ssa_value *val = rzalloc(b, struct vtn_ssa_value);
+ val->type = src_deref_tail->type;
+
+ /* The deref tail may contain a deref to select a component of a vector (in
+ * other words, it might not be an actual tail) so we have to save it away
+ * here since we overwrite it later.
+ */
+ nir_deref *old_child = src_deref_tail->child;
+
+ if (glsl_type_is_vector_or_scalar(val->type)) {
+ /* Terminate the deref chain in case there is one more link to pick
+ * off a component of the vector.
+ */
+ src_deref_tail->child = NULL;
+
+ nir_intrinsic_instr *load =
+ nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_var);
+ load->variables[0] =
+ nir_deref_as_var(nir_copy_deref(load, &src_deref->deref));
+ load->num_components = glsl_get_vector_elements(val->type);
+ nir_ssa_dest_init(&load->instr, &load->dest, load->num_components, NULL);
+
+ nir_builder_instr_insert(&b->nb, &load->instr);
+
+ if (src_deref->var->data.mode == nir_var_uniform &&
+ glsl_get_base_type(val->type) == GLSL_TYPE_BOOL) {
+ /* Uniform boolean loads need to be fixed up since they're defined
+ * to be zero/nonzero rather than NIR_FALSE/NIR_TRUE.
+ */
+ val->def = nir_ine(&b->nb, &load->dest.ssa, nir_imm_int(&b->nb, 0));
+ } else {
+ val->def = &load->dest.ssa;
+ }
+ } else if (glsl_get_base_type(val->type) == GLSL_TYPE_ARRAY ||
+ glsl_type_is_matrix(val->type)) {
+ unsigned elems = glsl_get_length(val->type);
+ val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
+
+ nir_deref_array *deref = nir_deref_array_create(b);
+ deref->deref_array_type = nir_deref_array_type_direct;
+ deref->deref.type = glsl_get_array_element(val->type);
+ src_deref_tail->child = &deref->deref;
+ for (unsigned i = 0; i < elems; i++) {
+ deref->base_offset = i;
+ val->elems[i] = _vtn_variable_load(b, src_deref, &deref->deref);
+ }
+ } else {
+ assert(glsl_get_base_type(val->type) == GLSL_TYPE_STRUCT);
+ unsigned elems = glsl_get_length(val->type);
+ val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
+
+ nir_deref_struct *deref = nir_deref_struct_create(b, 0);
+ src_deref_tail->child = &deref->deref;
+ for (unsigned i = 0; i < elems; i++) {
+ deref->index = i;
+ deref->deref.type = glsl_get_struct_field(val->type, i);
+ val->elems[i] = _vtn_variable_load(b, src_deref, &deref->deref);
+ }
+ }
+
+ src_deref_tail->child = old_child;
+
+ return val;
+}
+
+static void
+_vtn_variable_store(struct vtn_builder *b,
+ nir_deref_var *dest_deref, nir_deref *dest_deref_tail,
+ struct vtn_ssa_value *src)
+{
+ nir_deref *old_child = dest_deref_tail->child;
+
+ if (glsl_type_is_vector_or_scalar(src->type)) {
+ /* Terminate the deref chain in case there is one more link to pick
+ * off a component of the vector.
+ */
+ dest_deref_tail->child = NULL;
+
+ nir_intrinsic_instr *store =
+ nir_intrinsic_instr_create(b->shader, nir_intrinsic_store_var);
+ store->variables[0] =
+ nir_deref_as_var(nir_copy_deref(store, &dest_deref->deref));
+ store->num_components = glsl_get_vector_elements(src->type);
+ store->src[0] = nir_src_for_ssa(src->def);
+
+ nir_builder_instr_insert(&b->nb, &store->instr);
+ } else if (glsl_get_base_type(src->type) == GLSL_TYPE_ARRAY ||
+ glsl_type_is_matrix(src->type)) {
+ unsigned elems = glsl_get_length(src->type);
+
+ nir_deref_array *deref = nir_deref_array_create(b);
+ deref->deref_array_type = nir_deref_array_type_direct;
+ deref->deref.type = glsl_get_array_element(src->type);
+ dest_deref_tail->child = &deref->deref;
+ for (unsigned i = 0; i < elems; i++) {
+ deref->base_offset = i;
+ _vtn_variable_store(b, dest_deref, &deref->deref, src->elems[i]);
+ }
+ } else {
+ assert(glsl_get_base_type(src->type) == GLSL_TYPE_STRUCT);
+ unsigned elems = glsl_get_length(src->type);
+
+ nir_deref_struct *deref = nir_deref_struct_create(b, 0);
+ dest_deref_tail->child = &deref->deref;
+ for (unsigned i = 0; i < elems; i++) {
+ deref->index = i;
+ deref->deref.type = glsl_get_struct_field(src->type, i);
+ _vtn_variable_store(b, dest_deref, &deref->deref, src->elems[i]);
+ }
+ }
+
+ dest_deref_tail->child = old_child;
+}
+
+static nir_ssa_def *
+nir_vulkan_resource_index(nir_builder *b, unsigned set, unsigned binding,
+ nir_variable_mode mode, nir_ssa_def *array_index)
+{
+ if (array_index == NULL)
+ array_index = nir_imm_int(b, 0);
+
+ nir_intrinsic_instr *instr =
+ nir_intrinsic_instr_create(b->shader,
+ nir_intrinsic_vulkan_resource_index);
+ instr->src[0] = nir_src_for_ssa(array_index);
+ instr->const_index[0] = set;
+ instr->const_index[1] = binding;
+ instr->const_index[2] = mode;
+
+ nir_ssa_dest_init(&instr->instr, &instr->dest, 1, NULL);
+ nir_builder_instr_insert(b, &instr->instr);
+
+ return &instr->dest.ssa;
+}
+
+static struct vtn_ssa_value *
+_vtn_block_load(struct vtn_builder *b, nir_intrinsic_op op,
+ unsigned set, unsigned binding, nir_variable_mode mode,
- load->const_index[0] = offset;
++ nir_ssa_def *index, nir_ssa_def *offset, struct vtn_type *type)
+{
+ struct vtn_ssa_value *val = ralloc(b, struct vtn_ssa_value);
+ val->type = type->type;
+ val->transposed = NULL;
+ if (glsl_type_is_vector_or_scalar(type->type)) {
+ nir_intrinsic_instr *load = nir_intrinsic_instr_create(b->shader, op);
+ load->num_components = glsl_get_vector_elements(type->type);
- case nir_intrinsic_load_ubo_indirect:
- case nir_intrinsic_load_ssbo_indirect:
- load->src[1] = nir_src_for_ssa(indirect);
- /* fall through */
+
+ switch (op) {
- break; /* Nothing to do */
- case nir_intrinsic_load_push_constant_indirect:
- load->src[0] = nir_src_for_ssa(indirect);
+ case nir_intrinsic_load_ubo:
+ case nir_intrinsic_load_ssbo: {
+ nir_ssa_def *res_index = nir_vulkan_resource_index(&b->nb,
+ set, binding,
+ mode, index);
+ load->src[0] = nir_src_for_ssa(res_index);
++ load->src[1] = nir_src_for_ssa(offset);
+ break;
+ }
+
+ case nir_intrinsic_load_push_constant:
- offset + type->offsets[i],
- indirect, type->members[i]);
++ load->src[0] = nir_src_for_ssa(offset);
+ break;
+
+ default:
+ unreachable("Invalid block load intrinsic");
+ }
+
+ nir_ssa_dest_init(&load->instr, &load->dest, load->num_components, NULL);
+ nir_builder_instr_insert(&b->nb, &load->instr);
+ val->def = &load->dest.ssa;
+ } else {
+ unsigned elems = glsl_get_length(type->type);
+ val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
+ if (glsl_type_is_struct(type->type)) {
+ for (unsigned i = 0; i < elems; i++) {
++ nir_ssa_def *child_offset =
++ nir_iadd(&b->nb, offset, nir_imm_int(&b->nb, type->offsets[i]));
+ val->elems[i] = _vtn_block_load(b, op, set, binding, mode, index,
- offset + i * type->stride,
- indirect, type->array_element);
++ child_offset, type->members[i]);
+ }
+ } else {
+ for (unsigned i = 0; i < elems; i++) {
++ nir_ssa_def *child_offset =
++ nir_iadd(&b->nb, offset, nir_imm_int(&b->nb, i * type->stride));
+ val->elems[i] = _vtn_block_load(b, op, set, binding, mode, index,
- nir_ssa_def **index,
- unsigned *offset, nir_ssa_def **indirect)
++ child_offset,type->array_element);
+ }
+ }
+ }
+
+ return val;
+}
+
+static void
+vtn_block_get_offset(struct vtn_builder *b, nir_deref_var *src,
+ struct vtn_type **type, nir_deref *src_tail,
- *offset = 0;
- *indirect = NULL;
++ nir_ssa_def **index, nir_ssa_def **offset)
+{
+ nir_deref *deref = &src->deref;
+
+ if (deref->child->deref_type == nir_deref_type_array) {
+ deref = deref->child;
+ *type = (*type)->array_element;
+ nir_deref_array *deref_array = nir_deref_as_array(deref);
+ *index = nir_imm_int(&b->nb, deref_array->base_offset);
+
+ if (deref_array->deref_array_type == nir_deref_array_type_indirect)
+ *index = nir_iadd(&b->nb, *index, deref_array->indirect.ssa);
+ } else {
+ *index = nir_imm_int(&b->nb, 0);
+ }
+
- if (deref_array->deref_array_type == nir_deref_array_type_direct) {
- *offset += (*type)->stride * deref_array->base_offset;
- } else {
- nir_ssa_def *off = nir_imul(&b->nb, deref_array->indirect.ssa,
- nir_imm_int(&b->nb, (*type)->stride));
- *indirect = *indirect ? nir_iadd(&b->nb, *indirect, off) : off;
- }
++ *offset = nir_imm_int(&b->nb, 0);
+ while (deref != src_tail) {
+ deref = deref->child;
+ switch (deref->deref_type) {
+ case nir_deref_type_array: {
+ nir_deref_array *deref_array = nir_deref_as_array(deref);
- *offset += (*type)->offsets[deref_struct->index];
++ nir_ssa_def *off = nir_imm_int(&b->nb, deref_array->base_offset);
++
++ if (deref_array->deref_array_type == nir_deref_array_type_indirect)
++ off = nir_iadd(&b->nb, off, deref_array->indirect.ssa);
++
++ off = nir_imul(&b->nb, off, nir_imm_int(&b->nb, (*type)->stride));
++ *offset = nir_iadd(&b->nb, *offset, off);
++
+ *type = (*type)->array_element;
+ break;
+ }
+
+ case nir_deref_type_struct: {
+ nir_deref_struct *deref_struct = nir_deref_as_struct(deref);
- unsigned offset;
- nir_ssa_def *indirect;
- vtn_block_get_offset(b, src, &type, src_tail, &index, &offset, &indirect);
++
++ unsigned elem_off = (*type)->offsets[deref_struct->index];
++ *offset = nir_iadd(&b->nb, *offset, nir_imm_int(&b->nb, elem_off));
++
+ *type = (*type)->members[deref_struct->index];
+ break;
+ }
+
+ default:
+ unreachable("unknown deref type");
+ }
+ }
+}
+
+static struct vtn_ssa_value *
+vtn_block_load(struct vtn_builder *b, nir_deref_var *src,
+ struct vtn_type *type, nir_deref *src_tail)
+{
+ nir_ssa_def *index;
- op = indirect ? nir_intrinsic_load_ubo_indirect
- : nir_intrinsic_load_ubo;
++ nir_ssa_def *offset;
++ vtn_block_get_offset(b, src, &type, src_tail, &index, &offset);
+
+ nir_intrinsic_op op;
+ if (src->var->data.mode == nir_var_uniform) {
+ if (src->var->data.descriptor_set >= 0) {
+ /* UBO load */
+ assert(src->var->data.binding >= 0);
+
- op = indirect ? nir_intrinsic_load_push_constant_indirect
- : nir_intrinsic_load_push_constant;
++ op = nir_intrinsic_load_ubo;
+ } else {
+ /* Push constant load */
+ assert(src->var->data.descriptor_set == -1 &&
+ src->var->data.binding == -1);
+
- op = indirect ? nir_intrinsic_load_ssbo_indirect
- : nir_intrinsic_load_ssbo;
++ op = nir_intrinsic_load_push_constant;
+ }
+ } else {
+ assert(src->var->data.mode == nir_var_shader_storage);
- index, offset, indirect, type);
++ op = nir_intrinsic_load_ssbo;
+ }
+
+ return _vtn_block_load(b, op, src->var->data.descriptor_set,
+ src->var->data.binding, src->var->data.mode,
- nir_variable_mode mode, nir_ssa_def *index, unsigned offset,
- nir_ssa_def *indirect, struct vtn_type *type)
++ index, offset, type);
+}
+
+/*
+ * Gets the NIR-level deref tail, which may have as a child an array deref
+ * selecting which component due to OpAccessChain supporting per-component
+ * indexing in SPIR-V.
+ */
+
+static nir_deref *
+get_deref_tail(nir_deref_var *deref)
+{
+ nir_deref *cur = &deref->deref;
+ while (!glsl_type_is_vector_or_scalar(cur->type) && cur->child)
+ cur = cur->child;
+
+ return cur;
+}
+
+static nir_ssa_def *vtn_vector_extract(struct vtn_builder *b,
+ nir_ssa_def *src, unsigned index);
+
+static nir_ssa_def *vtn_vector_extract_dynamic(struct vtn_builder *b,
+ nir_ssa_def *src,
+ nir_ssa_def *index);
+
+static bool
+variable_is_external_block(nir_variable *var)
+{
+ return var->interface_type &&
+ glsl_type_is_struct(var->interface_type) &&
+ (var->data.mode == nir_var_uniform ||
+ var->data.mode == nir_var_shader_storage);
+}
+
+static struct vtn_ssa_value *
+vtn_variable_load(struct vtn_builder *b, nir_deref_var *src,
+ struct vtn_type *src_type)
+{
+ nir_deref *src_tail = get_deref_tail(src);
+
+ struct vtn_ssa_value *val;
+ if (variable_is_external_block(src->var))
+ val = vtn_block_load(b, src, src_type, src_tail);
+ else
+ val = _vtn_variable_load(b, src, src_tail);
+
+ if (src_tail->child) {
+ nir_deref_array *vec_deref = nir_deref_as_array(src_tail->child);
+ assert(vec_deref->deref.child == NULL);
+ val->type = vec_deref->deref.type;
+ if (vec_deref->deref_array_type == nir_deref_array_type_direct)
+ val->def = vtn_vector_extract(b, val->def, vec_deref->base_offset);
+ else
+ val->def = vtn_vector_extract_dynamic(b, val->def,
+ vec_deref->indirect.ssa);
+ }
+
+ return val;
+}
+
+static void
+_vtn_block_store(struct vtn_builder *b, nir_intrinsic_op op,
+ struct vtn_ssa_value *src, unsigned set, unsigned binding,
- store->const_index[0] = offset;
++ nir_variable_mode mode, nir_ssa_def *index,
++ nir_ssa_def *offset, struct vtn_type *type)
+{
+ assert(src->type == type->type);
+ if (glsl_type_is_vector_or_scalar(type->type)) {
+ nir_intrinsic_instr *store = nir_intrinsic_instr_create(b->shader, op);
+ store->num_components = glsl_get_vector_elements(type->type);
-
- if (op == nir_intrinsic_store_ssbo_indirect)
- store->src[2] = nir_src_for_ssa(indirect);
+ store->const_index[1] = (1 << store->num_components) - 1;
+ store->src[0] = nir_src_for_ssa(src->def);
+
+ nir_ssa_def *res_index = nir_vulkan_resource_index(&b->nb,
+ set, binding,
+ mode, index);
+ store->src[1] = nir_src_for_ssa(res_index);
- index, offset + type->offsets[i], indirect,
- type->members[i]);
++ store->src[2] = nir_src_for_ssa(offset);
+
+ nir_builder_instr_insert(&b->nb, &store->instr);
+ } else {
+ unsigned elems = glsl_get_length(type->type);
+ if (glsl_type_is_struct(type->type)) {
+ for (unsigned i = 0; i < elems; i++) {
++ nir_ssa_def *child_offset =
++ nir_iadd(&b->nb, offset, nir_imm_int(&b->nb, type->offsets[i]));
+ _vtn_block_store(b, op, src->elems[i], set, binding, mode,
- index, offset + i * type->stride, indirect,
- type->array_element);
++ index, child_offset, type->members[i]);
+ }
+ } else {
+ for (unsigned i = 0; i < elems; i++) {
++ nir_ssa_def *child_offset =
++ nir_iadd(&b->nb, offset, nir_imm_int(&b->nb, i * type->stride));
+ _vtn_block_store(b, op, src->elems[i], set, binding, mode,
- unsigned offset;
- nir_ssa_def *indirect;
- vtn_block_get_offset(b, dest, &type, dest_tail, &index, &offset, &indirect);
++ index, child_offset, type->array_element);
+ }
+ }
+ }
+}
+
+static void
+vtn_block_store(struct vtn_builder *b, struct vtn_ssa_value *src,
+ nir_deref_var *dest, struct vtn_type *type,
+ nir_deref *dest_tail)
+{
+ nir_ssa_def *index;
- nir_intrinsic_op op = indirect ? nir_intrinsic_store_ssbo_indirect
- : nir_intrinsic_store_ssbo;
++ nir_ssa_def *offset;
++ vtn_block_get_offset(b, dest, &type, dest_tail, &index, &offset);
+
- index, offset, indirect, type);
++ nir_intrinsic_op op = nir_intrinsic_store_ssbo;
+
+ return _vtn_block_store(b, op, src, dest->var->data.descriptor_set,
+ dest->var->data.binding, dest->var->data.mode,
- b->shader->num_uniforms = vtn_type_block_size(type);
++ index, offset, type);
+}
+
+static nir_ssa_def * vtn_vector_insert(struct vtn_builder *b,
+ nir_ssa_def *src, nir_ssa_def *insert,
+ unsigned index);
+
+static nir_ssa_def * vtn_vector_insert_dynamic(struct vtn_builder *b,
+ nir_ssa_def *src,
+ nir_ssa_def *insert,
+ nir_ssa_def *index);
+static void
+vtn_variable_store(struct vtn_builder *b, struct vtn_ssa_value *src,
+ nir_deref_var *dest, struct vtn_type *dest_type)
+{
+ nir_deref *dest_tail = get_deref_tail(dest);
+ if (variable_is_external_block(dest->var)) {
+ assert(dest->var->data.mode == nir_var_shader_storage);
+ vtn_block_store(b, src, dest, dest_type, dest_tail);
+ } else {
+ if (dest_tail->child) {
+ struct vtn_ssa_value *val = _vtn_variable_load(b, dest, dest_tail);
+ nir_deref_array *deref = nir_deref_as_array(dest_tail->child);
+ assert(deref->deref.child == NULL);
+ if (deref->deref_array_type == nir_deref_array_type_direct)
+ val->def = vtn_vector_insert(b, val->def, src->def,
+ deref->base_offset);
+ else
+ val->def = vtn_vector_insert_dynamic(b, val->def, src->def,
+ deref->indirect.ssa);
+ _vtn_variable_store(b, dest, dest_tail, val);
+ } else {
+ _vtn_variable_store(b, dest, dest_tail, src);
+ }
+ }
+}
+
+static void
+vtn_variable_copy(struct vtn_builder *b, nir_deref_var *src,
+ nir_deref_var *dest, struct vtn_type *type)
+{
+ nir_deref *src_tail = get_deref_tail(src);
+
+ if (src_tail->child || src->var->interface_type) {
+ assert(get_deref_tail(dest)->child);
+ struct vtn_ssa_value *val = vtn_variable_load(b, src, type);
+ vtn_variable_store(b, val, dest, type);
+ } else {
+ nir_intrinsic_instr *copy =
+ nir_intrinsic_instr_create(b->shader, nir_intrinsic_copy_var);
+ copy->variables[0] = nir_deref_as_var(nir_copy_deref(copy, &dest->deref));
+ copy->variables[1] = nir_deref_as_var(nir_copy_deref(copy, &src->deref));
+
+ nir_builder_instr_insert(&b->nb, ©->instr);
+ }
+}
+
+/* Tries to compute the size of an interface block based on the strides and
+ * offsets that are provided to us in the SPIR-V source.
+ */
+static unsigned
+vtn_type_block_size(struct vtn_type *type)
+{
+ enum glsl_base_type base_type = glsl_get_base_type(type->type);
+ switch (base_type) {
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_INT:
+ case GLSL_TYPE_FLOAT:
+ case GLSL_TYPE_BOOL:
+ case GLSL_TYPE_DOUBLE: {
+ unsigned cols = type->row_major ? glsl_get_vector_elements(type->type) :
+ glsl_get_matrix_columns(type->type);
+ if (cols > 1) {
+ assert(type->stride > 0);
+ return type->stride * cols;
+ } else if (base_type == GLSL_TYPE_DOUBLE) {
+ return glsl_get_vector_elements(type->type) * 8;
+ } else {
+ return glsl_get_vector_elements(type->type) * 4;
+ }
+ }
+
+ case GLSL_TYPE_STRUCT:
+ case GLSL_TYPE_INTERFACE: {
+ unsigned size = 0;
+ unsigned num_fields = glsl_get_length(type->type);
+ for (unsigned f = 0; f < num_fields; f++) {
+ unsigned field_end = type->offsets[f] +
+ vtn_type_block_size(type->members[f]);
+ size = MAX2(size, field_end);
+ }
+ return size;
+ }
+
+ case GLSL_TYPE_ARRAY:
+ assert(type->stride > 0);
+ assert(glsl_get_length(type->type) > 0);
+ return type->stride * glsl_get_length(type->type);
+
+ default:
+ assert(!"Invalid block type");
+ return 0;
+ }
+}
+
+static bool
+is_interface_type(struct vtn_type *type)
+{
+ return type->block || type->buffer_block ||
+ glsl_type_is_sampler(type->type) ||
+ glsl_type_is_image(type->type);
+}
+
+static void
+vtn_handle_variables(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ switch (opcode) {
+ case SpvOpVariable: {
+ struct vtn_type *type =
+ vtn_value(b, w[1], vtn_value_type_type)->type;
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_deref);
+
+ nir_variable *var = rzalloc(b->shader, nir_variable);
+
+ var->type = type->type;
+ var->name = ralloc_strdup(var, val->name);
+
+ struct vtn_type *interface_type;
+ if (is_interface_type(type)) {
+ interface_type = type;
+ } else if (glsl_type_is_array(type->type) &&
+ is_interface_type(type->array_element)) {
+ interface_type = type->array_element;
+ } else {
+ interface_type = NULL;
+ }
+
+ if (interface_type)
+ var->interface_type = interface_type->type;
+
+ switch ((SpvStorageClass)w[3]) {
+ case SpvStorageClassUniform:
+ case SpvStorageClassUniformConstant:
+ if (interface_type && interface_type->buffer_block) {
+ var->data.mode = nir_var_shader_storage;
+ b->shader->info.num_ssbos++;
+ } else {
+ /* UBO's and samplers */
+ var->data.mode = nir_var_uniform;
+ var->data.read_only = true;
+ if (interface_type) {
+ if (glsl_type_is_image(interface_type->type)) {
+ b->shader->info.num_images++;
+ var->data.image.format = interface_type->image_format;
+ } else if (glsl_type_is_sampler(interface_type->type)) {
+ b->shader->info.num_textures++;
+ } else {
+ assert(glsl_type_is_struct(interface_type->type));
+ b->shader->info.num_ubos++;
+ }
+ }
+ }
+ break;
+ case SpvStorageClassPushConstant:
+ assert(interface_type && interface_type->block);
+ var->data.mode = nir_var_uniform;
+ var->data.read_only = true;
+ var->data.descriptor_set = -1;
+ var->data.binding = -1;
+
+ /* We have exactly one push constant block */
+ assert(b->shader->num_uniforms == 0);
++ b->shader->num_uniforms = vtn_type_block_size(type) * 4;
+ break;
+ case SpvStorageClassInput:
+ var->data.mode = nir_var_shader_in;
+ var->data.read_only = true;
+ break;
+ case SpvStorageClassOutput:
+ var->data.mode = nir_var_shader_out;
+ break;
+ case SpvStorageClassPrivate:
+ var->data.mode = nir_var_global;
+ break;
+ case SpvStorageClassFunction:
+ var->data.mode = nir_var_local;
+ break;
+ case SpvStorageClassWorkgroup:
+ case SpvStorageClassCrossWorkgroup:
+ case SpvStorageClassGeneric:
+ case SpvStorageClassAtomicCounter:
+ default:
+ unreachable("Unhandled variable storage class");
+ }
+
+ if (count > 4) {
+ assert(count == 5);
+ var->constant_initializer =
+ vtn_value(b, w[4], vtn_value_type_constant)->constant;
+ }
+
+ val->deref = nir_deref_var_create(b, var);
+ val->deref_type = type;
+
+ /* We handle decorations first because decorations might give us
+ * location information. We use the data.explicit_location field to
+ * note that the location provided is the "final" location. If
+ * data.explicit_location == false, this means that it's relative to
+ * whatever the base location is.
+ */
+ vtn_foreach_decoration(b, val, var_decoration_cb, var);
+
+ if (!var->data.explicit_location) {
+ if (b->execution_model == SpvExecutionModelFragment &&
+ var->data.mode == nir_var_shader_out) {
+ var->data.location += FRAG_RESULT_DATA0;
+ } else if (b->execution_model == SpvExecutionModelVertex &&
+ var->data.mode == nir_var_shader_in) {
+ var->data.location += VERT_ATTRIB_GENERIC0;
+ } else if (var->data.mode == nir_var_shader_in ||
+ var->data.mode == nir_var_shader_out) {
+ var->data.location += VARYING_SLOT_VAR0;
+ }
+ }
+
+ /* Interface block variables aren't actually going to be referenced
+ * by the generated NIR, so we don't put them in the list
+ */
+ if (interface_type && glsl_type_is_struct(interface_type->type))
+ break;
+
+ if (var->data.mode == nir_var_local) {
+ nir_function_impl_add_variable(b->impl, var);
+ } else {
+ nir_shader_add_variable(b->shader, var);
+ }
+
+ break;
+ }
+
+ case SpvOpAccessChain:
+ case SpvOpInBoundsAccessChain: {
+ nir_deref_var *base;
+ struct vtn_value *base_val = vtn_untyped_value(b, w[3]);
+ if (base_val->value_type == vtn_value_type_sampled_image) {
+ /* This is rather insane. SPIR-V allows you to use OpSampledImage
+ * to combine an array of images with a single sampler to get an
+ * array of sampled images that all share the same sampler.
+ * Fortunately, this means that we can more-or-less ignore the
+ * sampler when crawling the access chain, but it does leave us
+ * with this rather awkward little special-case.
+ */
+ base = base_val->sampled_image->image;
+ } else {
+ assert(base_val->value_type == vtn_value_type_deref);
+ base = base_val->deref;
+ }
+
+ nir_deref_var *deref = nir_deref_as_var(nir_copy_deref(b, &base->deref));
+ struct vtn_type *deref_type = vtn_value(b, w[3], vtn_value_type_deref)->deref_type;
+
+ nir_deref *tail = &deref->deref;
+ while (tail->child)
+ tail = tail->child;
+
+ for (unsigned i = 0; i < count - 4; i++) {
+ assert(w[i + 4] < b->value_id_bound);
+ struct vtn_value *idx_val = &b->values[w[i + 4]];
+
+ enum glsl_base_type base_type = glsl_get_base_type(tail->type);
+ switch (base_type) {
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_INT:
+ case GLSL_TYPE_FLOAT:
+ case GLSL_TYPE_DOUBLE:
+ case GLSL_TYPE_BOOL:
+ case GLSL_TYPE_ARRAY: {
+ nir_deref_array *deref_arr = nir_deref_array_create(b);
+ if (base_type == GLSL_TYPE_ARRAY ||
+ glsl_type_is_matrix(tail->type)) {
+ deref_type = deref_type->array_element;
+ } else {
+ assert(glsl_type_is_vector(tail->type));
+ deref_type = ralloc(b, struct vtn_type);
+ deref_type->type = glsl_scalar_type(base_type);
+ }
+
+ deref_arr->deref.type = deref_type->type;
+
+ if (idx_val->value_type == vtn_value_type_constant) {
+ unsigned idx = idx_val->constant->value.u[0];
+ deref_arr->deref_array_type = nir_deref_array_type_direct;
+ deref_arr->base_offset = idx;
+ } else {
+ assert(idx_val->value_type == vtn_value_type_ssa);
+ assert(glsl_type_is_scalar(idx_val->ssa->type));
+ deref_arr->deref_array_type = nir_deref_array_type_indirect;
+ deref_arr->base_offset = 0;
+ deref_arr->indirect = nir_src_for_ssa(idx_val->ssa->def);
+ }
+ tail->child = &deref_arr->deref;
+ break;
+ }
+
+ case GLSL_TYPE_STRUCT: {
+ assert(idx_val->value_type == vtn_value_type_constant);
+ unsigned idx = idx_val->constant->value.u[0];
+ deref_type = deref_type->members[idx];
+ nir_deref_struct *deref_struct = nir_deref_struct_create(b, idx);
+ deref_struct->deref.type = deref_type->type;
+ tail->child = &deref_struct->deref;
+ break;
+ }
+ default:
+ unreachable("Invalid type for deref");
+ }
+
+ if (deref_type->is_builtin) {
+ /* If we encounter a builtin, we throw away the ress of the
+ * access chain, jump to the builtin, and keep building.
+ */
+ const struct glsl_type *builtin_type = deref_type->type;
+
+ nir_deref_array *per_vertex_deref = NULL;
+ if (glsl_type_is_array(base->var->type)) {
+ /* This builtin is a per-vertex builtin */
+ assert(b->shader->stage == MESA_SHADER_GEOMETRY);
+ assert(base->var->data.mode == nir_var_shader_in);
+ builtin_type = glsl_array_type(builtin_type,
+ b->shader->info.gs.vertices_in);
+
+ /* The first non-var deref should be an array deref. */
+ assert(deref->deref.child->deref_type ==
+ nir_deref_type_array);
+ per_vertex_deref = nir_deref_as_array(deref->deref.child);
+ }
+
+ nir_variable *builtin = get_builtin_variable(b,
+ base->var->data.mode,
+ builtin_type,
+ deref_type->builtin);
+ deref = nir_deref_var_create(b, builtin);
+
+ if (per_vertex_deref) {
+ /* Since deref chains start at the variable, we can just
+ * steal that link and use it.
+ */
+ deref->deref.child = &per_vertex_deref->deref;
+ per_vertex_deref->deref.child = NULL;
+ per_vertex_deref->deref.type =
+ glsl_get_array_element(builtin_type);
+
+ tail = &per_vertex_deref->deref;
+ } else {
+ tail = &deref->deref;
+ }
+ } else {
+ tail = tail->child;
+ }
+ }
+
+ /* For uniform blocks, we don't resolve the access chain until we
+ * actually access the variable, so we need to keep around the original
+ * type of the variable.
+ */
+ if (variable_is_external_block(base->var))
+ deref_type = vtn_value(b, w[3], vtn_value_type_deref)->deref_type;
+
+ if (base_val->value_type == vtn_value_type_sampled_image) {
+ struct vtn_value *val =
+ vtn_push_value(b, w[2], vtn_value_type_sampled_image);
+ val->sampled_image = ralloc(b, struct vtn_sampled_image);
+ val->sampled_image->image = deref;
+ val->sampled_image->sampler = base_val->sampled_image->sampler;
+ } else {
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_deref);
+ val->deref = deref;
+ val->deref_type = deref_type;
+ }
+
+ break;
+ }
+
+ case SpvOpCopyMemory: {
+ nir_deref_var *dest = vtn_value(b, w[1], vtn_value_type_deref)->deref;
+ nir_deref_var *src = vtn_value(b, w[2], vtn_value_type_deref)->deref;
+ struct vtn_type *type =
+ vtn_value(b, w[1], vtn_value_type_deref)->deref_type;
+
+ vtn_variable_copy(b, src, dest, type);
+ break;
+ }
+
+ case SpvOpLoad: {
+ nir_deref_var *src = vtn_value(b, w[3], vtn_value_type_deref)->deref;
+ struct vtn_type *src_type =
+ vtn_value(b, w[3], vtn_value_type_deref)->deref_type;
+
+ if (src->var->interface_type &&
+ (glsl_type_is_sampler(src->var->interface_type) ||
+ glsl_type_is_image(src->var->interface_type))) {
+ vtn_push_value(b, w[2], vtn_value_type_deref)->deref = src;
+ return;
+ }
+
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
+ val->ssa = vtn_variable_load(b, src, src_type);
+ break;
+ }
+
+ case SpvOpStore: {
+ nir_deref_var *dest = vtn_value(b, w[1], vtn_value_type_deref)->deref;
+ struct vtn_type *dest_type =
+ vtn_value(b, w[1], vtn_value_type_deref)->deref_type;
+ struct vtn_ssa_value *src = vtn_ssa_value(b, w[2]);
+ vtn_variable_store(b, src, dest, dest_type);
+ break;
+ }
+
+ case SpvOpCopyMemorySized:
+ case SpvOpArrayLength:
+ default:
+ unreachable("Unhandled opcode");
+ }
+}
+
+static void
+vtn_handle_function_call(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ unreachable("Unhandled opcode");
+}
+
+static struct vtn_ssa_value *
+vtn_create_ssa_value(struct vtn_builder *b, const struct glsl_type *type)
+{
+ struct vtn_ssa_value *val = rzalloc(b, struct vtn_ssa_value);
+ val->type = type;
+
+ if (!glsl_type_is_vector_or_scalar(type)) {
+ unsigned elems = glsl_get_length(type);
+ val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
+ for (unsigned i = 0; i < elems; i++) {
+ const struct glsl_type *child_type;
+
+ switch (glsl_get_base_type(type)) {
+ case GLSL_TYPE_INT:
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_BOOL:
+ case GLSL_TYPE_FLOAT:
+ case GLSL_TYPE_DOUBLE:
+ child_type = glsl_get_column_type(type);
+ break;
+ case GLSL_TYPE_ARRAY:
+ child_type = glsl_get_array_element(type);
+ break;
+ case GLSL_TYPE_STRUCT:
+ child_type = glsl_get_struct_field(type, i);
+ break;
+ default:
+ unreachable("unkown base type");
+ }
+
+ val->elems[i] = vtn_create_ssa_value(b, child_type);
+ }
+ }
+
+ return val;
+}
+
+static nir_tex_src
+vtn_tex_src(struct vtn_builder *b, unsigned index, nir_tex_src_type type)
+{
+ nir_tex_src src;
+ src.src = nir_src_for_ssa(vtn_ssa_value(b, index)->def);
+ src.src_type = type;
+ return src;
+}
+
+static void
+vtn_handle_texture(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ if (opcode == SpvOpSampledImage) {
+ struct vtn_value *val =
+ vtn_push_value(b, w[2], vtn_value_type_sampled_image);
+ val->sampled_image = ralloc(b, struct vtn_sampled_image);
+ val->sampled_image->image =
+ vtn_value(b, w[3], vtn_value_type_deref)->deref;
+ val->sampled_image->sampler =
+ vtn_value(b, w[4], vtn_value_type_deref)->deref;
+ return;
+ }
+
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
+
+ struct vtn_sampled_image sampled;
+ struct vtn_value *sampled_val = vtn_untyped_value(b, w[3]);
+ if (sampled_val->value_type == vtn_value_type_sampled_image) {
+ sampled = *sampled_val->sampled_image;
+ } else {
+ assert(sampled_val->value_type == vtn_value_type_deref);
+ sampled.image = NULL;
+ sampled.sampler = sampled_val->deref;
+ }
+
+ nir_tex_src srcs[8]; /* 8 should be enough */
+ nir_tex_src *p = srcs;
+
+ unsigned idx = 4;
+
+ unsigned coord_components = 0;
+ switch (opcode) {
+ case SpvOpImageSampleImplicitLod:
+ case SpvOpImageSampleExplicitLod:
+ case SpvOpImageSampleDrefImplicitLod:
+ case SpvOpImageSampleDrefExplicitLod:
+ case SpvOpImageSampleProjImplicitLod:
+ case SpvOpImageSampleProjExplicitLod:
+ case SpvOpImageSampleProjDrefImplicitLod:
+ case SpvOpImageSampleProjDrefExplicitLod:
+ case SpvOpImageFetch:
+ case SpvOpImageGather:
+ case SpvOpImageDrefGather:
+ case SpvOpImageQueryLod: {
+ /* All these types have the coordinate as their first real argument */
+ struct vtn_ssa_value *coord = vtn_ssa_value(b, w[idx++]);
+ coord_components = glsl_get_vector_elements(coord->type);
+ p->src = nir_src_for_ssa(coord->def);
+ p->src_type = nir_tex_src_coord;
+ p++;
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ /* These all have an explicit depth value as their next source */
+ switch (opcode) {
+ case SpvOpImageSampleDrefImplicitLod:
+ case SpvOpImageSampleDrefExplicitLod:
+ case SpvOpImageSampleProjDrefImplicitLod:
+ case SpvOpImageSampleProjDrefExplicitLod:
+ (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_comparitor);
+ break;
+ default:
+ break;
+ }
+
+ /* Figure out the base texture operation */
+ nir_texop texop;
+ switch (opcode) {
+ case SpvOpImageSampleImplicitLod:
+ case SpvOpImageSampleDrefImplicitLod:
+ case SpvOpImageSampleProjImplicitLod:
+ case SpvOpImageSampleProjDrefImplicitLod:
+ texop = nir_texop_tex;
+ break;
+
+ case SpvOpImageSampleExplicitLod:
+ case SpvOpImageSampleDrefExplicitLod:
+ case SpvOpImageSampleProjExplicitLod:
+ case SpvOpImageSampleProjDrefExplicitLod:
+ texop = nir_texop_txl;
+ break;
+
+ case SpvOpImageFetch:
+ texop = nir_texop_txf;
+ break;
+
+ case SpvOpImageGather:
+ case SpvOpImageDrefGather:
+ texop = nir_texop_tg4;
+ break;
+
+ case SpvOpImageQuerySizeLod:
+ case SpvOpImageQuerySize:
+ texop = nir_texop_txs;
+ break;
+
+ case SpvOpImageQueryLod:
+ texop = nir_texop_lod;
+ break;
+
+ case SpvOpImageQueryLevels:
+ texop = nir_texop_query_levels;
+ break;
+
+ case SpvOpImageQuerySamples:
+ default:
+ unreachable("Unhandled opcode");
+ }
+
+ /* Now we need to handle some number of optional arguments */
+ if (idx < count) {
+ uint32_t operands = w[idx++];
+
+ if (operands & SpvImageOperandsBiasMask) {
+ assert(texop == nir_texop_tex);
+ texop = nir_texop_txb;
+ (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_bias);
+ }
+
+ if (operands & SpvImageOperandsLodMask) {
+ assert(texop == nir_texop_txl || texop == nir_texop_txf ||
+ texop == nir_texop_txs);
+ (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_lod);
+ }
+
+ if (operands & SpvImageOperandsGradMask) {
+ assert(texop == nir_texop_tex);
+ texop = nir_texop_txd;
+ (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_ddx);
+ (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_ddy);
+ }
+
+ if (operands & SpvImageOperandsOffsetMask ||
+ operands & SpvImageOperandsConstOffsetMask)
+ (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_offset);
+
+ if (operands & SpvImageOperandsConstOffsetsMask)
+ assert(!"Constant offsets to texture gather not yet implemented");
+
+ if (operands & SpvImageOperandsSampleMask) {
+ assert(texop == nir_texop_txf);
+ texop = nir_texop_txf_ms;
+ (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_ms_index);
+ }
+ }
+ /* We should have now consumed exactly all of the arguments */
+ assert(idx == count);
+
+ nir_tex_instr *instr = nir_tex_instr_create(b->shader, p - srcs);
+
+ const struct glsl_type *sampler_type =
+ nir_deref_tail(&sampled.sampler->deref)->type;
+ instr->sampler_dim = glsl_get_sampler_dim(sampler_type);
+
+ switch (glsl_get_sampler_result_type(sampler_type)) {
+ case GLSL_TYPE_FLOAT: instr->dest_type = nir_type_float; break;
+ case GLSL_TYPE_INT: instr->dest_type = nir_type_int; break;
+ case GLSL_TYPE_UINT: instr->dest_type = nir_type_uint; break;
+ case GLSL_TYPE_BOOL: instr->dest_type = nir_type_bool; break;
+ default:
+ unreachable("Invalid base type for sampler result");
+ }
+
+ instr->op = texop;
+ memcpy(instr->src, srcs, instr->num_srcs * sizeof(*instr->src));
+ instr->coord_components = coord_components;
+ instr->is_array = glsl_sampler_type_is_array(sampler_type);
+ instr->is_shadow = glsl_sampler_type_is_shadow(sampler_type);
+
+ instr->sampler =
+ nir_deref_as_var(nir_copy_deref(instr, &sampled.sampler->deref));
+ if (sampled.image) {
+ instr->texture =
+ nir_deref_as_var(nir_copy_deref(instr, &sampled.image->deref));
+ } else {
+ instr->texture = NULL;
+ }
+
+ nir_ssa_dest_init(&instr->instr, &instr->dest, 4, NULL);
+ val->ssa = vtn_create_ssa_value(b, glsl_vector_type(GLSL_TYPE_FLOAT, 4));
+ val->ssa->def = &instr->dest.ssa;
+
+ nir_builder_instr_insert(&b->nb, &instr->instr);
+}
+
+static nir_ssa_def *
+get_image_coord(struct vtn_builder *b, uint32_t value)
+{
+ struct vtn_ssa_value *coord = vtn_ssa_value(b, value);
+
+ /* The image_load_store intrinsics assume a 4-dim coordinate */
+ unsigned dim = glsl_get_vector_elements(coord->type);
+ unsigned swizzle[4];
+ for (unsigned i = 0; i < 4; i++)
+ swizzle[i] = MIN2(i, dim - 1);
+
+ return nir_swizzle(&b->nb, coord->def, swizzle, 4, false);
+}
+
+static void
+vtn_handle_image(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ /* Just get this one out of the way */
+ if (opcode == SpvOpImageTexelPointer) {
+ struct vtn_value *val =
+ vtn_push_value(b, w[2], vtn_value_type_image_pointer);
+ val->image = ralloc(b, struct vtn_image_pointer);
+
+ val->image->deref = vtn_value(b, w[3], vtn_value_type_deref)->deref;
+ val->image->coord = get_image_coord(b, w[4]);
+ val->image->sample = vtn_ssa_value(b, w[5])->def;
+ return;
+ }
+
+ struct vtn_image_pointer image;
+
+ switch (opcode) {
+ case SpvOpAtomicExchange:
+ case SpvOpAtomicCompareExchange:
+ case SpvOpAtomicCompareExchangeWeak:
+ case SpvOpAtomicIIncrement:
+ case SpvOpAtomicIDecrement:
+ case SpvOpAtomicIAdd:
+ case SpvOpAtomicISub:
+ case SpvOpAtomicSMin:
+ case SpvOpAtomicUMin:
+ case SpvOpAtomicSMax:
+ case SpvOpAtomicUMax:
+ case SpvOpAtomicAnd:
+ case SpvOpAtomicOr:
+ case SpvOpAtomicXor:
+ image = *vtn_value(b, w[3], vtn_value_type_image_pointer)->image;
+ break;
+
+ case SpvOpImageRead:
+ image.deref = vtn_value(b, w[3], vtn_value_type_deref)->deref;
+ image.coord = get_image_coord(b, w[4]);
+
+ if (count > 5 && (w[5] & SpvImageOperandsSampleMask)) {
+ assert(w[5] == SpvImageOperandsSampleMask);
+ image.sample = vtn_ssa_value(b, w[6])->def;
+ } else {
+ image.sample = nir_ssa_undef(&b->nb, 1);
+ }
+ break;
+
+ case SpvOpImageWrite:
+ image.deref = vtn_value(b, w[1], vtn_value_type_deref)->deref;
+ image.coord = get_image_coord(b, w[2]);
+
+ /* texel = w[3] */
+
+ if (count > 4 && (w[4] & SpvImageOperandsSampleMask)) {
+ assert(w[4] == SpvImageOperandsSampleMask);
+ image.sample = vtn_ssa_value(b, w[5])->def;
+ } else {
+ image.sample = nir_ssa_undef(&b->nb, 1);
+ }
+
+ default:
+ unreachable("Invalid image opcode");
+ }
+
+ nir_intrinsic_op op;
+ switch (opcode) {
+#define OP(S, N) case SpvOp##S: op = nir_intrinsic_image_##N; break;
+ OP(ImageRead, load)
+ OP(ImageWrite, store)
+ OP(AtomicExchange, atomic_exchange)
+ OP(AtomicCompareExchange, atomic_comp_swap)
+ OP(AtomicIIncrement, atomic_add)
+ OP(AtomicIDecrement, atomic_add)
+ OP(AtomicIAdd, atomic_add)
+ OP(AtomicISub, atomic_add)
+ OP(AtomicSMin, atomic_min)
+ OP(AtomicUMin, atomic_min)
+ OP(AtomicSMax, atomic_max)
+ OP(AtomicUMax, atomic_max)
+ OP(AtomicAnd, atomic_and)
+ OP(AtomicOr, atomic_or)
+ OP(AtomicXor, atomic_xor)
+#undef OP
+ default:
+ unreachable("Invalid image opcode");
+ }
+
+ nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->shader, op);
+ intrin->variables[0] =
+ nir_deref_as_var(nir_copy_deref(&intrin->instr, &image.deref->deref));
+ intrin->src[0] = nir_src_for_ssa(image.coord);
+ intrin->src[1] = nir_src_for_ssa(image.sample);
+
+ switch (opcode) {
+ case SpvOpImageRead:
+ break;
+ case SpvOpImageWrite:
+ intrin->src[2] = nir_src_for_ssa(vtn_ssa_value(b, w[3])->def);
+ break;
+ case SpvOpAtomicIIncrement:
+ intrin->src[2] = nir_src_for_ssa(nir_imm_int(&b->nb, 1));
+ break;
+ case SpvOpAtomicIDecrement:
+ intrin->src[2] = nir_src_for_ssa(nir_imm_int(&b->nb, -1));
+ break;
+
+ case SpvOpAtomicExchange:
+ case SpvOpAtomicIAdd:
+ case SpvOpAtomicSMin:
+ case SpvOpAtomicUMin:
+ case SpvOpAtomicSMax:
+ case SpvOpAtomicUMax:
+ case SpvOpAtomicAnd:
+ case SpvOpAtomicOr:
+ case SpvOpAtomicXor:
+ intrin->src[2] = nir_src_for_ssa(vtn_ssa_value(b, w[6])->def);
+ break;
+
+ case SpvOpAtomicCompareExchange:
+ intrin->src[2] = nir_src_for_ssa(vtn_ssa_value(b, w[7])->def);
+ intrin->src[3] = nir_src_for_ssa(vtn_ssa_value(b, w[6])->def);
+ break;
+
+ case SpvOpAtomicISub:
+ intrin->src[2] = nir_src_for_ssa(nir_ineg(&b->nb, vtn_ssa_value(b, w[6])->def));
+ break;
+
+ default:
+ unreachable("Invalid image opcode");
+ }
+
+ if (opcode != SpvOpImageWrite) {
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
+ struct vtn_type *type = vtn_value(b, w[1], vtn_value_type_type)->type;
+ nir_ssa_dest_init(&intrin->instr, &intrin->dest,
+ glsl_get_vector_elements(type->type), NULL);
+ val->ssa = vtn_create_ssa_value(b, type->type);
+ val->ssa->def = &intrin->dest.ssa;
+ }
+
+ nir_builder_instr_insert(&b->nb, &intrin->instr);
+}
+
+static nir_alu_instr *
+create_vec(void *mem_ctx, unsigned num_components)
+{
+ nir_op op;
+ switch (num_components) {
+ case 1: op = nir_op_fmov; break;
+ case 2: op = nir_op_vec2; break;
+ case 3: op = nir_op_vec3; break;
+ case 4: op = nir_op_vec4; break;
+ default: unreachable("bad vector size");
+ }
+
+ nir_alu_instr *vec = nir_alu_instr_create(mem_ctx, op);
+ nir_ssa_dest_init(&vec->instr, &vec->dest.dest, num_components, NULL);
+ vec->dest.write_mask = (1 << num_components) - 1;
+
+ return vec;
+}
+
+static struct vtn_ssa_value *
+vtn_transpose(struct vtn_builder *b, struct vtn_ssa_value *src)
+{
+ if (src->transposed)
+ return src->transposed;
+
+ struct vtn_ssa_value *dest =
+ vtn_create_ssa_value(b, glsl_transposed_type(src->type));
+
+ for (unsigned i = 0; i < glsl_get_matrix_columns(dest->type); i++) {
+ nir_alu_instr *vec = create_vec(b, glsl_get_matrix_columns(src->type));
+ if (glsl_type_is_vector_or_scalar(src->type)) {
+ vec->src[0].src = nir_src_for_ssa(src->def);
+ vec->src[0].swizzle[0] = i;
+ } else {
+ for (unsigned j = 0; j < glsl_get_matrix_columns(src->type); j++) {
+ vec->src[j].src = nir_src_for_ssa(src->elems[j]->def);
+ vec->src[j].swizzle[0] = i;
+ }
+ }
+ nir_builder_instr_insert(&b->nb, &vec->instr);
+ dest->elems[i]->def = &vec->dest.dest.ssa;
+ }
+
+ dest->transposed = src;
+
+ return dest;
+}
+
+/*
+ * Normally, column vectors in SPIR-V correspond to a single NIR SSA
+ * definition. But for matrix multiplies, we want to do one routine for
+ * multiplying a matrix by a matrix and then pretend that vectors are matrices
+ * with one column. So we "wrap" these things, and unwrap the result before we
+ * send it off.
+ */
+
+static struct vtn_ssa_value *
+vtn_wrap_matrix(struct vtn_builder *b, struct vtn_ssa_value *val)
+{
+ if (val == NULL)
+ return NULL;
+
+ if (glsl_type_is_matrix(val->type))
+ return val;
+
+ struct vtn_ssa_value *dest = rzalloc(b, struct vtn_ssa_value);
+ dest->type = val->type;
+ dest->elems = ralloc_array(b, struct vtn_ssa_value *, 1);
+ dest->elems[0] = val;
+
+ return dest;
+}
+
+static struct vtn_ssa_value *
+vtn_unwrap_matrix(struct vtn_ssa_value *val)
+{
+ if (glsl_type_is_matrix(val->type))
+ return val;
+
+ return val->elems[0];
+}
+
+static struct vtn_ssa_value *
+vtn_matrix_multiply(struct vtn_builder *b,
+ struct vtn_ssa_value *_src0, struct vtn_ssa_value *_src1)
+{
+
+ struct vtn_ssa_value *src0 = vtn_wrap_matrix(b, _src0);
+ struct vtn_ssa_value *src1 = vtn_wrap_matrix(b, _src1);
+ struct vtn_ssa_value *src0_transpose = vtn_wrap_matrix(b, _src0->transposed);
+ struct vtn_ssa_value *src1_transpose = vtn_wrap_matrix(b, _src1->transposed);
+
+ unsigned src0_rows = glsl_get_vector_elements(src0->type);
+ unsigned src0_columns = glsl_get_matrix_columns(src0->type);
+ unsigned src1_columns = glsl_get_matrix_columns(src1->type);
+
+ struct vtn_ssa_value *dest =
+ vtn_create_ssa_value(b, glsl_matrix_type(glsl_get_base_type(src0->type),
+ src0_rows, src1_columns));
+
+ dest = vtn_wrap_matrix(b, dest);
+
+ bool transpose_result = false;
+ if (src0_transpose && src1_transpose) {
+ /* transpose(A) * transpose(B) = transpose(B * A) */
+ src1 = src0_transpose;
+ src0 = src1_transpose;
+ src0_transpose = NULL;
+ src1_transpose = NULL;
+ transpose_result = true;
+ }
+
+ if (src0_transpose && !src1_transpose &&
+ glsl_get_base_type(src0->type) == GLSL_TYPE_FLOAT) {
+ /* We already have the rows of src0 and the columns of src1 available,
+ * so we can just take the dot product of each row with each column to
+ * get the result.
+ */
+
+ for (unsigned i = 0; i < src1_columns; i++) {
+ nir_alu_instr *vec = create_vec(b, src0_rows);
+ for (unsigned j = 0; j < src0_rows; j++) {
+ vec->src[j].src =
+ nir_src_for_ssa(nir_fdot(&b->nb, src0_transpose->elems[j]->def,
+ src1->elems[i]->def));
+ }
+
+ nir_builder_instr_insert(&b->nb, &vec->instr);
+ dest->elems[i]->def = &vec->dest.dest.ssa;
+ }
+ } else {
+ /* We don't handle the case where src1 is transposed but not src0, since
+ * the general case only uses individual components of src1 so the
+ * optimizer should chew through the transpose we emitted for src1.
+ */
+
+ for (unsigned i = 0; i < src1_columns; i++) {
+ /* dest[i] = sum(src0[j] * src1[i][j] for all j) */
+ dest->elems[i]->def =
+ nir_fmul(&b->nb, src0->elems[0]->def,
+ vtn_vector_extract(b, src1->elems[i]->def, 0));
+ for (unsigned j = 1; j < src0_columns; j++) {
+ dest->elems[i]->def =
+ nir_fadd(&b->nb, dest->elems[i]->def,
+ nir_fmul(&b->nb, src0->elems[j]->def,
+ vtn_vector_extract(b,
+ src1->elems[i]->def, j)));
+ }
+ }
+ }
+
+ dest = vtn_unwrap_matrix(dest);
+
+ if (transpose_result)
+ dest = vtn_transpose(b, dest);
+
+ return dest;
+}
+
+static struct vtn_ssa_value *
+vtn_mat_times_scalar(struct vtn_builder *b,
+ struct vtn_ssa_value *mat,
+ nir_ssa_def *scalar)
+{
+ struct vtn_ssa_value *dest = vtn_create_ssa_value(b, mat->type);
+ for (unsigned i = 0; i < glsl_get_matrix_columns(mat->type); i++) {
+ if (glsl_get_base_type(mat->type) == GLSL_TYPE_FLOAT)
+ dest->elems[i]->def = nir_fmul(&b->nb, mat->elems[i]->def, scalar);
+ else
+ dest->elems[i]->def = nir_imul(&b->nb, mat->elems[i]->def, scalar);
+ }
+
+ return dest;
+}
+
+static void
+vtn_handle_matrix_alu(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
+
+ switch (opcode) {
+ case SpvOpTranspose: {
+ struct vtn_ssa_value *src = vtn_ssa_value(b, w[3]);
+ val->ssa = vtn_transpose(b, src);
+ break;
+ }
+
+ case SpvOpOuterProduct: {
+ struct vtn_ssa_value *src0 = vtn_ssa_value(b, w[3]);
+ struct vtn_ssa_value *src1 = vtn_ssa_value(b, w[4]);
+
+ val->ssa = vtn_matrix_multiply(b, src0, vtn_transpose(b, src1));
+ break;
+ }
+
+ case SpvOpMatrixTimesScalar: {
+ struct vtn_ssa_value *mat = vtn_ssa_value(b, w[3]);
+ struct vtn_ssa_value *scalar = vtn_ssa_value(b, w[4]);
+
+ if (mat->transposed) {
+ val->ssa = vtn_transpose(b, vtn_mat_times_scalar(b, mat->transposed,
+ scalar->def));
+ } else {
+ val->ssa = vtn_mat_times_scalar(b, mat, scalar->def);
+ }
+ break;
+ }
+
+ case SpvOpVectorTimesMatrix:
+ case SpvOpMatrixTimesVector:
+ case SpvOpMatrixTimesMatrix: {
+ struct vtn_ssa_value *src0 = vtn_ssa_value(b, w[3]);
+ struct vtn_ssa_value *src1 = vtn_ssa_value(b, w[4]);
+
+ val->ssa = vtn_matrix_multiply(b, src0, src1);
+ break;
+ }
+
+ default: unreachable("unknown matrix opcode");
+ }
+}
+
+static void
+vtn_handle_alu(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
+ const struct glsl_type *type =
+ vtn_value(b, w[1], vtn_value_type_type)->type->type;
+ val->ssa = vtn_create_ssa_value(b, type);
+
+ /* Collect the various SSA sources */
+ unsigned num_inputs = count - 3;
+ nir_ssa_def *src[4];
+ for (unsigned i = 0; i < num_inputs; i++)
+ src[i] = vtn_ssa_value(b, w[i + 3])->def;
+
+ /* Indicates that the first two arguments should be swapped. This is
+ * used for implementing greater-than and less-than-or-equal.
+ */
+ bool swap = false;
+
+ nir_op op;
+ switch (opcode) {
+ /* Basic ALU operations */
+ case SpvOpSNegate: op = nir_op_ineg; break;
+ case SpvOpFNegate: op = nir_op_fneg; break;
+ case SpvOpNot: op = nir_op_inot; break;
+
+ case SpvOpAny:
+ switch (src[0]->num_components) {
+ case 1: op = nir_op_imov; break;
+ case 2: op = nir_op_bany2; break;
+ case 3: op = nir_op_bany3; break;
+ case 4: op = nir_op_bany4; break;
+ }
+ break;
+
+ case SpvOpAll:
+ switch (src[0]->num_components) {
+ case 1: op = nir_op_imov; break;
+ case 2: op = nir_op_ball2; break;
+ case 3: op = nir_op_ball3; break;
+ case 4: op = nir_op_ball4; break;
+ }
+ break;
+
+ case SpvOpIAdd: op = nir_op_iadd; break;
+ case SpvOpFAdd: op = nir_op_fadd; break;
+ case SpvOpISub: op = nir_op_isub; break;
+ case SpvOpFSub: op = nir_op_fsub; break;
+ case SpvOpIMul: op = nir_op_imul; break;
+ case SpvOpFMul: op = nir_op_fmul; break;
+ case SpvOpUDiv: op = nir_op_udiv; break;
+ case SpvOpSDiv: op = nir_op_idiv; break;
+ case SpvOpFDiv: op = nir_op_fdiv; break;
+ case SpvOpUMod: op = nir_op_umod; break;
+ case SpvOpSMod: op = nir_op_umod; break; /* FIXME? */
+ case SpvOpFMod: op = nir_op_fmod; break;
+
+ case SpvOpDot:
+ assert(src[0]->num_components == src[1]->num_components);
+ switch (src[0]->num_components) {
+ case 1: op = nir_op_fmul; break;
+ case 2: op = nir_op_fdot2; break;
+ case 3: op = nir_op_fdot3; break;
+ case 4: op = nir_op_fdot4; break;
+ }
+ break;
+
+ case SpvOpShiftRightLogical: op = nir_op_ushr; break;
+ case SpvOpShiftRightArithmetic: op = nir_op_ishr; break;
+ case SpvOpShiftLeftLogical: op = nir_op_ishl; break;
+ case SpvOpLogicalOr: op = nir_op_ior; break;
+ case SpvOpLogicalEqual: op = nir_op_ieq; break;
+ case SpvOpLogicalNotEqual: op = nir_op_ine; break;
+ case SpvOpLogicalAnd: op = nir_op_iand; break;
+ case SpvOpBitwiseOr: op = nir_op_ior; break;
+ case SpvOpBitwiseXor: op = nir_op_ixor; break;
+ case SpvOpBitwiseAnd: op = nir_op_iand; break;
+ case SpvOpSelect: op = nir_op_bcsel; break;
+ case SpvOpIEqual: op = nir_op_ieq; break;
+
+ /* Comparisons: (TODO: How do we want to handled ordered/unordered?) */
+ case SpvOpFOrdEqual: op = nir_op_feq; break;
+ case SpvOpFUnordEqual: op = nir_op_feq; break;
+ case SpvOpINotEqual: op = nir_op_ine; break;
+ case SpvOpFOrdNotEqual: op = nir_op_fne; break;
+ case SpvOpFUnordNotEqual: op = nir_op_fne; break;
+ case SpvOpULessThan: op = nir_op_ult; break;
+ case SpvOpSLessThan: op = nir_op_ilt; break;
+ case SpvOpFOrdLessThan: op = nir_op_flt; break;
+ case SpvOpFUnordLessThan: op = nir_op_flt; break;
+ case SpvOpUGreaterThan: op = nir_op_ult; swap = true; break;
+ case SpvOpSGreaterThan: op = nir_op_ilt; swap = true; break;
+ case SpvOpFOrdGreaterThan: op = nir_op_flt; swap = true; break;
+ case SpvOpFUnordGreaterThan: op = nir_op_flt; swap = true; break;
+ case SpvOpULessThanEqual: op = nir_op_uge; swap = true; break;
+ case SpvOpSLessThanEqual: op = nir_op_ige; swap = true; break;
+ case SpvOpFOrdLessThanEqual: op = nir_op_fge; swap = true; break;
+ case SpvOpFUnordLessThanEqual: op = nir_op_fge; swap = true; break;
+ case SpvOpUGreaterThanEqual: op = nir_op_uge; break;
+ case SpvOpSGreaterThanEqual: op = nir_op_ige; break;
+ case SpvOpFOrdGreaterThanEqual: op = nir_op_fge; break;
+ case SpvOpFUnordGreaterThanEqual:op = nir_op_fge; break;
+
+ /* Conversions: */
+ case SpvOpConvertFToU: op = nir_op_f2u; break;
+ case SpvOpConvertFToS: op = nir_op_f2i; break;
+ case SpvOpConvertSToF: op = nir_op_i2f; break;
+ case SpvOpConvertUToF: op = nir_op_u2f; break;
+ case SpvOpBitcast: op = nir_op_imov; break;
+ case SpvOpUConvert:
+ case SpvOpSConvert:
+ op = nir_op_imov; /* TODO: NIR is 32-bit only; these are no-ops. */
+ break;
+ case SpvOpFConvert:
+ op = nir_op_fmov;
+ break;
+
+ /* Derivatives: */
+ case SpvOpDPdx: op = nir_op_fddx; break;
+ case SpvOpDPdy: op = nir_op_fddy; break;
+ case SpvOpDPdxFine: op = nir_op_fddx_fine; break;
+ case SpvOpDPdyFine: op = nir_op_fddy_fine; break;
+ case SpvOpDPdxCoarse: op = nir_op_fddx_coarse; break;
+ case SpvOpDPdyCoarse: op = nir_op_fddy_coarse; break;
+ case SpvOpFwidth:
+ val->ssa->def = nir_fadd(&b->nb,
+ nir_fabs(&b->nb, nir_fddx(&b->nb, src[0])),
+ nir_fabs(&b->nb, nir_fddx(&b->nb, src[1])));
+ return;
+ case SpvOpFwidthFine:
+ val->ssa->def = nir_fadd(&b->nb,
+ nir_fabs(&b->nb, nir_fddx_fine(&b->nb, src[0])),
+ nir_fabs(&b->nb, nir_fddx_fine(&b->nb, src[1])));
+ return;
+ case SpvOpFwidthCoarse:
+ val->ssa->def = nir_fadd(&b->nb,
+ nir_fabs(&b->nb, nir_fddx_coarse(&b->nb, src[0])),
+ nir_fabs(&b->nb, nir_fddx_coarse(&b->nb, src[1])));
+ return;
+
+ case SpvOpVectorTimesScalar:
+ /* The builder will take care of splatting for us. */
+ val->ssa->def = nir_fmul(&b->nb, src[0], src[1]);
+ return;
+
+ case SpvOpSRem:
+ case SpvOpFRem:
+ unreachable("No NIR equivalent");
+
+ case SpvOpIsNan:
+ case SpvOpIsInf:
+ case SpvOpIsFinite:
+ case SpvOpIsNormal:
+ case SpvOpSignBitSet:
+ case SpvOpLessOrGreater:
+ case SpvOpOrdered:
+ case SpvOpUnordered:
+ default:
+ unreachable("Unhandled opcode");
+ }
+
+ if (swap) {
+ nir_ssa_def *tmp = src[0];
+ src[0] = src[1];
+ src[1] = tmp;
+ }
+
+ nir_alu_instr *instr = nir_alu_instr_create(b->shader, op);
+ nir_ssa_dest_init(&instr->instr, &instr->dest.dest,
+ glsl_get_vector_elements(type), val->name);
+ instr->dest.write_mask = (1 << glsl_get_vector_elements(type)) - 1;
+ val->ssa->def = &instr->dest.dest.ssa;
+
+ for (unsigned i = 0; i < nir_op_infos[op].num_inputs; i++)
+ instr->src[i].src = nir_src_for_ssa(src[i]);
+
+ nir_builder_instr_insert(&b->nb, &instr->instr);
+}
+
+static nir_ssa_def *
+vtn_vector_extract(struct vtn_builder *b, nir_ssa_def *src, unsigned index)
+{
+ unsigned swiz[4] = { index };
+ return nir_swizzle(&b->nb, src, swiz, 1, true);
+}
+
+
+static nir_ssa_def *
+vtn_vector_insert(struct vtn_builder *b, nir_ssa_def *src, nir_ssa_def *insert,
+ unsigned index)
+{
+ nir_alu_instr *vec = create_vec(b->shader, src->num_components);
+
+ for (unsigned i = 0; i < src->num_components; i++) {
+ if (i == index) {
+ vec->src[i].src = nir_src_for_ssa(insert);
+ } else {
+ vec->src[i].src = nir_src_for_ssa(src);
+ vec->src[i].swizzle[0] = i;
+ }
+ }
+
+ nir_builder_instr_insert(&b->nb, &vec->instr);
+
+ return &vec->dest.dest.ssa;
+}
+
+static nir_ssa_def *
+vtn_vector_extract_dynamic(struct vtn_builder *b, nir_ssa_def *src,
+ nir_ssa_def *index)
+{
+ nir_ssa_def *dest = vtn_vector_extract(b, src, 0);
+ for (unsigned i = 1; i < src->num_components; i++)
+ dest = nir_bcsel(&b->nb, nir_ieq(&b->nb, index, nir_imm_int(&b->nb, i)),
+ vtn_vector_extract(b, src, i), dest);
+
+ return dest;
+}
+
+static nir_ssa_def *
+vtn_vector_insert_dynamic(struct vtn_builder *b, nir_ssa_def *src,
+ nir_ssa_def *insert, nir_ssa_def *index)
+{
+ nir_ssa_def *dest = vtn_vector_insert(b, src, insert, 0);
+ for (unsigned i = 1; i < src->num_components; i++)
+ dest = nir_bcsel(&b->nb, nir_ieq(&b->nb, index, nir_imm_int(&b->nb, i)),
+ vtn_vector_insert(b, src, insert, i), dest);
+
+ return dest;
+}
+
+static nir_ssa_def *
+vtn_vector_shuffle(struct vtn_builder *b, unsigned num_components,
+ nir_ssa_def *src0, nir_ssa_def *src1,
+ const uint32_t *indices)
+{
+ nir_alu_instr *vec = create_vec(b->shader, num_components);
+
+ nir_ssa_undef_instr *undef = nir_ssa_undef_instr_create(b->shader, 1);
+ nir_builder_instr_insert(&b->nb, &undef->instr);
+
+ for (unsigned i = 0; i < num_components; i++) {
+ uint32_t index = indices[i];
+ if (index == 0xffffffff) {
+ vec->src[i].src = nir_src_for_ssa(&undef->def);
+ } else if (index < src0->num_components) {
+ vec->src[i].src = nir_src_for_ssa(src0);
+ vec->src[i].swizzle[0] = index;
+ } else {
+ vec->src[i].src = nir_src_for_ssa(src1);
+ vec->src[i].swizzle[0] = index - src0->num_components;
+ }
+ }
+
+ nir_builder_instr_insert(&b->nb, &vec->instr);
+
+ return &vec->dest.dest.ssa;
+}
+
+/*
+ * Concatentates a number of vectors/scalars together to produce a vector
+ */
+static nir_ssa_def *
+vtn_vector_construct(struct vtn_builder *b, unsigned num_components,
+ unsigned num_srcs, nir_ssa_def **srcs)
+{
+ nir_alu_instr *vec = create_vec(b->shader, num_components);
+
+ unsigned dest_idx = 0;
+ for (unsigned i = 0; i < num_srcs; i++) {
+ nir_ssa_def *src = srcs[i];
+ for (unsigned j = 0; j < src->num_components; j++) {
+ vec->src[dest_idx].src = nir_src_for_ssa(src);
+ vec->src[dest_idx].swizzle[0] = j;
+ dest_idx++;
+ }
+ }
+
+ nir_builder_instr_insert(&b->nb, &vec->instr);
+
+ return &vec->dest.dest.ssa;
+}
+
+static struct vtn_ssa_value *
+vtn_composite_copy(void *mem_ctx, struct vtn_ssa_value *src)
+{
+ struct vtn_ssa_value *dest = rzalloc(mem_ctx, struct vtn_ssa_value);
+ dest->type = src->type;
+
+ if (glsl_type_is_vector_or_scalar(src->type)) {
+ dest->def = src->def;
+ } else {
+ unsigned elems = glsl_get_length(src->type);
+
+ dest->elems = ralloc_array(mem_ctx, struct vtn_ssa_value *, elems);
+ for (unsigned i = 0; i < elems; i++)
+ dest->elems[i] = vtn_composite_copy(mem_ctx, src->elems[i]);
+ }
+
+ return dest;
+}
+
+static struct vtn_ssa_value *
+vtn_composite_insert(struct vtn_builder *b, struct vtn_ssa_value *src,
+ struct vtn_ssa_value *insert, const uint32_t *indices,
+ unsigned num_indices)
+{
+ struct vtn_ssa_value *dest = vtn_composite_copy(b, src);
+
+ struct vtn_ssa_value *cur = dest;
+ unsigned i;
+ for (i = 0; i < num_indices - 1; i++) {
+ cur = cur->elems[indices[i]];
+ }
+
+ if (glsl_type_is_vector_or_scalar(cur->type)) {
+ /* According to the SPIR-V spec, OpCompositeInsert may work down to
+ * the component granularity. In that case, the last index will be
+ * the index to insert the scalar into the vector.
+ */
+
+ cur->def = vtn_vector_insert(b, cur->def, insert->def, indices[i]);
+ } else {
+ cur->elems[indices[i]] = insert;
+ }
+
+ return dest;
+}
+
+static struct vtn_ssa_value *
+vtn_composite_extract(struct vtn_builder *b, struct vtn_ssa_value *src,
+ const uint32_t *indices, unsigned num_indices)
+{
+ struct vtn_ssa_value *cur = src;
+ for (unsigned i = 0; i < num_indices; i++) {
+ if (glsl_type_is_vector_or_scalar(cur->type)) {
+ assert(i == num_indices - 1);
+ /* According to the SPIR-V spec, OpCompositeExtract may work down to
+ * the component granularity. The last index will be the index of the
+ * vector to extract.
+ */
+
+ struct vtn_ssa_value *ret = rzalloc(b, struct vtn_ssa_value);
+ ret->type = glsl_scalar_type(glsl_get_base_type(cur->type));
+ ret->def = vtn_vector_extract(b, cur->def, indices[i]);
+ return ret;
+ }
+ }
+
+ return cur;
+}
+
+static void
+vtn_handle_composite(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
+ const struct glsl_type *type =
+ vtn_value(b, w[1], vtn_value_type_type)->type->type;
+ val->ssa = vtn_create_ssa_value(b, type);
+
+ switch (opcode) {
+ case SpvOpVectorExtractDynamic:
+ val->ssa->def = vtn_vector_extract_dynamic(b, vtn_ssa_value(b, w[3])->def,
+ vtn_ssa_value(b, w[4])->def);
+ break;
+
+ case SpvOpVectorInsertDynamic:
+ val->ssa->def = vtn_vector_insert_dynamic(b, vtn_ssa_value(b, w[3])->def,
+ vtn_ssa_value(b, w[4])->def,
+ vtn_ssa_value(b, w[5])->def);
+ break;
+
+ case SpvOpVectorShuffle:
+ val->ssa->def = vtn_vector_shuffle(b, glsl_get_vector_elements(type),
+ vtn_ssa_value(b, w[3])->def,
+ vtn_ssa_value(b, w[4])->def,
+ w + 5);
+ break;
+
+ case SpvOpCompositeConstruct: {
+ unsigned elems = count - 3;
+ if (glsl_type_is_vector_or_scalar(type)) {
+ nir_ssa_def *srcs[4];
+ for (unsigned i = 0; i < elems; i++)
+ srcs[i] = vtn_ssa_value(b, w[3 + i])->def;
+ val->ssa->def =
+ vtn_vector_construct(b, glsl_get_vector_elements(type),
+ elems, srcs);
+ } else {
+ val->ssa->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
+ for (unsigned i = 0; i < elems; i++)
+ val->ssa->elems[i] = vtn_ssa_value(b, w[3 + i]);
+ }
+ break;
+ }
+ case SpvOpCompositeExtract:
+ val->ssa = vtn_composite_extract(b, vtn_ssa_value(b, w[3]),
+ w + 4, count - 4);
+ break;
+
+ case SpvOpCompositeInsert:
+ val->ssa = vtn_composite_insert(b, vtn_ssa_value(b, w[4]),
+ vtn_ssa_value(b, w[3]),
+ w + 5, count - 5);
+ break;
+
+ case SpvOpCopyObject:
+ val->ssa = vtn_composite_copy(b, vtn_ssa_value(b, w[3]));
+ break;
+
+ default:
+ unreachable("unknown composite operation");
+ }
+}
+
+static void
+vtn_handle_barrier(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ nir_intrinsic_op intrinsic_op;
+ switch (opcode) {
+ case SpvOpEmitVertex:
+ case SpvOpEmitStreamVertex:
+ intrinsic_op = nir_intrinsic_emit_vertex;
+ break;
+ case SpvOpEndPrimitive:
+ case SpvOpEndStreamPrimitive:
+ intrinsic_op = nir_intrinsic_end_primitive;
+ break;
+ case SpvOpMemoryBarrier:
+ intrinsic_op = nir_intrinsic_memory_barrier;
+ break;
+ case SpvOpControlBarrier:
+ default:
+ unreachable("unknown barrier instruction");
+ }
+
+ nir_intrinsic_instr *intrin =
+ nir_intrinsic_instr_create(b->shader, intrinsic_op);
+
+ if (opcode == SpvOpEmitStreamVertex || opcode == SpvOpEndStreamPrimitive)
+ intrin->const_index[0] = w[1];
+
+ nir_builder_instr_insert(&b->nb, &intrin->instr);
+}
+
+static void
+vtn_phi_node_init(struct vtn_builder *b, struct vtn_ssa_value *val)
+{
+ if (glsl_type_is_vector_or_scalar(val->type)) {
+ nir_phi_instr *phi = nir_phi_instr_create(b->shader);
+ nir_ssa_dest_init(&phi->instr, &phi->dest,
+ glsl_get_vector_elements(val->type), NULL);
+ exec_list_make_empty(&phi->srcs);
+ nir_builder_instr_insert(&b->nb, &phi->instr);
+ val->def = &phi->dest.ssa;
+ } else {
+ unsigned elems = glsl_get_length(val->type);
+ for (unsigned i = 0; i < elems; i++)
+ vtn_phi_node_init(b, val->elems[i]);
+ }
+}
+
+static struct vtn_ssa_value *
+vtn_phi_node_create(struct vtn_builder *b, const struct glsl_type *type)
+{
+ struct vtn_ssa_value *val = vtn_create_ssa_value(b, type);
+ vtn_phi_node_init(b, val);
+ return val;
+}
+
+static void
+vtn_handle_phi_first_pass(struct vtn_builder *b, const uint32_t *w)
+{
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
+ const struct glsl_type *type =
+ vtn_value(b, w[1], vtn_value_type_type)->type->type;
+ val->ssa = vtn_phi_node_create(b, type);
+}
+
+static void
+vtn_phi_node_add_src(struct vtn_ssa_value *phi, const nir_block *pred,
+ struct vtn_ssa_value *val)
+{
+ assert(phi->type == val->type);
+ if (glsl_type_is_vector_or_scalar(phi->type)) {
+ nir_phi_instr *phi_instr = nir_instr_as_phi(phi->def->parent_instr);
+ nir_phi_src *src = ralloc(phi_instr, nir_phi_src);
+ src->pred = (nir_block *) pred;
+ src->src = nir_src_for_ssa(val->def);
+ exec_list_push_tail(&phi_instr->srcs, &src->node);
+ } else {
+ unsigned elems = glsl_get_length(phi->type);
+ for (unsigned i = 0; i < elems; i++)
+ vtn_phi_node_add_src(phi->elems[i], pred, val->elems[i]);
+ }
+}
+
+static struct vtn_ssa_value *
+vtn_get_phi_node_src(struct vtn_builder *b, nir_block *block,
+ const struct glsl_type *type, const uint32_t *w,
+ unsigned count)
+{
+ struct hash_entry *entry = _mesa_hash_table_search(b->block_table, block);
+ if (entry) {
+ struct vtn_block *spv_block = entry->data;
+ for (unsigned off = 4; off < count; off += 2) {
+ if (spv_block == vtn_value(b, w[off], vtn_value_type_block)->block) {
+ return vtn_ssa_value(b, w[off - 1]);
+ }
+ }
+ }
+
+ b->nb.cursor = nir_before_block(block);
+ struct vtn_ssa_value *phi = vtn_phi_node_create(b, type);
+
+ struct set_entry *entry2;
+ set_foreach(block->predecessors, entry2) {
+ nir_block *pred = (nir_block *) entry2->key;
+ struct vtn_ssa_value *val = vtn_get_phi_node_src(b, pred, type, w,
+ count);
+ vtn_phi_node_add_src(phi, pred, val);
+ }
+
+ return phi;
+}
+
+static bool
+vtn_handle_phi_second_pass(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ if (opcode == SpvOpLabel) {
+ b->block = vtn_value(b, w[1], vtn_value_type_block)->block;
+ return true;
+ }
+
+ if (opcode != SpvOpPhi)
+ return true;
+
+ struct vtn_ssa_value *phi = vtn_value(b, w[2], vtn_value_type_ssa)->ssa;
+
+ struct set_entry *entry;
+ set_foreach(b->block->block->predecessors, entry) {
+ nir_block *pred = (nir_block *) entry->key;
+
+ struct vtn_ssa_value *val = vtn_get_phi_node_src(b, pred, phi->type, w,
+ count);
+ vtn_phi_node_add_src(phi, pred, val);
+ }
+
+ return true;
+}
+
+static unsigned
+gl_primitive_from_spv_execution_mode(SpvExecutionMode mode)
+{
+ switch (mode) {
+ case SpvExecutionModeInputPoints:
+ case SpvExecutionModeOutputPoints:
+ return 0; /* GL_POINTS */
+ case SpvExecutionModeInputLines:
+ return 1; /* GL_LINES */
+ case SpvExecutionModeInputLinesAdjacency:
+ return 0x000A; /* GL_LINE_STRIP_ADJACENCY_ARB */
+ case SpvExecutionModeTriangles:
+ return 4; /* GL_TRIANGLES */
+ case SpvExecutionModeInputTrianglesAdjacency:
+ return 0x000C; /* GL_TRIANGLES_ADJACENCY_ARB */
+ case SpvExecutionModeQuads:
+ return 7; /* GL_QUADS */
+ case SpvExecutionModeIsolines:
+ return 0x8E7A; /* GL_ISOLINES */
+ case SpvExecutionModeOutputLineStrip:
+ return 3; /* GL_LINE_STRIP */
+ case SpvExecutionModeOutputTriangleStrip:
+ return 5; /* GL_TRIANGLE_STRIP */
+ default:
+ assert(!"Invalid primitive type");
+ return 4;
+ }
+}
+
+static unsigned
+vertices_in_from_spv_execution_mode(SpvExecutionMode mode)
+{
+ switch (mode) {
+ case SpvExecutionModeInputPoints:
+ return 1;
+ case SpvExecutionModeInputLines:
+ return 2;
+ case SpvExecutionModeInputLinesAdjacency:
+ return 4;
+ case SpvExecutionModeTriangles:
+ return 3;
+ case SpvExecutionModeInputTrianglesAdjacency:
+ return 6;
+ default:
+ assert(!"Invalid GS input mode");
+ return 0;
+ }
+}
+
+static bool
+vtn_handle_preamble_instruction(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ switch (opcode) {
+ case SpvOpSource:
+ case SpvOpSourceExtension:
+ case SpvOpExtension:
+ /* Unhandled, but these are for debug so that's ok. */
+ break;
+
+ case SpvOpCapability:
+ switch ((SpvCapability)w[1]) {
+ case SpvCapabilityMatrix:
+ case SpvCapabilityShader:
+ /* All shaders support these */
+ break;
+ case SpvCapabilityGeometry:
+ assert(b->shader->stage == MESA_SHADER_GEOMETRY);
+ break;
+ default:
+ assert(!"Unsupported capability");
+ }
+ break;
+
+ case SpvOpExtInstImport:
+ vtn_handle_extension(b, opcode, w, count);
+ break;
+
+ case SpvOpMemoryModel:
+ assert(w[1] == SpvAddressingModelLogical);
+ assert(w[2] == SpvMemoryModelGLSL450);
+ break;
+
+ case SpvOpEntryPoint:
+ assert(b->entry_point == NULL);
+ b->entry_point = &b->values[w[2]];
+ b->execution_model = w[1];
+ break;
+
+ case SpvOpExecutionMode:
+ assert(b->entry_point == &b->values[w[1]]);
+
+ SpvExecutionMode mode = w[2];
+ switch(mode) {
+ case SpvExecutionModeOriginUpperLeft:
+ case SpvExecutionModeOriginLowerLeft:
+ b->origin_upper_left = (mode == SpvExecutionModeOriginUpperLeft);
+ break;
+
+ case SpvExecutionModeEarlyFragmentTests:
+ assert(b->shader->stage == MESA_SHADER_FRAGMENT);
+ b->shader->info.fs.early_fragment_tests = true;
+ break;
+
+ case SpvExecutionModeInvocations:
+ assert(b->shader->stage == MESA_SHADER_GEOMETRY);
+ b->shader->info.gs.invocations = w[3];
+ break;
+
+ case SpvExecutionModeDepthReplacing:
+ assert(b->shader->stage == MESA_SHADER_FRAGMENT);
+ b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_ANY;
+ break;
+ case SpvExecutionModeDepthGreater:
+ assert(b->shader->stage == MESA_SHADER_FRAGMENT);
+ b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_GREATER;
+ break;
+ case SpvExecutionModeDepthLess:
+ assert(b->shader->stage == MESA_SHADER_FRAGMENT);
+ b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_LESS;
+ break;
+ case SpvExecutionModeDepthUnchanged:
+ assert(b->shader->stage == MESA_SHADER_FRAGMENT);
+ b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_UNCHANGED;
+ break;
+
+ case SpvExecutionModeLocalSize:
+ assert(b->shader->stage == MESA_SHADER_COMPUTE);
+ b->shader->info.cs.local_size[0] = w[3];
+ b->shader->info.cs.local_size[1] = w[4];
+ b->shader->info.cs.local_size[2] = w[5];
+ break;
+ case SpvExecutionModeLocalSizeHint:
+ break; /* Nothing do do with this */
+
+ case SpvExecutionModeOutputVertices:
+ assert(b->shader->stage == MESA_SHADER_GEOMETRY);
+ b->shader->info.gs.vertices_out = w[3];
+ break;
+
+ case SpvExecutionModeInputPoints:
+ case SpvExecutionModeInputLines:
+ case SpvExecutionModeInputLinesAdjacency:
+ case SpvExecutionModeTriangles:
+ case SpvExecutionModeInputTrianglesAdjacency:
+ case SpvExecutionModeQuads:
+ case SpvExecutionModeIsolines:
+ if (b->shader->stage == MESA_SHADER_GEOMETRY) {
+ b->shader->info.gs.vertices_in =
+ vertices_in_from_spv_execution_mode(mode);
+ } else {
+ assert(!"Tesselation shaders not yet supported");
+ }
+ break;
+
+ case SpvExecutionModeOutputPoints:
+ case SpvExecutionModeOutputLineStrip:
+ case SpvExecutionModeOutputTriangleStrip:
+ assert(b->shader->stage == MESA_SHADER_GEOMETRY);
+ b->shader->info.gs.output_primitive =
+ gl_primitive_from_spv_execution_mode(mode);
+ break;
+
+ case SpvExecutionModeSpacingEqual:
+ case SpvExecutionModeSpacingFractionalEven:
+ case SpvExecutionModeSpacingFractionalOdd:
+ case SpvExecutionModeVertexOrderCw:
+ case SpvExecutionModeVertexOrderCcw:
+ case SpvExecutionModePointMode:
+ assert(!"TODO: Add tessellation metadata");
+ break;
+
+ case SpvExecutionModePixelCenterInteger:
+ case SpvExecutionModeXfb:
+ assert(!"Unhandled execution mode");
+ break;
+
+ case SpvExecutionModeVecTypeHint:
+ case SpvExecutionModeContractionOff:
+ break; /* OpenCL */
+ }
+ break;
+
+ case SpvOpString:
+ vtn_push_value(b, w[1], vtn_value_type_string)->str =
+ vtn_string_literal(b, &w[2], count - 2);
+ break;
+
+ case SpvOpName:
+ b->values[w[1]].name = vtn_string_literal(b, &w[2], count - 2);
+ break;
+
+ case SpvOpMemberName:
+ /* TODO */
+ break;
+
+ case SpvOpLine:
+ break; /* Ignored for now */
+
+ case SpvOpDecorationGroup:
+ case SpvOpDecorate:
+ case SpvOpMemberDecorate:
+ case SpvOpGroupDecorate:
+ case SpvOpGroupMemberDecorate:
+ vtn_handle_decoration(b, opcode, w, count);
+ break;
+
+ case SpvOpTypeVoid:
+ case SpvOpTypeBool:
+ case SpvOpTypeInt:
+ case SpvOpTypeFloat:
+ case SpvOpTypeVector:
+ case SpvOpTypeMatrix:
+ case SpvOpTypeImage:
+ case SpvOpTypeSampler:
+ case SpvOpTypeSampledImage:
+ case SpvOpTypeArray:
+ case SpvOpTypeRuntimeArray:
+ case SpvOpTypeStruct:
+ case SpvOpTypeOpaque:
+ case SpvOpTypePointer:
+ case SpvOpTypeFunction:
+ case SpvOpTypeEvent:
+ case SpvOpTypeDeviceEvent:
+ case SpvOpTypeReserveId:
+ case SpvOpTypeQueue:
+ case SpvOpTypePipe:
+ vtn_handle_type(b, opcode, w, count);
+ break;
+
+ case SpvOpConstantTrue:
+ case SpvOpConstantFalse:
+ case SpvOpConstant:
+ case SpvOpConstantComposite:
+ case SpvOpConstantSampler:
+ case SpvOpSpecConstantTrue:
+ case SpvOpSpecConstantFalse:
+ case SpvOpSpecConstant:
+ case SpvOpSpecConstantComposite:
+ vtn_handle_constant(b, opcode, w, count);
+ break;
+
+ case SpvOpVariable:
+ vtn_handle_variables(b, opcode, w, count);
+ break;
+
+ default:
+ return false; /* End of preamble */
+ }
+
+ return true;
+}
+
+static bool
+vtn_handle_first_cfg_pass_instruction(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ switch (opcode) {
+ case SpvOpFunction: {
+ assert(b->func == NULL);
+ b->func = rzalloc(b, struct vtn_function);
+
+ const struct glsl_type *result_type =
+ vtn_value(b, w[1], vtn_value_type_type)->type->type;
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_function);
+ const struct glsl_type *func_type =
+ vtn_value(b, w[4], vtn_value_type_type)->type->type;
+
+ assert(glsl_get_function_return_type(func_type) == result_type);
+
+ nir_function *func =
+ nir_function_create(b->shader, ralloc_strdup(b->shader, val->name));
+
+ nir_function_overload *overload = nir_function_overload_create(func);
+ overload->num_params = glsl_get_length(func_type);
+ overload->params = ralloc_array(overload, nir_parameter,
+ overload->num_params);
+ for (unsigned i = 0; i < overload->num_params; i++) {
+ const struct glsl_function_param *param =
+ glsl_get_function_param(func_type, i);
+ overload->params[i].type = param->type;
+ if (param->in) {
+ if (param->out) {
+ overload->params[i].param_type = nir_parameter_inout;
+ } else {
+ overload->params[i].param_type = nir_parameter_in;
+ }
+ } else {
+ if (param->out) {
+ overload->params[i].param_type = nir_parameter_out;
+ } else {
+ assert(!"Parameter is neither in nor out");
+ }
+ }
+ }
+ b->func->overload = overload;
+ break;
+ }
+
+ case SpvOpFunctionEnd:
+ b->func->end = w;
+ b->func = NULL;
+ break;
+
+ case SpvOpFunctionParameter:
+ break; /* Does nothing */
+
+ case SpvOpLabel: {
+ assert(b->block == NULL);
+ b->block = rzalloc(b, struct vtn_block);
+ b->block->label = w;
+ vtn_push_value(b, w[1], vtn_value_type_block)->block = b->block;
+
+ if (b->func->start_block == NULL) {
+ /* This is the first block encountered for this function. In this
+ * case, we set the start block and add it to the list of
+ * implemented functions that we'll walk later.
+ */
+ b->func->start_block = b->block;
+ exec_list_push_tail(&b->functions, &b->func->node);
+ }
+ break;
+ }
+
+ case SpvOpBranch:
+ case SpvOpBranchConditional:
+ case SpvOpSwitch:
+ case SpvOpKill:
+ case SpvOpReturn:
+ case SpvOpReturnValue:
+ case SpvOpUnreachable:
+ assert(b->block);
+ b->block->branch = w;
+ b->block = NULL;
+ break;
+
+ case SpvOpSelectionMerge:
+ case SpvOpLoopMerge:
+ assert(b->block && b->block->merge_op == SpvOpNop);
+ b->block->merge_op = opcode;
+ b->block->merge_block_id = w[1];
+ break;
+
+ default:
+ /* Continue on as per normal */
+ return true;
+ }
+
+ return true;
+}
+
+static bool
+vtn_handle_body_instruction(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ switch (opcode) {
+ case SpvOpLabel: {
+ struct vtn_block *block = vtn_value(b, w[1], vtn_value_type_block)->block;
+ assert(block->block == NULL);
+
+ block->block = nir_cursor_current_block(b->nb.cursor);
+ break;
+ }
+
+ case SpvOpLoopMerge:
+ case SpvOpSelectionMerge:
+ /* This is handled by cfg pre-pass and walk_blocks */
+ break;
+
+ case SpvOpUndef:
+ vtn_push_value(b, w[2], vtn_value_type_undef);
+ break;
+
+ case SpvOpExtInst:
+ vtn_handle_extension(b, opcode, w, count);
+ break;
+
+ case SpvOpVariable:
+ case SpvOpLoad:
+ case SpvOpStore:
+ case SpvOpCopyMemory:
+ case SpvOpCopyMemorySized:
+ case SpvOpAccessChain:
+ case SpvOpInBoundsAccessChain:
+ case SpvOpArrayLength:
+ vtn_handle_variables(b, opcode, w, count);
+ break;
+
+ case SpvOpFunctionCall:
+ vtn_handle_function_call(b, opcode, w, count);
+ break;
+
+ case SpvOpSampledImage:
+ case SpvOpImageSampleImplicitLod:
+ case SpvOpImageSampleExplicitLod:
+ case SpvOpImageSampleDrefImplicitLod:
+ case SpvOpImageSampleDrefExplicitLod:
+ case SpvOpImageSampleProjImplicitLod:
+ case SpvOpImageSampleProjExplicitLod:
+ case SpvOpImageSampleProjDrefImplicitLod:
+ case SpvOpImageSampleProjDrefExplicitLod:
+ case SpvOpImageFetch:
+ case SpvOpImageGather:
+ case SpvOpImageDrefGather:
+ case SpvOpImageQuerySizeLod:
+ case SpvOpImageQuerySize:
+ case SpvOpImageQueryLod:
+ case SpvOpImageQueryLevels:
+ case SpvOpImageQuerySamples:
+ vtn_handle_texture(b, opcode, w, count);
+ break;
+
+ case SpvOpImageRead:
+ case SpvOpImageWrite:
+ case SpvOpImageTexelPointer:
+ vtn_handle_image(b, opcode, w, count);
+ break;
+
+ case SpvOpAtomicExchange:
+ case SpvOpAtomicCompareExchange:
+ case SpvOpAtomicCompareExchangeWeak:
+ case SpvOpAtomicIIncrement:
+ case SpvOpAtomicIDecrement:
+ case SpvOpAtomicIAdd:
+ case SpvOpAtomicISub:
+ case SpvOpAtomicSMin:
+ case SpvOpAtomicUMin:
+ case SpvOpAtomicSMax:
+ case SpvOpAtomicUMax:
+ case SpvOpAtomicAnd:
+ case SpvOpAtomicOr:
+ case SpvOpAtomicXor: {
+ struct vtn_value *pointer = vtn_untyped_value(b, w[3]);
+ if (pointer->value_type == vtn_value_type_image_pointer) {
+ vtn_handle_image(b, opcode, w, count);
+ } else {
+ assert(!"Atomic buffers not yet implemented");
+ }
+ }
+
+ case SpvOpSNegate:
+ case SpvOpFNegate:
+ case SpvOpNot:
+ case SpvOpAny:
+ case SpvOpAll:
+ case SpvOpConvertFToU:
+ case SpvOpConvertFToS:
+ case SpvOpConvertSToF:
+ case SpvOpConvertUToF:
+ case SpvOpUConvert:
+ case SpvOpSConvert:
+ case SpvOpFConvert:
+ case SpvOpConvertPtrToU:
+ case SpvOpConvertUToPtr:
+ case SpvOpPtrCastToGeneric:
+ case SpvOpGenericCastToPtr:
+ case SpvOpBitcast:
+ case SpvOpIsNan:
+ case SpvOpIsInf:
+ case SpvOpIsFinite:
+ case SpvOpIsNormal:
+ case SpvOpSignBitSet:
+ case SpvOpLessOrGreater:
+ case SpvOpOrdered:
+ case SpvOpUnordered:
+ case SpvOpIAdd:
+ case SpvOpFAdd:
+ case SpvOpISub:
+ case SpvOpFSub:
+ case SpvOpIMul:
+ case SpvOpFMul:
+ case SpvOpUDiv:
+ case SpvOpSDiv:
+ case SpvOpFDiv:
+ case SpvOpUMod:
+ case SpvOpSRem:
+ case SpvOpSMod:
+ case SpvOpFRem:
+ case SpvOpFMod:
+ case SpvOpVectorTimesScalar:
+ case SpvOpDot:
+ case SpvOpShiftRightLogical:
+ case SpvOpShiftRightArithmetic:
+ case SpvOpShiftLeftLogical:
+ case SpvOpLogicalOr:
+ case SpvOpLogicalEqual:
+ case SpvOpLogicalNotEqual:
+ case SpvOpLogicalAnd:
+ case SpvOpBitwiseOr:
+ case SpvOpBitwiseXor:
+ case SpvOpBitwiseAnd:
+ case SpvOpSelect:
+ case SpvOpIEqual:
+ case SpvOpFOrdEqual:
+ case SpvOpFUnordEqual:
+ case SpvOpINotEqual:
+ case SpvOpFOrdNotEqual:
+ case SpvOpFUnordNotEqual:
+ case SpvOpULessThan:
+ case SpvOpSLessThan:
+ case SpvOpFOrdLessThan:
+ case SpvOpFUnordLessThan:
+ case SpvOpUGreaterThan:
+ case SpvOpSGreaterThan:
+ case SpvOpFOrdGreaterThan:
+ case SpvOpFUnordGreaterThan:
+ case SpvOpULessThanEqual:
+ case SpvOpSLessThanEqual:
+ case SpvOpFOrdLessThanEqual:
+ case SpvOpFUnordLessThanEqual:
+ case SpvOpUGreaterThanEqual:
+ case SpvOpSGreaterThanEqual:
+ case SpvOpFOrdGreaterThanEqual:
+ case SpvOpFUnordGreaterThanEqual:
+ case SpvOpDPdx:
+ case SpvOpDPdy:
+ case SpvOpFwidth:
+ case SpvOpDPdxFine:
+ case SpvOpDPdyFine:
+ case SpvOpFwidthFine:
+ case SpvOpDPdxCoarse:
+ case SpvOpDPdyCoarse:
+ case SpvOpFwidthCoarse:
+ vtn_handle_alu(b, opcode, w, count);
+ break;
+
+ case SpvOpTranspose:
+ case SpvOpOuterProduct:
+ case SpvOpMatrixTimesScalar:
+ case SpvOpVectorTimesMatrix:
+ case SpvOpMatrixTimesVector:
+ case SpvOpMatrixTimesMatrix:
+ vtn_handle_matrix_alu(b, opcode, w, count);
+ break;
+
+ case SpvOpVectorExtractDynamic:
+ case SpvOpVectorInsertDynamic:
+ case SpvOpVectorShuffle:
+ case SpvOpCompositeConstruct:
+ case SpvOpCompositeExtract:
+ case SpvOpCompositeInsert:
+ case SpvOpCopyObject:
+ vtn_handle_composite(b, opcode, w, count);
+ break;
+
+ case SpvOpPhi:
+ vtn_handle_phi_first_pass(b, w);
+ break;
+
+ case SpvOpEmitVertex:
+ case SpvOpEndPrimitive:
+ case SpvOpEmitStreamVertex:
+ case SpvOpEndStreamPrimitive:
+ case SpvOpControlBarrier:
+ case SpvOpMemoryBarrier:
+ vtn_handle_barrier(b, opcode, w, count);
+ break;
+
+ default:
+ unreachable("Unhandled opcode");
+ }
+
+ return true;
+}
+
+static void
+vtn_walk_blocks(struct vtn_builder *b, struct vtn_block *start,
+ struct vtn_block *break_block, struct vtn_block *cont_block,
+ struct vtn_block *end_block)
+{
+ struct vtn_block *block = start;
+ while (block != end_block) {
+ if (block->merge_op == SpvOpLoopMerge) {
+ /* This is the jump into a loop. */
+ struct vtn_block *new_cont_block = block;
+ struct vtn_block *new_break_block =
+ vtn_value(b, block->merge_block_id, vtn_value_type_block)->block;
+
+ nir_loop *loop = nir_loop_create(b->shader);
+ nir_cf_node_insert(b->nb.cursor, &loop->cf_node);
+
+ /* Reset the merge_op to prerevent infinite recursion */
+ block->merge_op = SpvOpNop;
+
+ b->nb.cursor = nir_after_cf_list(&loop->body);
+ vtn_walk_blocks(b, block, new_break_block, new_cont_block, NULL);
+
+ b->nb.cursor = nir_after_cf_node(&loop->cf_node);
+ block = new_break_block;
+ continue;
+ }
+
+ const uint32_t *w = block->branch;
+ SpvOp branch_op = w[0] & SpvOpCodeMask;
+
+ b->block = block;
+ vtn_foreach_instruction(b, block->label, block->branch,
+ vtn_handle_body_instruction);
+
+ nir_block *cur_block = nir_cursor_current_block(b->nb.cursor);
+ assert(cur_block == block->block);
+ _mesa_hash_table_insert(b->block_table, cur_block, block);
+
+ switch (branch_op) {
+ case SpvOpBranch: {
+ struct vtn_block *branch_block =
+ vtn_value(b, w[1], vtn_value_type_block)->block;
+
+ if (branch_block == break_block) {
+ nir_jump_instr *jump = nir_jump_instr_create(b->shader,
+ nir_jump_break);
+ nir_builder_instr_insert(&b->nb, &jump->instr);
+
+ return;
+ } else if (branch_block == cont_block) {
+ nir_jump_instr *jump = nir_jump_instr_create(b->shader,
+ nir_jump_continue);
+ nir_builder_instr_insert(&b->nb, &jump->instr);
+
+ return;
+ } else if (branch_block == end_block) {
+ /* We're branching to the merge block of an if, since for loops
+ * and functions end_block == NULL, so we're done here.
+ */
+ return;
+ } else {
+ /* We're branching to another block, and according to the rules,
+ * we can only branch to another block with one predecessor (so
+ * we're the only one jumping to it) so we can just process it
+ * next.
+ */
+ block = branch_block;
+ continue;
+ }
+ }
+
+ case SpvOpBranchConditional: {
+ /* Gather up the branch blocks */
+ struct vtn_block *then_block =
+ vtn_value(b, w[2], vtn_value_type_block)->block;
+ struct vtn_block *else_block =
+ vtn_value(b, w[3], vtn_value_type_block)->block;
+
+ nir_if *if_stmt = nir_if_create(b->shader);
+ if_stmt->condition = nir_src_for_ssa(vtn_ssa_value(b, w[1])->def);
+ nir_cf_node_insert(b->nb.cursor, &if_stmt->cf_node);
+
+ if (then_block == break_block) {
+ nir_jump_instr *jump = nir_jump_instr_create(b->shader,
+ nir_jump_break);
+ nir_instr_insert_after_cf_list(&if_stmt->then_list,
+ &jump->instr);
+ block = else_block;
+ } else if (else_block == break_block) {
+ nir_jump_instr *jump = nir_jump_instr_create(b->shader,
+ nir_jump_break);
+ nir_instr_insert_after_cf_list(&if_stmt->else_list,
+ &jump->instr);
+ block = then_block;
+ } else if (then_block == cont_block) {
+ nir_jump_instr *jump = nir_jump_instr_create(b->shader,
+ nir_jump_continue);
+ nir_instr_insert_after_cf_list(&if_stmt->then_list,
+ &jump->instr);
+ block = else_block;
+ } else if (else_block == cont_block) {
+ nir_jump_instr *jump = nir_jump_instr_create(b->shader,
+ nir_jump_continue);
+ nir_instr_insert_after_cf_list(&if_stmt->else_list,
+ &jump->instr);
+ block = then_block;
+ } else {
+ /* According to the rules we're branching to two blocks that don't
+ * have any other predecessors, so we can handle this as a
+ * conventional if.
+ */
+ assert(block->merge_op == SpvOpSelectionMerge);
+ struct vtn_block *merge_block =
+ vtn_value(b, block->merge_block_id, vtn_value_type_block)->block;
+
+ b->nb.cursor = nir_after_cf_list(&if_stmt->then_list);
+ vtn_walk_blocks(b, then_block, break_block, cont_block, merge_block);
+
+ b->nb.cursor = nir_after_cf_list(&if_stmt->else_list);
+ vtn_walk_blocks(b, else_block, break_block, cont_block, merge_block);
+
+ b->nb.cursor = nir_after_cf_node(&if_stmt->cf_node);
+ block = merge_block;
+ continue;
+ }
+
+ /* If we got here then we inserted a predicated break or continue
+ * above and we need to handle the other case. We already set
+ * `block` above to indicate what block to visit after the
+ * predicated break.
+ */
+
+ /* It's possible that the other branch is also a break/continue.
+ * If it is, we handle that here.
+ */
+ if (block == break_block) {
+ nir_jump_instr *jump = nir_jump_instr_create(b->shader,
+ nir_jump_break);
+ nir_builder_instr_insert(&b->nb, &jump->instr);
+
+ return;
+ } else if (block == cont_block) {
+ nir_jump_instr *jump = nir_jump_instr_create(b->shader,
+ nir_jump_continue);
+ nir_builder_instr_insert(&b->nb, &jump->instr);
+
+ return;
+ }
+
+ /* If we got here then there was a predicated break/continue but
+ * the other half of the if has stuff in it. `block` was already
+ * set above so there is nothing left for us to do.
+ */
+ continue;
+ }
+
+ case SpvOpReturn: {
+ nir_jump_instr *jump = nir_jump_instr_create(b->shader,
+ nir_jump_return);
+ nir_builder_instr_insert(&b->nb, &jump->instr);
+ return;
+ }
+
+ case SpvOpKill: {
+ nir_intrinsic_instr *discard =
+ nir_intrinsic_instr_create(b->shader, nir_intrinsic_discard);
+ nir_builder_instr_insert(&b->nb, &discard->instr);
+ return;
+ }
+
+ case SpvOpSwitch:
+ case SpvOpReturnValue:
+ case SpvOpUnreachable:
+ default:
+ unreachable("Unhandled opcode");
+ }
+ }
+}
+
+nir_shader *
+spirv_to_nir(const uint32_t *words, size_t word_count,
+ gl_shader_stage stage,
+ const nir_shader_compiler_options *options)
+{
+ const uint32_t *word_end = words + word_count;
+
+ /* Handle the SPIR-V header (first 4 dwords) */
+ assert(word_count > 5);
+
+ assert(words[0] == SpvMagicNumber);
+ assert(words[1] >= 0x10000);
+ /* words[2] == generator magic */
+ unsigned value_id_bound = words[3];
+ assert(words[4] == 0);
+
+ words+= 5;
+
+ nir_shader *shader = nir_shader_create(NULL, stage, options);
+
+ /* Initialize the stn_builder object */
+ struct vtn_builder *b = rzalloc(NULL, struct vtn_builder);
+ b->shader = shader;
+ b->value_id_bound = value_id_bound;
+ b->values = rzalloc_array(b, struct vtn_value, value_id_bound);
+ exec_list_make_empty(&b->functions);
+
+ /* XXX: We shouldn't need these defaults */
+ if (b->shader->stage == MESA_SHADER_GEOMETRY) {
+ b->shader->info.gs.vertices_in = 3;
+ b->shader->info.gs.output_primitive = 4; /* GL_TRIANGLES */
+ }
+
+ /* Handle all the preamble instructions */
+ words = vtn_foreach_instruction(b, words, word_end,
+ vtn_handle_preamble_instruction);
+
+ /* Do a very quick CFG analysis pass */
+ vtn_foreach_instruction(b, words, word_end,
+ vtn_handle_first_cfg_pass_instruction);
+
+ foreach_list_typed(struct vtn_function, func, node, &b->functions) {
+ b->impl = nir_function_impl_create(func->overload);
+ b->const_table = _mesa_hash_table_create(b, _mesa_hash_pointer,
+ _mesa_key_pointer_equal);
+ b->block_table = _mesa_hash_table_create(b, _mesa_hash_pointer,
+ _mesa_key_pointer_equal);
+ nir_builder_init(&b->nb, b->impl);
+ b->nb.cursor = nir_after_cf_list(&b->impl->body);
+ vtn_walk_blocks(b, func->start_block, NULL, NULL, NULL);
+ vtn_foreach_instruction(b, func->start_block->label, func->end,
+ vtn_handle_phi_second_pass);
+ }
+
+ /* Because we can still have output reads in NIR, we need to lower
+ * outputs to temporaries before we are truely finished.
+ */
+ nir_lower_outputs_to_temporaries(shader);
+
+ ralloc_free(b);
+
+ return shader;
+}
OPT_V(brw_nir_lower_inputs, devinfo, is_scalar);
OPT_V(brw_nir_lower_outputs, is_scalar);
- //nir_assign_var_locations(&nir->uniforms,
- // &nir->num_uniforms,
- // is_scalar ? type_size_scalar : type_size_vec4);
- OPT_V(brw_nir_lower_uniforms, is_scalar);
++ //OPT_V(brw_nir_lower_uniforms, is_scalar);
OPT_V(nir_lower_io, nir_var_all, is_scalar ? type_size_scalar : type_size_vec4);
if (shader_prog) {
--- /dev/null
- uint32_t stride;
-
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+#include "anv_private.h"
+
+/** \file anv_cmd_buffer.c
+ *
+ * This file contains all of the stuff for emitting commands into a command
+ * buffer. This includes implementations of most of the vkCmd*
+ * entrypoints. This file is concerned entirely with state emission and
+ * not with the command buffer data structure itself. As far as this file
+ * is concerned, most of anv_cmd_buffer is magic.
+ */
+
+/* TODO: These are taken from GLES. We should check the Vulkan spec */
+const struct anv_dynamic_state default_dynamic_state = {
+ .viewport = {
+ .count = 0,
+ },
+ .scissor = {
+ .count = 0,
+ },
+ .line_width = 1.0f,
+ .depth_bias = {
+ .bias = 0.0f,
+ .clamp = 0.0f,
+ .slope = 0.0f,
+ },
+ .blend_constants = { 0.0f, 0.0f, 0.0f, 0.0f },
+ .depth_bounds = {
+ .min = 0.0f,
+ .max = 1.0f,
+ },
+ .stencil_compare_mask = {
+ .front = ~0u,
+ .back = ~0u,
+ },
+ .stencil_write_mask = {
+ .front = ~0u,
+ .back = ~0u,
+ },
+ .stencil_reference = {
+ .front = 0u,
+ .back = 0u,
+ },
+};
+
+void
+anv_dynamic_state_copy(struct anv_dynamic_state *dest,
+ const struct anv_dynamic_state *src,
+ uint32_t copy_mask)
+{
+ if (copy_mask & (1 << VK_DYNAMIC_STATE_VIEWPORT)) {
+ dest->viewport.count = src->viewport.count;
+ typed_memcpy(dest->viewport.viewports, src->viewport.viewports,
+ src->viewport.count);
+ }
+
+ if (copy_mask & (1 << VK_DYNAMIC_STATE_SCISSOR)) {
+ dest->scissor.count = src->scissor.count;
+ typed_memcpy(dest->scissor.scissors, src->scissor.scissors,
+ src->scissor.count);
+ }
+
+ if (copy_mask & (1 << VK_DYNAMIC_STATE_LINE_WIDTH))
+ dest->line_width = src->line_width;
+
+ if (copy_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BIAS))
+ dest->depth_bias = src->depth_bias;
+
+ if (copy_mask & (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS))
+ typed_memcpy(dest->blend_constants, src->blend_constants, 4);
+
+ if (copy_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS))
+ dest->depth_bounds = src->depth_bounds;
+
+ if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK))
+ dest->stencil_compare_mask = src->stencil_compare_mask;
+
+ if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK))
+ dest->stencil_write_mask = src->stencil_write_mask;
+
+ if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE))
+ dest->stencil_reference = src->stencil_reference;
+}
+
+static void
+anv_cmd_state_init(struct anv_cmd_state *state)
+{
+ memset(&state->descriptors, 0, sizeof(state->descriptors));
+ memset(&state->push_constants, 0, sizeof(state->push_constants));
+
+ state->dirty = ~0;
+ state->vb_dirty = 0;
+ state->descriptors_dirty = 0;
+ state->push_constants_dirty = 0;
+ state->pipeline = NULL;
+ state->restart_index = UINT32_MAX;
+ state->dynamic = default_dynamic_state;
+
+ state->gen7.index_buffer = NULL;
+}
+
+static VkResult
+anv_cmd_buffer_ensure_push_constants_size(struct anv_cmd_buffer *cmd_buffer,
+ gl_shader_stage stage, uint32_t size)
+{
+ struct anv_push_constants **ptr = &cmd_buffer->state.push_constants[stage];
+
+ if (*ptr == NULL) {
+ *ptr = anv_alloc(&cmd_buffer->pool->alloc, size, 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (*ptr == NULL)
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ } else if ((*ptr)->size < size) {
+ *ptr = anv_realloc(&cmd_buffer->pool->alloc, *ptr, size, 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (*ptr == NULL)
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ }
+ (*ptr)->size = size;
+
+ return VK_SUCCESS;
+}
+
+#define anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, field) \
+ anv_cmd_buffer_ensure_push_constants_size(cmd_buffer, stage, \
+ (offsetof(struct anv_push_constants, field) + \
+ sizeof(cmd_buffer->state.push_constants[0]->field)))
+
+static VkResult anv_create_cmd_buffer(
+ struct anv_device * device,
+ struct anv_cmd_pool * pool,
+ VkCommandBufferLevel level,
+ VkCommandBuffer* pCommandBuffer)
+{
+ struct anv_cmd_buffer *cmd_buffer;
+ VkResult result;
+
+ cmd_buffer = anv_alloc(&pool->alloc, sizeof(*cmd_buffer), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (cmd_buffer == NULL)
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
+ cmd_buffer->device = device;
+ cmd_buffer->pool = pool;
+
+ result = anv_cmd_buffer_init_batch_bo_chain(cmd_buffer);
+ if (result != VK_SUCCESS)
+ goto fail;
+
+ anv_state_stream_init(&cmd_buffer->surface_state_stream,
+ &device->surface_state_block_pool);
+ anv_state_stream_init(&cmd_buffer->dynamic_state_stream,
+ &device->dynamic_state_block_pool);
+
+ cmd_buffer->level = level;
+ cmd_buffer->usage_flags = 0;
+
+ anv_cmd_state_init(&cmd_buffer->state);
+
+ if (pool) {
+ list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
+ } else {
+ /* Init the pool_link so we can safefly call list_del when we destroy
+ * the command buffer
+ */
+ list_inithead(&cmd_buffer->pool_link);
+ }
+
+ *pCommandBuffer = anv_cmd_buffer_to_handle(cmd_buffer);
+
+ return VK_SUCCESS;
+
+ fail:
+ anv_free(&cmd_buffer->pool->alloc, cmd_buffer);
+
+ return result;
+}
+
+VkResult anv_AllocateCommandBuffers(
+ VkDevice _device,
+ const VkCommandBufferAllocateInfo* pAllocateInfo,
+ VkCommandBuffer* pCommandBuffers)
+{
+ ANV_FROM_HANDLE(anv_device, device, _device);
+ ANV_FROM_HANDLE(anv_cmd_pool, pool, pAllocateInfo->commandPool);
+
+ VkResult result = VK_SUCCESS;
+ uint32_t i;
+
+ for (i = 0; i < pAllocateInfo->bufferCount; i++) {
+ result = anv_create_cmd_buffer(device, pool, pAllocateInfo->level,
+ &pCommandBuffers[i]);
+ if (result != VK_SUCCESS)
+ break;
+ }
+
+ if (result != VK_SUCCESS)
+ anv_FreeCommandBuffers(_device, pAllocateInfo->commandPool,
+ i, pCommandBuffers);
+
+ return result;
+}
+
+static void
+anv_cmd_buffer_destroy(struct anv_cmd_buffer *cmd_buffer)
+{
+ list_del(&cmd_buffer->pool_link);
+
+ anv_cmd_buffer_fini_batch_bo_chain(cmd_buffer);
+
+ anv_state_stream_finish(&cmd_buffer->surface_state_stream);
+ anv_state_stream_finish(&cmd_buffer->dynamic_state_stream);
+
+ anv_free(&cmd_buffer->pool->alloc, cmd_buffer);
+}
+
+void anv_FreeCommandBuffers(
+ VkDevice device,
+ VkCommandPool commandPool,
+ uint32_t commandBufferCount,
+ const VkCommandBuffer* pCommandBuffers)
+{
+ for (uint32_t i = 0; i < commandBufferCount; i++) {
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
+
+ anv_cmd_buffer_destroy(cmd_buffer);
+ }
+}
+
+VkResult anv_ResetCommandBuffer(
+ VkCommandBuffer commandBuffer,
+ VkCommandBufferResetFlags flags)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+
+ anv_cmd_buffer_reset_batch_bo_chain(cmd_buffer);
+
+ anv_cmd_state_init(&cmd_buffer->state);
+
+ return VK_SUCCESS;
+}
+
+void
+anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer)
+{
+ switch (cmd_buffer->device->info.gen) {
+ case 7:
+ if (cmd_buffer->device->info.is_haswell)
+ return gen7_cmd_buffer_emit_state_base_address(cmd_buffer);
+ else
+ return gen7_cmd_buffer_emit_state_base_address(cmd_buffer);
+ case 8:
+ return gen8_cmd_buffer_emit_state_base_address(cmd_buffer);
+ case 9:
+ return gen9_cmd_buffer_emit_state_base_address(cmd_buffer);
+ default:
+ unreachable("unsupported gen\n");
+ }
+}
+
+VkResult anv_BeginCommandBuffer(
+ VkCommandBuffer commandBuffer,
+ const VkCommandBufferBeginInfo* pBeginInfo)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+
+ anv_cmd_buffer_reset_batch_bo_chain(cmd_buffer);
+
+ cmd_buffer->usage_flags = pBeginInfo->flags;
+
+ if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
+ cmd_buffer->state.framebuffer =
+ anv_framebuffer_from_handle(pBeginInfo->framebuffer);
+ cmd_buffer->state.pass =
+ anv_render_pass_from_handle(pBeginInfo->renderPass);
+
+ struct anv_subpass *subpass =
+ &cmd_buffer->state.pass->subpasses[pBeginInfo->subpass];
+
+ anv_cmd_buffer_begin_subpass(cmd_buffer, subpass);
+ }
+
+ anv_cmd_buffer_emit_state_base_address(cmd_buffer);
+ cmd_buffer->state.current_pipeline = UINT32_MAX;
+
+ return VK_SUCCESS;
+}
+
+VkResult anv_EndCommandBuffer(
+ VkCommandBuffer commandBuffer)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+ struct anv_device *device = cmd_buffer->device;
+
+ anv_cmd_buffer_end_batch_buffer(cmd_buffer);
+
+ if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
+ /* The algorithm used to compute the validate list is not threadsafe as
+ * it uses the bo->index field. We have to lock the device around it.
+ * Fortunately, the chances for contention here are probably very low.
+ */
+ pthread_mutex_lock(&device->mutex);
+ anv_cmd_buffer_prepare_execbuf(cmd_buffer);
+ pthread_mutex_unlock(&device->mutex);
+ }
+
+ return VK_SUCCESS;
+}
+
+void anv_CmdBindPipeline(
+ VkCommandBuffer commandBuffer,
+ VkPipelineBindPoint pipelineBindPoint,
+ VkPipeline _pipeline)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+ ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
+
+ switch (pipelineBindPoint) {
+ case VK_PIPELINE_BIND_POINT_COMPUTE:
+ cmd_buffer->state.compute_pipeline = pipeline;
+ cmd_buffer->state.compute_dirty |= ANV_CMD_DIRTY_PIPELINE;
+ cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
+ break;
+
+ case VK_PIPELINE_BIND_POINT_GRAPHICS:
+ cmd_buffer->state.pipeline = pipeline;
+ cmd_buffer->state.vb_dirty |= pipeline->vb_used;
+ cmd_buffer->state.dirty |= ANV_CMD_DIRTY_PIPELINE;
+ cmd_buffer->state.push_constants_dirty |= pipeline->active_stages;
+
+ /* Apply the dynamic state from the pipeline */
+ cmd_buffer->state.dirty |= pipeline->dynamic_state_mask;
+ anv_dynamic_state_copy(&cmd_buffer->state.dynamic,
+ &pipeline->dynamic_state,
+ pipeline->dynamic_state_mask);
+ break;
+
+ default:
+ assert(!"invalid bind point");
+ break;
+ }
+}
+
+void anv_CmdSetViewport(
+ VkCommandBuffer commandBuffer,
+ uint32_t viewportCount,
+ const VkViewport* pViewports)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+
+ cmd_buffer->state.dynamic.viewport.count = viewportCount;
+ memcpy(cmd_buffer->state.dynamic.viewport.viewports,
+ pViewports, viewportCount * sizeof(*pViewports));
+
+ cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_VIEWPORT;
+}
+
+void anv_CmdSetScissor(
+ VkCommandBuffer commandBuffer,
+ uint32_t scissorCount,
+ const VkRect2D* pScissors)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+
+ cmd_buffer->state.dynamic.scissor.count = scissorCount;
+ memcpy(cmd_buffer->state.dynamic.scissor.scissors,
+ pScissors, scissorCount * sizeof(*pScissors));
+
+ cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_SCISSOR;
+}
+
+void anv_CmdSetLineWidth(
+ VkCommandBuffer commandBuffer,
+ float lineWidth)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+
+ cmd_buffer->state.dynamic.line_width = lineWidth;
+ cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH;
+}
+
+void anv_CmdSetDepthBias(
+ VkCommandBuffer commandBuffer,
+ float depthBiasConstantFactor,
+ float depthBiasClamp,
+ float depthBiasSlopeFactor)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+
+ cmd_buffer->state.dynamic.depth_bias.bias = depthBiasConstantFactor;
+ cmd_buffer->state.dynamic.depth_bias.clamp = depthBiasClamp;
+ cmd_buffer->state.dynamic.depth_bias.slope = depthBiasSlopeFactor;
+
+ cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
+}
+
+void anv_CmdSetBlendConstants(
+ VkCommandBuffer commandBuffer,
+ const float blendConstants[4])
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+
+ memcpy(cmd_buffer->state.dynamic.blend_constants,
+ blendConstants, sizeof(float) * 4);
+
+ cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS;
+}
+
+void anv_CmdSetDepthBounds(
+ VkCommandBuffer commandBuffer,
+ float minDepthBounds,
+ float maxDepthBounds)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+
+ cmd_buffer->state.dynamic.depth_bounds.min = minDepthBounds;
+ cmd_buffer->state.dynamic.depth_bounds.max = maxDepthBounds;
+
+ cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS;
+}
+
+void anv_CmdSetStencilCompareMask(
+ VkCommandBuffer commandBuffer,
+ VkStencilFaceFlags faceMask,
+ uint32_t compareMask)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+
+ if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
+ cmd_buffer->state.dynamic.stencil_compare_mask.front = compareMask;
+ if (faceMask & VK_STENCIL_FACE_BACK_BIT)
+ cmd_buffer->state.dynamic.stencil_compare_mask.back = compareMask;
+
+ cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK;
+}
+
+void anv_CmdSetStencilWriteMask(
+ VkCommandBuffer commandBuffer,
+ VkStencilFaceFlags faceMask,
+ uint32_t writeMask)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+
+ if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
+ cmd_buffer->state.dynamic.stencil_write_mask.front = writeMask;
+ if (faceMask & VK_STENCIL_FACE_BACK_BIT)
+ cmd_buffer->state.dynamic.stencil_write_mask.back = writeMask;
+
+ cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK;
+}
+
+void anv_CmdSetStencilReference(
+ VkCommandBuffer commandBuffer,
+ VkStencilFaceFlags faceMask,
+ uint32_t reference)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+
+ if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
+ cmd_buffer->state.dynamic.stencil_reference.front = reference;
+ if (faceMask & VK_STENCIL_FACE_BACK_BIT)
+ cmd_buffer->state.dynamic.stencil_reference.back = reference;
+
+ cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE;
+}
+
+void anv_CmdBindDescriptorSets(
+ VkCommandBuffer commandBuffer,
+ VkPipelineBindPoint pipelineBindPoint,
+ VkPipelineLayout _layout,
+ uint32_t firstSet,
+ uint32_t descriptorSetCount,
+ const VkDescriptorSet* pDescriptorSets,
+ uint32_t dynamicOffsetCount,
+ const uint32_t* pDynamicOffsets)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+ ANV_FROM_HANDLE(anv_pipeline_layout, layout, _layout);
+ struct anv_descriptor_set_layout *set_layout;
+
+ assert(firstSet + descriptorSetCount < MAX_SETS);
+
+ uint32_t dynamic_slot = 0;
+ for (uint32_t i = 0; i < descriptorSetCount; i++) {
+ ANV_FROM_HANDLE(anv_descriptor_set, set, pDescriptorSets[i]);
+ set_layout = layout->set[firstSet + i].layout;
+
+ if (cmd_buffer->state.descriptors[firstSet + i] != set) {
+ cmd_buffer->state.descriptors[firstSet + i] = set;
+ cmd_buffer->state.descriptors_dirty |= set_layout->shader_stages;
+ }
+
+ if (set_layout->dynamic_offset_count > 0) {
+ anv_foreach_stage(s, set_layout->shader_stages) {
+ anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, s, dynamic);
+
+ struct anv_push_constants *push =
+ cmd_buffer->state.push_constants[s];
+
+ unsigned d = layout->set[firstSet + i].dynamic_offset_start;
+ const uint32_t *offsets = pDynamicOffsets + dynamic_slot;
+ struct anv_descriptor *desc = set->descriptors;
+
+ for (unsigned b = 0; b < set_layout->binding_count; b++) {
+ if (set_layout->binding[b].dynamic_offset_index < 0)
+ continue;
+
+ unsigned array_size = set_layout->binding[b].array_size;
+ for (unsigned j = 0; j < array_size; j++) {
+ push->dynamic[d].offset = *(offsets++);
+ push->dynamic[d].range = (desc++)->range;
+ d++;
+ }
+ }
+ }
+ cmd_buffer->state.push_constants_dirty |= set_layout->shader_stages;
+ }
+ }
+}
+
+void anv_CmdBindVertexBuffers(
+ VkCommandBuffer commandBuffer,
+ uint32_t startBinding,
+ uint32_t bindingCount,
+ const VkBuffer* pBuffers,
+ const VkDeviceSize* pOffsets)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+ struct anv_vertex_binding *vb = cmd_buffer->state.vertex_bindings;
+
+ /* We have to defer setting up vertex buffer since we need the buffer
+ * stride from the pipeline. */
+
+ assert(startBinding + bindingCount < MAX_VBS);
+ for (uint32_t i = 0; i < bindingCount; i++) {
+ vb[startBinding + i].buffer = anv_buffer_from_handle(pBuffers[i]);
+ vb[startBinding + i].offset = pOffsets[i];
+ cmd_buffer->state.vb_dirty |= 1 << (startBinding + i);
+ }
+}
+
+static void
+add_surface_state_reloc(struct anv_cmd_buffer *cmd_buffer,
+ struct anv_state state, struct anv_bo *bo, uint32_t offset)
+{
+ /* The address goes in SURFACE_STATE dword 1 for gens < 8 and dwords 8 and
+ * 9 for gen8+. We only write the first dword for gen8+ here and rely on
+ * the initial state to set the high bits to 0. */
+
+ const uint32_t dword = cmd_buffer->device->info.gen < 8 ? 1 : 8;
+
+ anv_reloc_list_add(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc,
+ state.offset + dword * 4, bo, offset);
+}
+
+static void
+fill_descriptor_buffer_surface_state(struct anv_device *device, void *state,
+ gl_shader_stage stage,
+ VkDescriptorType type,
+ uint32_t offset, uint32_t range)
+{
+ VkFormat format;
- if (device->instance->physicalDevice.compiler->scalar_stage[stage]) {
- stride = 4;
- } else {
- stride = 16;
- }
+ switch (type) {
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
- stride = 1;
+ format = VK_FORMAT_R32G32B32A32_SFLOAT;
+ break;
+
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
- offset, range, stride);
+ format = VK_FORMAT_UNDEFINED;
+ break;
+
+ default:
+ unreachable("Invalid descriptor type");
+ }
+
+ anv_fill_buffer_surface_state(device, state,
+ anv_format_for_vk_format(format),
- (local_id_dwords + prog_data->nr_params) * sizeof(gl_constant_value);
++ offset, range, 1);
+}
+
+VkResult
+anv_cmd_buffer_emit_binding_table(struct anv_cmd_buffer *cmd_buffer,
+ gl_shader_stage stage,
+ struct anv_state *bt_state)
+{
+ struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
+ struct anv_subpass *subpass = cmd_buffer->state.subpass;
+ struct anv_pipeline_layout *layout;
+ uint32_t color_count, bias, state_offset;
+
+ if (stage == MESA_SHADER_COMPUTE)
+ layout = cmd_buffer->state.compute_pipeline->layout;
+ else
+ layout = cmd_buffer->state.pipeline->layout;
+
+ if (stage == MESA_SHADER_FRAGMENT) {
+ bias = MAX_RTS;
+ color_count = subpass->color_count;
+ } else {
+ bias = 0;
+ color_count = 0;
+ }
+
+ /* This is a little awkward: layout can be NULL but we still have to
+ * allocate and set a binding table for the PS stage for render
+ * targets. */
+ uint32_t surface_count = layout ? layout->stage[stage].surface_count : 0;
+
+ if (color_count + surface_count == 0)
+ return VK_SUCCESS;
+
+ *bt_state = anv_cmd_buffer_alloc_binding_table(cmd_buffer,
+ bias + surface_count,
+ &state_offset);
+ uint32_t *bt_map = bt_state->map;
+
+ if (bt_state->map == NULL)
+ return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
+ for (uint32_t a = 0; a < color_count; a++) {
+ const struct anv_image_view *iview =
+ fb->attachments[subpass->color_attachments[a]];
+
+ bt_map[a] = iview->color_rt_surface_state.offset + state_offset;
+ add_surface_state_reloc(cmd_buffer, iview->color_rt_surface_state,
+ iview->bo, iview->offset);
+ }
+
+ if (layout == NULL)
+ goto out;
+
+ if (layout->stage[stage].image_count > 0) {
+ VkResult result =
+ anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, images);
+ if (result != VK_SUCCESS)
+ return result;
+
+ cmd_buffer->state.push_constants_dirty |= 1 << stage;
+ }
+
+ uint32_t image = 0;
+ for (uint32_t s = 0; s < layout->stage[stage].surface_count; s++) {
+ struct anv_pipeline_binding *binding =
+ &layout->stage[stage].surface_to_descriptor[s];
+ struct anv_descriptor_set *set =
+ cmd_buffer->state.descriptors[binding->set];
+ struct anv_descriptor *desc = &set->descriptors[binding->offset];
+
+ struct anv_state surface_state;
+ struct anv_bo *bo;
+ uint32_t bo_offset;
+
+ switch (desc->type) {
+ case VK_DESCRIPTOR_TYPE_SAMPLER:
+ /* Nothing for us to do here */
+ continue;
+
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
+ bo = desc->buffer->bo;
+ bo_offset = desc->buffer->offset + desc->offset;
+
+ surface_state =
+ anv_cmd_buffer_alloc_surface_state(cmd_buffer);
+
+ fill_descriptor_buffer_surface_state(cmd_buffer->device,
+ surface_state.map,
+ stage, desc->type,
+ bo_offset, desc->range);
+
+ if (!cmd_buffer->device->info.has_llc)
+ anv_state_clflush(surface_state);
+
+ break;
+ }
+
+ case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+ case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
+ case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
+ surface_state = desc->image_view->nonrt_surface_state;
+ bo = desc->image_view->bo;
+ bo_offset = desc->image_view->offset;
+ break;
+
+ case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
+ surface_state = desc->image_view->storage_surface_state;
+ bo = desc->image_view->bo;
+ bo_offset = desc->image_view->offset;
+
+ struct brw_image_param *image_param =
+ &cmd_buffer->state.push_constants[stage]->images[image++];
+
+ anv_image_view_fill_image_param(cmd_buffer->device, desc->image_view,
+ image_param);
+ image_param->surface_idx = bias + s;
+ break;
+ }
+
+ case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
+ case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
+ assert(!"Unsupported descriptor type");
+ break;
+
+ default:
+ assert(!"Invalid descriptor type");
+ continue;
+ }
+
+ bt_map[bias + s] = surface_state.offset + state_offset;
+ add_surface_state_reloc(cmd_buffer, surface_state, bo, bo_offset);
+ }
+ assert(image == layout->stage[stage].image_count);
+
+ out:
+ if (!cmd_buffer->device->info.has_llc)
+ anv_state_clflush(*bt_state);
+
+ return VK_SUCCESS;
+}
+
+VkResult
+anv_cmd_buffer_emit_samplers(struct anv_cmd_buffer *cmd_buffer,
+ gl_shader_stage stage, struct anv_state *state)
+{
+ struct anv_pipeline_layout *layout;
+ uint32_t sampler_count;
+
+ if (stage == MESA_SHADER_COMPUTE)
+ layout = cmd_buffer->state.compute_pipeline->layout;
+ else
+ layout = cmd_buffer->state.pipeline->layout;
+
+ sampler_count = layout ? layout->stage[stage].sampler_count : 0;
+ if (sampler_count == 0)
+ return VK_SUCCESS;
+
+ uint32_t size = sampler_count * 16;
+ *state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, size, 32);
+
+ if (state->map == NULL)
+ return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
+ for (uint32_t s = 0; s < layout->stage[stage].sampler_count; s++) {
+ struct anv_pipeline_binding *binding =
+ &layout->stage[stage].sampler_to_descriptor[s];
+ struct anv_descriptor_set *set =
+ cmd_buffer->state.descriptors[binding->set];
+ struct anv_descriptor *desc = &set->descriptors[binding->offset];
+
+ if (desc->type != VK_DESCRIPTOR_TYPE_SAMPLER &&
+ desc->type != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
+ continue;
+
+ struct anv_sampler *sampler = desc->sampler;
+
+ /* This can happen if we have an unfilled slot since TYPE_SAMPLER
+ * happens to be zero.
+ */
+ if (sampler == NULL)
+ continue;
+
+ memcpy(state->map + (s * 16),
+ sampler->state, sizeof(sampler->state));
+ }
+
+ if (!cmd_buffer->device->info.has_llc)
+ anv_state_clflush(*state);
+
+ return VK_SUCCESS;
+}
+
+struct anv_state
+anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer *cmd_buffer,
+ const void *data, uint32_t size, uint32_t alignment)
+{
+ struct anv_state state;
+
+ state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, size, alignment);
+ memcpy(state.map, data, size);
+
+ if (!cmd_buffer->device->info.has_llc)
+ anv_state_clflush(state);
+
+ VG(VALGRIND_CHECK_MEM_IS_DEFINED(state.map, size));
+
+ return state;
+}
+
+struct anv_state
+anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer *cmd_buffer,
+ uint32_t *a, uint32_t *b,
+ uint32_t dwords, uint32_t alignment)
+{
+ struct anv_state state;
+ uint32_t *p;
+
+ state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
+ dwords * 4, alignment);
+ p = state.map;
+ for (uint32_t i = 0; i < dwords; i++)
+ p[i] = a[i] | b[i];
+
+ if (!cmd_buffer->device->info.has_llc)
+ anv_state_clflush(state);
+
+ VG(VALGRIND_CHECK_MEM_IS_DEFINED(p, dwords * 4));
+
+ return state;
+}
+
+void
+anv_cmd_buffer_begin_subpass(struct anv_cmd_buffer *cmd_buffer,
+ struct anv_subpass *subpass)
+{
+ switch (cmd_buffer->device->info.gen) {
+ case 7:
+ gen7_cmd_buffer_begin_subpass(cmd_buffer, subpass);
+ break;
+ case 8:
+ gen8_cmd_buffer_begin_subpass(cmd_buffer, subpass);
+ break;
+ case 9:
+ gen9_cmd_buffer_begin_subpass(cmd_buffer, subpass);
+ break;
+ default:
+ unreachable("unsupported gen\n");
+ }
+}
+
+void anv_CmdSetEvent(
+ VkCommandBuffer commandBuffer,
+ VkEvent event,
+ VkPipelineStageFlags stageMask)
+{
+ stub();
+}
+
+void anv_CmdResetEvent(
+ VkCommandBuffer commandBuffer,
+ VkEvent event,
+ VkPipelineStageFlags stageMask)
+{
+ stub();
+}
+
+void anv_CmdWaitEvents(
+ VkCommandBuffer commandBuffer,
+ uint32_t eventCount,
+ const VkEvent* pEvents,
+ VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags destStageMask,
+ uint32_t memBarrierCount,
+ const void* const* ppMemBarriers)
+{
+ stub();
+}
+
+struct anv_state
+anv_cmd_buffer_push_constants(struct anv_cmd_buffer *cmd_buffer,
+ gl_shader_stage stage)
+{
+ struct anv_push_constants *data =
+ cmd_buffer->state.push_constants[stage];
+ struct brw_stage_prog_data *prog_data =
+ cmd_buffer->state.pipeline->prog_data[stage];
+
+ /* If we don't actually have any push constants, bail. */
+ if (data == NULL || prog_data->nr_params == 0)
+ return (struct anv_state) { .offset = 0 };
+
+ struct anv_state state =
+ anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
+ prog_data->nr_params * sizeof(float),
+ 32 /* bottom 5 bits MBZ */);
+
+ /* Walk through the param array and fill the buffer with data */
+ uint32_t *u32_map = state.map;
+ for (unsigned i = 0; i < prog_data->nr_params; i++) {
+ uint32_t offset = (uintptr_t)prog_data->param[i];
+ u32_map[i] = *(uint32_t *)((uint8_t *)data + offset);
+ }
+
+ if (!cmd_buffer->device->info.has_llc)
+ anv_state_clflush(state);
+
+ return state;
+}
+
+struct anv_state
+anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer *cmd_buffer)
+{
+ struct anv_push_constants *data =
+ cmd_buffer->state.push_constants[MESA_SHADER_COMPUTE];
+ struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
+ const struct brw_cs_prog_data *cs_prog_data = &pipeline->cs_prog_data;
+ const struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
+
+ const unsigned local_id_dwords = cs_prog_data->local_invocation_id_regs * 8;
+ const unsigned push_constant_data_size =
++ (local_id_dwords + prog_data->nr_params) * sizeof(union gl_constant_value *);
+ const unsigned reg_aligned_constant_size = ALIGN(push_constant_data_size, 32);
+ const unsigned param_aligned_count =
+ reg_aligned_constant_size / sizeof(uint32_t);
+
+ /* If we don't actually have any push constants, bail. */
+ if (reg_aligned_constant_size == 0)
+ return (struct anv_state) { .offset = 0 };
+
+ const unsigned threads = pipeline->cs_thread_width_max;
+ const unsigned total_push_constants_size =
+ reg_aligned_constant_size * threads;
+ struct anv_state state =
+ anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
+ total_push_constants_size,
+ 32 /* bottom 5 bits MBZ */);
+
+ /* Walk through the param array and fill the buffer with data */
+ uint32_t *u32_map = state.map;
+
+ brw_cs_fill_local_id_payload(cs_prog_data, u32_map, threads,
+ reg_aligned_constant_size);
+
+ /* Setup uniform data for the first thread */
+ for (unsigned i = 0; i < prog_data->nr_params; i++) {
+ uint32_t offset = (uintptr_t)prog_data->param[i];
+ u32_map[local_id_dwords + i] = *(uint32_t *)((uint8_t *)data + offset);
+ }
+
+ /* Copy uniform data from the first thread to every other thread */
+ const size_t uniform_data_size = prog_data->nr_params * sizeof(uint32_t);
+ for (unsigned t = 1; t < threads; t++) {
+ memcpy(&u32_map[t * param_aligned_count + local_id_dwords],
+ &u32_map[local_id_dwords],
+ uniform_data_size);
+ }
+
+ if (!cmd_buffer->device->info.has_llc)
+ anv_state_clflush(state);
+
+ return state;
+}
+
+void anv_CmdPushConstants(
+ VkCommandBuffer commandBuffer,
+ VkPipelineLayout layout,
+ VkShaderStageFlags stageFlags,
+ uint32_t offset,
+ uint32_t size,
+ const void* pValues)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+
+ anv_foreach_stage(stage, stageFlags) {
+ anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, client_data);
+
+ memcpy(cmd_buffer->state.push_constants[stage]->client_data + offset,
+ pValues, size);
+ }
+
+ cmd_buffer->state.push_constants_dirty |= stageFlags;
+}
+
+void anv_CmdExecuteCommands(
+ VkCommandBuffer commandBuffer,
+ uint32_t commandBuffersCount,
+ const VkCommandBuffer* pCmdBuffers)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, primary, commandBuffer);
+
+ assert(primary->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
+
+ anv_assert(primary->state.subpass == &primary->state.pass->subpasses[0]);
+
+ for (uint32_t i = 0; i < commandBuffersCount; i++) {
+ ANV_FROM_HANDLE(anv_cmd_buffer, secondary, pCmdBuffers[i]);
+
+ assert(secondary->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
+
+ anv_cmd_buffer_add_secondary(primary, secondary);
+ }
+}
+
+VkResult anv_CreateCommandPool(
+ VkDevice _device,
+ const VkCommandPoolCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkCommandPool* pCmdPool)
+{
+ ANV_FROM_HANDLE(anv_device, device, _device);
+ struct anv_cmd_pool *pool;
+
+ pool = anv_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (pool == NULL)
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ if (pAllocator)
+ pool->alloc = *pAllocator;
+ else
+ pool->alloc = device->alloc;
+
+ list_inithead(&pool->cmd_buffers);
+
+ *pCmdPool = anv_cmd_pool_to_handle(pool);
+
+ return VK_SUCCESS;
+}
+
+void anv_DestroyCommandPool(
+ VkDevice _device,
+ VkCommandPool commandPool,
+ const VkAllocationCallbacks* pAllocator)
+{
+ ANV_FROM_HANDLE(anv_device, device, _device);
+ ANV_FROM_HANDLE(anv_cmd_pool, pool, commandPool);
+
+ anv_ResetCommandPool(_device, commandPool, 0);
+
+ anv_free2(&device->alloc, pAllocator, pool);
+}
+
+VkResult anv_ResetCommandPool(
+ VkDevice device,
+ VkCommandPool commandPool,
+ VkCommandPoolResetFlags flags)
+{
+ ANV_FROM_HANDLE(anv_cmd_pool, pool, commandPool);
+
+ list_for_each_entry_safe(struct anv_cmd_buffer, cmd_buffer,
+ &pool->cmd_buffers, pool_link) {
+ anv_cmd_buffer_destroy(cmd_buffer);
+ }
+
+ return VK_SUCCESS;
+}
+
+/**
+ * Return NULL if the current subpass has no depthstencil attachment.
+ */
+const struct anv_image_view *
+anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer *cmd_buffer)
+{
+ const struct anv_subpass *subpass = cmd_buffer->state.subpass;
+ const struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
+
+ if (subpass->depth_stencil_attachment == VK_ATTACHMENT_UNUSED)
+ return NULL;
+
+ const struct anv_image_view *iview =
+ fb->attachments[subpass->depth_stencil_attachment];
+
+ assert(anv_format_is_depth_or_stencil(iview->format));
+
+ return iview;
+}
--- /dev/null
- case nir_intrinsic_load_ubo_indirect:
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "anv_nir.h"
+#include "glsl/nir/nir_builder.h"
+
+struct apply_dynamic_offsets_state {
+ nir_shader *shader;
+ nir_builder builder;
+
+ struct anv_pipeline_layout *layout;
+
+ uint32_t indices_start;
+};
+
+static bool
+apply_dynamic_offsets_block(nir_block *block, void *void_state)
+{
+ struct apply_dynamic_offsets_state *state = void_state;
+ struct anv_descriptor_set_layout *set_layout;
+
+ nir_builder *b = &state->builder;
+
+ nir_foreach_instr_safe(block, instr) {
+ if (instr->type != nir_instr_type_intrinsic)
+ continue;
+
+ nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
+
+ unsigned block_idx_src;
+ switch (intrin->intrinsic) {
+ case nir_intrinsic_load_ubo:
- case nir_intrinsic_load_ssbo_indirect:
+ case nir_intrinsic_load_ssbo:
- case nir_intrinsic_store_ssbo_indirect:
+ block_idx_src = 0;
+ break;
+ case nir_intrinsic_store_ssbo:
- int indirect_src;
- switch (intrin->intrinsic) {
- case nir_intrinsic_load_ubo_indirect:
- case nir_intrinsic_load_ssbo_indirect:
- indirect_src = 1;
- break;
- case nir_intrinsic_store_ssbo_indirect:
- indirect_src = 2;
- break;
- default:
- indirect_src = -1;
- break;
- }
-
+ block_idx_src = 1;
+ break;
+ default:
+ continue; /* the loop */
+ }
+
+ nir_instr *res_instr = intrin->src[block_idx_src].ssa->parent_instr;
+ assert(res_instr->type == nir_instr_type_intrinsic);
+ nir_intrinsic_instr *res_intrin = nir_instr_as_intrinsic(res_instr);
+ assert(res_intrin->intrinsic == nir_intrinsic_vulkan_resource_index);
+
+ unsigned set = res_intrin->const_index[0];
+ unsigned binding = res_intrin->const_index[1];
+
+ set_layout = state->layout->set[set].layout;
+ if (set_layout->binding[binding].dynamic_offset_index < 0)
+ continue;
+
+ b->cursor = nir_before_instr(&intrin->instr);
+
- nir_const_value *const_arr_idx =
- nir_src_as_const_value(res_intrin->src[0]);
-
- nir_intrinsic_op offset_load_op;
- if (const_arr_idx)
- offset_load_op = nir_intrinsic_load_uniform;
- else
- offset_load_op = nir_intrinsic_load_uniform_indirect;
-
+ /* First, we need to generate the uniform load for the buffer offset */
+ uint32_t index = state->layout->set[set].dynamic_offset_start +
+ set_layout->binding[binding].dynamic_offset_index;
+
- nir_intrinsic_instr_create(state->shader, offset_load_op);
+ nir_intrinsic_instr *offset_load =
- offset_load->const_index[0] = state->indices_start + index * 2;
-
- if (const_arr_idx) {
- offset_load->const_index[1] = const_arr_idx->u[0] * 2;
- } else {
- offset_load->const_index[1] = 0;
- offset_load->src[0] = nir_src_for_ssa(
- nir_imul(b, nir_ssa_for_src(b, res_intrin->src[0], 1),
- nir_imm_int(b, 2)));
- }
++ nir_intrinsic_instr_create(state->shader, nir_intrinsic_load_uniform);
+ offset_load->num_components = 2;
- /* We calculate the full offset and don't bother with the base
- * offset. We need the full offset for the predicate anyway.
- */
- nir_ssa_def *rel_offset = nir_imm_int(b, intrin->const_index[0]);
- if (indirect_src >= 0) {
- assert(intrin->src[indirect_src].is_ssa);
- rel_offset = nir_iadd(b, intrin->src[indirect_src].ssa, rel_offset);
- }
- nir_ssa_def *global_offset = nir_iadd(b, rel_offset,
- &offset_load->dest.ssa);
-
- /* Now we replace the load/store intrinsic */
-
- nir_intrinsic_op indirect_op;
- switch (intrin->intrinsic) {
- case nir_intrinsic_load_ubo:
- indirect_op = nir_intrinsic_load_ubo_indirect;
- break;
- case nir_intrinsic_load_ssbo:
- indirect_op = nir_intrinsic_load_ssbo_indirect;
- break;
- case nir_intrinsic_store_ssbo:
- indirect_op = nir_intrinsic_store_ssbo_indirect;
- break;
- default:
- unreachable("Invalid direct load/store intrinsic");
- }
-
- nir_intrinsic_instr *copy =
- nir_intrinsic_instr_create(state->shader, indirect_op);
- copy->num_components = intrin->num_components;
-
- /* The indirect is always the last source */
- indirect_src = nir_intrinsic_infos[indirect_op].num_srcs - 1;
-
- for (unsigned i = 0; i < (unsigned)indirect_src; i++)
- nir_src_copy(©->src[i], &intrin->src[i], ©->instr);
-
- copy->src[indirect_src] = nir_src_for_ssa(global_offset);
- nir_ssa_dest_init(©->instr, ©->dest,
- intrin->dest.ssa.num_components,
- intrin->dest.ssa.name);
++ offset_load->const_index[0] = state->indices_start + index * 8;
++ offset_load->src[0] = nir_src_for_ssa(nir_imul(b, res_intrin->src[0].ssa,
++ nir_imm_int(b, 8)));
+
+ nir_ssa_dest_init(&offset_load->instr, &offset_load->dest, 2, NULL);
+ nir_builder_instr_insert(b, &offset_load->instr);
+
- nir_ssa_def *pred = nir_fge(b, nir_channel(b, &offset_load->dest.ssa, 1),
- rel_offset);
++ nir_src *offset_src = nir_get_io_offset_src(intrin);
++ nir_ssa_def *new_offset = nir_iadd(b, offset_src->ssa,
++ &offset_load->dest.ssa);
+
+ /* In order to avoid out-of-bounds access, we predicate */
- nir_instr_insert_after_cf_list(&if_stmt->then_list, ©->instr);
++ nir_ssa_def *pred = nir_uge(b, nir_channel(b, &offset_load->dest.ssa, 1),
++ offset_src->ssa);
+ nir_if *if_stmt = nir_if_create(b->shader);
+ if_stmt->condition = nir_src_for_ssa(pred);
+ nir_cf_node_insert(b->cursor, &if_stmt->cf_node);
+
- if (indirect_op != nir_intrinsic_store_ssbo) {
++ nir_instr_remove(&intrin->instr);
++ *offset_src = nir_src_for_ssa(new_offset);
++ nir_instr_insert_after_cf_list(&if_stmt->then_list, &intrin->instr);
+
- src1->src = nir_src_for_ssa(©->dest.ssa);
++ if (intrin->intrinsic != nir_intrinsic_store_ssbo) {
+ /* It's a load, we need a phi node */
+ nir_phi_instr *phi = nir_phi_instr_create(b->shader);
+ nir_ssa_dest_init(&phi->instr, &phi->dest,
+ intrin->num_components, NULL);
+
+ nir_phi_src *src1 = ralloc(phi, nir_phi_src);
+ struct exec_node *tnode = exec_list_get_tail(&if_stmt->then_list);
+ src1->pred = exec_node_data(nir_block, tnode, cf_node.node);
- nir_instr_insert_after_cf(&if_stmt->cf_node, &phi->instr);
-
++ src1->src = nir_src_for_ssa(&intrin->dest.ssa);
+ exec_list_push_tail(&phi->srcs, &src1->node);
+
+ b->cursor = nir_after_cf_list(&if_stmt->else_list);
+ nir_ssa_def *zero = nir_build_imm(b, intrin->num_components,
+ (nir_const_value) { .u = { 0, 0, 0, 0 } });
+
+ nir_phi_src *src2 = ralloc(phi, nir_phi_src);
+ struct exec_node *enode = exec_list_get_tail(&if_stmt->else_list);
+ src2->pred = exec_node_data(nir_block, enode, cf_node.node);
+ src2->src = nir_src_for_ssa(zero);
+ exec_list_push_tail(&phi->srcs, &src2->node);
+
- }
+ assert(intrin->dest.is_ssa);
+ nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
+ nir_src_for_ssa(&phi->dest.ssa));
- nir_instr_remove(&intrin->instr);
+
- (const gl_constant_value *)&null_data->dynamic[i].offset;
++ nir_instr_insert_after_cf(&if_stmt->cf_node, &phi->instr);
++ }
+ }
+
+ return true;
+}
+
+void
+anv_nir_apply_dynamic_offsets(struct anv_pipeline *pipeline,
+ nir_shader *shader,
+ struct brw_stage_prog_data *prog_data)
+{
+ struct apply_dynamic_offsets_state state = {
+ .shader = shader,
+ .layout = pipeline->layout,
+ .indices_start = shader->num_uniforms,
+ };
+
+ if (!state.layout || !state.layout->stage[shader->stage].has_dynamic_offsets)
+ return;
+
+ nir_foreach_overload(shader, overload) {
+ if (overload->impl) {
+ nir_builder_init(&state.builder, overload->impl);
+ nir_foreach_block(overload->impl, apply_dynamic_offsets_block, &state);
+ nir_metadata_preserve(overload->impl, nir_metadata_block_index |
+ nir_metadata_dominance);
+ }
+ }
+
+ struct anv_push_constants *null_data = NULL;
+ for (unsigned i = 0; i < MAX_DYNAMIC_BUFFERS; i++) {
+ prog_data->param[i * 2 + shader->num_uniforms] =
- (const gl_constant_value *)&null_data->dynamic[i].range;
++ (const union gl_constant_value *)&null_data->dynamic[i].offset;
+ prog_data->param[i * 2 + 1 + shader->num_uniforms] =
- shader->num_uniforms += MAX_DYNAMIC_BUFFERS * 2;
++ (const union gl_constant_value *)&null_data->dynamic[i].range;
+ }
+
++ shader->num_uniforms += MAX_DYNAMIC_BUFFERS * 8;
+}
--- /dev/null
- setup_vec4_uniform_value(const gl_constant_value **params,
- const gl_constant_value *values,
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "anv_nir.h"
++#include "program/prog_parameter.h"
+#include "glsl/nir/nir_builder.h"
+
+struct apply_pipeline_layout_state {
+ nir_shader *shader;
+ nir_builder builder;
+
+ const struct anv_pipeline_layout *layout;
+
+ bool progress;
+};
+
+static uint32_t
+get_surface_index(unsigned set, unsigned binding,
+ struct apply_pipeline_layout_state *state)
+{
+ assert(set < state->layout->num_sets);
+ struct anv_descriptor_set_layout *set_layout =
+ state->layout->set[set].layout;
+
+ gl_shader_stage stage = state->shader->stage;
+
+ assert(binding < set_layout->binding_count);
+
+ assert(set_layout->binding[binding].stage[stage].surface_index >= 0);
+
+ uint32_t surface_index =
+ state->layout->set[set].stage[stage].surface_start +
+ set_layout->binding[binding].stage[stage].surface_index;
+
+ assert(surface_index < state->layout->stage[stage].surface_count);
+
+ return surface_index;
+}
+
+static uint32_t
+get_sampler_index(unsigned set, unsigned binding, nir_texop tex_op,
+ struct apply_pipeline_layout_state *state)
+{
+ assert(set < state->layout->num_sets);
+ struct anv_descriptor_set_layout *set_layout =
+ state->layout->set[set].layout;
+
+ assert(binding < set_layout->binding_count);
+
+ gl_shader_stage stage = state->shader->stage;
+
+ if (set_layout->binding[binding].stage[stage].sampler_index < 0) {
+ assert(tex_op == nir_texop_txf);
+ return 0;
+ }
+
+ uint32_t sampler_index =
+ state->layout->set[set].stage[stage].sampler_start +
+ set_layout->binding[binding].stage[stage].sampler_index;
+
+ assert(sampler_index < state->layout->stage[stage].sampler_count);
+
+ return sampler_index;
+}
+
+static uint32_t
+get_image_index(unsigned set, unsigned binding,
+ struct apply_pipeline_layout_state *state)
+{
+ assert(set < state->layout->num_sets);
+ struct anv_descriptor_set_layout *set_layout =
+ state->layout->set[set].layout;
+
+ assert(binding < set_layout->binding_count);
+
+ gl_shader_stage stage = state->shader->stage;
+
+ assert(set_layout->binding[binding].stage[stage].image_index >= 0);
+
+ uint32_t image_index =
+ state->layout->set[set].stage[stage].image_start +
+ set_layout->binding[binding].stage[stage].image_index;
+
+ assert(image_index < state->layout->stage[stage].image_count);
+
+ return image_index;
+}
+
+static void
+lower_res_index_intrinsic(nir_intrinsic_instr *intrin,
+ struct apply_pipeline_layout_state *state)
+{
+ nir_builder *b = &state->builder;
+
+ b->cursor = nir_before_instr(&intrin->instr);
+
+ uint32_t set = intrin->const_index[0];
+ uint32_t binding = intrin->const_index[1];
+
+ uint32_t surface_index = get_surface_index(set, binding, state);
+
+ nir_const_value *const_block_idx =
+ nir_src_as_const_value(intrin->src[0]);
+
+ nir_ssa_def *block_index;
+ if (const_block_idx) {
+ block_index = nir_imm_int(b, surface_index + const_block_idx->u[0]);
+ } else {
+ block_index = nir_iadd(b, nir_imm_int(b, surface_index),
+ nir_ssa_for_src(b, intrin->src[0], 1));
+ }
+
+ assert(intrin->dest.is_ssa);
+ nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(block_index));
+ nir_instr_remove(&intrin->instr);
+}
+
+static void
+lower_tex_deref(nir_tex_instr *tex, nir_deref_var *deref,
+ unsigned *const_index, nir_tex_src_type src_type,
+ struct apply_pipeline_layout_state *state)
+{
+ if (deref->deref.child) {
+ assert(deref->deref.child->deref_type == nir_deref_type_array);
+ nir_deref_array *deref_array = nir_deref_as_array(deref->deref.child);
+
+ *const_index += deref_array->base_offset;
+
+ if (deref_array->deref_array_type == nir_deref_array_type_indirect) {
+ nir_tex_src *new_srcs = rzalloc_array(tex, nir_tex_src,
+ tex->num_srcs + 1);
+
+ for (unsigned i = 0; i < tex->num_srcs; i++) {
+ new_srcs[i].src_type = tex->src[i].src_type;
+ nir_instr_move_src(&tex->instr, &new_srcs[i].src, &tex->src[i].src);
+ }
+
+ ralloc_free(tex->src);
+ tex->src = new_srcs;
+
+ /* Now we can go ahead and move the source over to being a
+ * first-class texture source.
+ */
+ tex->src[tex->num_srcs].src_type = src_type;
+ tex->num_srcs++;
+ assert(deref_array->indirect.is_ssa);
+ nir_instr_rewrite_src(&tex->instr, &tex->src[tex->num_srcs - 1].src,
+ deref_array->indirect);
+ }
+ }
+}
+
+static void
+cleanup_tex_deref(nir_tex_instr *tex, nir_deref_var *deref)
+{
+ if (deref->deref.child == NULL)
+ return;
+
+ nir_deref_array *deref_array = nir_deref_as_array(deref->deref.child);
+
+ if (deref_array->deref_array_type != nir_deref_array_type_indirect)
+ return;
+
+ nir_instr_rewrite_src(&tex->instr, &deref_array->indirect, NIR_SRC_INIT);
+}
+
+static void
+lower_tex(nir_tex_instr *tex, struct apply_pipeline_layout_state *state)
+{
+ /* No one should have come by and lowered it already */
+ assert(tex->sampler);
+
+ nir_deref_var *tex_deref = tex->texture ? tex->texture : tex->sampler;
+ tex->texture_index =
+ get_surface_index(tex_deref->var->data.descriptor_set,
+ tex_deref->var->data.binding, state);
+ lower_tex_deref(tex, tex_deref, &tex->texture_index,
+ nir_tex_src_texture_offset, state);
+
+ tex->sampler_index =
+ get_sampler_index(tex->sampler->var->data.descriptor_set,
+ tex->sampler->var->data.binding, tex->op, state);
+ lower_tex_deref(tex, tex->sampler, &tex->sampler_index,
+ nir_tex_src_sampler_offset, state);
+
+ if (tex->texture)
+ cleanup_tex_deref(tex, tex->texture);
+ cleanup_tex_deref(tex, tex->sampler);
+ tex->texture = NULL;
+ tex->sampler = NULL;
+}
+
+static bool
+apply_pipeline_layout_block(nir_block *block, void *void_state)
+{
+ struct apply_pipeline_layout_state *state = void_state;
+
+ nir_foreach_instr_safe(block, instr) {
+ switch (instr->type) {
+ case nir_instr_type_intrinsic: {
+ nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
+ if (intrin->intrinsic == nir_intrinsic_vulkan_resource_index) {
+ lower_res_index_intrinsic(intrin, state);
+ state->progress = true;
+ }
+ break;
+ }
+ case nir_instr_type_tex:
+ lower_tex(nir_instr_as_tex(instr), state);
+ /* All texture instructions need lowering */
+ state->progress = true;
+ break;
+ default:
+ continue;
+ }
+ }
+
+ return true;
+}
+
+static void
- (const gl_constant_value *)&image_param->surface_idx, 1);
++setup_vec4_uniform_value(const union gl_constant_value **params,
++ const union gl_constant_value *values,
+ unsigned n)
+{
+ static const gl_constant_value zero = { 0 };
+
+ for (unsigned i = 0; i < n; ++i)
+ params[i] = &values[i];
+
+ for (unsigned i = n; i < 4; ++i)
+ params[i] = &zero;
+}
+
+bool
+anv_nir_apply_pipeline_layout(nir_shader *shader,
+ struct brw_stage_prog_data *prog_data,
+ const struct anv_pipeline_layout *layout)
+{
+ struct apply_pipeline_layout_state state = {
+ .shader = shader,
+ .layout = layout,
+ };
+
+ nir_foreach_overload(shader, overload) {
+ if (overload->impl) {
+ nir_builder_init(&state.builder, overload->impl);
+ nir_foreach_block(overload->impl, apply_pipeline_layout_block, &state);
+ nir_metadata_preserve(overload->impl, nir_metadata_block_index |
+ nir_metadata_dominance);
+ }
+ }
+
+ if (layout->stage[shader->stage].image_count > 0) {
+ nir_foreach_variable(var, &shader->uniforms) {
+ if (glsl_type_is_image(var->type) ||
+ (glsl_type_is_array(var->type) &&
+ glsl_type_is_image(glsl_get_array_element(var->type)))) {
+ /* Images are represented as uniform push constants and the actual
+ * information required for reading/writing to/from the image is
+ * storred in the uniform.
+ */
+ unsigned image_index = get_image_index(var->data.descriptor_set,
+ var->data.binding, &state);
+
+ var->data.driver_location = shader->num_uniforms +
+ image_index * BRW_IMAGE_PARAM_SIZE;
+ }
+ }
+
+ struct anv_push_constants *null_data = NULL;
+ const gl_constant_value **param = prog_data->param + shader->num_uniforms;
+ const struct brw_image_param *image_param = null_data->images;
+ for (uint32_t i = 0; i < layout->stage[shader->stage].image_count; i++) {
+ setup_vec4_uniform_value(param + BRW_IMAGE_PARAM_SURFACE_IDX_OFFSET,
- (const gl_constant_value *)image_param->offset, 2);
++ (const union gl_constant_value *)&image_param->surface_idx, 1);
+ setup_vec4_uniform_value(param + BRW_IMAGE_PARAM_OFFSET_OFFSET,
- (const gl_constant_value *)image_param->size, 3);
++ (const union gl_constant_value *)image_param->offset, 2);
+ setup_vec4_uniform_value(param + BRW_IMAGE_PARAM_SIZE_OFFSET,
- (const gl_constant_value *)image_param->stride, 4);
++ (const union gl_constant_value *)image_param->size, 3);
+ setup_vec4_uniform_value(param + BRW_IMAGE_PARAM_STRIDE_OFFSET,
- (const gl_constant_value *)image_param->tiling, 3);
++ (const union gl_constant_value *)image_param->stride, 4);
+ setup_vec4_uniform_value(param + BRW_IMAGE_PARAM_TILING_OFFSET,
- (const gl_constant_value *)image_param->swizzling, 2);
++ (const union gl_constant_value *)image_param->tiling, 3);
+ setup_vec4_uniform_value(param + BRW_IMAGE_PARAM_SWIZZLING_OFFSET,
++ (const union gl_constant_value *)image_param->swizzling, 2);
+
+ param += BRW_IMAGE_PARAM_SIZE;
+ image_param ++;
+ }
+
+ shader->num_uniforms += layout->stage[shader->stage].image_count *
+ BRW_IMAGE_PARAM_SIZE;
+ }
+
+ return state.progress;
+}
--- /dev/null
- prog_data->param = (const gl_constant_value **)
- malloc(prog_data->nr_params * sizeof(gl_constant_value *));
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+#include "anv_private.h"
+#include "brw_nir.h"
+#include "anv_nir.h"
+#include "glsl/nir/nir_spirv.h"
+
+/* Needed for SWIZZLE macros */
+#include "program/prog_instruction.h"
+
+// Shader functions
+
+VkResult anv_CreateShaderModule(
+ VkDevice _device,
+ const VkShaderModuleCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkShaderModule* pShaderModule)
+{
+ ANV_FROM_HANDLE(anv_device, device, _device);
+ struct anv_shader_module *module;
+
+ assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
+ assert(pCreateInfo->flags == 0);
+
+ module = anv_alloc2(&device->alloc, pAllocator,
+ sizeof(*module) + pCreateInfo->codeSize, 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (module == NULL)
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ module->nir = NULL;
+ module->size = pCreateInfo->codeSize;
+ memcpy(module->data, pCreateInfo->pCode, module->size);
+
+ *pShaderModule = anv_shader_module_to_handle(module);
+
+ return VK_SUCCESS;
+}
+
+void anv_DestroyShaderModule(
+ VkDevice _device,
+ VkShaderModule _module,
+ const VkAllocationCallbacks* pAllocator)
+{
+ ANV_FROM_HANDLE(anv_device, device, _device);
+ ANV_FROM_HANDLE(anv_shader_module, module, _module);
+
+ anv_free2(&device->alloc, pAllocator, module);
+}
+
+#define SPIR_V_MAGIC_NUMBER 0x07230203
+
+/* Eventually, this will become part of anv_CreateShader. Unfortunately,
+ * we can't do that yet because we don't have the ability to copy nir.
+ */
+static nir_shader *
+anv_shader_compile_to_nir(struct anv_device *device,
+ struct anv_shader_module *module,
+ const char *entrypoint_name,
+ gl_shader_stage stage)
+{
+ if (strcmp(entrypoint_name, "main") != 0) {
+ anv_finishme("Multiple shaders per module not really supported");
+ }
+
+ const struct brw_compiler *compiler =
+ device->instance->physicalDevice.compiler;
+ const nir_shader_compiler_options *nir_options =
+ compiler->glsl_compiler_options[stage].NirOptions;
+
+ nir_shader *nir;
+ if (module->nir) {
+ /* Some things such as our meta clear/blit code will give us a NIR
+ * shader directly. In that case, we just ignore the SPIR-V entirely
+ * and just use the NIR shader */
+ nir = module->nir;
+ nir->options = nir_options;
+ } else {
+ uint32_t *spirv = (uint32_t *) module->data;
+ assert(spirv[0] == SPIR_V_MAGIC_NUMBER);
+ assert(module->size % 4 == 0);
+
+ nir = spirv_to_nir(spirv, module->size / 4, stage, nir_options);
+ }
+ nir_validate_shader(nir);
+
+ /* Vulkan uses the separate-shader linking model */
+ nir->info.separate_shader = true;
+
+ /* Make sure the provided shader has exactly one entrypoint and that the
+ * name matches the name that came in from the VkShader.
+ */
+ nir_function_impl *entrypoint = NULL;
+ nir_foreach_overload(nir, overload) {
+ if (strcmp(entrypoint_name, overload->function->name) == 0 &&
+ overload->impl) {
+ assert(entrypoint == NULL);
+ entrypoint = overload->impl;
+ }
+ }
+ assert(entrypoint != NULL);
+
+ nir = brw_preprocess_nir(nir, compiler->scalar_stage[stage]);
+
+ nir_shader_gather_info(nir, entrypoint);
+
+ return nir;
+}
+
+VkResult anv_CreatePipelineCache(
+ VkDevice device,
+ const VkPipelineCacheCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkPipelineCache* pPipelineCache)
+{
+ *pPipelineCache = (VkPipelineCache)1;
+
+ stub_return(VK_SUCCESS);
+}
+
+void anv_DestroyPipelineCache(
+ VkDevice _device,
+ VkPipelineCache _cache,
+ const VkAllocationCallbacks* pAllocator)
+{
+}
+
+VkResult anv_GetPipelineCacheData(
+ VkDevice device,
+ VkPipelineCache pipelineCache,
+ size_t* pDataSize,
+ void* pData)
+{
+ *pDataSize = 0;
+ stub_return(VK_SUCCESS);
+}
+
+VkResult anv_MergePipelineCaches(
+ VkDevice device,
+ VkPipelineCache destCache,
+ uint32_t srcCacheCount,
+ const VkPipelineCache* pSrcCaches)
+{
+ stub_return(VK_SUCCESS);
+}
+
+void anv_DestroyPipeline(
+ VkDevice _device,
+ VkPipeline _pipeline,
+ const VkAllocationCallbacks* pAllocator)
+{
+ ANV_FROM_HANDLE(anv_device, device, _device);
+ ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
+
+ anv_reloc_list_finish(&pipeline->batch_relocs,
+ pAllocator ? pAllocator : &device->alloc);
+ anv_state_stream_finish(&pipeline->program_stream);
+ if (pipeline->blend_state.map)
+ anv_state_pool_free(&device->dynamic_state_pool, pipeline->blend_state);
+ anv_free2(&device->alloc, pAllocator, pipeline);
+}
+
+static const uint32_t vk_to_gen_primitive_type[] = {
+ [VK_PRIMITIVE_TOPOLOGY_POINT_LIST] = _3DPRIM_POINTLIST,
+ [VK_PRIMITIVE_TOPOLOGY_LINE_LIST] = _3DPRIM_LINELIST,
+ [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP] = _3DPRIM_LINESTRIP,
+ [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST] = _3DPRIM_TRILIST,
+ [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP] = _3DPRIM_TRISTRIP,
+ [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN] = _3DPRIM_TRIFAN,
+ [VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY] = _3DPRIM_LINELIST_ADJ,
+ [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY] = _3DPRIM_LINESTRIP_ADJ,
+ [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY] = _3DPRIM_TRILIST_ADJ,
+ [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY] = _3DPRIM_TRISTRIP_ADJ,
+/* [VK_PRIMITIVE_TOPOLOGY_PATCH_LIST] = _3DPRIM_PATCHLIST_1 */
+};
+
+static void
+populate_sampler_prog_key(const struct brw_device_info *devinfo,
+ struct brw_sampler_prog_key_data *key)
+{
+ /* XXX: Handle texture swizzle on HSW- */
+ for (int i = 0; i < MAX_SAMPLERS; i++) {
+ /* Assume color sampler, no swizzling. (Works for BDW+) */
+ key->swizzles[i] = SWIZZLE_XYZW;
+ }
+}
+
+static void
+populate_vs_prog_key(const struct brw_device_info *devinfo,
+ struct brw_vs_prog_key *key)
+{
+ memset(key, 0, sizeof(*key));
+
+ populate_sampler_prog_key(devinfo, &key->tex);
+
+ /* XXX: Handle vertex input work-arounds */
+
+ /* XXX: Handle sampler_prog_key */
+}
+
+static void
+populate_gs_prog_key(const struct brw_device_info *devinfo,
+ struct brw_gs_prog_key *key)
+{
+ memset(key, 0, sizeof(*key));
+
+ populate_sampler_prog_key(devinfo, &key->tex);
+}
+
+static void
+populate_wm_prog_key(const struct brw_device_info *devinfo,
+ const VkGraphicsPipelineCreateInfo *info,
+ struct brw_wm_prog_key *key)
+{
+ ANV_FROM_HANDLE(anv_render_pass, render_pass, info->renderPass);
+
+ memset(key, 0, sizeof(*key));
+
+ populate_sampler_prog_key(devinfo, &key->tex);
+
+ /* TODO: Fill out key->input_slots_valid */
+
+ /* Vulkan doesn't specify a default */
+ key->high_quality_derivatives = false;
+
+ /* XXX Vulkan doesn't appear to specify */
+ key->clamp_fragment_color = false;
+
+ /* Vulkan always specifies upper-left coordinates */
+ key->drawable_height = 0;
+ key->render_to_fbo = false;
+
+ key->nr_color_regions = render_pass->subpasses[info->subpass].color_count;
+
+ key->replicate_alpha = key->nr_color_regions > 1 &&
+ info->pMultisampleState &&
+ info->pMultisampleState->alphaToCoverageEnable;
+
+ if (info->pMultisampleState && info->pMultisampleState->rasterizationSamples > 1) {
+ /* We should probably pull this out of the shader, but it's fairly
+ * harmless to compute it and then let dead-code take care of it.
+ */
+ key->persample_shading = info->pMultisampleState->sampleShadingEnable;
+ if (key->persample_shading)
+ key->persample_2x = info->pMultisampleState->rasterizationSamples == 2;
+
+ key->compute_pos_offset = info->pMultisampleState->sampleShadingEnable;
+ key->compute_sample_id = info->pMultisampleState->sampleShadingEnable;
+ }
+}
+
+static void
+populate_cs_prog_key(const struct brw_device_info *devinfo,
+ struct brw_cs_prog_key *key)
+{
+ memset(key, 0, sizeof(*key));
+
+ populate_sampler_prog_key(devinfo, &key->tex);
+}
+
+static nir_shader *
+anv_pipeline_compile(struct anv_pipeline *pipeline,
+ struct anv_shader_module *module,
+ const char *entrypoint,
+ gl_shader_stage stage,
+ struct brw_stage_prog_data *prog_data)
+{
+ const struct brw_compiler *compiler =
+ pipeline->device->instance->physicalDevice.compiler;
+
+ nir_shader *nir = anv_shader_compile_to_nir(pipeline->device,
+ module, entrypoint, stage);
+ if (nir == NULL)
+ return NULL;
+
+ anv_nir_lower_push_constants(nir, compiler->scalar_stage[stage]);
+
+ /* Figure out the number of parameters */
+ prog_data->nr_params = 0;
+
+ if (nir->num_uniforms > 0) {
+ /* If the shader uses any push constants at all, we'll just give
+ * them the maximum possible number
+ */
+ prog_data->nr_params += MAX_PUSH_CONSTANTS_SIZE / sizeof(float);
+ }
+
+ if (pipeline->layout && pipeline->layout->stage[stage].has_dynamic_offsets)
+ prog_data->nr_params += MAX_DYNAMIC_BUFFERS * 2;
+
+ if (pipeline->layout && pipeline->layout->stage[stage].image_count > 0)
+ prog_data->nr_params += pipeline->layout->stage[stage].image_count *
+ BRW_IMAGE_PARAM_SIZE;
+
+ if (prog_data->nr_params > 0) {
+ /* XXX: I think we're leaking this */
- prog_data->param[i] = (const gl_constant_value *)
++ prog_data->param = (const union gl_constant_value **)
++ malloc(prog_data->nr_params * sizeof(union gl_constant_value *));
+
+ /* We now set the param values to be offsets into a
+ * anv_push_constant_data structure. Since the compiler doesn't
+ * actually dereference any of the gl_constant_value pointers in the
+ * params array, it doesn't really matter what we put here.
+ */
+ struct anv_push_constants *null_data = NULL;
+ if (nir->num_uniforms > 0) {
+ /* Fill out the push constants section of the param array */
+ for (unsigned i = 0; i < MAX_PUSH_CONSTANTS_SIZE / sizeof(float); i++)
- nir->num_uniforms = prog_data->nr_params;
++ prog_data->param[i] = (const union gl_constant_value *)
+ &null_data->client_data[i * sizeof(float)];
+ }
+ }
+
+ /* Set up dynamic offsets */
+ anv_nir_apply_dynamic_offsets(pipeline, nir, prog_data);
+
+ /* Apply the actual pipeline layout to UBOs, SSBOs, and textures */
+ if (pipeline->layout)
+ anv_nir_apply_pipeline_layout(nir, prog_data, pipeline->layout);
+
+ /* All binding table offsets provided by apply_pipeline_layout() are
+ * relative to the start of the bindint table (plus MAX_RTS for VS).
+ */
+ unsigned bias = stage == MESA_SHADER_FRAGMENT ? MAX_RTS : 0;
+ prog_data->binding_table.size_bytes = 0;
+ prog_data->binding_table.texture_start = bias;
+ prog_data->binding_table.ubo_start = bias;
+ prog_data->binding_table.ssbo_start = bias;
+ prog_data->binding_table.image_start = bias;
+
+ /* Finish the optimization and compilation process */
+ nir = brw_lower_nir(nir, &pipeline->device->info, NULL,
+ compiler->scalar_stage[stage]);
+
+ /* nir_lower_io will only handle the push constants; we need to set this
+ * to the full number of possible uniforms.
+ */
++ nir->num_uniforms = prog_data->nr_params * 4;
+
+ return nir;
+}
+
+static uint32_t
+anv_pipeline_upload_kernel(struct anv_pipeline *pipeline,
+ const void *data, size_t size)
+{
+ struct anv_state state =
+ anv_state_stream_alloc(&pipeline->program_stream, size, 64);
+
+ assert(size < pipeline->program_stream.block_pool->block_size);
+
+ memcpy(state.map, data, size);
+
+ if (!pipeline->device->info.has_llc)
+ anv_state_clflush(state);
+
+ return state.offset;
+}
+
+static void
+anv_pipeline_add_compiled_stage(struct anv_pipeline *pipeline,
+ gl_shader_stage stage,
+ struct brw_stage_prog_data *prog_data)
+{
+ struct brw_device_info *devinfo = &pipeline->device->info;
+ uint32_t max_threads[] = {
+ [MESA_SHADER_VERTEX] = devinfo->max_vs_threads,
+ [MESA_SHADER_TESS_CTRL] = 0,
+ [MESA_SHADER_TESS_EVAL] = 0,
+ [MESA_SHADER_GEOMETRY] = devinfo->max_gs_threads,
+ [MESA_SHADER_FRAGMENT] = devinfo->max_wm_threads,
+ [MESA_SHADER_COMPUTE] = devinfo->max_cs_threads,
+ };
+
+ pipeline->prog_data[stage] = prog_data;
+ pipeline->active_stages |= mesa_to_vk_shader_stage(stage);
+ pipeline->scratch_start[stage] = pipeline->total_scratch;
+ pipeline->total_scratch =
+ align_u32(pipeline->total_scratch, 1024) +
+ prog_data->total_scratch * max_threads[stage];
+}
+
+static VkResult
+anv_pipeline_compile_vs(struct anv_pipeline *pipeline,
+ const VkGraphicsPipelineCreateInfo *info,
+ struct anv_shader_module *module,
+ const char *entrypoint)
+{
+ const struct brw_compiler *compiler =
+ pipeline->device->instance->physicalDevice.compiler;
+ struct brw_vs_prog_data *prog_data = &pipeline->vs_prog_data;
+ struct brw_vs_prog_key key;
+
+ populate_vs_prog_key(&pipeline->device->info, &key);
+
+ /* TODO: Look up shader in cache */
+
+ memset(prog_data, 0, sizeof(*prog_data));
+
+ nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint,
+ MESA_SHADER_VERTEX,
+ &prog_data->base.base);
+ if (nir == NULL)
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ void *mem_ctx = ralloc_context(NULL);
+
+ if (module->nir == NULL)
+ ralloc_steal(mem_ctx, nir);
+
+ prog_data->inputs_read = nir->info.inputs_read;
+ pipeline->writes_point_size = nir->info.outputs_written & VARYING_SLOT_PSIZ;
+
+ brw_compute_vue_map(&pipeline->device->info,
+ &prog_data->base.vue_map,
+ nir->info.outputs_written,
+ nir->info.separate_shader);
+
+ unsigned code_size;
+ const unsigned *shader_code =
+ brw_compile_vs(compiler, NULL, mem_ctx, &key, prog_data, nir,
+ NULL, false, -1, &code_size, NULL);
+ if (shader_code == NULL) {
+ ralloc_free(mem_ctx);
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ }
+
+ const uint32_t offset =
+ anv_pipeline_upload_kernel(pipeline, shader_code, code_size);
+ if (prog_data->base.dispatch_mode == DISPATCH_MODE_SIMD8) {
+ pipeline->vs_simd8 = offset;
+ pipeline->vs_vec4 = NO_KERNEL;
+ } else {
+ pipeline->vs_simd8 = NO_KERNEL;
+ pipeline->vs_vec4 = offset;
+ }
+
+ ralloc_free(mem_ctx);
+
+ anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_VERTEX,
+ &prog_data->base.base);
+
+ return VK_SUCCESS;
+}
+
+static VkResult
+anv_pipeline_compile_gs(struct anv_pipeline *pipeline,
+ const VkGraphicsPipelineCreateInfo *info,
+ struct anv_shader_module *module,
+ const char *entrypoint)
+{
+ const struct brw_compiler *compiler =
+ pipeline->device->instance->physicalDevice.compiler;
+ struct brw_gs_prog_data *prog_data = &pipeline->gs_prog_data;
+ struct brw_gs_prog_key key;
+
+ populate_gs_prog_key(&pipeline->device->info, &key);
+
+ /* TODO: Look up shader in cache */
+
+ memset(prog_data, 0, sizeof(*prog_data));
+
+ nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint,
+ MESA_SHADER_GEOMETRY,
+ &prog_data->base.base);
+ if (nir == NULL)
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ void *mem_ctx = ralloc_context(NULL);
+
+ if (module->nir == NULL)
+ ralloc_steal(mem_ctx, nir);
+
+ brw_compute_vue_map(&pipeline->device->info,
+ &prog_data->base.vue_map,
+ nir->info.outputs_written,
+ nir->info.separate_shader);
+
+ unsigned code_size;
+ const unsigned *shader_code =
+ brw_compile_gs(compiler, NULL, mem_ctx, &key, prog_data, nir,
+ NULL, -1, &code_size, NULL);
+ if (shader_code == NULL) {
+ ralloc_free(mem_ctx);
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ }
+
+ /* TODO: SIMD8 GS */
+ pipeline->gs_vec4 =
+ anv_pipeline_upload_kernel(pipeline, shader_code, code_size);
+ pipeline->gs_vertex_count = nir->info.gs.vertices_in;
+
+ ralloc_free(mem_ctx);
+
+ anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_GEOMETRY,
+ &prog_data->base.base);
+
+ return VK_SUCCESS;
+}
+
+static VkResult
+anv_pipeline_compile_fs(struct anv_pipeline *pipeline,
+ const VkGraphicsPipelineCreateInfo *info,
+ struct anv_shader_module *module,
+ const char *entrypoint)
+{
+ const struct brw_compiler *compiler =
+ pipeline->device->instance->physicalDevice.compiler;
+ struct brw_wm_prog_data *prog_data = &pipeline->wm_prog_data;
+ struct brw_wm_prog_key key;
+
+ populate_wm_prog_key(&pipeline->device->info, info, &key);
+
+ if (pipeline->use_repclear)
+ key.nr_color_regions = 1;
+
+ /* TODO: Look up shader in cache */
+
+ memset(prog_data, 0, sizeof(*prog_data));
+
+ prog_data->binding_table.render_target_start = 0;
+
+ nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint,
+ MESA_SHADER_FRAGMENT,
+ &prog_data->base);
+ if (nir == NULL)
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ void *mem_ctx = ralloc_context(NULL);
+
+ if (module->nir == NULL)
+ ralloc_steal(mem_ctx, nir);
+
+ unsigned code_size;
+ const unsigned *shader_code =
+ brw_compile_fs(compiler, NULL, mem_ctx, &key, prog_data, nir,
+ NULL, -1, -1, pipeline->use_repclear, &code_size, NULL);
+ if (shader_code == NULL) {
+ ralloc_free(mem_ctx);
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ }
+
+ uint32_t offset = anv_pipeline_upload_kernel(pipeline,
+ shader_code, code_size);
+ if (prog_data->no_8)
+ pipeline->ps_simd8 = NO_KERNEL;
+ else
+ pipeline->ps_simd8 = offset;
+
+ if (prog_data->no_8 || prog_data->prog_offset_16) {
+ pipeline->ps_simd16 = offset + prog_data->prog_offset_16;
+ } else {
+ pipeline->ps_simd16 = NO_KERNEL;
+ }
+
+ pipeline->ps_ksp2 = 0;
+ pipeline->ps_grf_start2 = 0;
+ if (pipeline->ps_simd8 != NO_KERNEL) {
+ pipeline->ps_ksp0 = pipeline->ps_simd8;
+ pipeline->ps_grf_start0 = prog_data->base.dispatch_grf_start_reg;
+ if (pipeline->ps_simd16 != NO_KERNEL) {
+ pipeline->ps_ksp2 = pipeline->ps_simd16;
+ pipeline->ps_grf_start2 = prog_data->dispatch_grf_start_reg_16;
+ }
+ } else if (pipeline->ps_simd16 != NO_KERNEL) {
+ pipeline->ps_ksp0 = pipeline->ps_simd16;
+ pipeline->ps_grf_start0 = prog_data->dispatch_grf_start_reg_16;
+ }
+
+ ralloc_free(mem_ctx);
+
+ anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_FRAGMENT,
+ &prog_data->base);
+
+ return VK_SUCCESS;
+}
+
+VkResult
+anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
+ const VkComputePipelineCreateInfo *info,
+ struct anv_shader_module *module,
+ const char *entrypoint)
+{
+ const struct brw_compiler *compiler =
+ pipeline->device->instance->physicalDevice.compiler;
+ struct brw_cs_prog_data *prog_data = &pipeline->cs_prog_data;
+ struct brw_cs_prog_key key;
+
+ populate_cs_prog_key(&pipeline->device->info, &key);
+
+ /* TODO: Look up shader in cache */
+
+ memset(prog_data, 0, sizeof(*prog_data));
+
+ nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint,
+ MESA_SHADER_COMPUTE,
+ &prog_data->base);
+ if (nir == NULL)
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ void *mem_ctx = ralloc_context(NULL);
+
+ if (module->nir == NULL)
+ ralloc_steal(mem_ctx, nir);
+
+ unsigned code_size;
+ const unsigned *shader_code =
+ brw_compile_cs(compiler, NULL, mem_ctx, &key, prog_data, nir,
+ -1, &code_size, NULL);
+ if (shader_code == NULL) {
+ ralloc_free(mem_ctx);
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ }
+
+ pipeline->cs_simd = anv_pipeline_upload_kernel(pipeline,
+ shader_code, code_size);
+ ralloc_free(mem_ctx);
+
+ anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_COMPUTE,
+ &prog_data->base);
+
+ return VK_SUCCESS;
+}
+
+static const int gen8_push_size = 32 * 1024;
+
+static void
+gen7_compute_urb_partition(struct anv_pipeline *pipeline)
+{
+ const struct brw_device_info *devinfo = &pipeline->device->info;
+ bool vs_present = pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT;
+ unsigned vs_size = vs_present ? pipeline->vs_prog_data.base.urb_entry_size : 1;
+ unsigned vs_entry_size_bytes = vs_size * 64;
+ bool gs_present = pipeline->active_stages & VK_SHADER_STAGE_GEOMETRY_BIT;
+ unsigned gs_size = gs_present ? pipeline->gs_prog_data.base.urb_entry_size : 1;
+ unsigned gs_entry_size_bytes = gs_size * 64;
+
+ /* From p35 of the Ivy Bridge PRM (section 1.7.1: 3DSTATE_URB_GS):
+ *
+ * VS Number of URB Entries must be divisible by 8 if the VS URB Entry
+ * Allocation Size is less than 9 512-bit URB entries.
+ *
+ * Similar text exists for GS.
+ */
+ unsigned vs_granularity = (vs_size < 9) ? 8 : 1;
+ unsigned gs_granularity = (gs_size < 9) ? 8 : 1;
+
+ /* URB allocations must be done in 8k chunks. */
+ unsigned chunk_size_bytes = 8192;
+
+ /* Determine the size of the URB in chunks. */
+ unsigned urb_chunks = devinfo->urb.size * 1024 / chunk_size_bytes;
+
+ /* Reserve space for push constants */
+ unsigned push_constant_bytes = gen8_push_size;
+ unsigned push_constant_chunks =
+ push_constant_bytes / chunk_size_bytes;
+
+ /* Initially, assign each stage the minimum amount of URB space it needs,
+ * and make a note of how much additional space it "wants" (the amount of
+ * additional space it could actually make use of).
+ */
+
+ /* VS has a lower limit on the number of URB entries */
+ unsigned vs_chunks =
+ ALIGN(devinfo->urb.min_vs_entries * vs_entry_size_bytes,
+ chunk_size_bytes) / chunk_size_bytes;
+ unsigned vs_wants =
+ ALIGN(devinfo->urb.max_vs_entries * vs_entry_size_bytes,
+ chunk_size_bytes) / chunk_size_bytes - vs_chunks;
+
+ unsigned gs_chunks = 0;
+ unsigned gs_wants = 0;
+ if (gs_present) {
+ /* There are two constraints on the minimum amount of URB space we can
+ * allocate:
+ *
+ * (1) We need room for at least 2 URB entries, since we always operate
+ * the GS in DUAL_OBJECT mode.
+ *
+ * (2) We can't allocate less than nr_gs_entries_granularity.
+ */
+ gs_chunks = ALIGN(MAX2(gs_granularity, 2) * gs_entry_size_bytes,
+ chunk_size_bytes) / chunk_size_bytes;
+ gs_wants =
+ ALIGN(devinfo->urb.max_gs_entries * gs_entry_size_bytes,
+ chunk_size_bytes) / chunk_size_bytes - gs_chunks;
+ }
+
+ /* There should always be enough URB space to satisfy the minimum
+ * requirements of each stage.
+ */
+ unsigned total_needs = push_constant_chunks + vs_chunks + gs_chunks;
+ assert(total_needs <= urb_chunks);
+
+ /* Mete out remaining space (if any) in proportion to "wants". */
+ unsigned total_wants = vs_wants + gs_wants;
+ unsigned remaining_space = urb_chunks - total_needs;
+ if (remaining_space > total_wants)
+ remaining_space = total_wants;
+ if (remaining_space > 0) {
+ unsigned vs_additional = (unsigned)
+ round(vs_wants * (((double) remaining_space) / total_wants));
+ vs_chunks += vs_additional;
+ remaining_space -= vs_additional;
+ gs_chunks += remaining_space;
+ }
+
+ /* Sanity check that we haven't over-allocated. */
+ assert(push_constant_chunks + vs_chunks + gs_chunks <= urb_chunks);
+
+ /* Finally, compute the number of entries that can fit in the space
+ * allocated to each stage.
+ */
+ unsigned nr_vs_entries = vs_chunks * chunk_size_bytes / vs_entry_size_bytes;
+ unsigned nr_gs_entries = gs_chunks * chunk_size_bytes / gs_entry_size_bytes;
+
+ /* Since we rounded up when computing *_wants, this may be slightly more
+ * than the maximum allowed amount, so correct for that.
+ */
+ nr_vs_entries = MIN2(nr_vs_entries, devinfo->urb.max_vs_entries);
+ nr_gs_entries = MIN2(nr_gs_entries, devinfo->urb.max_gs_entries);
+
+ /* Ensure that we program a multiple of the granularity. */
+ nr_vs_entries = ROUND_DOWN_TO(nr_vs_entries, vs_granularity);
+ nr_gs_entries = ROUND_DOWN_TO(nr_gs_entries, gs_granularity);
+
+ /* Finally, sanity check to make sure we have at least the minimum number
+ * of entries needed for each stage.
+ */
+ assert(nr_vs_entries >= devinfo->urb.min_vs_entries);
+ if (gs_present)
+ assert(nr_gs_entries >= 2);
+
+ /* Lay out the URB in the following order:
+ * - push constants
+ * - VS
+ * - GS
+ */
+ pipeline->urb.vs_start = push_constant_chunks;
+ pipeline->urb.vs_size = vs_size;
+ pipeline->urb.nr_vs_entries = nr_vs_entries;
+
+ pipeline->urb.gs_start = push_constant_chunks + vs_chunks;
+ pipeline->urb.gs_size = gs_size;
+ pipeline->urb.nr_gs_entries = nr_gs_entries;
+}
+
+static void
+anv_pipeline_init_dynamic_state(struct anv_pipeline *pipeline,
+ const VkGraphicsPipelineCreateInfo *pCreateInfo)
+{
+ anv_cmd_dirty_mask_t states = ANV_CMD_DIRTY_DYNAMIC_ALL;
+ ANV_FROM_HANDLE(anv_render_pass, pass, pCreateInfo->renderPass);
+ struct anv_subpass *subpass = &pass->subpasses[pCreateInfo->subpass];
+
+ pipeline->dynamic_state = default_dynamic_state;
+
+ if (pCreateInfo->pDynamicState) {
+ /* Remove all of the states that are marked as dynamic */
+ uint32_t count = pCreateInfo->pDynamicState->dynamicStateCount;
+ for (uint32_t s = 0; s < count; s++)
+ states &= ~(1 << pCreateInfo->pDynamicState->pDynamicStates[s]);
+ }
+
+ struct anv_dynamic_state *dynamic = &pipeline->dynamic_state;
+
+ dynamic->viewport.count = pCreateInfo->pViewportState->viewportCount;
+ if (states & (1 << VK_DYNAMIC_STATE_VIEWPORT)) {
+ typed_memcpy(dynamic->viewport.viewports,
+ pCreateInfo->pViewportState->pViewports,
+ pCreateInfo->pViewportState->viewportCount);
+ }
+
+ dynamic->scissor.count = pCreateInfo->pViewportState->scissorCount;
+ if (states & (1 << VK_DYNAMIC_STATE_SCISSOR)) {
+ typed_memcpy(dynamic->scissor.scissors,
+ pCreateInfo->pViewportState->pScissors,
+ pCreateInfo->pViewportState->scissorCount);
+ }
+
+ if (states & (1 << VK_DYNAMIC_STATE_LINE_WIDTH)) {
+ assert(pCreateInfo->pRasterizationState);
+ dynamic->line_width = pCreateInfo->pRasterizationState->lineWidth;
+ }
+
+ if (states & (1 << VK_DYNAMIC_STATE_DEPTH_BIAS)) {
+ assert(pCreateInfo->pRasterizationState);
+ dynamic->depth_bias.bias =
+ pCreateInfo->pRasterizationState->depthBiasConstantFactor;
+ dynamic->depth_bias.clamp =
+ pCreateInfo->pRasterizationState->depthBiasClamp;
+ dynamic->depth_bias.slope =
+ pCreateInfo->pRasterizationState->depthBiasSlopeFactor;
+ }
+
+ if (states & (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS)) {
+ assert(pCreateInfo->pColorBlendState);
+ typed_memcpy(dynamic->blend_constants,
+ pCreateInfo->pColorBlendState->blendConstants, 4);
+ }
+
+ /* If there is no depthstencil attachment, then don't read
+ * pDepthStencilState. The Vulkan spec states that pDepthStencilState may
+ * be NULL in this case. Even if pDepthStencilState is non-NULL, there is
+ * no need to override the depthstencil defaults in
+ * anv_pipeline::dynamic_state when there is no depthstencil attachment.
+ *
+ * From the Vulkan spec (20 Oct 2015, git-aa308cb):
+ *
+ * pDepthStencilState [...] may only be NULL if renderPass and subpass
+ * specify a subpass that has no depth/stencil attachment.
+ */
+ if (subpass->depth_stencil_attachment != VK_ATTACHMENT_UNUSED) {
+ if (states & (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS)) {
+ assert(pCreateInfo->pDepthStencilState);
+ dynamic->depth_bounds.min =
+ pCreateInfo->pDepthStencilState->minDepthBounds;
+ dynamic->depth_bounds.max =
+ pCreateInfo->pDepthStencilState->maxDepthBounds;
+ }
+
+ if (states & (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK)) {
+ assert(pCreateInfo->pDepthStencilState);
+ dynamic->stencil_compare_mask.front =
+ pCreateInfo->pDepthStencilState->front.compareMask;
+ dynamic->stencil_compare_mask.back =
+ pCreateInfo->pDepthStencilState->back.compareMask;
+ }
+
+ if (states & (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK)) {
+ assert(pCreateInfo->pDepthStencilState);
+ dynamic->stencil_write_mask.front =
+ pCreateInfo->pDepthStencilState->front.writeMask;
+ dynamic->stencil_write_mask.back =
+ pCreateInfo->pDepthStencilState->back.writeMask;
+ }
+
+ if (states & (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE)) {
+ assert(pCreateInfo->pDepthStencilState);
+ dynamic->stencil_reference.front =
+ pCreateInfo->pDepthStencilState->front.reference;
+ dynamic->stencil_reference.back =
+ pCreateInfo->pDepthStencilState->back.reference;
+ }
+ }
+
+ pipeline->dynamic_state_mask = states;
+}
+
+static void
+anv_pipeline_validate_create_info(const VkGraphicsPipelineCreateInfo *info)
+{
+ struct anv_render_pass *renderpass = NULL;
+ struct anv_subpass *subpass = NULL;
+
+ /* Assert that all required members of VkGraphicsPipelineCreateInfo are
+ * present, as explained by the Vulkan (20 Oct 2015, git-aa308cb), Section
+ * 4.2 Graphics Pipeline.
+ */
+ assert(info->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
+
+ renderpass = anv_render_pass_from_handle(info->renderPass);
+ assert(renderpass);
+
+ if (renderpass != &anv_meta_dummy_renderpass) {
+ assert(info->subpass < renderpass->subpass_count);
+ subpass = &renderpass->subpasses[info->subpass];
+ }
+
+ assert(info->stageCount >= 1);
+ assert(info->pVertexInputState);
+ assert(info->pInputAssemblyState);
+ assert(info->pViewportState);
+ assert(info->pRasterizationState);
+ assert(info->pMultisampleState);
+
+ if (subpass && subpass->depth_stencil_attachment != VK_ATTACHMENT_UNUSED)
+ assert(info->pDepthStencilState);
+
+ if (subpass && subpass->color_count > 0)
+ assert(info->pColorBlendState);
+
+ for (uint32_t i = 0; i < info->stageCount; ++i) {
+ switch (info->pStages[i].stage) {
+ case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
+ case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
+ assert(info->pTessellationState);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+VkResult
+anv_pipeline_init(struct anv_pipeline *pipeline, struct anv_device *device,
+ const VkGraphicsPipelineCreateInfo *pCreateInfo,
+ const struct anv_graphics_pipeline_create_info *extra,
+ const VkAllocationCallbacks *alloc)
+{
+ anv_validate {
+ anv_pipeline_validate_create_info(pCreateInfo);
+ }
+
+ if (alloc == NULL)
+ alloc = &device->alloc;
+
+ pipeline->device = device;
+ pipeline->layout = anv_pipeline_layout_from_handle(pCreateInfo->layout);
+
+ anv_reloc_list_init(&pipeline->batch_relocs, alloc);
+ /* TODO: Handle allocation fail */
+
+ pipeline->batch.alloc = alloc;
+ pipeline->batch.next = pipeline->batch.start = pipeline->batch_data;
+ pipeline->batch.end = pipeline->batch.start + sizeof(pipeline->batch_data);
+ pipeline->batch.relocs = &pipeline->batch_relocs;
+
+ anv_state_stream_init(&pipeline->program_stream,
+ &device->instruction_block_pool);
+
+ anv_pipeline_init_dynamic_state(pipeline, pCreateInfo);
+
+ if (pCreateInfo->pTessellationState)
+ anv_finishme("VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO");
+ if (pCreateInfo->pMultisampleState &&
+ pCreateInfo->pMultisampleState->rasterizationSamples > 1)
+ anv_finishme("VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO");
+
+ pipeline->use_repclear = extra && extra->use_repclear;
+ pipeline->writes_point_size = false;
+
+ /* When we free the pipeline, we detect stages based on the NULL status
+ * of various prog_data pointers. Make them NULL by default.
+ */
+ memset(pipeline->prog_data, 0, sizeof(pipeline->prog_data));
+ memset(pipeline->scratch_start, 0, sizeof(pipeline->scratch_start));
+
+ pipeline->vs_simd8 = NO_KERNEL;
+ pipeline->vs_vec4 = NO_KERNEL;
+ pipeline->gs_vec4 = NO_KERNEL;
+
+ pipeline->active_stages = 0;
+ pipeline->total_scratch = 0;
+
+ for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
+ ANV_FROM_HANDLE(anv_shader_module, module,
+ pCreateInfo->pStages[i].module);
+ const char *entrypoint = pCreateInfo->pStages[i].pName;
+
+ switch (pCreateInfo->pStages[i].stage) {
+ case VK_SHADER_STAGE_VERTEX_BIT:
+ anv_pipeline_compile_vs(pipeline, pCreateInfo, module, entrypoint);
+ break;
+ case VK_SHADER_STAGE_GEOMETRY_BIT:
+ anv_pipeline_compile_gs(pipeline, pCreateInfo, module, entrypoint);
+ break;
+ case VK_SHADER_STAGE_FRAGMENT_BIT:
+ anv_pipeline_compile_fs(pipeline, pCreateInfo, module, entrypoint);
+ break;
+ default:
+ anv_finishme("Unsupported shader stage");
+ }
+ }
+
+ if (!(pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT)) {
+ /* Vertex is only optional if disable_vs is set */
+ assert(extra->disable_vs);
+ memset(&pipeline->vs_prog_data, 0, sizeof(pipeline->vs_prog_data));
+ }
+
+ gen7_compute_urb_partition(pipeline);
+
+ const VkPipelineVertexInputStateCreateInfo *vi_info =
+ pCreateInfo->pVertexInputState;
+ pipeline->vb_used = 0;
+ for (uint32_t i = 0; i < vi_info->vertexBindingDescriptionCount; i++) {
+ const VkVertexInputBindingDescription *desc =
+ &vi_info->pVertexBindingDescriptions[i];
+
+ pipeline->vb_used |= 1 << desc->binding;
+ pipeline->binding_stride[desc->binding] = desc->stride;
+
+ /* Step rate is programmed per vertex element (attribute), not
+ * binding. Set up a map of which bindings step per instance, for
+ * reference by vertex element setup. */
+ switch (desc->inputRate) {
+ default:
+ case VK_VERTEX_INPUT_RATE_VERTEX:
+ pipeline->instancing_enable[desc->binding] = false;
+ break;
+ case VK_VERTEX_INPUT_RATE_INSTANCE:
+ pipeline->instancing_enable[desc->binding] = true;
+ break;
+ }
+ }
+
+ const VkPipelineInputAssemblyStateCreateInfo *ia_info =
+ pCreateInfo->pInputAssemblyState;
+ pipeline->primitive_restart = ia_info->primitiveRestartEnable;
+ pipeline->topology = vk_to_gen_primitive_type[ia_info->topology];
+
+ if (extra && extra->use_rectlist)
+ pipeline->topology = _3DPRIM_RECTLIST;
+
+ return VK_SUCCESS;
+}
+
+VkResult
+anv_graphics_pipeline_create(
+ VkDevice _device,
+ const VkGraphicsPipelineCreateInfo *pCreateInfo,
+ const struct anv_graphics_pipeline_create_info *extra,
+ const VkAllocationCallbacks *pAllocator,
+ VkPipeline *pPipeline)
+{
+ ANV_FROM_HANDLE(anv_device, device, _device);
+
+ switch (device->info.gen) {
+ case 7:
+ if (device->info.is_haswell)
+ return gen75_graphics_pipeline_create(_device, pCreateInfo, extra, pAllocator, pPipeline);
+ else
+ return gen7_graphics_pipeline_create(_device, pCreateInfo, extra, pAllocator, pPipeline);
+ case 8:
+ return gen8_graphics_pipeline_create(_device, pCreateInfo, extra, pAllocator, pPipeline);
+ case 9:
+ return gen9_graphics_pipeline_create(_device, pCreateInfo, extra, pAllocator, pPipeline);
+ default:
+ unreachable("unsupported gen\n");
+ }
+}
+
+VkResult anv_CreateGraphicsPipelines(
+ VkDevice _device,
+ VkPipelineCache pipelineCache,
+ uint32_t count,
+ const VkGraphicsPipelineCreateInfo* pCreateInfos,
+ const VkAllocationCallbacks* pAllocator,
+ VkPipeline* pPipelines)
+{
+ VkResult result = VK_SUCCESS;
+
+ unsigned i = 0;
+ for (; i < count; i++) {
+ result = anv_graphics_pipeline_create(_device, &pCreateInfos[i],
+ NULL, pAllocator, &pPipelines[i]);
+ if (result != VK_SUCCESS) {
+ for (unsigned j = 0; j < i; j++) {
+ anv_DestroyPipeline(_device, pPipelines[j], pAllocator);
+ }
+
+ return result;
+ }
+ }
+
+ return VK_SUCCESS;
+}
+
+static VkResult anv_compute_pipeline_create(
+ VkDevice _device,
+ const VkComputePipelineCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkPipeline* pPipeline)
+{
+ ANV_FROM_HANDLE(anv_device, device, _device);
+
+ switch (device->info.gen) {
+ case 7:
+ if (device->info.is_haswell)
+ return gen75_compute_pipeline_create(_device, pCreateInfo, pAllocator, pPipeline);
+ else
+ return gen7_compute_pipeline_create(_device, pCreateInfo, pAllocator, pPipeline);
+ case 8:
+ return gen8_compute_pipeline_create(_device, pCreateInfo, pAllocator, pPipeline);
+ case 9:
+ return gen9_compute_pipeline_create(_device, pCreateInfo, pAllocator, pPipeline);
+ default:
+ unreachable("unsupported gen\n");
+ }
+}
+
+VkResult anv_CreateComputePipelines(
+ VkDevice _device,
+ VkPipelineCache pipelineCache,
+ uint32_t count,
+ const VkComputePipelineCreateInfo* pCreateInfos,
+ const VkAllocationCallbacks* pAllocator,
+ VkPipeline* pPipelines)
+{
+ VkResult result = VK_SUCCESS;
+
+ unsigned i = 0;
+ for (; i < count; i++) {
+ result = anv_compute_pipeline_create(_device, &pCreateInfos[i],
+ pAllocator, &pPipelines[i]);
+ if (result != VK_SUCCESS) {
+ for (unsigned j = 0; j < i; j++) {
+ anv_DestroyPipeline(_device, pPipelines[j], pAllocator);
+ }
+
+ return result;
+ }
+ }
+
+ return VK_SUCCESS;
+}
--- /dev/null
- (prog_data->nr_params + local_id_dwords) * sizeof(gl_constant_value);
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+#include "anv_private.h"
+
+#include "gen8_pack.h"
+#include "gen9_pack.h"
+
+static void
+cmd_buffer_flush_push_constants(struct anv_cmd_buffer *cmd_buffer)
+{
+ static const uint32_t push_constant_opcodes[] = {
+ [MESA_SHADER_VERTEX] = 21,
+ [MESA_SHADER_TESS_CTRL] = 25, /* HS */
+ [MESA_SHADER_TESS_EVAL] = 26, /* DS */
+ [MESA_SHADER_GEOMETRY] = 22,
+ [MESA_SHADER_FRAGMENT] = 23,
+ [MESA_SHADER_COMPUTE] = 0,
+ };
+
+ VkShaderStageFlags flushed = 0;
+
+ anv_foreach_stage(stage, cmd_buffer->state.push_constants_dirty) {
+ if (stage == MESA_SHADER_COMPUTE)
+ continue;
+
+ struct anv_state state = anv_cmd_buffer_push_constants(cmd_buffer, stage);
+
+ if (state.offset == 0)
+ continue;
+
+ anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_VS),
+ ._3DCommandSubOpcode = push_constant_opcodes[stage],
+ .ConstantBody = {
+ .PointerToConstantBuffer0 = { .offset = state.offset },
+ .ConstantBuffer0ReadLength = DIV_ROUND_UP(state.alloc_size, 32),
+ });
+
+ flushed |= mesa_to_vk_shader_stage(stage);
+ }
+
+ cmd_buffer->state.push_constants_dirty &= ~flushed;
+}
+
+#if ANV_GEN == 8
+static void
+emit_viewport_state(struct anv_cmd_buffer *cmd_buffer,
+ uint32_t count, const VkViewport *viewports)
+{
+ struct anv_state sf_clip_state =
+ anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, count * 64, 64);
+ struct anv_state cc_state =
+ anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, count * 8, 32);
+
+ for (uint32_t i = 0; i < count; i++) {
+ const VkViewport *vp = &viewports[i];
+
+ /* The gen7 state struct has just the matrix and guardband fields, the
+ * gen8 struct adds the min/max viewport fields. */
+ struct GENX(SF_CLIP_VIEWPORT) sf_clip_viewport = {
+ .ViewportMatrixElementm00 = vp->width / 2,
+ .ViewportMatrixElementm11 = vp->height / 2,
+ .ViewportMatrixElementm22 = (vp->maxDepth - vp->minDepth) / 2,
+ .ViewportMatrixElementm30 = vp->x + vp->width / 2,
+ .ViewportMatrixElementm31 = vp->y + vp->height / 2,
+ .ViewportMatrixElementm32 = (vp->maxDepth + vp->minDepth) / 2,
+ .XMinClipGuardband = -1.0f,
+ .XMaxClipGuardband = 1.0f,
+ .YMinClipGuardband = -1.0f,
+ .YMaxClipGuardband = 1.0f,
+ .XMinViewPort = vp->x,
+ .XMaxViewPort = vp->x + vp->width - 1,
+ .YMinViewPort = vp->y,
+ .YMaxViewPort = vp->y + vp->height - 1,
+ };
+
+ struct GENX(CC_VIEWPORT) cc_viewport = {
+ .MinimumDepth = vp->minDepth,
+ .MaximumDepth = vp->maxDepth
+ };
+
+ GENX(SF_CLIP_VIEWPORT_pack)(NULL, sf_clip_state.map + i * 64,
+ &sf_clip_viewport);
+ GENX(CC_VIEWPORT_pack)(NULL, cc_state.map + i * 32, &cc_viewport);
+ }
+
+ if (!cmd_buffer->device->info.has_llc) {
+ anv_state_clflush(sf_clip_state);
+ anv_state_clflush(cc_state);
+ }
+
+ anv_batch_emit(&cmd_buffer->batch,
+ GENX(3DSTATE_VIEWPORT_STATE_POINTERS_CC),
+ .CCViewportPointer = cc_state.offset);
+ anv_batch_emit(&cmd_buffer->batch,
+ GENX(3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP),
+ .SFClipViewportPointer = sf_clip_state.offset);
+}
+
+void
+gen8_cmd_buffer_emit_viewport(struct anv_cmd_buffer *cmd_buffer)
+{
+ if (cmd_buffer->state.dynamic.viewport.count > 0) {
+ emit_viewport_state(cmd_buffer, cmd_buffer->state.dynamic.viewport.count,
+ cmd_buffer->state.dynamic.viewport.viewports);
+ } else {
+ /* If viewport count is 0, this is taken to mean "use the default" */
+ emit_viewport_state(cmd_buffer, 1,
+ &(VkViewport) {
+ .x = 0.0f,
+ .y = 0.0f,
+ .width = cmd_buffer->state.framebuffer->width,
+ .height = cmd_buffer->state.framebuffer->height,
+ .minDepth = 0.0f,
+ .maxDepth = 1.0f,
+ });
+ }
+}
+#endif
+
+static void
+cmd_buffer_flush_state(struct anv_cmd_buffer *cmd_buffer)
+{
+ struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
+ uint32_t *p;
+
+ uint32_t vb_emit = cmd_buffer->state.vb_dirty & pipeline->vb_used;
+
+ assert((pipeline->active_stages & VK_SHADER_STAGE_COMPUTE_BIT) == 0);
+
+ if (cmd_buffer->state.current_pipeline != _3D) {
+ anv_batch_emit(&cmd_buffer->batch, GENX(PIPELINE_SELECT),
+#if ANV_GEN >= 9
+ .MaskBits = 3,
+#endif
+ .PipelineSelection = _3D);
+ cmd_buffer->state.current_pipeline = _3D;
+ }
+
+ if (vb_emit) {
+ const uint32_t num_buffers = __builtin_popcount(vb_emit);
+ const uint32_t num_dwords = 1 + num_buffers * 4;
+
+ p = anv_batch_emitn(&cmd_buffer->batch, num_dwords,
+ GENX(3DSTATE_VERTEX_BUFFERS));
+ uint32_t vb, i = 0;
+ for_each_bit(vb, vb_emit) {
+ struct anv_buffer *buffer = cmd_buffer->state.vertex_bindings[vb].buffer;
+ uint32_t offset = cmd_buffer->state.vertex_bindings[vb].offset;
+
+ struct GENX(VERTEX_BUFFER_STATE) state = {
+ .VertexBufferIndex = vb,
+ .MemoryObjectControlState = GENX(MOCS),
+ .AddressModifyEnable = true,
+ .BufferPitch = pipeline->binding_stride[vb],
+ .BufferStartingAddress = { buffer->bo, buffer->offset + offset },
+ .BufferSize = buffer->size - offset
+ };
+
+ GENX(VERTEX_BUFFER_STATE_pack)(&cmd_buffer->batch, &p[1 + i * 4], &state);
+ i++;
+ }
+ }
+
+ if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_PIPELINE) {
+ /* If somebody compiled a pipeline after starting a command buffer the
+ * scratch bo may have grown since we started this cmd buffer (and
+ * emitted STATE_BASE_ADDRESS). If we're binding that pipeline now,
+ * reemit STATE_BASE_ADDRESS so that we use the bigger scratch bo. */
+ if (cmd_buffer->state.scratch_size < pipeline->total_scratch)
+ anv_cmd_buffer_emit_state_base_address(cmd_buffer);
+
+ anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->batch);
+ }
+
+#if ANV_GEN >= 9
+ /* On SKL+ the new constants don't take effect until the next corresponding
+ * 3DSTATE_BINDING_TABLE_POINTER_* command is parsed so we need to ensure
+ * that is sent. As it is, we re-emit binding tables but we could hold on
+ * to the offset of the most recent binding table and only re-emit the
+ * 3DSTATE_BINDING_TABLE_POINTER_* command.
+ */
+ cmd_buffer->state.descriptors_dirty |=
+ cmd_buffer->state.push_constants_dirty &
+ cmd_buffer->state.pipeline->active_stages;
+#endif
+
+ if (cmd_buffer->state.descriptors_dirty)
+ gen7_cmd_buffer_flush_descriptor_sets(cmd_buffer);
+
+ if (cmd_buffer->state.push_constants_dirty)
+ cmd_buffer_flush_push_constants(cmd_buffer);
+
+ if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_DYNAMIC_VIEWPORT)
+ gen8_cmd_buffer_emit_viewport(cmd_buffer);
+
+ if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_DYNAMIC_SCISSOR)
+ gen7_cmd_buffer_emit_scissor(cmd_buffer);
+
+ if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE |
+ ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH)) {
+ uint32_t sf_dw[GENX(3DSTATE_SF_length)];
+ struct GENX(3DSTATE_SF) sf = {
+ GENX(3DSTATE_SF_header),
+ .LineWidth = cmd_buffer->state.dynamic.line_width,
+ };
+ GENX(3DSTATE_SF_pack)(NULL, sf_dw, &sf);
+ /* FIXME: gen9.fs */
+ anv_batch_emit_merge(&cmd_buffer->batch, sf_dw, pipeline->gen8.sf);
+ }
+
+ if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE |
+ ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS)){
+ bool enable_bias = cmd_buffer->state.dynamic.depth_bias.bias != 0.0f ||
+ cmd_buffer->state.dynamic.depth_bias.slope != 0.0f;
+
+ uint32_t raster_dw[GENX(3DSTATE_RASTER_length)];
+ struct GENX(3DSTATE_RASTER) raster = {
+ GENX(3DSTATE_RASTER_header),
+ .GlobalDepthOffsetEnableSolid = enable_bias,
+ .GlobalDepthOffsetEnableWireframe = enable_bias,
+ .GlobalDepthOffsetEnablePoint = enable_bias,
+ .GlobalDepthOffsetConstant = cmd_buffer->state.dynamic.depth_bias.bias,
+ .GlobalDepthOffsetScale = cmd_buffer->state.dynamic.depth_bias.slope,
+ .GlobalDepthOffsetClamp = cmd_buffer->state.dynamic.depth_bias.clamp
+ };
+ GENX(3DSTATE_RASTER_pack)(NULL, raster_dw, &raster);
+ anv_batch_emit_merge(&cmd_buffer->batch, raster_dw,
+ pipeline->gen8.raster);
+ }
+
+ /* Stencil reference values moved from COLOR_CALC_STATE in gen8 to
+ * 3DSTATE_WM_DEPTH_STENCIL in gen9. That means the dirty bits gets split
+ * across different state packets for gen8 and gen9. We handle that by
+ * using a big old #if switch here.
+ */
+#if ANV_GEN == 8
+ if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS |
+ ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE)) {
+ struct anv_state cc_state =
+ anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
+ GEN8_COLOR_CALC_STATE_length, 64);
+ struct GEN8_COLOR_CALC_STATE cc = {
+ .BlendConstantColorRed = cmd_buffer->state.dynamic.blend_constants[0],
+ .BlendConstantColorGreen = cmd_buffer->state.dynamic.blend_constants[1],
+ .BlendConstantColorBlue = cmd_buffer->state.dynamic.blend_constants[2],
+ .BlendConstantColorAlpha = cmd_buffer->state.dynamic.blend_constants[3],
+ .StencilReferenceValue =
+ cmd_buffer->state.dynamic.stencil_reference.front,
+ .BackFaceStencilReferenceValue =
+ cmd_buffer->state.dynamic.stencil_reference.back,
+ };
+ GEN8_COLOR_CALC_STATE_pack(NULL, cc_state.map, &cc);
+
+ if (!cmd_buffer->device->info.has_llc)
+ anv_state_clflush(cc_state);
+
+ anv_batch_emit(&cmd_buffer->batch,
+ GEN8_3DSTATE_CC_STATE_POINTERS,
+ .ColorCalcStatePointer = cc_state.offset,
+ .ColorCalcStatePointerValid = true);
+ }
+
+ if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE |
+ ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK |
+ ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK)) {
+ uint32_t wm_depth_stencil_dw[GEN8_3DSTATE_WM_DEPTH_STENCIL_length];
+
+ struct GEN8_3DSTATE_WM_DEPTH_STENCIL wm_depth_stencil = {
+ GEN8_3DSTATE_WM_DEPTH_STENCIL_header,
+
+ /* Is this what we need to do? */
+ .StencilBufferWriteEnable =
+ cmd_buffer->state.dynamic.stencil_write_mask.front != 0,
+
+ .StencilTestMask =
+ cmd_buffer->state.dynamic.stencil_compare_mask.front & 0xff,
+ .StencilWriteMask =
+ cmd_buffer->state.dynamic.stencil_write_mask.front & 0xff,
+
+ .BackfaceStencilTestMask =
+ cmd_buffer->state.dynamic.stencil_compare_mask.back & 0xff,
+ .BackfaceStencilWriteMask =
+ cmd_buffer->state.dynamic.stencil_write_mask.back & 0xff,
+ };
+ GEN8_3DSTATE_WM_DEPTH_STENCIL_pack(NULL, wm_depth_stencil_dw,
+ &wm_depth_stencil);
+
+ anv_batch_emit_merge(&cmd_buffer->batch, wm_depth_stencil_dw,
+ pipeline->gen8.wm_depth_stencil);
+ }
+#else
+ if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS) {
+ struct anv_state cc_state =
+ anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
+ GEN9_COLOR_CALC_STATE_length, 64);
+ struct GEN9_COLOR_CALC_STATE cc = {
+ .BlendConstantColorRed = cmd_buffer->state.dynamic.blend_constants[0],
+ .BlendConstantColorGreen = cmd_buffer->state.dynamic.blend_constants[1],
+ .BlendConstantColorBlue = cmd_buffer->state.dynamic.blend_constants[2],
+ .BlendConstantColorAlpha = cmd_buffer->state.dynamic.blend_constants[3],
+ };
+ GEN9_COLOR_CALC_STATE_pack(NULL, cc_state.map, &cc);
+
+ if (!cmd_buffer->device->info.has_llc)
+ anv_state_clflush(cc_state);
+
+ anv_batch_emit(&cmd_buffer->batch,
+ GEN9_3DSTATE_CC_STATE_POINTERS,
+ .ColorCalcStatePointer = cc_state.offset,
+ .ColorCalcStatePointerValid = true);
+ }
+
+ if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE |
+ ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK |
+ ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK |
+ ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE)) {
+ uint32_t dwords[GEN9_3DSTATE_WM_DEPTH_STENCIL_length];
+ struct anv_dynamic_state *d = &cmd_buffer->state.dynamic;
+ struct GEN9_3DSTATE_WM_DEPTH_STENCIL wm_depth_stencil = {
+ GEN9_3DSTATE_WM_DEPTH_STENCIL_header,
+
+ .StencilBufferWriteEnable = d->stencil_write_mask.front != 0,
+
+ .StencilTestMask = d->stencil_compare_mask.front & 0xff,
+ .StencilWriteMask = d->stencil_write_mask.front & 0xff,
+
+ .BackfaceStencilTestMask = d->stencil_compare_mask.back & 0xff,
+ .BackfaceStencilWriteMask = d->stencil_write_mask.back & 0xff,
+
+ .StencilReferenceValue = d->stencil_reference.front,
+ .BackfaceStencilReferenceValue = d->stencil_reference.back
+ };
+ GEN9_3DSTATE_WM_DEPTH_STENCIL_pack(NULL, dwords, &wm_depth_stencil);
+
+ anv_batch_emit_merge(&cmd_buffer->batch, dwords,
+ pipeline->gen9.wm_depth_stencil);
+ }
+#endif
+
+ if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE |
+ ANV_CMD_DIRTY_INDEX_BUFFER)) {
+ anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_VF),
+ .IndexedDrawCutIndexEnable = pipeline->primitive_restart,
+ .CutIndex = cmd_buffer->state.restart_index,
+ );
+ }
+
+ cmd_buffer->state.vb_dirty &= ~vb_emit;
+ cmd_buffer->state.dirty = 0;
+}
+
+void genX(CmdDraw)(
+ VkCommandBuffer commandBuffer,
+ uint32_t vertexCount,
+ uint32_t instanceCount,
+ uint32_t firstVertex,
+ uint32_t firstInstance)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+
+ cmd_buffer_flush_state(cmd_buffer);
+
+ anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE),
+ .VertexAccessType = SEQUENTIAL,
+ .VertexCountPerInstance = vertexCount,
+ .StartVertexLocation = firstVertex,
+ .InstanceCount = instanceCount,
+ .StartInstanceLocation = firstInstance,
+ .BaseVertexLocation = 0);
+}
+
+void genX(CmdDrawIndexed)(
+ VkCommandBuffer commandBuffer,
+ uint32_t indexCount,
+ uint32_t instanceCount,
+ uint32_t firstIndex,
+ int32_t vertexOffset,
+ uint32_t firstInstance)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+
+ cmd_buffer_flush_state(cmd_buffer);
+
+ anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE),
+ .VertexAccessType = RANDOM,
+ .VertexCountPerInstance = indexCount,
+ .StartVertexLocation = firstIndex,
+ .InstanceCount = instanceCount,
+ .StartInstanceLocation = firstInstance,
+ .BaseVertexLocation = vertexOffset);
+}
+
+static void
+emit_lrm(struct anv_batch *batch,
+ uint32_t reg, struct anv_bo *bo, uint32_t offset)
+{
+ anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM),
+ .RegisterAddress = reg,
+ .MemoryAddress = { bo, offset });
+}
+
+static void
+emit_lri(struct anv_batch *batch, uint32_t reg, uint32_t imm)
+{
+ anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_IMM),
+ .RegisterOffset = reg,
+ .DataDWord = imm);
+}
+
+/* Auto-Draw / Indirect Registers */
+#define GEN7_3DPRIM_END_OFFSET 0x2420
+#define GEN7_3DPRIM_START_VERTEX 0x2430
+#define GEN7_3DPRIM_VERTEX_COUNT 0x2434
+#define GEN7_3DPRIM_INSTANCE_COUNT 0x2438
+#define GEN7_3DPRIM_START_INSTANCE 0x243C
+#define GEN7_3DPRIM_BASE_VERTEX 0x2440
+
+void genX(CmdDrawIndirect)(
+ VkCommandBuffer commandBuffer,
+ VkBuffer _buffer,
+ VkDeviceSize offset,
+ uint32_t drawCount,
+ uint32_t stride)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+ ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
+ struct anv_bo *bo = buffer->bo;
+ uint32_t bo_offset = buffer->offset + offset;
+
+ cmd_buffer_flush_state(cmd_buffer);
+
+ emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
+ emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
+ emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);
+ emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 12);
+ emit_lri(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, 0);
+
+ anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE),
+ .IndirectParameterEnable = true,
+ .VertexAccessType = SEQUENTIAL);
+}
+
+void genX(CmdBindIndexBuffer)(
+ VkCommandBuffer commandBuffer,
+ VkBuffer _buffer,
+ VkDeviceSize offset,
+ VkIndexType indexType)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+ ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
+
+ static const uint32_t vk_to_gen_index_type[] = {
+ [VK_INDEX_TYPE_UINT16] = INDEX_WORD,
+ [VK_INDEX_TYPE_UINT32] = INDEX_DWORD,
+ };
+
+ static const uint32_t restart_index_for_type[] = {
+ [VK_INDEX_TYPE_UINT16] = UINT16_MAX,
+ [VK_INDEX_TYPE_UINT32] = UINT32_MAX,
+ };
+
+ cmd_buffer->state.restart_index = restart_index_for_type[indexType];
+
+ anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_INDEX_BUFFER),
+ .IndexFormat = vk_to_gen_index_type[indexType],
+ .MemoryObjectControlState = GENX(MOCS),
+ .BufferStartingAddress = { buffer->bo, buffer->offset + offset },
+ .BufferSize = buffer->size - offset);
+
+ cmd_buffer->state.dirty |= ANV_CMD_DIRTY_INDEX_BUFFER;
+}
+
+static VkResult
+flush_compute_descriptor_set(struct anv_cmd_buffer *cmd_buffer)
+{
+ struct anv_device *device = cmd_buffer->device;
+ struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
+ struct anv_state surfaces = { 0, }, samplers = { 0, };
+ VkResult result;
+
+ result = anv_cmd_buffer_emit_samplers(cmd_buffer,
+ MESA_SHADER_COMPUTE, &samplers);
+ if (result != VK_SUCCESS)
+ return result;
+ result = anv_cmd_buffer_emit_binding_table(cmd_buffer,
+ MESA_SHADER_COMPUTE, &surfaces);
+ if (result != VK_SUCCESS)
+ return result;
+
+ struct anv_state push_state = anv_cmd_buffer_cs_push_constants(cmd_buffer);
+
+ const struct brw_cs_prog_data *cs_prog_data = &pipeline->cs_prog_data;
+ const struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
+
+ unsigned local_id_dwords = cs_prog_data->local_invocation_id_regs * 8;
+ unsigned push_constant_data_size =
++ (prog_data->nr_params + local_id_dwords) * sizeof(union gl_constant_value *);
+ unsigned reg_aligned_constant_size = ALIGN(push_constant_data_size, 32);
+ unsigned push_constant_regs = reg_aligned_constant_size / 32;
+
+ anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_CURBE_LOAD),
+ .CURBETotalDataLength = push_state.alloc_size,
+ .CURBEDataStartAddress = push_state.offset);
+
+ struct anv_state state =
+ anv_state_pool_emit(&device->dynamic_state_pool,
+ GENX(INTERFACE_DESCRIPTOR_DATA), 64,
+ .KernelStartPointer = pipeline->cs_simd,
+ .KernelStartPointerHigh = 0,
+ .BindingTablePointer = surfaces.offset,
+ .BindingTableEntryCount = 0,
+ .SamplerStatePointer = samplers.offset,
+ .SamplerCount = 0,
+ .ConstantIndirectURBEntryReadLength = push_constant_regs,
+ .ConstantURBEntryReadOffset = 0,
+ .NumberofThreadsinGPGPUThreadGroup = 0);
+
+ uint32_t size = GENX(INTERFACE_DESCRIPTOR_DATA_length) * sizeof(uint32_t);
+ anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD),
+ .InterfaceDescriptorTotalLength = size,
+ .InterfaceDescriptorDataStartAddress = state.offset);
+
+ return VK_SUCCESS;
+}
+
+static void
+cmd_buffer_flush_compute_state(struct anv_cmd_buffer *cmd_buffer)
+{
+ struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
+ VkResult result;
+
+ assert(pipeline->active_stages == VK_SHADER_STAGE_COMPUTE_BIT);
+
+ if (cmd_buffer->state.current_pipeline != GPGPU) {
+ anv_batch_emit(&cmd_buffer->batch, GENX(PIPELINE_SELECT),
+#if ANV_GEN >= 9
+ .MaskBits = 3,
+#endif
+ .PipelineSelection = GPGPU);
+ cmd_buffer->state.current_pipeline = GPGPU;
+ }
+
+ if (cmd_buffer->state.compute_dirty & ANV_CMD_DIRTY_PIPELINE)
+ anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->batch);
+
+ if ((cmd_buffer->state.descriptors_dirty & VK_SHADER_STAGE_COMPUTE_BIT) ||
+ (cmd_buffer->state.compute_dirty & ANV_CMD_DIRTY_PIPELINE)) {
+ result = flush_compute_descriptor_set(cmd_buffer);
+ assert(result == VK_SUCCESS);
+ cmd_buffer->state.descriptors_dirty &= ~VK_SHADER_STAGE_COMPUTE_BIT;
+ }
+
+ cmd_buffer->state.compute_dirty = 0;
+}
+
+void genX(CmdDrawIndexedIndirect)(
+ VkCommandBuffer commandBuffer,
+ VkBuffer _buffer,
+ VkDeviceSize offset,
+ uint32_t drawCount,
+ uint32_t stride)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+ ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
+ struct anv_bo *bo = buffer->bo;
+ uint32_t bo_offset = buffer->offset + offset;
+
+ cmd_buffer_flush_state(cmd_buffer);
+
+ emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
+ emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
+ emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);
+ emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, bo, bo_offset + 12);
+ emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 16);
+
+ anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE),
+ .IndirectParameterEnable = true,
+ .VertexAccessType = RANDOM);
+}
+
+void genX(CmdDispatch)(
+ VkCommandBuffer commandBuffer,
+ uint32_t x,
+ uint32_t y,
+ uint32_t z)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+ struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
+ struct brw_cs_prog_data *prog_data = &pipeline->cs_prog_data;
+
+ cmd_buffer_flush_compute_state(cmd_buffer);
+
+ anv_batch_emit(&cmd_buffer->batch, GENX(GPGPU_WALKER),
+ .SIMDSize = prog_data->simd_size / 16,
+ .ThreadDepthCounterMaximum = 0,
+ .ThreadHeightCounterMaximum = 0,
+ .ThreadWidthCounterMaximum = pipeline->cs_thread_width_max - 1,
+ .ThreadGroupIDXDimension = x,
+ .ThreadGroupIDYDimension = y,
+ .ThreadGroupIDZDimension = z,
+ .RightExecutionMask = pipeline->cs_right_mask,
+ .BottomExecutionMask = 0xffffffff);
+
+ anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_STATE_FLUSH));
+}
+
+#define GPGPU_DISPATCHDIMX 0x2500
+#define GPGPU_DISPATCHDIMY 0x2504
+#define GPGPU_DISPATCHDIMZ 0x2508
+
+void genX(CmdDispatchIndirect)(
+ VkCommandBuffer commandBuffer,
+ VkBuffer _buffer,
+ VkDeviceSize offset)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+ ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
+ struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
+ struct brw_cs_prog_data *prog_data = &pipeline->cs_prog_data;
+ struct anv_bo *bo = buffer->bo;
+ uint32_t bo_offset = buffer->offset + offset;
+
+ cmd_buffer_flush_compute_state(cmd_buffer);
+
+ emit_lrm(&cmd_buffer->batch, GPGPU_DISPATCHDIMX, bo, bo_offset);
+ emit_lrm(&cmd_buffer->batch, GPGPU_DISPATCHDIMY, bo, bo_offset + 4);
+ emit_lrm(&cmd_buffer->batch, GPGPU_DISPATCHDIMZ, bo, bo_offset + 8);
+
+ anv_batch_emit(&cmd_buffer->batch, GENX(GPGPU_WALKER),
+ .IndirectParameterEnable = true,
+ .SIMDSize = prog_data->simd_size / 16,
+ .ThreadDepthCounterMaximum = 0,
+ .ThreadHeightCounterMaximum = 0,
+ .ThreadWidthCounterMaximum = pipeline->cs_thread_width_max - 1,
+ .RightExecutionMask = pipeline->cs_right_mask,
+ .BottomExecutionMask = 0xffffffff);
+
+ anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_STATE_FLUSH));
+}
+
+static void
+cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer *cmd_buffer)
+{
+ const struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
+ const struct anv_image_view *iview =
+ anv_cmd_buffer_get_depth_stencil_view(cmd_buffer);
+ const struct anv_image *image = iview ? iview->image : NULL;
+ const bool has_depth = iview && iview->format->depth_format;
+ const bool has_stencil = iview && iview->format->has_stencil;
+
+ /* FIXME: Implement the PMA stall W/A */
+ /* FIXME: Width and Height are wrong */
+
+ /* Emit 3DSTATE_DEPTH_BUFFER */
+ if (has_depth) {
+ anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DEPTH_BUFFER),
+ .SurfaceType = SURFTYPE_2D,
+ .DepthWriteEnable = iview->format->depth_format,
+ .StencilWriteEnable = has_stencil,
+ .HierarchicalDepthBufferEnable = false,
+ .SurfaceFormat = iview->format->depth_format,
+ .SurfacePitch = image->depth_surface.isl.row_pitch - 1,
+ .SurfaceBaseAddress = {
+ .bo = image->bo,
+ .offset = image->depth_surface.offset,
+ },
+ .Height = fb->height - 1,
+ .Width = fb->width - 1,
+ .LOD = 0,
+ .Depth = 1 - 1,
+ .MinimumArrayElement = 0,
+ .DepthBufferObjectControlState = GENX(MOCS),
+ .RenderTargetViewExtent = 1 - 1,
+ .SurfaceQPitch = isl_surf_get_array_pitch_el_rows(&image->depth_surface.isl) >> 2);
+ } else {
+ /* Even when no depth buffer is present, the hardware requires that
+ * 3DSTATE_DEPTH_BUFFER be programmed correctly. The Broadwell PRM says:
+ *
+ * If a null depth buffer is bound, the driver must instead bind depth as:
+ * 3DSTATE_DEPTH.SurfaceType = SURFTYPE_2D
+ * 3DSTATE_DEPTH.Width = 1
+ * 3DSTATE_DEPTH.Height = 1
+ * 3DSTATE_DEPTH.SuraceFormat = D16_UNORM
+ * 3DSTATE_DEPTH.SurfaceBaseAddress = 0
+ * 3DSTATE_DEPTH.HierarchicalDepthBufferEnable = 0
+ * 3DSTATE_WM_DEPTH_STENCIL.DepthTestEnable = 0
+ * 3DSTATE_WM_DEPTH_STENCIL.DepthBufferWriteEnable = 0
+ *
+ * The PRM is wrong, though. The width and height must be programmed to
+ * actual framebuffer's width and height, even when neither depth buffer
+ * nor stencil buffer is present.
+ */
+ anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DEPTH_BUFFER),
+ .SurfaceType = SURFTYPE_2D,
+ .SurfaceFormat = D16_UNORM,
+ .Width = fb->width - 1,
+ .Height = fb->height - 1,
+ .StencilWriteEnable = has_stencil);
+ }
+
+ /* Emit 3DSTATE_STENCIL_BUFFER */
+ if (has_stencil) {
+ anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_STENCIL_BUFFER),
+ .StencilBufferEnable = true,
+ .StencilBufferObjectControlState = GENX(MOCS),
+
+ /* Stencil buffers have strange pitch. The PRM says:
+ *
+ * The pitch must be set to 2x the value computed based on width,
+ * as the stencil buffer is stored with two rows interleaved.
+ */
+ .SurfacePitch = 2 * image->stencil_surface.isl.row_pitch - 1,
+
+ .SurfaceBaseAddress = {
+ .bo = image->bo,
+ .offset = image->offset + image->stencil_surface.offset,
+ },
+ .SurfaceQPitch = isl_surf_get_array_pitch_el_rows(&image->stencil_surface.isl) >> 2);
+ } else {
+ anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_STENCIL_BUFFER));
+ }
+
+ /* Disable hierarchial depth buffers. */
+ anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_HIER_DEPTH_BUFFER));
+
+ /* Clear the clear params. */
+ anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CLEAR_PARAMS));
+}
+
+void
+genX(cmd_buffer_begin_subpass)(struct anv_cmd_buffer *cmd_buffer,
+ struct anv_subpass *subpass)
+{
+ cmd_buffer->state.subpass = subpass;
+
+ cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_FRAGMENT_BIT;
+
+ cmd_buffer_emit_depth_stencil(cmd_buffer);
+}
+
+void genX(CmdBeginRenderPass)(
+ VkCommandBuffer commandBuffer,
+ const VkRenderPassBeginInfo* pRenderPassBegin,
+ VkSubpassContents contents)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+ ANV_FROM_HANDLE(anv_render_pass, pass, pRenderPassBegin->renderPass);
+ ANV_FROM_HANDLE(anv_framebuffer, framebuffer, pRenderPassBegin->framebuffer);
+
+ cmd_buffer->state.framebuffer = framebuffer;
+ cmd_buffer->state.pass = pass;
+
+ const VkRect2D *render_area = &pRenderPassBegin->renderArea;
+
+ anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DRAWING_RECTANGLE),
+ .ClippedDrawingRectangleYMin = render_area->offset.y,
+ .ClippedDrawingRectangleXMin = render_area->offset.x,
+ .ClippedDrawingRectangleYMax =
+ render_area->offset.y + render_area->extent.height - 1,
+ .ClippedDrawingRectangleXMax =
+ render_area->offset.x + render_area->extent.width - 1,
+ .DrawingRectangleOriginY = 0,
+ .DrawingRectangleOriginX = 0);
+
+ anv_cmd_buffer_clear_attachments(cmd_buffer, pass,
+ pRenderPassBegin->pClearValues);
+
+ genX(cmd_buffer_begin_subpass)(cmd_buffer, pass->subpasses);
+}
+
+void genX(CmdNextSubpass)(
+ VkCommandBuffer commandBuffer,
+ VkSubpassContents contents)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+
+ assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
+
+ genX(cmd_buffer_begin_subpass)(cmd_buffer, cmd_buffer->state.subpass + 1);
+}
+
+void genX(CmdEndRenderPass)(
+ VkCommandBuffer commandBuffer)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+
+ /* Emit a flushing pipe control at the end of a pass. This is kind of a
+ * hack but it ensures that render targets always actually get written.
+ * Eventually, we should do flushing based on image format transitions
+ * or something of that nature.
+ */
+ anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
+ .PostSyncOperation = NoWrite,
+ .RenderTargetCacheFlushEnable = true,
+ .InstructionCacheInvalidateEnable = true,
+ .DepthCacheFlushEnable = true,
+ .VFCacheInvalidationEnable = true,
+ .TextureCacheInvalidationEnable = true,
+ .CommandStreamerStallEnable = true);
+}
+
+static void
+emit_ps_depth_count(struct anv_batch *batch,
+ struct anv_bo *bo, uint32_t offset)
+{
+ anv_batch_emit(batch, GENX(PIPE_CONTROL),
+ .DestinationAddressType = DAT_PPGTT,
+ .PostSyncOperation = WritePSDepthCount,
+ .Address = { bo, offset }); /* FIXME: This is only lower 32 bits */
+}
+
+void genX(CmdBeginQuery)(
+ VkCommandBuffer commandBuffer,
+ VkQueryPool queryPool,
+ uint32_t entry,
+ VkQueryControlFlags flags)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+ ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
+
+ switch (pool->type) {
+ case VK_QUERY_TYPE_OCCLUSION:
+ emit_ps_depth_count(&cmd_buffer->batch, &pool->bo,
+ entry * sizeof(struct anv_query_pool_slot));
+ break;
+
+ case VK_QUERY_TYPE_PIPELINE_STATISTICS:
+ default:
+ unreachable("");
+ }
+}
+
+void genX(CmdEndQuery)(
+ VkCommandBuffer commandBuffer,
+ VkQueryPool queryPool,
+ uint32_t entry)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+ ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
+
+ switch (pool->type) {
+ case VK_QUERY_TYPE_OCCLUSION:
+ emit_ps_depth_count(&cmd_buffer->batch, &pool->bo,
+ entry * sizeof(struct anv_query_pool_slot) + 8);
+ break;
+
+ case VK_QUERY_TYPE_PIPELINE_STATISTICS:
+ default:
+ unreachable("");
+ }
+}
+
+#define TIMESTAMP 0x2358
+
+void genX(CmdWriteTimestamp)(
+ VkCommandBuffer commandBuffer,
+ VkPipelineStageFlagBits pipelineStage,
+ VkQueryPool queryPool,
+ uint32_t entry)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+ ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
+
+ assert(pool->type == VK_QUERY_TYPE_TIMESTAMP);
+
+ switch (pipelineStage) {
+ case VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT:
+ anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM),
+ .RegisterAddress = TIMESTAMP,
+ .MemoryAddress = { &pool->bo, entry * 8 });
+ anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM),
+ .RegisterAddress = TIMESTAMP + 4,
+ .MemoryAddress = { &pool->bo, entry * 8 + 4 });
+ break;
+
+ default:
+ /* Everything else is bottom-of-pipe */
+ anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
+ .DestinationAddressType = DAT_PPGTT,
+ .PostSyncOperation = WriteTimestamp,
+ .Address = /* FIXME: This is only lower 32 bits */
+ { &pool->bo, entry * 8 });
+ break;
+ }
+}
+
+#define alu_opcode(v) __gen_field((v), 20, 31)
+#define alu_operand1(v) __gen_field((v), 10, 19)
+#define alu_operand2(v) __gen_field((v), 0, 9)
+#define alu(opcode, operand1, operand2) \
+ alu_opcode(opcode) | alu_operand1(operand1) | alu_operand2(operand2)
+
+#define OPCODE_NOOP 0x000
+#define OPCODE_LOAD 0x080
+#define OPCODE_LOADINV 0x480
+#define OPCODE_LOAD0 0x081
+#define OPCODE_LOAD1 0x481
+#define OPCODE_ADD 0x100
+#define OPCODE_SUB 0x101
+#define OPCODE_AND 0x102
+#define OPCODE_OR 0x103
+#define OPCODE_XOR 0x104
+#define OPCODE_STORE 0x180
+#define OPCODE_STOREINV 0x580
+
+#define OPERAND_R0 0x00
+#define OPERAND_R1 0x01
+#define OPERAND_R2 0x02
+#define OPERAND_R3 0x03
+#define OPERAND_R4 0x04
+#define OPERAND_SRCA 0x20
+#define OPERAND_SRCB 0x21
+#define OPERAND_ACCU 0x31
+#define OPERAND_ZF 0x32
+#define OPERAND_CF 0x33
+
+#define CS_GPR(n) (0x2600 + (n) * 8)
+
+static void
+emit_load_alu_reg_u64(struct anv_batch *batch, uint32_t reg,
+ struct anv_bo *bo, uint32_t offset)
+{
+ anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM),
+ .RegisterAddress = reg,
+ .MemoryAddress = { bo, offset });
+ anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM),
+ .RegisterAddress = reg + 4,
+ .MemoryAddress = { bo, offset + 4 });
+}
+
+void genX(CmdCopyQueryPoolResults)(
+ VkCommandBuffer commandBuffer,
+ VkQueryPool queryPool,
+ uint32_t startQuery,
+ uint32_t queryCount,
+ VkBuffer destBuffer,
+ VkDeviceSize destOffset,
+ VkDeviceSize destStride,
+ VkQueryResultFlags flags)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+ ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
+ ANV_FROM_HANDLE(anv_buffer, buffer, destBuffer);
+ uint32_t slot_offset, dst_offset;
+
+ if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
+ /* Where is the availabilty info supposed to go? */
+ anv_finishme("VK_QUERY_RESULT_WITH_AVAILABILITY_BIT");
+ return;
+ }
+
+ assert(pool->type == VK_QUERY_TYPE_OCCLUSION);
+
+ /* FIXME: If we're not waiting, should we just do this on the CPU? */
+ if (flags & VK_QUERY_RESULT_WAIT_BIT)
+ anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL),
+ .CommandStreamerStallEnable = true,
+ .StallAtPixelScoreboard = true);
+
+ dst_offset = buffer->offset + destOffset;
+ for (uint32_t i = 0; i < queryCount; i++) {
+
+ slot_offset = (startQuery + i) * sizeof(struct anv_query_pool_slot);
+
+ emit_load_alu_reg_u64(&cmd_buffer->batch, CS_GPR(0), &pool->bo, slot_offset);
+ emit_load_alu_reg_u64(&cmd_buffer->batch, CS_GPR(1), &pool->bo, slot_offset + 8);
+
+ /* FIXME: We need to clamp the result for 32 bit. */
+
+ uint32_t *dw = anv_batch_emitn(&cmd_buffer->batch, 5, GENX(MI_MATH));
+ dw[1] = alu(OPCODE_LOAD, OPERAND_SRCA, OPERAND_R1);
+ dw[2] = alu(OPCODE_LOAD, OPERAND_SRCB, OPERAND_R0);
+ dw[3] = alu(OPCODE_SUB, 0, 0);
+ dw[4] = alu(OPCODE_STORE, OPERAND_R2, OPERAND_ACCU);
+
+ anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM),
+ .RegisterAddress = CS_GPR(2),
+ /* FIXME: This is only lower 32 bits */
+ .MemoryAddress = { buffer->bo, dst_offset });
+
+ if (flags & VK_QUERY_RESULT_64_BIT)
+ anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM),
+ .RegisterAddress = CS_GPR(2) + 4,
+ /* FIXME: This is only lower 32 bits */
+ .MemoryAddress = { buffer->bo, dst_offset + 4 });
+
+ dst_offset += destStride;
+ }
+}