X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fcompiler%2Fspirv%2Fspirv_to_nir.c;h=d3ad2d13ed86cc4c25e73332490dbdaab11478b4;hb=b831b8d2e1ec00f11207343e131d74e53fe2c4a5;hp=de2cebc3f2d3cf61137253499099925250a908ba;hpb=9adfa695ac144cfd7c9bb9d0c63d280861f3add4;p=mesa.git diff --git a/src/compiler/spirv/spirv_to_nir.c b/src/compiler/spirv/spirv_to_nir.c index de2cebc3f2d..d3ad2d13ed8 100644 --- a/src/compiler/spirv/spirv_to_nir.c +++ b/src/compiler/spirv/spirv_to_nir.c @@ -31,6 +31,14 @@ #include "nir/nir_constant_expressions.h" #include "spirv_info.h" +struct spec_constant_value { + bool is_double; + union { + uint32_t data32; + uint64_t data64; + }; +}; + void _vtn_warn(const char *file, int line, const char *msg, ...) { @@ -96,16 +104,18 @@ vtn_const_ssa_value(struct vtn_builder *b, nir_constant *constant, switch (glsl_get_base_type(type)) { case GLSL_TYPE_INT: case GLSL_TYPE_UINT: + case GLSL_TYPE_INT64: + case GLSL_TYPE_UINT64: case GLSL_TYPE_BOOL: case GLSL_TYPE_FLOAT: - case GLSL_TYPE_DOUBLE: + case GLSL_TYPE_DOUBLE: { + int bit_size = glsl_get_bit_size(type); if (glsl_type_is_vector_or_scalar(type)) { unsigned num_components = glsl_get_vector_elements(val->type); nir_load_const_instr *load = - nir_load_const_instr_create(b->shader, num_components, 32); + nir_load_const_instr_create(b->shader, num_components, bit_size); - for (unsigned i = 0; i < num_components; i++) - load->value.u32[i] = constant->value.u[i]; + load->value = constant->values[0]; nir_instr_insert_before_cf_list(&b->impl->body, &load->instr); val->def = &load->def; @@ -119,10 +129,9 @@ vtn_const_ssa_value(struct vtn_builder *b, nir_constant *constant, struct vtn_ssa_value *col_val = rzalloc(b, struct vtn_ssa_value); col_val->type = glsl_get_column_type(val->type); nir_load_const_instr *load = - nir_load_const_instr_create(b->shader, rows, 32); + nir_load_const_instr_create(b->shader, rows, bit_size); - for (unsigned j = 0; j < rows; j++) - load->value.u32[j] = constant->value.u[rows * i + j]; + load->value = constant->values[i]; nir_instr_insert_before_cf_list(&b->impl->body, &load->instr); col_val->def = &load->def; @@ -131,6 +140,7 @@ vtn_const_ssa_value(struct vtn_builder *b, nir_constant *constant, } } break; + } case GLSL_TYPE_ARRAY: { unsigned elems = glsl_get_length(val->type); @@ -412,6 +422,8 @@ vtn_type_copy(struct vtn_builder *b, struct vtn_type *src) switch (glsl_get_base_type(src->type)) { case GLSL_TYPE_INT: case GLSL_TYPE_UINT: + case GLSL_TYPE_INT64: + case GLSL_TYPE_UINT64: case GLSL_TYPE_BOOL: case GLSL_TYPE_FLOAT: case GLSL_TYPE_DOUBLE: @@ -517,7 +529,6 @@ struct_member_decoration_cb(struct vtn_builder *b, break; case SpvDecorationPatch: - vtn_warn("Tessellation not yet supported"); break; case SpvDecorationSpecId: @@ -551,9 +562,12 @@ struct_member_decoration_cb(struct vtn_builder *b, case SpvDecorationFPRoundingMode: case SpvDecorationFPFastMathMode: case SpvDecorationAlignment: - vtn_warn("Decoraiton only allowed for CL-style kernels: %s", + vtn_warn("Decoration only allowed for CL-style kernels: %s", spirv_decoration_to_string(dec->decoration)); break; + + default: + unreachable("Unhandled decoration"); } } @@ -602,7 +616,7 @@ type_decoration_cb(struct vtn_builder *b, case SpvDecorationOffset: case SpvDecorationXfbBuffer: case SpvDecorationXfbStride: - vtn_warn("Decoraiton only allowed for struct members: %s", + vtn_warn("Decoration only allowed for struct members: %s", spirv_decoration_to_string(dec->decoration)); break; @@ -618,7 +632,7 @@ type_decoration_cb(struct vtn_builder *b, case SpvDecorationLinkageAttributes: case SpvDecorationNoContraction: case SpvDecorationInputAttachmentIndex: - vtn_warn("Decoraiton not allowed on types: %s", + vtn_warn("Decoration not allowed on types: %s", spirv_decoration_to_string(dec->decoration)); break; @@ -628,9 +642,12 @@ type_decoration_cb(struct vtn_builder *b, case SpvDecorationFPRoundingMode: case SpvDecorationFPFastMathMode: case SpvDecorationAlignment: - vtn_warn("Decoraiton only allowed for CL-style kernels: %s", + vtn_warn("Decoration only allowed for CL-style kernels: %s", spirv_decoration_to_string(dec->decoration)); break; + + default: + unreachable("Unhandled decoration"); } } @@ -702,13 +719,19 @@ vtn_handle_type(struct vtn_builder *b, SpvOp opcode, val->type->type = glsl_bool_type(); break; case SpvOpTypeInt: { + int bit_size = w[2]; const bool signedness = w[3]; - val->type->type = (signedness ? glsl_int_type() : glsl_uint_type()); + if (bit_size == 64) + val->type->type = (signedness ? glsl_int64_t_type() : glsl_uint64_t_type()); + else + val->type->type = (signedness ? glsl_int_type() : glsl_uint_type()); break; } - case SpvOpTypeFloat: - val->type->type = glsl_float_type(); + case SpvOpTypeFloat: { + int bit_size = w[2]; + val->type->type = bit_size == 64 ? glsl_double_type() : glsl_float_type(); break; + } case SpvOpTypeVector: { struct vtn_type *base = vtn_value(b, w[2], vtn_value_type_type)->type; @@ -752,7 +775,7 @@ vtn_handle_type(struct vtn_builder *b, SpvOp opcode, length = 0; } else { length = - vtn_value(b, w[3], vtn_value_type_constant)->constant->value.u[0]; + vtn_value(b, w[3], vtn_value_type_constant)->constant->values[0].u32[0]; } val->type->type = glsl_array_type(array_element->type, length); @@ -845,8 +868,12 @@ vtn_handle_type(struct vtn_builder *b, SpvOp opcode, val->type->access_qualifier = SpvAccessQualifierReadWrite; if (multisampled) { - assert(dim == GLSL_SAMPLER_DIM_2D); - dim = GLSL_SAMPLER_DIM_MS; + if (dim == GLSL_SAMPLER_DIM_2D) + dim = GLSL_SAMPLER_DIM_MS; + else if (dim == GLSL_SAMPLER_DIM_SUBPASS) + dim = GLSL_SAMPLER_DIM_SUBPASS_MS; + else + assert(!"Unsupported multisampled image type"); } val->type->image_format = translate_image_format(format); @@ -855,7 +882,6 @@ vtn_handle_type(struct vtn_builder *b, SpvOp opcode, val->type->type = glsl_sampler_type(dim, is_shadow, is_array, glsl_get_base_type(sampled_type)); } else if (sampled == 2) { - assert((dim == GLSL_SAMPLER_DIM_SUBPASS) || format); assert(!is_shadow); val->type->type = glsl_image_type(dim, is_array, glsl_get_base_type(sampled_type)); @@ -899,6 +925,8 @@ vtn_null_constant(struct vtn_builder *b, const struct glsl_type *type) switch (glsl_get_base_type(type)) { case GLSL_TYPE_INT: case GLSL_TYPE_UINT: + case GLSL_TYPE_INT64: + case GLSL_TYPE_UINT64: case GLSL_TYPE_BOOL: case GLSL_TYPE_FLOAT: case GLSL_TYPE_DOUBLE: @@ -932,7 +960,7 @@ vtn_null_constant(struct vtn_builder *b, const struct glsl_type *type) } static void -spec_constant_deocoration_cb(struct vtn_builder *b, struct vtn_value *v, +spec_constant_decoration_cb(struct vtn_builder *b, struct vtn_value *v, int member, const struct vtn_decoration *dec, void *data) { @@ -940,11 +968,14 @@ spec_constant_deocoration_cb(struct vtn_builder *b, struct vtn_value *v, if (dec->decoration != SpvDecorationSpecId) return; - uint32_t *const_value = data; + struct spec_constant_value *const_value = data; for (unsigned i = 0; i < b->num_specializations; i++) { if (b->specializations[i].id == dec->literals[0]) { - *const_value = b->specializations[i].data; + if (const_value->is_double) + const_value->data64 = b->specializations[i].data64; + else + const_value->data32 = b->specializations[i].data32; return; } } @@ -954,8 +985,22 @@ static uint32_t get_specialization(struct vtn_builder *b, struct vtn_value *val, uint32_t const_value) { - vtn_foreach_decoration(b, val, spec_constant_deocoration_cb, &const_value); - return const_value; + struct spec_constant_value data; + data.is_double = false; + data.data32 = const_value; + vtn_foreach_decoration(b, val, spec_constant_decoration_cb, &data); + return data.data32; +} + +static uint64_t +get_specialization64(struct vtn_builder *b, struct vtn_value *val, + uint64_t const_value) +{ + struct spec_constant_value data; + data.is_double = true; + data.data64 = const_value; + vtn_foreach_decoration(b, val, spec_constant_decoration_cb, &data); + return data.data64; } static void @@ -972,9 +1017,9 @@ handle_workgroup_size_decoration_cb(struct vtn_builder *b, assert(val->const_type == glsl_vector_type(GLSL_TYPE_UINT, 3)); - b->shader->info.cs.local_size[0] = val->constant->value.u[0]; - b->shader->info.cs.local_size[1] = val->constant->value.u[1]; - b->shader->info.cs.local_size[2] = val->constant->value.u[2]; + b->shader->info->cs.local_size[0] = val->constant->values[0].u32[0]; + b->shader->info->cs.local_size[1] = val->constant->values[0].u32[1]; + b->shader->info->cs.local_size[2] = val->constant->values[0].u32[2]; } static void @@ -987,11 +1032,11 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, switch (opcode) { case SpvOpConstantTrue: assert(val->const_type == glsl_bool_type()); - val->constant->value.u[0] = NIR_TRUE; + val->constant->values[0].u32[0] = NIR_TRUE; break; case SpvOpConstantFalse: assert(val->const_type == glsl_bool_type()); - val->constant->value.u[0] = NIR_FALSE; + val->constant->values[0].u32[0] = NIR_FALSE; break; case SpvOpSpecConstantTrue: @@ -999,18 +1044,33 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, assert(val->const_type == glsl_bool_type()); uint32_t int_val = get_specialization(b, val, (opcode == SpvOpSpecConstantTrue)); - val->constant->value.u[0] = int_val ? NIR_TRUE : NIR_FALSE; + val->constant->values[0].u32[0] = int_val ? NIR_TRUE : NIR_FALSE; break; } - case SpvOpConstant: + case SpvOpConstant: { assert(glsl_type_is_scalar(val->const_type)); - val->constant->value.u[0] = w[3]; + int bit_size = glsl_get_bit_size(val->const_type); + if (bit_size == 64) { + val->constant->values->u32[0] = w[3]; + val->constant->values->u32[1] = w[4]; + } else { + assert(bit_size == 32); + val->constant->values->u32[0] = w[3]; + } break; - case SpvOpSpecConstant: + } + case SpvOpSpecConstant: { assert(glsl_type_is_scalar(val->const_type)); - val->constant->value.u[0] = get_specialization(b, val, w[3]); + val->constant->values[0].u32[0] = get_specialization(b, val, w[3]); + int bit_size = glsl_get_bit_size(val->const_type); + if (bit_size == 64) + val->constant->values[0].u64[0] = + get_specialization64(b, val, vtn_u64_literal(&w[3])); + else + val->constant->values[0].u32[0] = get_specialization(b, val, w[3]); break; + } case SpvOpSpecConstantComposite: case SpvOpConstantComposite: { unsigned elem_count = count - 3; @@ -1021,23 +1081,31 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, switch (glsl_get_base_type(val->const_type)) { case GLSL_TYPE_UINT: case GLSL_TYPE_INT: + case GLSL_TYPE_UINT64: + case GLSL_TYPE_INT64: case GLSL_TYPE_FLOAT: case GLSL_TYPE_BOOL: + case GLSL_TYPE_DOUBLE: { + int bit_size = glsl_get_bit_size(val->const_type); if (glsl_type_is_matrix(val->const_type)) { - unsigned rows = glsl_get_vector_elements(val->const_type); assert(glsl_get_matrix_columns(val->const_type) == elem_count); for (unsigned i = 0; i < elem_count; i++) - for (unsigned j = 0; j < rows; j++) - val->constant->value.u[rows * i + j] = elems[i]->value.u[j]; + val->constant->values[i] = elems[i]->values[0]; } else { assert(glsl_type_is_vector(val->const_type)); assert(glsl_get_vector_elements(val->const_type) == elem_count); - for (unsigned i = 0; i < elem_count; i++) - val->constant->value.u[i] = elems[i]->value.u[0]; + for (unsigned i = 0; i < elem_count; i++) { + if (bit_size == 64) { + val->constant->values[0].u64[i] = elems[i]->values[0].u64[0]; + } else { + assert(bit_size == 32); + val->constant->values[0].u32[i] = elems[i]->values[0].u32[0]; + } + } } ralloc_free(elems); break; - + } case GLSL_TYPE_STRUCT: case GLSL_TYPE_ARRAY: ralloc_steal(val->constant, elems); @@ -1055,23 +1123,75 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, SpvOp opcode = get_specialization(b, val, w[3]); switch (opcode) { case SpvOpVectorShuffle: { - struct vtn_value *v0 = vtn_value(b, w[4], vtn_value_type_constant); - struct vtn_value *v1 = vtn_value(b, w[5], vtn_value_type_constant); - unsigned len0 = glsl_get_vector_elements(v0->const_type); - unsigned len1 = glsl_get_vector_elements(v1->const_type); - - uint32_t u[8]; - for (unsigned i = 0; i < len0; i++) - u[i] = v0->constant->value.u[i]; - for (unsigned i = 0; i < len1; i++) - u[len0 + i] = v1->constant->value.u[i]; - - for (unsigned i = 0; i < count - 6; i++) { - uint32_t comp = w[i + 6]; - if (comp == (uint32_t)-1) { - val->constant->value.u[i] = 0xdeadbeef; - } else { - val->constant->value.u[i] = u[comp]; + struct vtn_value *v0 = &b->values[w[4]]; + struct vtn_value *v1 = &b->values[w[5]]; + + assert(v0->value_type == vtn_value_type_constant || + v0->value_type == vtn_value_type_undef); + assert(v1->value_type == vtn_value_type_constant || + v1->value_type == vtn_value_type_undef); + + unsigned len0 = v0->value_type == vtn_value_type_constant ? + glsl_get_vector_elements(v0->const_type) : + glsl_get_vector_elements(v0->type->type); + unsigned len1 = v1->value_type == vtn_value_type_constant ? + glsl_get_vector_elements(v1->const_type) : + glsl_get_vector_elements(v1->type->type); + + assert(len0 + len1 < 16); + + unsigned bit_size = glsl_get_bit_size(val->const_type); + unsigned bit_size0 = v0->value_type == vtn_value_type_constant ? + glsl_get_bit_size(v0->const_type) : + glsl_get_bit_size(v0->type->type); + unsigned bit_size1 = v1->value_type == vtn_value_type_constant ? + glsl_get_bit_size(v1->const_type) : + glsl_get_bit_size(v1->type->type); + + assert(bit_size == bit_size0 && bit_size == bit_size1); + (void)bit_size0; (void)bit_size1; + + if (bit_size == 64) { + uint64_t u64[8]; + if (v0->value_type == vtn_value_type_constant) { + for (unsigned i = 0; i < len0; i++) + u64[i] = v0->constant->values[0].u64[i]; + } + if (v1->value_type == vtn_value_type_constant) { + for (unsigned i = 0; i < len1; i++) + u64[len0 + i] = v1->constant->values[0].u64[i]; + } + + for (unsigned i = 0, j = 0; i < count - 6; i++, j++) { + uint32_t comp = w[i + 6]; + /* If component is not used, set the value to a known constant + * to detect if it is wrongly used. + */ + if (comp == (uint32_t)-1) + val->constant->values[0].u64[j] = 0xdeadbeefdeadbeef; + else + val->constant->values[0].u64[j] = u64[comp]; + } + } else { + uint32_t u32[8]; + if (v0->value_type == vtn_value_type_constant) { + for (unsigned i = 0; i < len0; i++) + u32[i] = v0->constant->values[0].u32[i]; + } + if (v1->value_type == vtn_value_type_constant) { + for (unsigned i = 0; i < len1; i++) + u32[len0 + i] = v1->constant->values[0].u32[i]; + } + + for (unsigned i = 0, j = 0; i < count - 6; i++, j++) { + uint32_t comp = w[i + 6]; + /* If component is not used, set the value to a known constant + * to detect if it is wrongly used. + */ + if (comp == (uint32_t)-1) + val->constant->values[0].u32[j] = 0xdeadbeef; + else + val->constant->values[0].u32[j] = u32[comp]; } } break; @@ -1095,23 +1215,26 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, } int elem = -1; + int col = 0; const struct glsl_type *type = comp->const_type; for (unsigned i = deref_start; i < count; i++) { switch (glsl_get_base_type(type)) { case GLSL_TYPE_UINT: case GLSL_TYPE_INT: + case GLSL_TYPE_UINT64: + case GLSL_TYPE_INT64: case GLSL_TYPE_FLOAT: + case GLSL_TYPE_DOUBLE: case GLSL_TYPE_BOOL: /* If we hit this granularity, we're picking off an element */ - if (elem < 0) - elem = 0; - if (glsl_type_is_matrix(type)) { - elem += w[i] * glsl_get_vector_elements(type); + assert(col == 0 && elem == -1); + col = w[i]; + elem = 0; type = glsl_get_column_type(type); } else { - assert(glsl_type_is_vector(type)); - elem += w[i]; + assert(elem <= 0 && glsl_type_is_vector(type)); + elem = w[i]; type = glsl_scalar_type(glsl_get_base_type(type)); } continue; @@ -1136,8 +1259,14 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, val->constant = *c; } else { unsigned num_components = glsl_get_vector_elements(type); + unsigned bit_size = glsl_get_bit_size(type); for (unsigned i = 0; i < num_components; i++) - val->constant->value.u[i] = (*c)->value.u[elem + i]; + if (bit_size == 64) { + val->constant->values[0].u64[i] = (*c)->values[col].u64[elem + i]; + } else { + assert(bit_size == 32); + val->constant->values[0].u32[i] = (*c)->values[col].u32[elem + i]; + } } } else { struct vtn_value *insert = @@ -1147,8 +1276,14 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, *c = insert->constant; } else { unsigned num_components = glsl_get_vector_elements(type); + unsigned bit_size = glsl_get_bit_size(type); for (unsigned i = 0; i < num_components; i++) - (*c)->value.u[elem + i] = insert->constant->value.u[i]; + if (bit_size == 64) { + (*c)->values[col].u64[elem + i] = insert->constant->values[0].u64[i]; + } else { + assert(bit_size == 32); + (*c)->values[col].u32[elem + i] = insert->constant->values[0].u32[i]; + } } } break; @@ -1156,7 +1291,9 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, default: { bool swap; - nir_op op = vtn_nir_alu_op_for_spirv_opcode(opcode, &swap); + nir_alu_type dst_alu_type = nir_get_nir_type_for_glsl_type(val->const_type); + nir_alu_type src_alu_type = dst_alu_type; + nir_op op = vtn_nir_alu_op_for_spirv_opcode(opcode, &swap, src_alu_type, dst_alu_type); unsigned num_components = glsl_get_vector_elements(val->const_type); unsigned bit_size = @@ -1170,16 +1307,11 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, unsigned j = swap ? 1 - i : i; assert(bit_size == 32); - for (unsigned k = 0; k < num_components; k++) - src[j].u32[k] = c->value.u[k]; + src[j] = c->values[0]; } - nir_const_value res = nir_eval_const_opcode(op, num_components, - bit_size, src); - - for (unsigned k = 0; k < num_components; k++) - val->constant->value.u[k] = res.u32[k]; - + val->constant->values[0] = + nir_eval_const_opcode(op, num_components, bit_size, src); break; } /* default */ } @@ -1215,7 +1347,7 @@ vtn_handle_function_call(struct vtn_builder *b, SpvOp opcode, struct vtn_value *arg = vtn_untyped_value(b, arg_id); if (arg->value_type == vtn_value_type_access_chain) { nir_deref_var *d = vtn_access_chain_to_deref(b, arg->access_chain); - call->params[i] = nir_deref_as_var(nir_copy_deref(call, &d->deref)); + call->params[i] = nir_deref_var_clone(d, call); } else { struct vtn_ssa_value *arg_ssa = vtn_ssa_value(b, arg_id); @@ -1260,6 +1392,8 @@ vtn_create_ssa_value(struct vtn_builder *b, const struct glsl_type *type) switch (glsl_get_base_type(type)) { case GLSL_TYPE_INT: case GLSL_TYPE_UINT: + case GLSL_TYPE_INT64: + case GLSL_TYPE_UINT64: case GLSL_TYPE_BOOL: case GLSL_TYPE_FLOAT: case GLSL_TYPE_DOUBLE: @@ -1434,7 +1568,8 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode, coord_components++; coord = vtn_ssa_value(b, w[idx++])->def; - p->src = nir_src_for_ssa(coord); + p->src = nir_src_for_ssa(nir_channels(&b->nb, coord, + (1 << coord_components) - 1)); p->src_type = nir_tex_src_coord; p++; break; @@ -1469,13 +1604,13 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode, case SpvOpImageSampleProjDrefExplicitLod: case SpvOpImageDrefGather: /* These all have an explicit depth value as their next source */ - (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_comparitor); + (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_comparator); break; case SpvOpImageGather: /* This has a component as its next source */ gather_component = - vtn_value(b, w[idx++], vtn_value_type_constant)->constant->value.u[0]; + vtn_value(b, w[idx++], vtn_value_type_constant)->constant->values[0].u32[0]; break; default: @@ -1487,6 +1622,7 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode, (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_lod); /* Now we need to handle some number of optional arguments */ + const struct vtn_ssa_value *gather_offsets = NULL; if (idx < count) { uint32_t operands = w[idx++]; @@ -1513,8 +1649,10 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode, operands & SpvImageOperandsConstOffsetMask) (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_offset); - if (operands & SpvImageOperandsConstOffsetsMask) - assert(!"Constant offsets to texture gather not yet implemented"); + if (operands & SpvImageOperandsConstOffsetsMask) { + gather_offsets = vtn_ssa_value(b, w[idx++]); + (*p++) = (nir_tex_src){}; + } if (operands & SpvImageOperandsSampleMask) { assert(texop == nir_texop_txf_ms); @@ -1548,20 +1686,23 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode, } nir_deref_var *sampler = vtn_access_chain_to_deref(b, sampled.sampler); + nir_deref_var *texture; if (sampled.image) { nir_deref_var *image = vtn_access_chain_to_deref(b, sampled.image); - instr->texture = nir_deref_as_var(nir_copy_deref(instr, &image->deref)); + texture = image; } else { - instr->texture = nir_deref_as_var(nir_copy_deref(instr, &sampler->deref)); + texture = sampler; } + instr->texture = nir_deref_var_clone(texture, instr); + switch (instr->op) { case nir_texop_tex: case nir_texop_txb: case nir_texop_txl: case nir_texop_txd: /* These operations require a sampler */ - instr->sampler = nir_deref_as_var(nir_copy_deref(instr, &sampler->deref)); + instr->sampler = nir_deref_var_clone(sampler, instr); break; case nir_texop_txf: case nir_texop_txf_ms: @@ -1584,10 +1725,65 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode, assert(glsl_get_vector_elements(ret_type->type) == nir_tex_instr_dest_size(instr)); + nir_ssa_def *def; + nir_instr *instruction; + if (gather_offsets) { + assert(glsl_get_base_type(gather_offsets->type) == GLSL_TYPE_ARRAY); + assert(glsl_get_length(gather_offsets->type) == 4); + nir_tex_instr *instrs[4] = {instr, NULL, NULL, NULL}; + + /* Copy the current instruction 4x */ + for (uint32_t i = 1; i < 4; i++) { + instrs[i] = nir_tex_instr_create(b->shader, instr->num_srcs); + instrs[i]->op = instr->op; + instrs[i]->coord_components = instr->coord_components; + instrs[i]->sampler_dim = instr->sampler_dim; + instrs[i]->is_array = instr->is_array; + instrs[i]->is_shadow = instr->is_shadow; + instrs[i]->is_new_style_shadow = instr->is_new_style_shadow; + instrs[i]->component = instr->component; + instrs[i]->dest_type = instr->dest_type; + instrs[i]->texture = nir_deref_var_clone(texture, instrs[i]); + instrs[i]->sampler = NULL; + + memcpy(instrs[i]->src, srcs, instr->num_srcs * sizeof(*instr->src)); + + nir_ssa_dest_init(&instrs[i]->instr, &instrs[i]->dest, + nir_tex_instr_dest_size(instr), 32, NULL); + } + + /* Fill in the last argument with the offset from the passed in offsets + * and insert the instruction into the stream. + */ + for (uint32_t i = 0; i < 4; i++) { + nir_tex_src src; + src.src = nir_src_for_ssa(gather_offsets->elems[i]->def); + src.src_type = nir_tex_src_offset; + instrs[i]->src[instrs[i]->num_srcs - 1] = src; + nir_builder_instr_insert(&b->nb, &instrs[i]->instr); + } + + /* Combine the results of the 4 instructions by taking their .w + * components + */ + nir_alu_instr *vec4 = nir_alu_instr_create(b->shader, nir_op_vec4); + nir_ssa_dest_init(&vec4->instr, &vec4->dest.dest, 4, 32, NULL); + vec4->dest.write_mask = 0xf; + for (uint32_t i = 0; i < 4; i++) { + vec4->src[i].src = nir_src_for_ssa(&instrs[i]->dest.ssa); + vec4->src[i].swizzle[0] = 3; + } + def = &vec4->dest.dest.ssa; + instruction = &vec4->instr; + } else { + def = &instr->dest.ssa; + instruction = &instr->instr; + } + val->ssa = vtn_create_ssa_value(b, ret_type->type); - val->ssa->def = &instr->dest.ssa; + val->ssa->def = def; - nir_builder_instr_insert(&b->nb, &instr->instr); + nir_builder_instr_insert(&b->nb, instruction); } static void @@ -1754,8 +1950,7 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode, nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->shader, op); nir_deref_var *image_deref = vtn_access_chain_to_deref(b, image.image); - intrin->variables[0] = - nir_deref_as_var(nir_copy_deref(&intrin->instr, &image_deref->deref)); + intrin->variables[0] = nir_deref_var_clone(image_deref, intrin); /* ImageQuerySize doesn't take any extra parameters */ if (opcode != SpvOpImageQuerySize) { @@ -1803,17 +1998,21 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode, if (opcode != SpvOpImageWrite) { struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa); struct vtn_type *type = vtn_value(b, w[1], vtn_value_type_type)->type; - nir_ssa_dest_init(&intrin->instr, &intrin->dest, 4, 32, NULL); + + unsigned dest_components = + nir_intrinsic_infos[intrin->intrinsic].dest_components; + if (intrin->intrinsic == nir_intrinsic_image_size) { + dest_components = intrin->num_components = + glsl_get_vector_elements(type->type); + } + + nir_ssa_dest_init(&intrin->instr, &intrin->dest, + dest_components, 32, NULL); nir_builder_instr_insert(&b->nb, &intrin->instr); - /* The image intrinsics always return 4 channels but we may not want - * that many. Emit a mov to trim it down. - */ - unsigned swiz[4] = {0, 1, 2, 3}; val->ssa = vtn_create_ssa_value(b, type->type); - val->ssa->def = nir_swizzle(&b->nb, &intrin->dest.ssa, swiz, - glsl_get_vector_elements(type->type), false); + val->ssa->def = &intrin->dest.ssa; } else { nir_builder_instr_insert(&b->nb, &intrin->instr); } @@ -1914,10 +2113,10 @@ vtn_handle_ssbo_or_shared_atomic(struct vtn_builder *b, SpvOp opcode, if (chain->var->mode == vtn_variable_mode_workgroup) { struct vtn_type *type = chain->var->type; - nir_deref *deref = &vtn_access_chain_to_deref(b, chain)->deref; + nir_deref_var *deref = vtn_access_chain_to_deref(b, chain); nir_intrinsic_op op = get_shared_nir_atomic_op(opcode); atomic = nir_intrinsic_instr_create(b->nb.shader, op); - atomic->variables[0] = nir_deref_as_var(nir_copy_deref(atomic, deref)); + atomic->variables[0] = nir_deref_var_clone(deref, atomic); switch (opcode) { case SpvOpAtomicLoad: @@ -2155,9 +2354,17 @@ vtn_vector_construct(struct vtn_builder *b, unsigned num_components, nir_alu_instr *vec = create_vec(b->shader, num_components, srcs[0]->bit_size); + /* From the SPIR-V 1.1 spec for OpCompositeConstruct: + * + * "When constructing a vector, there must be at least two Constituent + * operands." + */ + assert(num_srcs >= 2); + unsigned dest_idx = 0; for (unsigned i = 0; i < num_srcs; i++) { nir_ssa_def *src = srcs[i]; + assert(dest_idx + src->num_components <= num_components); for (unsigned j = 0; j < src->num_components; j++) { vec->src[dest_idx].src = nir_src_for_ssa(src); vec->src[dest_idx].swizzle[0] = j; @@ -2165,6 +2372,13 @@ vtn_vector_construct(struct vtn_builder *b, unsigned num_components, } } + /* From the SPIR-V 1.1 spec for OpCompositeConstruct: + * + * "When constructing a vector, the total number of components in all + * the operands must equal the number of components in Result Type." + */ + assert(dest_idx == num_components); + nir_builder_instr_insert(&b->nb, &vec->instr); return &vec->dest.dest.ssa; @@ -2408,6 +2622,12 @@ stage_for_execution_model(SpvExecutionModel model) } } +#define spv_check_supported(name, cap) do { \ + if (!(b->ext && b->ext->name)) \ + vtn_warn("Unsupported SPIR-V capability: %s", \ + spirv_capability_to_string(cap)); \ + } while(0) + static bool vtn_handle_preamble_instruction(struct vtn_builder *b, SpvOp opcode, const uint32_t *w, unsigned count) @@ -2443,38 +2663,38 @@ vtn_handle_preamble_instruction(struct vtn_builder *b, SpvOp opcode, case SpvCapabilityInterpolationFunction: case SpvCapabilityMultiViewport: case SpvCapabilitySampleRateShading: - break; - case SpvCapabilityClipDistance: case SpvCapabilityCullDistance: + case SpvCapabilityInputAttachment: + case SpvCapabilityImageGatherExtended: + case SpvCapabilityStorageImageExtendedFormats: + break; + case SpvCapabilityGeometryStreams: - case SpvCapabilityTessellation: - case SpvCapabilityTessellationPointSize: case SpvCapabilityLinkage: case SpvCapabilityVector16: case SpvCapabilityFloat16Buffer: case SpvCapabilityFloat16: - case SpvCapabilityFloat64: - case SpvCapabilityInt64: case SpvCapabilityInt64Atomics: case SpvCapabilityAtomicStorage: case SpvCapabilityInt16: - case SpvCapabilityImageGatherExtended: case SpvCapabilityStorageImageMultisample: case SpvCapabilityImageCubeArray: case SpvCapabilityInt8: - case SpvCapabilityInputAttachment: case SpvCapabilitySparseResidency: case SpvCapabilityMinLod: - case SpvCapabilityImageMSArray: - case SpvCapabilityStorageImageExtendedFormats: case SpvCapabilityTransformFeedback: - case SpvCapabilityStorageImageReadWithoutFormat: - case SpvCapabilityStorageImageWriteWithoutFormat: vtn_warn("Unsupported SPIR-V capability: %s", spirv_capability_to_string(cap)); break; + case SpvCapabilityFloat64: + spv_check_supported(float64, cap); + break; + case SpvCapabilityInt64: + spv_check_supported(int64, cap); + break; + case SpvCapabilityAddresses: case SpvCapabilityKernel: case SpvCapabilityImageBasic: @@ -2488,6 +2708,30 @@ vtn_handle_preamble_instruction(struct vtn_builder *b, SpvOp opcode, vtn_warn("Unsupported OpenCL-style SPIR-V capability: %s", spirv_capability_to_string(cap)); break; + + case SpvCapabilityImageMSArray: + spv_check_supported(image_ms_array, cap); + break; + + case SpvCapabilityTessellation: + case SpvCapabilityTessellationPointSize: + spv_check_supported(tessellation, cap); + break; + + case SpvCapabilityDrawParameters: + spv_check_supported(draw_parameters, cap); + break; + + case SpvCapabilityStorageImageReadWithoutFormat: + spv_check_supported(image_read_without_format, cap); + break; + + case SpvCapabilityStorageImageWriteWithoutFormat: + spv_check_supported(image_write_without_format, cap); + break; + + default: + unreachable("Unhandled capability"); } break; } @@ -2560,43 +2804,48 @@ vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point, case SpvExecutionModeEarlyFragmentTests: assert(b->shader->stage == MESA_SHADER_FRAGMENT); - b->shader->info.fs.early_fragment_tests = true; + b->shader->info->fs.early_fragment_tests = true; break; case SpvExecutionModeInvocations: assert(b->shader->stage == MESA_SHADER_GEOMETRY); - b->shader->info.gs.invocations = MAX2(1, mode->literals[0]); + b->shader->info->gs.invocations = MAX2(1, mode->literals[0]); break; case SpvExecutionModeDepthReplacing: assert(b->shader->stage == MESA_SHADER_FRAGMENT); - b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_ANY; + b->shader->info->fs.depth_layout = FRAG_DEPTH_LAYOUT_ANY; break; case SpvExecutionModeDepthGreater: assert(b->shader->stage == MESA_SHADER_FRAGMENT); - b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_GREATER; + b->shader->info->fs.depth_layout = FRAG_DEPTH_LAYOUT_GREATER; break; case SpvExecutionModeDepthLess: assert(b->shader->stage == MESA_SHADER_FRAGMENT); - b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_LESS; + b->shader->info->fs.depth_layout = FRAG_DEPTH_LAYOUT_LESS; break; case SpvExecutionModeDepthUnchanged: assert(b->shader->stage == MESA_SHADER_FRAGMENT); - b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_UNCHANGED; + b->shader->info->fs.depth_layout = FRAG_DEPTH_LAYOUT_UNCHANGED; break; case SpvExecutionModeLocalSize: assert(b->shader->stage == MESA_SHADER_COMPUTE); - b->shader->info.cs.local_size[0] = mode->literals[0]; - b->shader->info.cs.local_size[1] = mode->literals[1]; - b->shader->info.cs.local_size[2] = mode->literals[2]; + b->shader->info->cs.local_size[0] = mode->literals[0]; + b->shader->info->cs.local_size[1] = mode->literals[1]; + b->shader->info->cs.local_size[2] = mode->literals[2]; break; case SpvExecutionModeLocalSizeHint: break; /* Nothing to do with this */ case SpvExecutionModeOutputVertices: - assert(b->shader->stage == MESA_SHADER_GEOMETRY); - b->shader->info.gs.vertices_out = mode->literals[0]; + if (b->shader->stage == MESA_SHADER_TESS_CTRL || + b->shader->stage == MESA_SHADER_TESS_EVAL) { + b->shader->info->tess.tcs_vertices_out = mode->literals[0]; + } else { + assert(b->shader->stage == MESA_SHADER_GEOMETRY); + b->shader->info->gs.vertices_out = mode->literals[0]; + } break; case SpvExecutionModeInputPoints: @@ -2606,11 +2855,14 @@ vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point, case SpvExecutionModeInputTrianglesAdjacency: case SpvExecutionModeQuads: case SpvExecutionModeIsolines: - if (b->shader->stage == MESA_SHADER_GEOMETRY) { - b->shader->info.gs.vertices_in = - vertices_in_from_spv_execution_mode(mode->exec_mode); + if (b->shader->stage == MESA_SHADER_TESS_CTRL || + b->shader->stage == MESA_SHADER_TESS_EVAL) { + b->shader->info->tess.primitive_mode = + gl_primitive_from_spv_execution_mode(mode->exec_mode); } else { - assert(!"Tesselation shaders not yet supported"); + assert(b->shader->stage == MESA_SHADER_GEOMETRY); + b->shader->info->gs.vertices_in = + vertices_in_from_spv_execution_mode(mode->exec_mode); } break; @@ -2618,17 +2870,44 @@ vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point, case SpvExecutionModeOutputLineStrip: case SpvExecutionModeOutputTriangleStrip: assert(b->shader->stage == MESA_SHADER_GEOMETRY); - b->shader->info.gs.output_primitive = + b->shader->info->gs.output_primitive = gl_primitive_from_spv_execution_mode(mode->exec_mode); break; case SpvExecutionModeSpacingEqual: + assert(b->shader->stage == MESA_SHADER_TESS_CTRL || + b->shader->stage == MESA_SHADER_TESS_EVAL); + b->shader->info->tess.spacing = TESS_SPACING_EQUAL; + break; case SpvExecutionModeSpacingFractionalEven: + assert(b->shader->stage == MESA_SHADER_TESS_CTRL || + b->shader->stage == MESA_SHADER_TESS_EVAL); + b->shader->info->tess.spacing = TESS_SPACING_FRACTIONAL_EVEN; + break; case SpvExecutionModeSpacingFractionalOdd: + assert(b->shader->stage == MESA_SHADER_TESS_CTRL || + b->shader->stage == MESA_SHADER_TESS_EVAL); + b->shader->info->tess.spacing = TESS_SPACING_FRACTIONAL_ODD; + break; case SpvExecutionModeVertexOrderCw: + assert(b->shader->stage == MESA_SHADER_TESS_CTRL || + b->shader->stage == MESA_SHADER_TESS_EVAL); + /* Vulkan's notion of CCW seems to match the hardware backends, + * but be the opposite of OpenGL. Currently NIR follows GL semantics, + * so we set it backwards here. + */ + b->shader->info->tess.ccw = true; + break; case SpvExecutionModeVertexOrderCcw: + assert(b->shader->stage == MESA_SHADER_TESS_CTRL || + b->shader->stage == MESA_SHADER_TESS_EVAL); + /* Backwards; see above */ + b->shader->info->tess.ccw = false; + break; case SpvExecutionModePointMode: - assert(!"TODO: Add tessellation metadata"); + assert(b->shader->stage == MESA_SHADER_TESS_CTRL || + b->shader->stage == MESA_SHADER_TESS_EVAL); + b->shader->info->tess.point_mode = true; break; case SpvExecutionModePixelCenterInteger: @@ -2642,6 +2921,9 @@ vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point, case SpvExecutionModeVecTypeHint: case SpvExecutionModeContractionOff: break; /* OpenCL */ + + default: + unreachable("Unhandled execution mode"); } } @@ -2707,6 +2989,7 @@ vtn_handle_variable_or_type_instruction(struct vtn_builder *b, SpvOp opcode, vtn_handle_constant(b, opcode, w, count); break; + case SpvOpUndef: case SpvOpVariable: vtn_handle_variables(b, opcode, w, count); break; @@ -2962,6 +3245,7 @@ nir_function * spirv_to_nir(const uint32_t *words, size_t word_count, struct nir_spirv_specialization *spec, unsigned num_spec, gl_shader_stage stage, const char *entry_point_name, + const struct nir_spirv_supported_extensions *ext, const nir_shader_compiler_options *options) { const uint32_t *word_end = words + word_count; @@ -2984,6 +3268,7 @@ spirv_to_nir(const uint32_t *words, size_t word_count, exec_list_make_empty(&b->functions); b->entry_point_stage = stage; b->entry_point_name = entry_point_name; + b->ext = ext; /* Handle all the preamble instructions */ words = vtn_foreach_instruction(b, words, word_end, @@ -2995,10 +3280,10 @@ spirv_to_nir(const uint32_t *words, size_t word_count, return NULL; } - b->shader = nir_shader_create(NULL, stage, options); + b->shader = nir_shader_create(NULL, stage, options, NULL); /* Set shader info defaults */ - b->shader->info.gs.invocations = 1; + b->shader->info->gs.invocations = 1; /* Parse execution modes */ vtn_foreach_execution_mode(b, b->entry_point,