X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fcompiler%2Fspirv%2Fspirv_to_nir.c;h=d3ad2d13ed86cc4c25e73332490dbdaab11478b4;hb=7d41bf8d7b4a094bdfa725d68053ab21a1365ad5;hp=9dc93e20202a240cd49bd10e875151bd3261aedf;hpb=d1bbe2c94e218545714b22da31fa38710acf36e4;p=mesa.git diff --git a/src/compiler/spirv/spirv_to_nir.c b/src/compiler/spirv/spirv_to_nir.c index 9dc93e20202..d3ad2d13ed8 100644 --- a/src/compiler/spirv/spirv_to_nir.c +++ b/src/compiler/spirv/spirv_to_nir.c @@ -31,6 +31,14 @@ #include "nir/nir_constant_expressions.h" #include "spirv_info.h" +struct spec_constant_value { + bool is_double; + union { + uint32_t data32; + uint64_t data64; + }; +}; + void _vtn_warn(const char *file, int line, const char *msg, ...) { @@ -96,13 +104,16 @@ vtn_const_ssa_value(struct vtn_builder *b, nir_constant *constant, switch (glsl_get_base_type(type)) { case GLSL_TYPE_INT: case GLSL_TYPE_UINT: + case GLSL_TYPE_INT64: + case GLSL_TYPE_UINT64: case GLSL_TYPE_BOOL: case GLSL_TYPE_FLOAT: - case GLSL_TYPE_DOUBLE: + case GLSL_TYPE_DOUBLE: { + int bit_size = glsl_get_bit_size(type); if (glsl_type_is_vector_or_scalar(type)) { unsigned num_components = glsl_get_vector_elements(val->type); nir_load_const_instr *load = - nir_load_const_instr_create(b->shader, num_components, 32); + nir_load_const_instr_create(b->shader, num_components, bit_size); load->value = constant->values[0]; @@ -118,7 +129,7 @@ vtn_const_ssa_value(struct vtn_builder *b, nir_constant *constant, struct vtn_ssa_value *col_val = rzalloc(b, struct vtn_ssa_value); col_val->type = glsl_get_column_type(val->type); nir_load_const_instr *load = - nir_load_const_instr_create(b->shader, rows, 32); + nir_load_const_instr_create(b->shader, rows, bit_size); load->value = constant->values[i]; @@ -129,6 +140,7 @@ vtn_const_ssa_value(struct vtn_builder *b, nir_constant *constant, } } break; + } case GLSL_TYPE_ARRAY: { unsigned elems = glsl_get_length(val->type); @@ -410,6 +422,8 @@ vtn_type_copy(struct vtn_builder *b, struct vtn_type *src) switch (glsl_get_base_type(src->type)) { case GLSL_TYPE_INT: case GLSL_TYPE_UINT: + case GLSL_TYPE_INT64: + case GLSL_TYPE_UINT64: case GLSL_TYPE_BOOL: case GLSL_TYPE_FLOAT: case GLSL_TYPE_DOUBLE: @@ -515,7 +529,6 @@ struct_member_decoration_cb(struct vtn_builder *b, break; case SpvDecorationPatch: - vtn_warn("Tessellation not yet supported"); break; case SpvDecorationSpecId: @@ -549,9 +562,12 @@ struct_member_decoration_cb(struct vtn_builder *b, case SpvDecorationFPRoundingMode: case SpvDecorationFPFastMathMode: case SpvDecorationAlignment: - vtn_warn("Decoraiton only allowed for CL-style kernels: %s", + vtn_warn("Decoration only allowed for CL-style kernels: %s", spirv_decoration_to_string(dec->decoration)); break; + + default: + unreachable("Unhandled decoration"); } } @@ -600,7 +616,7 @@ type_decoration_cb(struct vtn_builder *b, case SpvDecorationOffset: case SpvDecorationXfbBuffer: case SpvDecorationXfbStride: - vtn_warn("Decoraiton only allowed for struct members: %s", + vtn_warn("Decoration only allowed for struct members: %s", spirv_decoration_to_string(dec->decoration)); break; @@ -616,7 +632,7 @@ type_decoration_cb(struct vtn_builder *b, case SpvDecorationLinkageAttributes: case SpvDecorationNoContraction: case SpvDecorationInputAttachmentIndex: - vtn_warn("Decoraiton not allowed on types: %s", + vtn_warn("Decoration not allowed on types: %s", spirv_decoration_to_string(dec->decoration)); break; @@ -626,9 +642,12 @@ type_decoration_cb(struct vtn_builder *b, case SpvDecorationFPRoundingMode: case SpvDecorationFPFastMathMode: case SpvDecorationAlignment: - vtn_warn("Decoraiton only allowed for CL-style kernels: %s", + vtn_warn("Decoration only allowed for CL-style kernels: %s", spirv_decoration_to_string(dec->decoration)); break; + + default: + unreachable("Unhandled decoration"); } } @@ -700,13 +719,19 @@ vtn_handle_type(struct vtn_builder *b, SpvOp opcode, val->type->type = glsl_bool_type(); break; case SpvOpTypeInt: { + int bit_size = w[2]; const bool signedness = w[3]; - val->type->type = (signedness ? glsl_int_type() : glsl_uint_type()); + if (bit_size == 64) + val->type->type = (signedness ? glsl_int64_t_type() : glsl_uint64_t_type()); + else + val->type->type = (signedness ? glsl_int_type() : glsl_uint_type()); break; } - case SpvOpTypeFloat: - val->type->type = glsl_float_type(); + case SpvOpTypeFloat: { + int bit_size = w[2]; + val->type->type = bit_size == 64 ? glsl_double_type() : glsl_float_type(); break; + } case SpvOpTypeVector: { struct vtn_type *base = vtn_value(b, w[2], vtn_value_type_type)->type; @@ -843,8 +868,12 @@ vtn_handle_type(struct vtn_builder *b, SpvOp opcode, val->type->access_qualifier = SpvAccessQualifierReadWrite; if (multisampled) { - assert(dim == GLSL_SAMPLER_DIM_2D); - dim = GLSL_SAMPLER_DIM_MS; + if (dim == GLSL_SAMPLER_DIM_2D) + dim = GLSL_SAMPLER_DIM_MS; + else if (dim == GLSL_SAMPLER_DIM_SUBPASS) + dim = GLSL_SAMPLER_DIM_SUBPASS_MS; + else + assert(!"Unsupported multisampled image type"); } val->type->image_format = translate_image_format(format); @@ -853,7 +882,6 @@ vtn_handle_type(struct vtn_builder *b, SpvOp opcode, val->type->type = glsl_sampler_type(dim, is_shadow, is_array, glsl_get_base_type(sampled_type)); } else if (sampled == 2) { - assert((dim == GLSL_SAMPLER_DIM_SUBPASS) || format); assert(!is_shadow); val->type->type = glsl_image_type(dim, is_array, glsl_get_base_type(sampled_type)); @@ -897,6 +925,8 @@ vtn_null_constant(struct vtn_builder *b, const struct glsl_type *type) switch (glsl_get_base_type(type)) { case GLSL_TYPE_INT: case GLSL_TYPE_UINT: + case GLSL_TYPE_INT64: + case GLSL_TYPE_UINT64: case GLSL_TYPE_BOOL: case GLSL_TYPE_FLOAT: case GLSL_TYPE_DOUBLE: @@ -938,11 +968,14 @@ spec_constant_decoration_cb(struct vtn_builder *b, struct vtn_value *v, if (dec->decoration != SpvDecorationSpecId) return; - uint32_t *const_value = data; + struct spec_constant_value *const_value = data; for (unsigned i = 0; i < b->num_specializations; i++) { if (b->specializations[i].id == dec->literals[0]) { - *const_value = b->specializations[i].data; + if (const_value->is_double) + const_value->data64 = b->specializations[i].data64; + else + const_value->data32 = b->specializations[i].data32; return; } } @@ -952,8 +985,22 @@ static uint32_t get_specialization(struct vtn_builder *b, struct vtn_value *val, uint32_t const_value) { - vtn_foreach_decoration(b, val, spec_constant_decoration_cb, &const_value); - return const_value; + struct spec_constant_value data; + data.is_double = false; + data.data32 = const_value; + vtn_foreach_decoration(b, val, spec_constant_decoration_cb, &data); + return data.data32; +} + +static uint64_t +get_specialization64(struct vtn_builder *b, struct vtn_value *val, + uint64_t const_value) +{ + struct spec_constant_value data; + data.is_double = true; + data.data64 = const_value; + vtn_foreach_decoration(b, val, spec_constant_decoration_cb, &data); + return data.data64; } static void @@ -1001,14 +1048,29 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, break; } - case SpvOpConstant: + case SpvOpConstant: { assert(glsl_type_is_scalar(val->const_type)); - val->constant->values[0].u32[0] = w[3]; + int bit_size = glsl_get_bit_size(val->const_type); + if (bit_size == 64) { + val->constant->values->u32[0] = w[3]; + val->constant->values->u32[1] = w[4]; + } else { + assert(bit_size == 32); + val->constant->values->u32[0] = w[3]; + } break; - case SpvOpSpecConstant: + } + case SpvOpSpecConstant: { assert(glsl_type_is_scalar(val->const_type)); val->constant->values[0].u32[0] = get_specialization(b, val, w[3]); + int bit_size = glsl_get_bit_size(val->const_type); + if (bit_size == 64) + val->constant->values[0].u64[0] = + get_specialization64(b, val, vtn_u64_literal(&w[3])); + else + val->constant->values[0].u32[0] = get_specialization(b, val, w[3]); break; + } case SpvOpSpecConstantComposite: case SpvOpConstantComposite: { unsigned elem_count = count - 3; @@ -1019,8 +1081,12 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, switch (glsl_get_base_type(val->const_type)) { case GLSL_TYPE_UINT: case GLSL_TYPE_INT: + case GLSL_TYPE_UINT64: + case GLSL_TYPE_INT64: case GLSL_TYPE_FLOAT: case GLSL_TYPE_BOOL: + case GLSL_TYPE_DOUBLE: { + int bit_size = glsl_get_bit_size(val->const_type); if (glsl_type_is_matrix(val->const_type)) { assert(glsl_get_matrix_columns(val->const_type) == elem_count); for (unsigned i = 0; i < elem_count; i++) @@ -1028,12 +1094,18 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, } else { assert(glsl_type_is_vector(val->const_type)); assert(glsl_get_vector_elements(val->const_type) == elem_count); - for (unsigned i = 0; i < elem_count; i++) - val->constant->values[0].u32[i] = elems[i]->values[0].u32[0]; + for (unsigned i = 0; i < elem_count; i++) { + if (bit_size == 64) { + val->constant->values[0].u64[i] = elems[i]->values[0].u64[0]; + } else { + assert(bit_size == 32); + val->constant->values[0].u32[i] = elems[i]->values[0].u32[0]; + } + } } ralloc_free(elems); break; - + } case GLSL_TYPE_STRUCT: case GLSL_TYPE_ARRAY: ralloc_steal(val->constant, elems); @@ -1051,23 +1123,75 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, SpvOp opcode = get_specialization(b, val, w[3]); switch (opcode) { case SpvOpVectorShuffle: { - struct vtn_value *v0 = vtn_value(b, w[4], vtn_value_type_constant); - struct vtn_value *v1 = vtn_value(b, w[5], vtn_value_type_constant); - unsigned len0 = glsl_get_vector_elements(v0->const_type); - unsigned len1 = glsl_get_vector_elements(v1->const_type); - - uint32_t u[8]; - for (unsigned i = 0; i < len0; i++) - u[i] = v0->constant->values[0].u32[i]; - for (unsigned i = 0; i < len1; i++) - u[len0 + i] = v1->constant->values[0].u32[i]; - - for (unsigned i = 0; i < count - 6; i++) { - uint32_t comp = w[i + 6]; - if (comp == (uint32_t)-1) { - val->constant->values[0].u32[i] = 0xdeadbeef; - } else { - val->constant->values[0].u32[i] = u[comp]; + struct vtn_value *v0 = &b->values[w[4]]; + struct vtn_value *v1 = &b->values[w[5]]; + + assert(v0->value_type == vtn_value_type_constant || + v0->value_type == vtn_value_type_undef); + assert(v1->value_type == vtn_value_type_constant || + v1->value_type == vtn_value_type_undef); + + unsigned len0 = v0->value_type == vtn_value_type_constant ? + glsl_get_vector_elements(v0->const_type) : + glsl_get_vector_elements(v0->type->type); + unsigned len1 = v1->value_type == vtn_value_type_constant ? + glsl_get_vector_elements(v1->const_type) : + glsl_get_vector_elements(v1->type->type); + + assert(len0 + len1 < 16); + + unsigned bit_size = glsl_get_bit_size(val->const_type); + unsigned bit_size0 = v0->value_type == vtn_value_type_constant ? + glsl_get_bit_size(v0->const_type) : + glsl_get_bit_size(v0->type->type); + unsigned bit_size1 = v1->value_type == vtn_value_type_constant ? + glsl_get_bit_size(v1->const_type) : + glsl_get_bit_size(v1->type->type); + + assert(bit_size == bit_size0 && bit_size == bit_size1); + (void)bit_size0; (void)bit_size1; + + if (bit_size == 64) { + uint64_t u64[8]; + if (v0->value_type == vtn_value_type_constant) { + for (unsigned i = 0; i < len0; i++) + u64[i] = v0->constant->values[0].u64[i]; + } + if (v1->value_type == vtn_value_type_constant) { + for (unsigned i = 0; i < len1; i++) + u64[len0 + i] = v1->constant->values[0].u64[i]; + } + + for (unsigned i = 0, j = 0; i < count - 6; i++, j++) { + uint32_t comp = w[i + 6]; + /* If component is not used, set the value to a known constant + * to detect if it is wrongly used. + */ + if (comp == (uint32_t)-1) + val->constant->values[0].u64[j] = 0xdeadbeefdeadbeef; + else + val->constant->values[0].u64[j] = u64[comp]; + } + } else { + uint32_t u32[8]; + if (v0->value_type == vtn_value_type_constant) { + for (unsigned i = 0; i < len0; i++) + u32[i] = v0->constant->values[0].u32[i]; + } + if (v1->value_type == vtn_value_type_constant) { + for (unsigned i = 0; i < len1; i++) + u32[len0 + i] = v1->constant->values[0].u32[i]; + } + + for (unsigned i = 0, j = 0; i < count - 6; i++, j++) { + uint32_t comp = w[i + 6]; + /* If component is not used, set the value to a known constant + * to detect if it is wrongly used. + */ + if (comp == (uint32_t)-1) + val->constant->values[0].u32[j] = 0xdeadbeef; + else + val->constant->values[0].u32[j] = u32[comp]; } } break; @@ -1097,7 +1221,10 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, switch (glsl_get_base_type(type)) { case GLSL_TYPE_UINT: case GLSL_TYPE_INT: + case GLSL_TYPE_UINT64: + case GLSL_TYPE_INT64: case GLSL_TYPE_FLOAT: + case GLSL_TYPE_DOUBLE: case GLSL_TYPE_BOOL: /* If we hit this granularity, we're picking off an element */ if (glsl_type_is_matrix(type)) { @@ -1132,8 +1259,14 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, val->constant = *c; } else { unsigned num_components = glsl_get_vector_elements(type); + unsigned bit_size = glsl_get_bit_size(type); for (unsigned i = 0; i < num_components; i++) - val->constant->values[0].u32[i] = (*c)->values[col].u32[elem + i]; + if (bit_size == 64) { + val->constant->values[0].u64[i] = (*c)->values[col].u64[elem + i]; + } else { + assert(bit_size == 32); + val->constant->values[0].u32[i] = (*c)->values[col].u32[elem + i]; + } } } else { struct vtn_value *insert = @@ -1143,8 +1276,14 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, *c = insert->constant; } else { unsigned num_components = glsl_get_vector_elements(type); + unsigned bit_size = glsl_get_bit_size(type); for (unsigned i = 0; i < num_components; i++) - (*c)->values[col].u32[elem + i] = insert->constant->values[0].u32[i]; + if (bit_size == 64) { + (*c)->values[col].u64[elem + i] = insert->constant->values[0].u64[i]; + } else { + assert(bit_size == 32); + (*c)->values[col].u32[elem + i] = insert->constant->values[0].u32[i]; + } } } break; @@ -1152,7 +1291,9 @@ vtn_handle_constant(struct vtn_builder *b, SpvOp opcode, default: { bool swap; - nir_op op = vtn_nir_alu_op_for_spirv_opcode(opcode, &swap); + nir_alu_type dst_alu_type = nir_get_nir_type_for_glsl_type(val->const_type); + nir_alu_type src_alu_type = dst_alu_type; + nir_op op = vtn_nir_alu_op_for_spirv_opcode(opcode, &swap, src_alu_type, dst_alu_type); unsigned num_components = glsl_get_vector_elements(val->const_type); unsigned bit_size = @@ -1251,6 +1392,8 @@ vtn_create_ssa_value(struct vtn_builder *b, const struct glsl_type *type) switch (glsl_get_base_type(type)) { case GLSL_TYPE_INT: case GLSL_TYPE_UINT: + case GLSL_TYPE_INT64: + case GLSL_TYPE_UINT64: case GLSL_TYPE_BOOL: case GLSL_TYPE_FLOAT: case GLSL_TYPE_DOUBLE: @@ -1425,7 +1568,8 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode, coord_components++; coord = vtn_ssa_value(b, w[idx++])->def; - p->src = nir_src_for_ssa(coord); + p->src = nir_src_for_ssa(nir_channels(&b->nb, coord, + (1 << coord_components) - 1)); p->src_type = nir_tex_src_coord; p++; break; @@ -1854,17 +1998,21 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode, if (opcode != SpvOpImageWrite) { struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa); struct vtn_type *type = vtn_value(b, w[1], vtn_value_type_type)->type; - nir_ssa_dest_init(&intrin->instr, &intrin->dest, 4, 32, NULL); + + unsigned dest_components = + nir_intrinsic_infos[intrin->intrinsic].dest_components; + if (intrin->intrinsic == nir_intrinsic_image_size) { + dest_components = intrin->num_components = + glsl_get_vector_elements(type->type); + } + + nir_ssa_dest_init(&intrin->instr, &intrin->dest, + dest_components, 32, NULL); nir_builder_instr_insert(&b->nb, &intrin->instr); - /* The image intrinsics always return 4 channels but we may not want - * that many. Emit a mov to trim it down. - */ - unsigned swiz[4] = {0, 1, 2, 3}; val->ssa = vtn_create_ssa_value(b, type->type); - val->ssa->def = nir_swizzle(&b->nb, &intrin->dest.ssa, swiz, - glsl_get_vector_elements(type->type), false); + val->ssa->def = &intrin->dest.ssa; } else { nir_builder_instr_insert(&b->nb, &intrin->instr); } @@ -2206,9 +2354,17 @@ vtn_vector_construct(struct vtn_builder *b, unsigned num_components, nir_alu_instr *vec = create_vec(b->shader, num_components, srcs[0]->bit_size); + /* From the SPIR-V 1.1 spec for OpCompositeConstruct: + * + * "When constructing a vector, there must be at least two Constituent + * operands." + */ + assert(num_srcs >= 2); + unsigned dest_idx = 0; for (unsigned i = 0; i < num_srcs; i++) { nir_ssa_def *src = srcs[i]; + assert(dest_idx + src->num_components <= num_components); for (unsigned j = 0; j < src->num_components; j++) { vec->src[dest_idx].src = nir_src_for_ssa(src); vec->src[dest_idx].swizzle[0] = j; @@ -2216,6 +2372,13 @@ vtn_vector_construct(struct vtn_builder *b, unsigned num_components, } } + /* From the SPIR-V 1.1 spec for OpCompositeConstruct: + * + * "When constructing a vector, the total number of components in all + * the operands must equal the number of components in Result Type." + */ + assert(dest_idx == num_components); + nir_builder_instr_insert(&b->nb, &vec->instr); return &vec->dest.dest.ssa; @@ -2508,14 +2671,10 @@ vtn_handle_preamble_instruction(struct vtn_builder *b, SpvOp opcode, break; case SpvCapabilityGeometryStreams: - case SpvCapabilityTessellation: - case SpvCapabilityTessellationPointSize: case SpvCapabilityLinkage: case SpvCapabilityVector16: case SpvCapabilityFloat16Buffer: case SpvCapabilityFloat16: - case SpvCapabilityFloat64: - case SpvCapabilityInt64: case SpvCapabilityInt64Atomics: case SpvCapabilityAtomicStorage: case SpvCapabilityInt16: @@ -2525,12 +2684,17 @@ vtn_handle_preamble_instruction(struct vtn_builder *b, SpvOp opcode, case SpvCapabilitySparseResidency: case SpvCapabilityMinLod: case SpvCapabilityTransformFeedback: - case SpvCapabilityStorageImageReadWithoutFormat: - case SpvCapabilityStorageImageWriteWithoutFormat: vtn_warn("Unsupported SPIR-V capability: %s", spirv_capability_to_string(cap)); break; + case SpvCapabilityFloat64: + spv_check_supported(float64, cap); + break; + case SpvCapabilityInt64: + spv_check_supported(int64, cap); + break; + case SpvCapabilityAddresses: case SpvCapabilityKernel: case SpvCapabilityImageBasic: @@ -2548,6 +2712,26 @@ vtn_handle_preamble_instruction(struct vtn_builder *b, SpvOp opcode, case SpvCapabilityImageMSArray: spv_check_supported(image_ms_array, cap); break; + + case SpvCapabilityTessellation: + case SpvCapabilityTessellationPointSize: + spv_check_supported(tessellation, cap); + break; + + case SpvCapabilityDrawParameters: + spv_check_supported(draw_parameters, cap); + break; + + case SpvCapabilityStorageImageReadWithoutFormat: + spv_check_supported(image_read_without_format, cap); + break; + + case SpvCapabilityStorageImageWriteWithoutFormat: + spv_check_supported(image_write_without_format, cap); + break; + + default: + unreachable("Unhandled capability"); } break; } @@ -2655,8 +2839,13 @@ vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point, break; /* Nothing to do with this */ case SpvExecutionModeOutputVertices: - assert(b->shader->stage == MESA_SHADER_GEOMETRY); - b->shader->info->gs.vertices_out = mode->literals[0]; + if (b->shader->stage == MESA_SHADER_TESS_CTRL || + b->shader->stage == MESA_SHADER_TESS_EVAL) { + b->shader->info->tess.tcs_vertices_out = mode->literals[0]; + } else { + assert(b->shader->stage == MESA_SHADER_GEOMETRY); + b->shader->info->gs.vertices_out = mode->literals[0]; + } break; case SpvExecutionModeInputPoints: @@ -2666,11 +2855,14 @@ vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point, case SpvExecutionModeInputTrianglesAdjacency: case SpvExecutionModeQuads: case SpvExecutionModeIsolines: - if (b->shader->stage == MESA_SHADER_GEOMETRY) { + if (b->shader->stage == MESA_SHADER_TESS_CTRL || + b->shader->stage == MESA_SHADER_TESS_EVAL) { + b->shader->info->tess.primitive_mode = + gl_primitive_from_spv_execution_mode(mode->exec_mode); + } else { + assert(b->shader->stage == MESA_SHADER_GEOMETRY); b->shader->info->gs.vertices_in = vertices_in_from_spv_execution_mode(mode->exec_mode); - } else { - assert(!"Tesselation shaders not yet supported"); } break; @@ -2683,12 +2875,39 @@ vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point, break; case SpvExecutionModeSpacingEqual: + assert(b->shader->stage == MESA_SHADER_TESS_CTRL || + b->shader->stage == MESA_SHADER_TESS_EVAL); + b->shader->info->tess.spacing = TESS_SPACING_EQUAL; + break; case SpvExecutionModeSpacingFractionalEven: + assert(b->shader->stage == MESA_SHADER_TESS_CTRL || + b->shader->stage == MESA_SHADER_TESS_EVAL); + b->shader->info->tess.spacing = TESS_SPACING_FRACTIONAL_EVEN; + break; case SpvExecutionModeSpacingFractionalOdd: + assert(b->shader->stage == MESA_SHADER_TESS_CTRL || + b->shader->stage == MESA_SHADER_TESS_EVAL); + b->shader->info->tess.spacing = TESS_SPACING_FRACTIONAL_ODD; + break; case SpvExecutionModeVertexOrderCw: + assert(b->shader->stage == MESA_SHADER_TESS_CTRL || + b->shader->stage == MESA_SHADER_TESS_EVAL); + /* Vulkan's notion of CCW seems to match the hardware backends, + * but be the opposite of OpenGL. Currently NIR follows GL semantics, + * so we set it backwards here. + */ + b->shader->info->tess.ccw = true; + break; case SpvExecutionModeVertexOrderCcw: + assert(b->shader->stage == MESA_SHADER_TESS_CTRL || + b->shader->stage == MESA_SHADER_TESS_EVAL); + /* Backwards; see above */ + b->shader->info->tess.ccw = false; + break; case SpvExecutionModePointMode: - assert(!"TODO: Add tessellation metadata"); + assert(b->shader->stage == MESA_SHADER_TESS_CTRL || + b->shader->stage == MESA_SHADER_TESS_EVAL); + b->shader->info->tess.point_mode = true; break; case SpvExecutionModePixelCenterInteger: @@ -2702,6 +2921,9 @@ vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point, case SpvExecutionModeVecTypeHint: case SpvExecutionModeContractionOff: break; /* OpenCL */ + + default: + unreachable("Unhandled execution mode"); } } @@ -2767,6 +2989,7 @@ vtn_handle_variable_or_type_instruction(struct vtn_builder *b, SpvOp opcode, vtn_handle_constant(b, opcode, w, count); break; + case SpvOpUndef: case SpvOpVariable: vtn_handle_variables(b, opcode, w, count); break;