X-Git-Url: https://git.libre-soc.org/?p=mesa.git;a=blobdiff_plain;f=src%2Fcompiler%2Fspirv%2Fspirv_to_nir.c;h=747d1a0cdcce5e929984a34ce794f406e718ce1b;hp=a35dbfc0a7ad3f8f9fc886db72bf4de6f0397227;hb=217def3ac6721bc7d8a1f99d2fc4501e5ee8627d;hpb=9e3213ad30fdf121f98dc3e250486c1533047c10 diff --git a/src/compiler/spirv/spirv_to_nir.c b/src/compiler/spirv/spirv_to_nir.c index a35dbfc0a7a..747d1a0cdcc 100644 --- a/src/compiler/spirv/spirv_to_nir.c +++ b/src/compiler/spirv/spirv_to_nir.c @@ -310,6 +310,66 @@ vtn_push_nir_ssa(struct vtn_builder *b, uint32_t value_id, nir_ssa_def *def) return vtn_push_ssa_value(b, value_id, ssa); } +static nir_deref_instr * +vtn_get_image(struct vtn_builder *b, uint32_t value_id) +{ + struct vtn_type *type = vtn_get_value_type(b, value_id); + vtn_assert(type->base_type == vtn_base_type_image); + return nir_build_deref_cast(&b->nb, vtn_get_nir_ssa(b, value_id), + nir_var_uniform, type->glsl_image, 0); +} + +static void +vtn_push_image(struct vtn_builder *b, uint32_t value_id, + nir_deref_instr *deref) +{ + struct vtn_type *type = vtn_get_value_type(b, value_id); + vtn_assert(type->base_type == vtn_base_type_image); + vtn_push_nir_ssa(b, value_id, &deref->dest.ssa); +} + +static nir_deref_instr * +vtn_get_sampler(struct vtn_builder *b, uint32_t value_id) +{ + struct vtn_type *type = vtn_get_value_type(b, value_id); + vtn_assert(type->base_type == vtn_base_type_sampler); + return nir_build_deref_cast(&b->nb, vtn_get_nir_ssa(b, value_id), + nir_var_uniform, glsl_bare_sampler_type(), 0); +} + +nir_ssa_def * +vtn_sampled_image_to_nir_ssa(struct vtn_builder *b, + struct vtn_sampled_image si) +{ + return nir_vec2(&b->nb, &si.image->dest.ssa, &si.sampler->dest.ssa); +} + +static void +vtn_push_sampled_image(struct vtn_builder *b, uint32_t value_id, + struct vtn_sampled_image si) +{ + struct vtn_type *type = vtn_get_value_type(b, value_id); + vtn_assert(type->base_type == vtn_base_type_sampled_image); + vtn_push_nir_ssa(b, value_id, vtn_sampled_image_to_nir_ssa(b, si)); +} + +static struct vtn_sampled_image +vtn_get_sampled_image(struct vtn_builder *b, uint32_t value_id) +{ + struct vtn_type *type = vtn_get_value_type(b, value_id); + vtn_assert(type->base_type == vtn_base_type_sampled_image); + nir_ssa_def *si_vec2 = vtn_get_nir_ssa(b, value_id); + + struct vtn_sampled_image si = { NULL, }; + si.image = nir_build_deref_cast(&b->nb, nir_channel(&b->nb, si_vec2, 0), + nir_var_uniform, + type->image->glsl_image, 0); + si.sampler = nir_build_deref_cast(&b->nb, nir_channel(&b->nb, si_vec2, 1), + nir_var_uniform, + glsl_bare_sampler_type(), 0); + return si; +} + static char * vtn_string_literal(struct vtn_builder *b, const uint32_t *words, unsigned word_count, unsigned *words_used) @@ -715,6 +775,33 @@ wrap_type_in_array(const struct glsl_type *type, glsl_get_explicit_stride(array_type)); } +static bool +vtn_type_needs_explicit_layout(struct vtn_builder *b, enum vtn_variable_mode mode) +{ + /* For OpenCL we never want to strip the info from the types, and it makes + * type comparisons easier in later stages. + */ + if (b->options->environment == NIR_SPIRV_OPENCL) + return true; + + switch (mode) { + case vtn_variable_mode_input: + case vtn_variable_mode_output: + /* Layout decorations kept because we need offsets for XFB arrays of + * blocks. + */ + return b->shader->info.has_transform_feedback_varyings; + + case vtn_variable_mode_ssbo: + case vtn_variable_mode_phys_ssbo: + case vtn_variable_mode_ubo: + return true; + + default: + return false; + } +} + const struct glsl_type * vtn_type_get_nir_type(struct vtn_builder *b, struct vtn_type *type, enum vtn_variable_mode mode) @@ -726,6 +813,66 @@ vtn_type_get_nir_type(struct vtn_builder *b, struct vtn_type *type, return wrap_type_in_array(glsl_atomic_uint_type(), type->type); } + if (mode == vtn_variable_mode_uniform) { + switch (type->base_type) { + case vtn_base_type_array: { + const struct glsl_type *elem_type = + vtn_type_get_nir_type(b, type->array_element, mode); + + return glsl_array_type(elem_type, type->length, + glsl_get_explicit_stride(type->type)); + } + + case vtn_base_type_struct: { + bool need_new_struct = false; + const uint32_t num_fields = type->length; + NIR_VLA(struct glsl_struct_field, fields, num_fields); + for (unsigned i = 0; i < num_fields; i++) { + fields[i] = *glsl_get_struct_field_data(type->type, i); + const struct glsl_type *field_nir_type = + vtn_type_get_nir_type(b, type->members[i], mode); + if (fields[i].type != field_nir_type) { + fields[i].type = field_nir_type; + need_new_struct = true; + } + } + if (need_new_struct) { + if (glsl_type_is_interface(type->type)) { + return glsl_interface_type(fields, num_fields, + /* packing */ 0, false, + glsl_get_type_name(type->type)); + } else { + return glsl_struct_type(fields, num_fields, + glsl_get_type_name(type->type), + glsl_struct_type_is_packed(type->type)); + } + } else { + /* No changes, just pass it on */ + return type->type; + } + } + + case vtn_base_type_image: + return type->glsl_image; + + case vtn_base_type_sampler: + return glsl_bare_sampler_type(); + + case vtn_base_type_sampled_image: + return type->image->glsl_image; + + default: + return type->type; + } + } + + /* Layout decorations are allowed but ignored in certain conditions, + * to allow SPIR-V generators perform type deduplication. Discard + * unnecessary ones when passing to NIR. + */ + if (!vtn_type_needs_explicit_layout(b, mode)) + return glsl_get_bare_type(type->type); + return type->type; } @@ -1413,6 +1560,14 @@ vtn_handle_type(struct vtn_builder *b, SpvOp opcode, case SpvOpTypeImage: { val->type->base_type = vtn_base_type_image; + /* Images are represented in NIR as a scalar SSA value that is the + * result of a deref instruction. An OpLoad on an OpTypeImage pointer + * from UniformConstant memory just takes the NIR deref from the pointer + * and turns it into an SSA value. + */ + val->type->type = nir_address_format_to_glsl_type( + vtn_mode_to_address_format(b, vtn_variable_mode_function)); + const struct vtn_type *sampled_type = vtn_get_type(b, w[2]); vtn_fail_if(sampled_type->base_type != vtn_base_type_scalar || glsl_get_bit_size(sampled_type->type) != 32, @@ -1459,30 +1614,50 @@ vtn_handle_type(struct vtn_builder *b, SpvOp opcode, enum glsl_base_type sampled_base_type = glsl_get_base_type(sampled_type->type); if (sampled == 1) { - val->type->type = glsl_sampler_type(dim, false, is_array, - sampled_base_type); + val->type->glsl_image = glsl_sampler_type(dim, false, is_array, + sampled_base_type); } else if (sampled == 2) { - val->type->type = glsl_image_type(dim, is_array, sampled_base_type); + val->type->glsl_image = glsl_image_type(dim, is_array, + sampled_base_type); } else { vtn_fail("We need to know if the image will be sampled"); } break; } - case SpvOpTypeSampledImage: + case SpvOpTypeSampledImage: { val->type->base_type = vtn_base_type_sampled_image; val->type->image = vtn_get_type(b, w[2]); - val->type->type = val->type->image->type; + + /* Sampled images are represented NIR as a vec2 SSA value where each + * component is the result of a deref instruction. The first component + * is the image and the second is the sampler. An OpLoad on an + * OpTypeSampledImage pointer from UniformConstant memory just takes + * the NIR deref from the pointer and duplicates it to both vector + * components. + */ + nir_address_format addr_format = + vtn_mode_to_address_format(b, vtn_variable_mode_function); + assert(nir_address_format_num_components(addr_format) == 1); + unsigned bit_size = nir_address_format_bit_size(addr_format); + assert(bit_size == 32 || bit_size == 64); + + enum glsl_base_type base_type = + bit_size == 32 ? GLSL_TYPE_UINT : GLSL_TYPE_UINT64; + val->type->type = glsl_vector_type(base_type, 2); break; + } case SpvOpTypeSampler: - /* The actual sampler type here doesn't really matter. It gets - * thrown away the moment you combine it with an image. What really - * matters is that it's a sampler type as opposed to an integer type - * so the backend knows what to do. - */ val->type->base_type = vtn_base_type_sampler; - val->type->type = glsl_bare_sampler_type(); + + /* Samplers are represented in NIR as a scalar SSA value that is the + * result of a deref instruction. An OpLoad on an OpTypeSampler pointer + * from UniformConstant memory just takes the NIR deref from the pointer + * and turns it into an SSA value. + */ + val->type->type = nir_address_format_to_glsl_type( + vtn_mode_to_address_format(b, vtn_variable_mode_function)); break; case SpvOpTypeOpaque: @@ -2285,68 +2460,37 @@ non_uniform_decoration_cb(struct vtn_builder *b, } } - static void vtn_handle_texture(struct vtn_builder *b, SpvOp opcode, const uint32_t *w, unsigned count) { - if (opcode == SpvOpSampledImage) { - struct vtn_value *val = - vtn_push_value(b, w[2], vtn_value_type_sampled_image); - val->sampled_image = ralloc(b, struct vtn_sampled_image); - - /* It seems valid to use OpSampledImage with OpUndef instead of - * OpTypeImage or OpTypeSampler. - */ - if (vtn_untyped_value(b, w[3])->value_type == vtn_value_type_undef) { - val->sampled_image->image = NULL; - } else { - val->sampled_image->image = - vtn_value(b, w[3], vtn_value_type_pointer)->pointer; - } + struct vtn_type *ret_type = vtn_get_type(b, w[1]); - if (vtn_untyped_value(b, w[4])->value_type == vtn_value_type_undef) { - val->sampled_image->sampler = NULL; - } else { - val->sampled_image->sampler = - vtn_value(b, w[4], vtn_value_type_pointer)->pointer; - } + if (opcode == SpvOpSampledImage) { + struct vtn_sampled_image si = { + .image = vtn_get_image(b, w[3]), + .sampler = vtn_get_sampler(b, w[4]), + }; + vtn_push_sampled_image(b, w[2], si); return; } else if (opcode == SpvOpImage) { - struct vtn_value *src_val = vtn_untyped_value(b, w[3]); - if (src_val->value_type == vtn_value_type_sampled_image) { - vtn_push_pointer(b, w[2], src_val->sampled_image->image); - } else { - vtn_assert(src_val->value_type == vtn_value_type_pointer); - vtn_push_pointer(b, w[2], src_val->pointer); - } + struct vtn_sampled_image si = vtn_get_sampled_image(b, w[3]); + vtn_push_image(b, w[2], si.image); return; } - struct vtn_type *ret_type = vtn_get_type(b, w[1]); - - struct vtn_pointer *image = NULL, *sampler = NULL; + nir_deref_instr *image = NULL, *sampler = NULL; struct vtn_value *sampled_val = vtn_untyped_value(b, w[3]); - if (sampled_val->value_type == vtn_value_type_sampled_image) { - image = sampled_val->sampled_image->image; - sampler = sampled_val->sampled_image->sampler; + if (sampled_val->type->base_type == vtn_base_type_sampled_image) { + struct vtn_sampled_image si = vtn_get_sampled_image(b, w[3]); + image = si.image; + sampler = si.sampler; } else { - vtn_assert(sampled_val->value_type == vtn_value_type_pointer); - image = sampled_val->pointer; - } - - if (!image) { - vtn_push_value(b, w[2], vtn_value_type_undef); - return; + image = vtn_get_image(b, w[3]); } - nir_deref_instr *image_deref = vtn_pointer_to_deref(b, image); - nir_deref_instr *sampler_deref = - sampler ? vtn_pointer_to_deref(b, sampler) : NULL; - - const struct glsl_type *image_type = sampled_val->type->type; - const enum glsl_sampler_dim sampler_dim = glsl_get_sampler_dim(image_type); - const bool is_array = glsl_sampler_type_is_array(image_type); + const enum glsl_sampler_dim sampler_dim = glsl_get_sampler_dim(image->type); + const bool is_array = glsl_sampler_type_is_array(image->type); nir_alu_type dest_type = nir_type_invalid; /* Figure out the base texture operation */ @@ -2415,7 +2559,7 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode, nir_tex_src srcs[10]; /* 10 should be enough */ nir_tex_src *p = srcs; - p->src = nir_src_for_ssa(&image_deref->dest.ssa); + p->src = nir_src_for_ssa(&image->dest.ssa); p->src_type = nir_tex_src_texture_deref; p++; @@ -2429,7 +2573,7 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode, vtn_fail_if(sampler == NULL, "%s requires an image of type OpTypeSampledImage", spirv_op_to_string(opcode)); - p->src = nir_src_for_ssa(&sampler_deref->dest.ssa); + p->src = nir_src_for_ssa(&sampler->dest.ssa); p->src_type = nir_tex_src_sampler_deref; p++; break; @@ -2648,7 +2792,7 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode, /* for non-query ops, get dest_type from sampler type */ if (dest_type == nir_type_invalid) { - switch (glsl_get_sampler_result_type(image_type)) { + switch (glsl_get_sampler_result_type(image->type)) { case GLSL_TYPE_FLOAT: dest_type = nir_type_float; break; case GLSL_TYPE_INT: dest_type = nir_type_int; break; case GLSL_TYPE_UINT: dest_type = nir_type_uint; break; @@ -2778,7 +2922,7 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode, vtn_push_value(b, w[2], vtn_value_type_image_pointer); val->image = ralloc(b, struct vtn_image_pointer); - val->image->image = vtn_value(b, w[3], vtn_value_type_pointer)->pointer; + val->image->image = vtn_nir_deref(b, w[3]); val->image->coord = get_image_coord(b, w[4]); val->image->sample = vtn_get_nir_ssa(b, w[5]); val->image->lod = nir_imm_int(&b->nb, 0); @@ -2789,6 +2933,8 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode, SpvScope scope = SpvScopeInvocation; SpvMemorySemanticsMask semantics = 0; + enum gl_access_qualifier access = 0; + struct vtn_value *res_val; switch (opcode) { case SpvOpAtomicExchange: @@ -2811,6 +2957,7 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode, image = *res_val->image; scope = vtn_constant_uint(b, w[4]); semantics = vtn_constant_uint(b, w[5]); + access |= ACCESS_COHERENT; break; case SpvOpAtomicStore: @@ -2818,19 +2965,20 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode, image = *res_val->image; scope = vtn_constant_uint(b, w[2]); semantics = vtn_constant_uint(b, w[3]); + access |= ACCESS_COHERENT; break; case SpvOpImageQuerySize: - res_val = vtn_value(b, w[3], vtn_value_type_pointer); - image.image = res_val->pointer; + res_val = vtn_untyped_value(b, w[3]); + image.image = vtn_get_image(b, w[3]); image.coord = NULL; image.sample = NULL; image.lod = NULL; break; case SpvOpImageRead: { - res_val = vtn_value(b, w[3], vtn_value_type_pointer); - image.image = res_val->pointer; + res_val = vtn_untyped_value(b, w[3]); + image.image = vtn_get_image(b, w[3]); image.coord = get_image_coord(b, w[4]); const SpvImageOperandsMask operands = @@ -2867,8 +3015,8 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode, } case SpvOpImageWrite: { - res_val = vtn_value(b, w[1], vtn_value_type_pointer); - image.image = res_val->pointer; + res_val = vtn_untyped_value(b, w[1]); + image.image = vtn_get_image(b, w[1]); image.coord = get_image_coord(b, w[2]); /* texel = w[3] */ @@ -2940,8 +3088,7 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode, nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->shader, op); - nir_deref_instr *image_deref = vtn_pointer_to_deref(b, image.image); - intrin->src[0] = nir_src_for_ssa(&image_deref->dest.ssa); + intrin->src[0] = nir_src_for_ssa(&image.image->dest.ssa); /* ImageQuerySize doesn't take any extra parameters */ if (opcode != SpvOpImageQuerySize) { @@ -2965,7 +3112,6 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode, * chains to find the NonUniform decoration. It's either right there or we * can assume it doesn't exist. */ - enum gl_access_qualifier access = 0; vtn_foreach_decoration(b, res_val, non_uniform_decoration_cb, &access); nir_intrinsic_set_access(intrin, access); @@ -3233,6 +3379,8 @@ vtn_handle_atomics(struct vtn_builder *b, SpvOp opcode, nir_intrinsic_op op = get_ssbo_nir_atomic_op(b, opcode); atomic = nir_intrinsic_instr_create(b->nb.shader, op); + nir_intrinsic_set_access(atomic, ACCESS_COHERENT); + int src = 0; switch (opcode) { case SpvOpAtomicLoad: @@ -3284,6 +3432,9 @@ vtn_handle_atomics(struct vtn_builder *b, SpvOp opcode, atomic = nir_intrinsic_instr_create(b->nb.shader, op); atomic->src[0] = nir_src_for_ssa(&deref->dest.ssa); + if (ptr->mode != vtn_variable_mode_workgroup) + nir_intrinsic_set_access(atomic, ACCESS_COHERENT); + switch (opcode) { case SpvOpAtomicLoad: atomic->num_components = glsl_get_vector_elements(deref_type); @@ -4374,14 +4525,7 @@ vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point, b->shader->info.cs.local_size[2] = mode->operands[2]; break; - case SpvExecutionModeLocalSizeId: - b->shader->info.cs.local_size[0] = vtn_constant_uint(b, mode->operands[0]); - b->shader->info.cs.local_size[1] = vtn_constant_uint(b, mode->operands[1]); - b->shader->info.cs.local_size[2] = vtn_constant_uint(b, mode->operands[2]); - break; - case SpvExecutionModeLocalSizeHint: - case SpvExecutionModeLocalSizeHintId: break; /* Nothing to do with this */ case SpvExecutionModeOutputVertices: @@ -4511,8 +4655,60 @@ vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point, case SpvExecutionModeDenormFlushToZero: case SpvExecutionModeSignedZeroInfNanPreserve: case SpvExecutionModeRoundingModeRTE: - case SpvExecutionModeRoundingModeRTZ: - /* Already handled in vtn_handle_rounding_mode_in_execution_mode() */ + case SpvExecutionModeRoundingModeRTZ: { + unsigned execution_mode = 0; + switch (mode->exec_mode) { + case SpvExecutionModeDenormPreserve: + switch (mode->operands[0]) { + case 16: execution_mode = FLOAT_CONTROLS_DENORM_PRESERVE_FP16; break; + case 32: execution_mode = FLOAT_CONTROLS_DENORM_PRESERVE_FP32; break; + case 64: execution_mode = FLOAT_CONTROLS_DENORM_PRESERVE_FP64; break; + default: vtn_fail("Floating point type not supported"); + } + break; + case SpvExecutionModeDenormFlushToZero: + switch (mode->operands[0]) { + case 16: execution_mode = FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP16; break; + case 32: execution_mode = FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP32; break; + case 64: execution_mode = FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP64; break; + default: vtn_fail("Floating point type not supported"); + } + break; + case SpvExecutionModeSignedZeroInfNanPreserve: + switch (mode->operands[0]) { + case 16: execution_mode = FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP16; break; + case 32: execution_mode = FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP32; break; + case 64: execution_mode = FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP64; break; + default: vtn_fail("Floating point type not supported"); + } + break; + case SpvExecutionModeRoundingModeRTE: + switch (mode->operands[0]) { + case 16: execution_mode = FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP16; break; + case 32: execution_mode = FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP32; break; + case 64: execution_mode = FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP64; break; + default: vtn_fail("Floating point type not supported"); + } + break; + case SpvExecutionModeRoundingModeRTZ: + switch (mode->operands[0]) { + case 16: execution_mode = FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP16; break; + case 32: execution_mode = FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP32; break; + case 64: execution_mode = FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP64; break; + default: vtn_fail("Floating point type not supported"); + } + break; + default: + break; + } + + b->shader->info.float_controls_execution_mode |= execution_mode; + break; + } + + case SpvExecutionModeLocalSizeId: + case SpvExecutionModeLocalSizeHintId: + /* Handled later by vtn_handle_execution_mode_id(). */ break; default: @@ -4523,60 +4719,28 @@ vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point, } static void -vtn_handle_rounding_mode_in_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point, - const struct vtn_decoration *mode, void *data) +vtn_handle_execution_mode_id(struct vtn_builder *b, struct vtn_value *entry_point, + const struct vtn_decoration *mode, UNUSED void *data) { - vtn_assert(b->entry_point == entry_point); - unsigned execution_mode = 0; + vtn_assert(b->entry_point == entry_point); - switch(mode->exec_mode) { - case SpvExecutionModeDenormPreserve: - switch (mode->operands[0]) { - case 16: execution_mode = FLOAT_CONTROLS_DENORM_PRESERVE_FP16; break; - case 32: execution_mode = FLOAT_CONTROLS_DENORM_PRESERVE_FP32; break; - case 64: execution_mode = FLOAT_CONTROLS_DENORM_PRESERVE_FP64; break; - default: vtn_fail("Floating point type not supported"); - } - break; - case SpvExecutionModeDenormFlushToZero: - switch (mode->operands[0]) { - case 16: execution_mode = FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP16; break; - case 32: execution_mode = FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP32; break; - case 64: execution_mode = FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP64; break; - default: vtn_fail("Floating point type not supported"); - } - break; - case SpvExecutionModeSignedZeroInfNanPreserve: - switch (mode->operands[0]) { - case 16: execution_mode = FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP16; break; - case 32: execution_mode = FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP32; break; - case 64: execution_mode = FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP64; break; - default: vtn_fail("Floating point type not supported"); - } - break; - case SpvExecutionModeRoundingModeRTE: - switch (mode->operands[0]) { - case 16: execution_mode = FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP16; break; - case 32: execution_mode = FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP32; break; - case 64: execution_mode = FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP64; break; - default: vtn_fail("Floating point type not supported"); - } + switch (mode->exec_mode) { + case SpvExecutionModeLocalSizeId: + b->shader->info.cs.local_size[0] = vtn_constant_uint(b, mode->operands[0]); + b->shader->info.cs.local_size[1] = vtn_constant_uint(b, mode->operands[1]); + b->shader->info.cs.local_size[2] = vtn_constant_uint(b, mode->operands[2]); break; - case SpvExecutionModeRoundingModeRTZ: - switch (mode->operands[0]) { - case 16: execution_mode = FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP16; break; - case 32: execution_mode = FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP32; break; - case 64: execution_mode = FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP64; break; - default: vtn_fail("Floating point type not supported"); - } + + case SpvExecutionModeLocalSizeHintId: + /* Nothing to do with this hint. */ break; default: + /* Nothing to do. Literal execution modes already handled by + * vtn_handle_execution_mode(). */ break; } - - b->shader->info.float_controls_execution_mode |= execution_mode; } static bool @@ -4865,12 +5029,12 @@ vtn_handle_body_instruction(struct vtn_builder *b, SpvOp opcode, break; case SpvOpImageQuerySize: { - struct vtn_pointer *image = - vtn_value(b, w[3], vtn_value_type_pointer)->pointer; - if (glsl_type_is_image(image->type->type)) { + struct vtn_type *image_type = vtn_get_value_type(b, w[3]); + vtn_assert(image_type->base_type == vtn_base_type_image); + if (glsl_type_is_image(image_type->glsl_image)) { vtn_handle_image(b, opcode, w, count); } else { - vtn_assert(glsl_type_is_sampler(image->type->type)); + vtn_assert(glsl_type_is_sampler(image_type->glsl_image)); vtn_handle_texture(b, opcode, w, count); } break; @@ -5244,12 +5408,6 @@ vtn_create_builder(const uint32_t *words, size_t word_count, uint16_t generator_id = words[2] >> 16; uint16_t generator_version = words[2]; - /* The first GLSLang version bump actually 1.5 years after #179 was fixed - * but this should at least let us shut the workaround off for modern - * versions of GLSLang. - */ - b->wa_glslang_179 = (generator_id == 8 && generator_version == 1); - /* In GLSLang commit 8297936dd6eb3, their handling of barrier() was fixed * to provide correct memory semantics on compute shader barrier() * commands. Prior to that, we need to fix them up ourselves. This @@ -5377,12 +5535,9 @@ spirv_to_nir(const uint32_t *words, size_t word_count, if (stage == MESA_SHADER_GEOMETRY) b->shader->info.gs.invocations = 1; - /* Parse rounding mode execution modes. This has to happen earlier than - * other changes in the execution modes since they can affect, for example, - * the result of the floating point constants. - */ + /* Parse execution modes. */ vtn_foreach_execution_mode(b, b->entry_point, - vtn_handle_rounding_mode_in_execution_mode, NULL); + vtn_handle_execution_mode, NULL); b->specializations = spec; b->num_specializations = num_spec; @@ -5391,9 +5546,11 @@ spirv_to_nir(const uint32_t *words, size_t word_count, words = vtn_foreach_instruction(b, words, word_end, vtn_handle_variable_or_type_instruction); - /* Parse execution modes */ + /* Parse execution modes that depend on IDs. Must happen after we have + * constants parsed. + */ vtn_foreach_execution_mode(b, b->entry_point, - vtn_handle_execution_mode, NULL); + vtn_handle_execution_mode_id, NULL); if (b->workgroup_size_builtin) { vtn_assert(b->workgroup_size_builtin->type->type == @@ -5437,6 +5594,9 @@ spirv_to_nir(const uint32_t *words, size_t word_count, if (entry_point->num_params && b->shader->info.stage == MESA_SHADER_KERNEL) entry_point = vtn_emit_kernel_entry_point_wrapper(b, entry_point); + /* structurize the CFG */ + nir_lower_goto_ifs(b->shader); + entry_point->is_entrypoint = true; /* When multiple shader stages exist in the same SPIR-V module, we