X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;ds=sidebyside;f=src%2Fcompiler%2Fspirv%2Fvtn_variables.c;h=712c3db741574b16c65fdbbd52be93550a7c182b;hb=857b9c50276141c874c0dba2475afe73ce62a013;hp=e4013e940acb13b6e449ca97a453366f4db238f8;hpb=14a12b771d0a380defacafe5825362af77ff21bd;p=mesa.git diff --git a/src/compiler/spirv/vtn_variables.c b/src/compiler/spirv/vtn_variables.c index e4013e940ac..712c3db7415 100644 --- a/src/compiler/spirv/vtn_variables.c +++ b/src/compiler/spirv/vtn_variables.c @@ -124,6 +124,18 @@ vtn_mode_uses_ssa_offset(struct vtn_builder *b, mode == vtn_variable_mode_push_constant; } +static bool +vtn_mode_is_cross_invocation(struct vtn_builder *b, + enum vtn_variable_mode mode) +{ + return mode == vtn_variable_mode_ssbo || + mode == vtn_variable_mode_ubo || + mode == vtn_variable_mode_phys_ssbo || + mode == vtn_variable_mode_push_constant || + mode == vtn_variable_mode_workgroup || + mode == vtn_variable_mode_cross_workgroup; +} + static bool vtn_pointer_is_external_block(struct vtn_builder *b, struct vtn_pointer *ptr) @@ -1074,11 +1086,11 @@ _vtn_variable_load_store(struct vtn_builder *b, bool load, if (glsl_type_is_vector_or_scalar(ptr->type->type)) { /* We hit a vector or scalar; go ahead and emit the load[s] */ nir_deref_instr *deref = vtn_pointer_to_deref(b, ptr); - if (vtn_pointer_is_external_block(b, ptr)) { - /* If it's external, we call nir_load/store_deref directly. The - * vtn_local_load/store helpers are too clever and do magic to - * avoid array derefs of vectors. That magic is both less - * efficient than the direct load/store and, in the case of + if (vtn_mode_is_cross_invocation(b, ptr->mode)) { + /* If it's cross-invocation, we call nir_load/store_deref + * directly. The vtn_local_load/store helpers are too clever and + * do magic to avoid array derefs of vectors. That magic is both + * less efficient than the direct load/store and, in the case of * stores, is broken because it creates a race condition if two * threads are writing to different components of the same vector * due to the load+insert+store it uses to emulate the array @@ -1155,7 +1167,8 @@ static void _vtn_variable_copy(struct vtn_builder *b, struct vtn_pointer *dest, struct vtn_pointer *src) { - vtn_assert(src->type->type == dest->type->type); + vtn_assert(glsl_get_bare_type(src->type->type) == + glsl_get_bare_type(dest->type->type)); enum glsl_base_type base_type = glsl_get_base_type(src->type->type); switch (base_type) { case GLSL_TYPE_UINT: @@ -1384,6 +1397,10 @@ vtn_get_builtin_location(struct vtn_builder *b, *location = SYSTEM_VALUE_GLOBAL_INVOCATION_INDEX; set_mode_system_value(b, mode); break; + case SpvBuiltInGlobalOffset: + *location = SYSTEM_VALUE_BASE_GLOBAL_INVOCATION_ID; + set_mode_system_value(b, mode); + break; case SpvBuiltInBaseVertex: /* OpenGL gl_BaseVertex (SYSTEM_VALUE_BASE_VERTEX) is not the same * semantic as Vulkan BaseVertex (SYSTEM_VALUE_FIRST_VERTEX). @@ -1423,8 +1440,13 @@ vtn_get_builtin_location(struct vtn_builder *b, set_mode_system_value(b, mode); break; case SpvBuiltInViewIndex: - *location = SYSTEM_VALUE_VIEW_INDEX; - set_mode_system_value(b, mode); + if (b->options && b->options->view_index_is_input) { + *location = VARYING_SLOT_VIEW_INDEX; + vtn_assert(*mode == nir_var_shader_in); + } else { + *location = SYSTEM_VALUE_VIEW_INDEX; + set_mode_system_value(b, mode); + } break; case SpvBuiltInSubgroupEqMask: *location = SYSTEM_VALUE_SUBGROUP_EQ_MASK, @@ -2197,10 +2219,6 @@ vtn_create_variable(struct vtn_builder *b, struct vtn_value *val, /* Create the variable normally */ var->var = rzalloc(b->shader, nir_variable); var->var->name = ralloc_strdup(var->var, val->name); - /* Workgroup variables don't have any explicit layout but some - * layouts may have leaked through due to type deduplication in the - * SPIR-V. - */ var->var->type = vtn_type_get_nir_type(b, var->type, var->mode); var->var->data.mode = nir_var_mem_shared; break; @@ -2253,11 +2271,6 @@ vtn_create_variable(struct vtn_builder *b, struct vtn_value *val, var->var = rzalloc(b->shader, nir_variable); var->var->name = ralloc_strdup(var->var, val->name); - /* In Vulkan, shader I/O variables don't have any explicit layout but - * some layouts may have leaked through due to type deduplication in - * the SPIR-V. We do, however, keep the layouts in the variable's - * interface_type because we need offsets for XFB arrays of blocks. - */ var->var->type = vtn_type_get_nir_type(b, var->type, var->mode); var->var->data.mode = nir_mode; var->var->data.patch = var->patch; @@ -2341,7 +2354,8 @@ vtn_create_variable(struct vtn_builder *b, struct vtn_value *val, if (var->mode == vtn_variable_mode_uniform || var->mode == vtn_variable_mode_ubo || - var->mode == vtn_variable_mode_ssbo) { + var->mode == vtn_variable_mode_ssbo || + var->mode == vtn_variable_mode_atomic_counter) { /* XXX: We still need the binding information in the nir_variable * for these. We should fix that. */ @@ -2433,6 +2447,37 @@ nir_sloppy_bitcast(nir_builder *b, nir_ssa_def *val, return nir_shrink_zero_pad_vec(b, val, num_components); } +static bool +vtn_get_mem_operands(struct vtn_builder *b, const uint32_t *w, unsigned count, + unsigned *idx, SpvMemoryAccessMask *access, unsigned *alignment, + SpvScope *dest_scope, SpvScope *src_scope) +{ + *access = 0; + *alignment = 0; + if (*idx >= count) + return false; + + *access = w[(*idx)++]; + if (*access & SpvMemoryAccessAlignedMask) { + vtn_assert(*idx < count); + *alignment = w[(*idx)++]; + } + + if (*access & SpvMemoryAccessMakePointerAvailableMask) { + vtn_assert(*idx < count); + vtn_assert(dest_scope); + *dest_scope = vtn_constant_uint(b, w[(*idx)++]); + } + + if (*access & SpvMemoryAccessMakePointerVisibleMask) { + vtn_assert(*idx < count); + vtn_assert(src_scope); + *src_scope = vtn_constant_uint(b, w[(*idx)++]); + } + + return true; +} + void vtn_handle_variables(struct vtn_builder *b, SpvOp opcode, const uint32_t *w, unsigned count) @@ -2472,6 +2517,32 @@ vtn_handle_variables(struct vtn_builder *b, SpvOp opcode, break; } + case SpvOpConstantSampler: { + /* Synthesize a pointer-to-sampler type, create a variable of that type, + * and give the variable a constant initializer with the sampler params */ + struct vtn_type *sampler_type = vtn_value(b, w[1], vtn_value_type_type)->type; + struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_pointer); + + struct vtn_type *ptr_type = rzalloc(b, struct vtn_type); + ptr_type = rzalloc(b, struct vtn_type); + ptr_type->base_type = vtn_base_type_pointer; + ptr_type->deref = sampler_type; + ptr_type->storage_class = SpvStorageClassUniform; + + ptr_type->type = nir_address_format_to_glsl_type( + vtn_mode_to_address_format(b, vtn_variable_mode_function)); + + vtn_create_variable(b, val, ptr_type, ptr_type->storage_class, NULL, NULL); + + nir_variable *nir_var = val->pointer->var->var; + nir_var->data.sampler.is_inline_sampler = true; + nir_var->data.sampler.addressing_mode = w[3]; + nir_var->data.sampler.normalized_coordinates = w[4]; + nir_var->data.sampler.filter_mode = w[5]; + + break; + } + case SpvOpAccessChain: case SpvOpPtrAccessChain: case SpvOpInBoundsAccessChain: @@ -2520,20 +2591,15 @@ vtn_handle_variables(struct vtn_builder *b, SpvOp opcode, vtn_assert_types_equal(b, opcode, res_type, src_val->type->deref); - if (count > 4) { - unsigned idx = 5; - SpvMemoryAccessMask access = w[4]; - if (access & SpvMemoryAccessAlignedMask) - idx++; - - if (access & SpvMemoryAccessMakePointerVisibleMask) { - SpvMemorySemanticsMask semantics = - SpvMemorySemanticsMakeVisibleMask | - vtn_storage_class_to_memory_semantics(src->ptr_type->storage_class); - - SpvScope scope = vtn_constant_uint(b, w[idx]); - vtn_emit_memory_barrier(b, scope, semantics); - } + unsigned idx = 4, alignment; + SpvMemoryAccessMask access; + SpvScope scope; + vtn_get_mem_operands(b, w, count, &idx, &access, &alignment, NULL, &scope); + if (access & SpvMemoryAccessMakePointerVisibleMask) { + SpvMemorySemanticsMask semantics = + SpvMemorySemanticsMakeVisibleMask | + vtn_storage_class_to_memory_semantics(src->ptr_type->storage_class); + vtn_emit_memory_barrier(b, scope, semantics); } vtn_push_ssa_value(b, w[2], vtn_variable_load(b, src)); @@ -2570,23 +2636,19 @@ vtn_handle_variables(struct vtn_builder *b, SpvOp opcode, vtn_assert_types_equal(b, opcode, dest_val->type->deref, src_val->type); + unsigned idx = 3, alignment; + SpvMemoryAccessMask access; + SpvScope scope; + vtn_get_mem_operands(b, w, count, &idx, &access, &alignment, &scope, NULL); + struct vtn_ssa_value *src = vtn_ssa_value(b, w[2]); vtn_variable_store(b, src, dest); - if (count > 3) { - unsigned idx = 4; - SpvMemoryAccessMask access = w[3]; - - if (access & SpvMemoryAccessAlignedMask) - idx++; - - if (access & SpvMemoryAccessMakePointerAvailableMask) { - SpvMemorySemanticsMask semantics = - SpvMemorySemanticsMakeAvailableMask | - vtn_storage_class_to_memory_semantics(dest->ptr_type->storage_class); - SpvScope scope = vtn_constant_uint(b, w[idx]); - vtn_emit_memory_barrier(b, scope, semantics); - } + if (access & SpvMemoryAccessMakePointerAvailableMask) { + SpvMemorySemanticsMask semantics = + SpvMemorySemanticsMakeAvailableMask | + vtn_storage_class_to_memory_semantics(dest->ptr_type->storage_class); + vtn_emit_memory_barrier(b, scope, semantics); } break; }