X-Git-Url: https://git.libre-soc.org/?p=mesa.git;a=blobdiff_plain;f=src%2Fcompiler%2Fspirv%2Fvtn_private.h;h=8ee4f7be70b8d7ca4788e56e273270988f815bda;hp=cac4d45864b5c8c05e47557958f3a55d4272e205;hb=467b90fcc46efdd5ce64a12937fedf507d0242ec;hpb=16dfdeefc8469c9bb8770bab763fd943b62f875d diff --git a/src/compiler/spirv/vtn_private.h b/src/compiler/spirv/vtn_private.h index cac4d45864b..8ee4f7be70b 100644 --- a/src/compiler/spirv/vtn_private.h +++ b/src/compiler/spirv/vtn_private.h @@ -28,6 +28,8 @@ #ifndef _VTN_PRIVATE_H_ #define _VTN_PRIVATE_H_ +#include + #include "nir/nir.h" #include "nir/nir_builder.h" #include "util/u_dynarray.h" @@ -49,6 +51,60 @@ void _vtn_warn(struct vtn_builder *b, const char *file, unsigned line, const char *fmt, ...) PRINTFLIKE(4, 5); #define vtn_warn(...) _vtn_warn(b, __FILE__, __LINE__, __VA_ARGS__) +void _vtn_err(struct vtn_builder *b, const char *file, unsigned line, + const char *fmt, ...) PRINTFLIKE(4, 5); +#define vtn_err(...) _vtn_err(b, __FILE__, __LINE__, __VA_ARGS__) + +/** Fail SPIR-V parsing + * + * This function logs an error and then bails out of the shader compile using + * longjmp. This being safe relies on two things: + * + * 1) We must guarantee that setjmp is called after allocating the builder + * and setting up b->debug (so that logging works) but before before any + * errors have a chance to occur. + * + * 2) While doing the SPIR-V -> NIR conversion, we need to be careful to + * ensure that all heap allocations happen through ralloc and are parented + * to the builder. This way they will get properly cleaned up on error. + * + * 3) We must ensure that _vtn_fail is never called while a mutex lock or a + * reference to any other resource is held with the exception of ralloc + * objects which are parented to the builder. + * + * So long as these two things continue to hold, we can easily longjmp back to + * spirv_to_nir(), clean up the builder, and return NULL. + */ +NORETURN void +_vtn_fail(struct vtn_builder *b, const char *file, unsigned line, + const char *fmt, ...) PRINTFLIKE(4, 5); + +#define vtn_fail(...) _vtn_fail(b, __FILE__, __LINE__, __VA_ARGS__) + +/** Fail if the given expression evaluates to true */ +#define vtn_fail_if(expr, ...) \ + do { \ + if (unlikely(expr)) \ + vtn_fail(__VA_ARGS__); \ + } while (0) + +#define _vtn_fail_with(t, msg, v) \ + vtn_fail("%s: %s (%u)\n", msg, spirv_ ## t ## _to_string(v), v) + +#define vtn_fail_with_decoration(msg, v) _vtn_fail_with(decoration, msg, v) +#define vtn_fail_with_opcode(msg, v) _vtn_fail_with(op, msg, v) + +/** Assert that a condition is true and, if it isn't, vtn_fail + * + * This macro is transitional only and should not be used in new code. Use + * vtn_fail_if and provide a real message instead. + */ +#define vtn_assert(expr) \ + do { \ + if (!likely(expr)) \ + vtn_fail("%s", #expr); \ + } while (0) + enum vtn_value_type { vtn_value_type_invalid = 0, vtn_value_type_undef, @@ -62,15 +118,16 @@ enum vtn_value_type { vtn_value_type_ssa, vtn_value_type_extension, vtn_value_type_image_pointer, - vtn_value_type_sampled_image, }; enum vtn_branch_type { vtn_branch_type_none, + vtn_branch_type_if_merge, vtn_branch_type_switch_break, vtn_branch_type_switch_fallthrough, vtn_branch_type_loop_break, vtn_branch_type_loop_continue, + vtn_branch_type_loop_back_edge, vtn_branch_type_discard, vtn_branch_type_return, }; @@ -79,11 +136,14 @@ enum vtn_cf_node_type { vtn_cf_node_type_block, vtn_cf_node_type_if, vtn_cf_node_type_loop, + vtn_cf_node_type_case, vtn_cf_node_type_switch, + vtn_cf_node_type_function, }; struct vtn_cf_node { struct list_head link; + struct vtn_cf_node *parent; enum vtn_cf_node_type type; }; @@ -98,6 +158,10 @@ struct vtn_loop { */ struct list_head cont_body; + struct vtn_block *header_block; + struct vtn_block *cont_block; + struct vtn_block *break_block; + SpvLoopControlMask control; }; @@ -112,16 +176,18 @@ struct vtn_if { enum vtn_branch_type else_type; struct list_head else_body; + struct vtn_block *merge_block; + SpvSelectionControlMask control; }; struct vtn_case { - struct list_head link; + struct vtn_cf_node node; - struct list_head body; + struct vtn_block *block; - /* The block that starts this case */ - struct vtn_block *start_block; + enum vtn_branch_type type; + struct list_head body; /* The fallthrough case, if any */ struct vtn_case *fallthrough; @@ -142,6 +208,8 @@ struct vtn_switch { uint32_t selector; struct list_head cases; + + struct vtn_block *break_block; }; struct vtn_block { @@ -158,6 +226,14 @@ struct vtn_block { enum vtn_branch_type branch_type; + /* The CF node for which this is a merge target + * + * The SPIR-V spec requires that any given block can be the merge target + * for at most one merge instruction. If this block is a merge target, + * this points back to the block containing that merge instruction. + */ + struct vtn_cf_node *merge_cf_node; + /** Points to the loop that this block starts (if it starts a loop) */ struct vtn_loop *loop; @@ -169,7 +245,9 @@ struct vtn_block { }; struct vtn_function { - struct exec_node node; + struct vtn_cf_node node; + + struct vtn_type *type; bool referenced; bool emitted; @@ -184,13 +262,33 @@ struct vtn_function { SpvFunctionControlMask control; }; -typedef bool (*vtn_instruction_handler)(struct vtn_builder *, uint32_t, +#define VTN_DECL_CF_NODE_CAST(_type) \ +static inline struct vtn_##_type * \ +vtn_cf_node_as_##_type(struct vtn_cf_node *node) \ +{ \ + assert(node->type == vtn_cf_node_type_##_type); \ + return (struct vtn_##_type *)node; \ +} + +VTN_DECL_CF_NODE_CAST(block) +VTN_DECL_CF_NODE_CAST(loop) +VTN_DECL_CF_NODE_CAST(if) +VTN_DECL_CF_NODE_CAST(case) +VTN_DECL_CF_NODE_CAST(switch) +VTN_DECL_CF_NODE_CAST(function) + +#define vtn_foreach_cf_node(node, cf_list) \ + list_for_each_entry(struct vtn_cf_node, node, cf_list, link) + +typedef bool (*vtn_instruction_handler)(struct vtn_builder *, SpvOp, const uint32_t *, unsigned); void vtn_build_cfg(struct vtn_builder *b, const uint32_t *words, const uint32_t *end); void vtn_function_emit(struct vtn_builder *b, struct vtn_function *func, vtn_instruction_handler instruction_handler); +void vtn_handle_function_call(struct vtn_builder *b, SpvOp opcode, + const uint32_t *w, unsigned count); const uint32_t * vtn_foreach_instruction(struct vtn_builder *b, const uint32_t *start, @@ -221,6 +319,7 @@ enum vtn_base_type { vtn_base_type_pointer, vtn_base_type_image, vtn_base_type_sampler, + vtn_base_type_sampled_image, vtn_base_type_function, }; @@ -229,15 +328,21 @@ struct vtn_type { const struct glsl_type *type; - /* The value that declares this type. Used for finding decorations */ - struct vtn_value *val; + /* The SPIR-V id of the given type. */ + uint32_t id; - /* Specifies the length of complex types. */ + /* Specifies the length of complex types. + * + * For Workgroup pointers, this is the size of the referenced type. + */ unsigned length; /* for arrays, matrices and pointers, the array stride */ unsigned stride; + /* Access qualifiers */ + enum gl_access_qualifier access; + union { /* Members for scalar, vector, and array-like types */ struct { @@ -274,6 +379,13 @@ struct vtn_type { * (i.e. a block that contains only builtins). */ bool builtin_block:1; + + /* for structs and unions it specifies the minimum alignment of the + * members. 0 means packed. + * + * Set by CPacked and Alignment Decorations in kernels. + */ + bool packed:1; }; /* Members for pointer types */ @@ -283,12 +395,18 @@ struct vtn_type { /* Storage class for pointers */ SpvStorageClass storage_class; + + /* Required alignment for pointers */ + uint32_t align; }; /* Members for image types */ struct { - /* For images, indicates whether it's sampled or storage */ - bool sampled; + /* GLSL image type for this type. This is not to be confused with + * vtn_type::type which is actually going to be the GLSL type for a + * pointer to an image, likely a uint32_t. + */ + const struct glsl_type *glsl_image; /* Image format for image_load_store type images */ unsigned image_format; @@ -297,6 +415,12 @@ struct vtn_type { SpvAccessQualifier access_qualifier; }; + /* Members for sampled image types */ + struct { + /* For sampled images, the image type */ + struct vtn_type *image; + }; + /* Members for function types */ struct { /* For functions, the vtn_type for each parameter */ @@ -308,6 +432,13 @@ struct vtn_type { }; }; +bool vtn_type_contains_block(struct vtn_builder *b, struct vtn_type *type); + +bool vtn_types_compatible(struct vtn_builder *b, + struct vtn_type *t1, struct vtn_type *t2); + +struct vtn_type *vtn_type_without_array(struct vtn_type *type); + struct vtn_variable; enum vtn_access_mode { @@ -317,7 +448,7 @@ enum vtn_access_mode { struct vtn_access_link { enum vtn_access_mode mode; - uint32_t id; + int64_t id; }; struct vtn_access_chain { @@ -328,6 +459,9 @@ struct vtn_access_chain { */ bool ptr_as_array; + /* Access qualifiers */ + enum gl_access_qualifier access; + /** Struct elements and array offsets. * * This is an array of 1 so that it can conveniently be created on the @@ -337,17 +471,19 @@ struct vtn_access_chain { }; enum vtn_variable_mode { - vtn_variable_mode_local, - vtn_variable_mode_global, - vtn_variable_mode_param, + vtn_variable_mode_function, + vtn_variable_mode_private, + vtn_variable_mode_uniform, + vtn_variable_mode_atomic_counter, vtn_variable_mode_ubo, vtn_variable_mode_ssbo, + vtn_variable_mode_phys_ssbo, vtn_variable_mode_push_constant, - vtn_variable_mode_image, - vtn_variable_mode_sampler, vtn_variable_mode_workgroup, + vtn_variable_mode_cross_workgroup, vtn_variable_mode_input, vtn_variable_mode_output, + vtn_variable_mode_image, }; struct vtn_pointer { @@ -368,29 +504,31 @@ struct vtn_pointer { /** The referenced variable, if known * * This field may be NULL if the pointer uses a (block_index, offset) pair - * instead of an access chain. + * instead of an access chain or if the access chain starts at a deref. */ struct vtn_variable *var; - /** An access chain describing how to get from var to the referenced data - * - * This field may be NULL if the pointer references the entire variable or - * if a (block_index, offset) pair is used instead of an access chain. - */ - struct vtn_access_chain *chain; + /** The NIR deref corresponding to this pointer */ + nir_deref_instr *deref; /** A (block_index, offset) pair representing a UBO or SSBO position. */ struct nir_ssa_def *block_index; struct nir_ssa_def *offset; + + /* Access qualifiers */ + enum gl_access_qualifier access; }; -static inline bool -vtn_pointer_uses_ssa_offset(struct vtn_pointer *ptr) +bool vtn_mode_uses_ssa_offset(struct vtn_builder *b, + enum vtn_variable_mode mode); + +static inline bool vtn_pointer_uses_ssa_offset(struct vtn_builder *b, + struct vtn_pointer *ptr) { - return ptr->mode == vtn_variable_mode_ubo || - ptr->mode == vtn_variable_mode_ssbo; + return vtn_mode_uses_ssa_offset(b, ptr->mode); } + struct vtn_variable { enum vtn_variable_mode mode; @@ -398,11 +536,20 @@ struct vtn_variable { unsigned descriptor_set; unsigned binding; + bool explicit_binding; + unsigned offset; unsigned input_attachment_index; bool patch; nir_variable *var; - nir_variable **members; + + /* If the variable is a struct with a location set on it then this will be + * stored here. This will be used to calculate locations for members that + * don’t have their own explicit location. + */ + int base_location; + + int shared_location; /** * In some early released versions of GLSLang, it implemented all function @@ -417,35 +564,32 @@ struct vtn_variable { * hack at some point in the future. */ struct vtn_pointer *copy_prop_sampler; + + /* Access qualifiers. */ + enum gl_access_qualifier access; }; +const struct glsl_type * +vtn_type_get_nir_type(struct vtn_builder *b, struct vtn_type *type, + enum vtn_variable_mode mode); + struct vtn_image_pointer { - struct vtn_pointer *image; + nir_deref_instr *image; nir_ssa_def *coord; nir_ssa_def *sample; -}; - -struct vtn_sampled_image { - struct vtn_type *type; - struct vtn_pointer *image; /* Image or array of images */ - struct vtn_pointer *sampler; /* Sampler */ + nir_ssa_def *lod; }; struct vtn_value { enum vtn_value_type value_type; const char *name; struct vtn_decoration *decoration; + struct vtn_type *type; union { - void *ptr; char *str; - struct vtn_type *type; - struct { - nir_constant *constant; - const struct glsl_type *const_type; - }; + nir_constant *constant; struct vtn_pointer *pointer; struct vtn_image_pointer *image; - struct vtn_sampled_image *sampled_image; struct vtn_function *func; struct vtn_block *block; struct vtn_ssa_value *ssa; @@ -466,7 +610,7 @@ struct vtn_decoration { */ int scope; - const uint32_t *literals; + const uint32_t *operands; struct vtn_value *group; union { @@ -478,10 +622,14 @@ struct vtn_decoration { struct vtn_builder { nir_builder nb; + /* Used by vtn_fail to jump back to the beginning of SPIR-V compilation */ + jmp_buf fail_jump; + const uint32_t *spirv; + size_t spirv_word_count; nir_shader *shader; - const struct spirv_to_nir_options *options; + struct spirv_to_nir_options *options; struct vtn_block *block; /* Current offset, file, line, and column. Useful for debugging. Set @@ -511,19 +659,31 @@ struct vtn_builder { unsigned value_id_bound; struct vtn_value *values; + /* True if we need to fix up CS OpControlBarrier */ + bool wa_glslang_cs_barrier; + gl_shader_stage entry_point_stage; const char *entry_point_name; struct vtn_value *entry_point; - bool origin_upper_left; - bool pixel_center_integer; + struct vtn_value *workgroup_size_builtin; + bool variable_pointers; struct vtn_function *func; - struct exec_list functions; + struct list_head functions; /* Current function parameter index */ unsigned func_param_idx; bool has_loop_continue; + + /* false by default, set to true by the ContractionOff execution mode */ + bool exact; + + /* when a physical memory model is choosen */ + bool physical_ptrs; + + /* memory model specified by OpMemoryModel */ + unsigned mem_model; }; nir_ssa_def * @@ -533,49 +693,123 @@ vtn_pointer_from_ssa(struct vtn_builder *b, nir_ssa_def *ssa, struct vtn_type *ptr_type); static inline struct vtn_value * +vtn_untyped_value(struct vtn_builder *b, uint32_t value_id) +{ + vtn_fail_if(value_id >= b->value_id_bound, + "SPIR-V id %u is out-of-bounds", value_id); + return &b->values[value_id]; +} + +/* Consider not using this function directly and instead use + * vtn_push_ssa/vtn_push_pointer so that appropriate applying of + * decorations is handled by common code. + */ +static inline struct vtn_value * vtn_push_value(struct vtn_builder *b, uint32_t value_id, enum vtn_value_type value_type) { - assert(value_id < b->value_id_bound); - assert(b->values[value_id].value_type == vtn_value_type_invalid); + struct vtn_value *val = vtn_untyped_value(b, value_id); - b->values[value_id].value_type = value_type; + vtn_fail_if(value_type == vtn_value_type_ssa, + "Do not call vtn_push_value for value_type_ssa. Use " + "vtn_push_ssa_value instead."); + + vtn_fail_if(val->value_type != vtn_value_type_invalid, + "SPIR-V id %u has already been written by another instruction", + value_id); + + val->value_type = value_type; return &b->values[value_id]; } static inline struct vtn_value * -vtn_push_ssa(struct vtn_builder *b, uint32_t value_id, - struct vtn_type *type, struct vtn_ssa_value *ssa) +vtn_value(struct vtn_builder *b, uint32_t value_id, + enum vtn_value_type value_type) { - struct vtn_value *val; - if (type->base_type == vtn_base_type_pointer) { - val = vtn_push_value(b, value_id, vtn_value_type_pointer); - val->pointer = vtn_pointer_from_ssa(b, ssa->def, type); - } else { - val = vtn_push_value(b, value_id, vtn_value_type_ssa); - val->ssa = ssa; - } + struct vtn_value *val = vtn_untyped_value(b, value_id); + vtn_fail_if(val->value_type != value_type, + "SPIR-V id %u is the wrong kind of value", value_id); return val; } -static inline struct vtn_value * -vtn_untyped_value(struct vtn_builder *b, uint32_t value_id) +bool +vtn_set_instruction_result_type(struct vtn_builder *b, SpvOp opcode, + const uint32_t *w, unsigned count); + +static inline uint64_t +vtn_constant_uint(struct vtn_builder *b, uint32_t value_id) { - assert(value_id < b->value_id_bound); - return &b->values[value_id]; + struct vtn_value *val = vtn_value(b, value_id, vtn_value_type_constant); + + vtn_fail_if(val->type->base_type != vtn_base_type_scalar || + !glsl_type_is_integer(val->type->type), + "Expected id %u to be an integer constant", value_id); + + switch (glsl_get_bit_size(val->type->type)) { + case 8: return val->constant->values[0].u8; + case 16: return val->constant->values[0].u16; + case 32: return val->constant->values[0].u32; + case 64: return val->constant->values[0].u64; + default: unreachable("Invalid bit size"); + } } -static inline struct vtn_value * -vtn_value(struct vtn_builder *b, uint32_t value_id, - enum vtn_value_type value_type) +static inline int64_t +vtn_constant_int(struct vtn_builder *b, uint32_t value_id) +{ + struct vtn_value *val = vtn_value(b, value_id, vtn_value_type_constant); + + vtn_fail_if(val->type->base_type != vtn_base_type_scalar || + !glsl_type_is_integer(val->type->type), + "Expected id %u to be an integer constant", value_id); + + switch (glsl_get_bit_size(val->type->type)) { + case 8: return val->constant->values[0].i8; + case 16: return val->constant->values[0].i16; + case 32: return val->constant->values[0].i32; + case 64: return val->constant->values[0].i64; + default: unreachable("Invalid bit size"); + } +} + +static inline struct vtn_type * +vtn_get_value_type(struct vtn_builder *b, uint32_t value_id) { struct vtn_value *val = vtn_untyped_value(b, value_id); - assert(val->value_type == value_type); - return val; + vtn_fail_if(val->type == NULL, "Value %u does not have a type", value_id); + return val->type; +} + +static inline struct vtn_type * +vtn_get_type(struct vtn_builder *b, uint32_t value_id) +{ + return vtn_value(b, value_id, vtn_value_type_type)->type; } struct vtn_ssa_value *vtn_ssa_value(struct vtn_builder *b, uint32_t value_id); +struct vtn_value *vtn_push_ssa_value(struct vtn_builder *b, uint32_t value_id, + struct vtn_ssa_value *ssa); + +nir_ssa_def *vtn_get_nir_ssa(struct vtn_builder *b, uint32_t value_id); +struct vtn_value *vtn_push_nir_ssa(struct vtn_builder *b, uint32_t value_id, + nir_ssa_def *def); + +struct vtn_value *vtn_push_pointer(struct vtn_builder *b, + uint32_t value_id, + struct vtn_pointer *ptr); + +struct vtn_sampled_image { + nir_deref_instr *image; + nir_deref_instr *sampler; +}; + +nir_ssa_def *vtn_sampled_image_to_nir_ssa(struct vtn_builder *b, + struct vtn_sampled_image si); + +void +vtn_copy_value(struct vtn_builder *b, uint32_t src_value_id, + uint32_t dst_value_id); struct vtn_ssa_value *vtn_create_ssa_value(struct vtn_builder *b, const struct glsl_type *type); @@ -583,31 +817,21 @@ struct vtn_ssa_value *vtn_create_ssa_value(struct vtn_builder *b, struct vtn_ssa_value *vtn_ssa_transpose(struct vtn_builder *b, struct vtn_ssa_value *src); -nir_ssa_def *vtn_vector_extract(struct vtn_builder *b, nir_ssa_def *src, - unsigned index); -nir_ssa_def *vtn_vector_extract_dynamic(struct vtn_builder *b, nir_ssa_def *src, - nir_ssa_def *index); -nir_ssa_def *vtn_vector_insert(struct vtn_builder *b, nir_ssa_def *src, - nir_ssa_def *insert, unsigned index); -nir_ssa_def *vtn_vector_insert_dynamic(struct vtn_builder *b, nir_ssa_def *src, - nir_ssa_def *insert, nir_ssa_def *index); +nir_deref_instr *vtn_nir_deref(struct vtn_builder *b, uint32_t id); -nir_deref_var *vtn_nir_deref(struct vtn_builder *b, uint32_t id); - -struct vtn_pointer *vtn_pointer_for_variable(struct vtn_builder *b, - struct vtn_variable *var, - struct vtn_type *ptr_type); - -nir_deref_var *vtn_pointer_to_deref(struct vtn_builder *b, - struct vtn_pointer *ptr); +nir_deref_instr *vtn_pointer_to_deref(struct vtn_builder *b, + struct vtn_pointer *ptr); nir_ssa_def * vtn_pointer_to_offset(struct vtn_builder *b, struct vtn_pointer *ptr, - nir_ssa_def **index_out, unsigned *end_idx_out); + nir_ssa_def **index_out); -struct vtn_ssa_value *vtn_local_load(struct vtn_builder *b, nir_deref_var *src); +struct vtn_ssa_value * +vtn_local_load(struct vtn_builder *b, nir_deref_instr *src, + enum gl_access_qualifier access); void vtn_local_store(struct vtn_builder *b, struct vtn_ssa_value *src, - nir_deref_var *dest); + nir_deref_instr *dest, + enum gl_access_qualifier access); struct vtn_ssa_value * vtn_variable_load(struct vtn_builder *b, struct vtn_pointer *src); @@ -636,19 +860,73 @@ typedef void (*vtn_execution_mode_foreach_cb)(struct vtn_builder *, void vtn_foreach_execution_mode(struct vtn_builder *b, struct vtn_value *value, vtn_execution_mode_foreach_cb cb, void *data); -nir_op vtn_nir_alu_op_for_spirv_opcode(SpvOp opcode, bool *swap, - nir_alu_type src, nir_alu_type dst); +nir_op vtn_nir_alu_op_for_spirv_opcode(struct vtn_builder *b, + SpvOp opcode, bool *swap, + unsigned src_bit_size, unsigned dst_bit_size); void vtn_handle_alu(struct vtn_builder *b, SpvOp opcode, const uint32_t *w, unsigned count); -bool vtn_handle_glsl450_instruction(struct vtn_builder *b, uint32_t ext_opcode, +void vtn_handle_bitcast(struct vtn_builder *b, const uint32_t *w, + unsigned count); + +void vtn_handle_subgroup(struct vtn_builder *b, SpvOp opcode, + const uint32_t *w, unsigned count); + +bool vtn_handle_glsl450_instruction(struct vtn_builder *b, SpvOp ext_opcode, const uint32_t *words, unsigned count); +bool vtn_handle_opencl_instruction(struct vtn_builder *b, SpvOp ext_opcode, + const uint32_t *words, unsigned count); + +struct vtn_builder* vtn_create_builder(const uint32_t *words, size_t word_count, + gl_shader_stage stage, const char *entry_point_name, + const struct spirv_to_nir_options *options); + +void vtn_handle_entry_point(struct vtn_builder *b, const uint32_t *w, + unsigned count); + +void vtn_handle_decoration(struct vtn_builder *b, SpvOp opcode, + const uint32_t *w, unsigned count); + +enum vtn_variable_mode vtn_storage_class_to_mode(struct vtn_builder *b, + SpvStorageClass class, + struct vtn_type *interface_type, + nir_variable_mode *nir_mode_out); + +nir_address_format vtn_mode_to_address_format(struct vtn_builder *b, + enum vtn_variable_mode); + +static inline uint32_t +vtn_align_u32(uint32_t v, uint32_t a) +{ + assert(a != 0 && a == (a & -((int32_t) a))); + return (v + a - 1) & ~(a - 1); +} + static inline uint64_t vtn_u64_literal(const uint32_t *w) { return (uint64_t)w[1] << 32 | w[0]; } +bool vtn_handle_amd_gcn_shader_instruction(struct vtn_builder *b, SpvOp ext_opcode, + const uint32_t *words, unsigned count); + +bool vtn_handle_amd_shader_ballot_instruction(struct vtn_builder *b, SpvOp ext_opcode, + const uint32_t *w, unsigned count); + +bool vtn_handle_amd_shader_trinary_minmax_instruction(struct vtn_builder *b, SpvOp ext_opcode, + const uint32_t *words, unsigned count); + +bool vtn_handle_amd_shader_explicit_vertex_parameter_instruction(struct vtn_builder *b, + SpvOp ext_opcode, + const uint32_t *words, + unsigned count); + +SpvMemorySemanticsMask vtn_storage_class_to_memory_semantics(SpvStorageClass sc); + +void vtn_emit_memory_barrier(struct vtn_builder *b, SpvScope scope, + SpvMemorySemanticsMask semantics); + #endif /* _VTN_PRIVATE_H_ */