X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fcompiler%2Fspirv%2Fvtn_private.h;h=787dfdb244e3ef5a8085f3d9d8ee7bd4a3d8e3cb;hb=5d904d27491aab489d54746bc6650b27c5927c39;hp=07e3311db2ee162bab0a48edbee8277de7469b8f;hpb=9b37e93e4256ab1610bff5cdacc1e846467fc19d;p=mesa.git diff --git a/src/compiler/spirv/vtn_private.h b/src/compiler/spirv/vtn_private.h index 07e3311db2e..787dfdb244e 100644 --- a/src/compiler/spirv/vtn_private.h +++ b/src/compiler/spirv/vtn_private.h @@ -88,6 +88,12 @@ _vtn_fail(struct vtn_builder *b, const char *file, unsigned line, vtn_fail(__VA_ARGS__); \ } while (0) +#define _vtn_fail_with(t, msg, v) \ + vtn_fail("%s: %s (%u)\n", msg, spirv_ ## t ## _to_string(v), v) + +#define vtn_fail_with_decoration(msg, v) _vtn_fail_with(decoration, msg, v) +#define vtn_fail_with_opcode(msg, v) _vtn_fail_with(op, msg, v) + /** Assert that a condition is true and, if it isn't, vtn_fail * * This macro is transitional only and should not be used in new code. Use @@ -117,10 +123,12 @@ enum vtn_value_type { enum vtn_branch_type { vtn_branch_type_none, + vtn_branch_type_if_merge, vtn_branch_type_switch_break, vtn_branch_type_switch_fallthrough, vtn_branch_type_loop_break, vtn_branch_type_loop_continue, + vtn_branch_type_loop_back_edge, vtn_branch_type_discard, vtn_branch_type_return, }; @@ -129,11 +137,14 @@ enum vtn_cf_node_type { vtn_cf_node_type_block, vtn_cf_node_type_if, vtn_cf_node_type_loop, + vtn_cf_node_type_case, vtn_cf_node_type_switch, + vtn_cf_node_type_function, }; struct vtn_cf_node { struct list_head link; + struct vtn_cf_node *parent; enum vtn_cf_node_type type; }; @@ -148,6 +159,10 @@ struct vtn_loop { */ struct list_head cont_body; + struct vtn_block *header_block; + struct vtn_block *cont_block; + struct vtn_block *break_block; + SpvLoopControlMask control; }; @@ -162,17 +177,17 @@ struct vtn_if { enum vtn_branch_type else_type; struct list_head else_body; + struct vtn_block *merge_block; + SpvSelectionControlMask control; }; struct vtn_case { - struct list_head link; + struct vtn_cf_node node; + enum vtn_branch_type type; struct list_head body; - /* The block that starts this case */ - struct vtn_block *start_block; - /* The fallthrough case, if any */ struct vtn_case *fallthrough; @@ -192,6 +207,8 @@ struct vtn_switch { uint32_t selector; struct list_head cases; + + struct vtn_block *break_block; }; struct vtn_block { @@ -208,6 +225,14 @@ struct vtn_block { enum vtn_branch_type branch_type; + /* The CF node for which this is a merge target + * + * The SPIR-V spec requires that any given block can be the merge target + * for at most one merge instruction. If this block is a merge target, + * this points back to the block containing that merge instruction. + */ + struct vtn_cf_node *merge_cf_node; + /** Points to the loop that this block starts (if it starts a loop) */ struct vtn_loop *loop; @@ -219,7 +244,7 @@ struct vtn_block { }; struct vtn_function { - struct exec_node node; + struct vtn_cf_node node; struct vtn_type *type; @@ -236,6 +261,24 @@ struct vtn_function { SpvFunctionControlMask control; }; +#define VTN_DECL_CF_NODE_CAST(_type) \ +static inline struct vtn_##_type * \ +vtn_cf_node_as_##_type(struct vtn_cf_node *node) \ +{ \ + assert(node->type == vtn_cf_node_type_##_type); \ + return (struct vtn_##_type *)node; \ +} + +VTN_DECL_CF_NODE_CAST(block) +VTN_DECL_CF_NODE_CAST(loop) +VTN_DECL_CF_NODE_CAST(if) +VTN_DECL_CF_NODE_CAST(case) +VTN_DECL_CF_NODE_CAST(switch) +VTN_DECL_CF_NODE_CAST(function) + +#define vtn_foreach_cf_node(node, cf_list) \ + list_for_each_entry(struct vtn_cf_node, node, cf_list, link) + typedef bool (*vtn_instruction_handler)(struct vtn_builder *, SpvOp, const uint32_t *, unsigned); @@ -263,6 +306,9 @@ struct vtn_ssa_value { struct vtn_ssa_value *transposed; const struct glsl_type *type; + + /* Access qualifiers */ + enum gl_access_qualifier access; }; enum vtn_base_type { @@ -335,6 +381,13 @@ struct vtn_type { * (i.e. a block that contains only builtins). */ bool builtin_block:1; + + /* for structs and unions it specifies the minimum alignment of the + * members. 0 means packed. + * + * Set by CPacked and Alignment Decorations in kernels. + */ + bool packed:1; }; /* Members for pointer types */ @@ -383,6 +436,8 @@ bool vtn_type_contains_block(struct vtn_builder *b, struct vtn_type *type); bool vtn_types_compatible(struct vtn_builder *b, struct vtn_type *t1, struct vtn_type *t2); +struct vtn_type *vtn_type_without_array(struct vtn_type *type); + struct vtn_variable; enum vtn_access_mode { @@ -403,6 +458,9 @@ struct vtn_access_chain { */ bool ptr_as_array; + /* Access qualifiers */ + enum gl_access_qualifier access; + /** Struct elements and array offsets. * * This is an array of 1 so that it can conveniently be created on the @@ -423,6 +481,7 @@ enum vtn_variable_mode { vtn_variable_mode_cross_workgroup, vtn_variable_mode_input, vtn_variable_mode_output, + vtn_variable_mode_image, }; struct vtn_pointer { @@ -458,8 +517,15 @@ struct vtn_pointer { enum gl_access_qualifier access; }; -bool vtn_pointer_uses_ssa_offset(struct vtn_builder *b, - struct vtn_pointer *ptr); +bool vtn_mode_uses_ssa_offset(struct vtn_builder *b, + enum vtn_variable_mode mode); + +static inline bool vtn_pointer_uses_ssa_offset(struct vtn_builder *b, + struct vtn_pointer *ptr) +{ + return vtn_mode_uses_ssa_offset(b, ptr->mode); +} + struct vtn_variable { enum vtn_variable_mode mode; @@ -505,10 +571,10 @@ struct vtn_image_pointer { struct vtn_pointer *image; nir_ssa_def *coord; nir_ssa_def *sample; + nir_ssa_def *lod; }; struct vtn_sampled_image { - struct vtn_type *type; struct vtn_pointer *image; /* Image or array of images */ struct vtn_pointer *sampler; /* Sampler */ }; @@ -545,7 +611,7 @@ struct vtn_decoration { */ int scope; - const uint32_t *literals; + const uint32_t *operands; struct vtn_value *group; union { @@ -564,7 +630,7 @@ struct vtn_builder { size_t spirv_word_count; nir_shader *shader; - const struct spirv_to_nir_options *options; + struct spirv_to_nir_options *options; struct vtn_block *block; /* Current offset, file, line, and column. Useful for debugging. Set @@ -597,15 +663,17 @@ struct vtn_builder { /* True if we should watch out for GLSLang issue #179 */ bool wa_glslang_179; + /* True if we need to fix up CS OpControlBarrier */ + bool wa_glslang_cs_barrier; + gl_shader_stage entry_point_stage; const char *entry_point_name; struct vtn_value *entry_point; - bool origin_upper_left; - bool pixel_center_integer; + struct vtn_value *workgroup_size_builtin; bool variable_pointers; struct vtn_function *func; - struct exec_list functions; + struct list_head functions; /* Current function parameter index */ unsigned func_param_idx; @@ -614,6 +682,9 @@ struct vtn_builder { /* false by default, set to true by the ContractionOff execution mode */ bool exact; + + /* when a physical memory model is choosen */ + bool physical_ptrs; }; nir_ssa_def * @@ -630,6 +701,10 @@ vtn_untyped_value(struct vtn_builder *b, uint32_t value_id) return &b->values[value_id]; } +/* Consider not using this function directly and instead use + * vtn_push_ssa/vtn_push_value_pointer so that appropriate applying of + * decorations is handled by common code. + */ static inline struct vtn_value * vtn_push_value(struct vtn_builder *b, uint32_t value_id, enum vtn_value_type value_type) @@ -641,22 +716,8 @@ vtn_push_value(struct vtn_builder *b, uint32_t value_id, value_id); val->value_type = value_type; - return &b->values[value_id]; -} -static inline struct vtn_value * -vtn_push_ssa(struct vtn_builder *b, uint32_t value_id, - struct vtn_type *type, struct vtn_ssa_value *ssa) -{ - struct vtn_value *val; - if (type->base_type == vtn_base_type_pointer) { - val = vtn_push_value(b, value_id, vtn_value_type_pointer); - val->pointer = vtn_pointer_from_ssa(b, ssa->def, type); - } else { - val = vtn_push_value(b, value_id, vtn_value_type_ssa); - val->ssa = ssa; - } - return val; + return &b->values[value_id]; } static inline struct vtn_value * @@ -683,37 +744,77 @@ vtn_constant_uint(struct vtn_builder *b, uint32_t value_id) "Expected id %u to be an integer constant", value_id); switch (glsl_get_bit_size(val->type->type)) { - case 8: return val->constant->values[0].u8[0]; - case 16: return val->constant->values[0].u16[0]; - case 32: return val->constant->values[0].u32[0]; - case 64: return val->constant->values[0].u64[0]; + case 8: return val->constant->values[0].u8; + case 16: return val->constant->values[0].u16; + case 32: return val->constant->values[0].u32; + case 64: return val->constant->values[0].u64; + default: unreachable("Invalid bit size"); + } +} + +static inline int64_t +vtn_constant_int(struct vtn_builder *b, uint32_t value_id) +{ + struct vtn_value *val = vtn_value(b, value_id, vtn_value_type_constant); + + vtn_fail_if(val->type->base_type != vtn_base_type_scalar || + !glsl_type_is_integer(val->type->type), + "Expected id %u to be an integer constant", value_id); + + switch (glsl_get_bit_size(val->type->type)) { + case 8: return val->constant->values[0].i8; + case 16: return val->constant->values[0].i16; + case 32: return val->constant->values[0].i32; + case 64: return val->constant->values[0].i64; default: unreachable("Invalid bit size"); } } +static inline enum gl_access_qualifier vtn_value_access(struct vtn_value *value) +{ + switch (value->value_type) { + case vtn_value_type_invalid: + case vtn_value_type_undef: + case vtn_value_type_string: + case vtn_value_type_decoration_group: + case vtn_value_type_constant: + case vtn_value_type_function: + case vtn_value_type_block: + case vtn_value_type_extension: + return 0; + case vtn_value_type_type: + return value->type->access; + case vtn_value_type_pointer: + return value->pointer->access; + case vtn_value_type_ssa: + return value->ssa->access; + case vtn_value_type_image_pointer: + return value->image->image->access; + case vtn_value_type_sampled_image: + return value->sampled_image->image->access | + value->sampled_image->sampler->access; + } + + unreachable("invalid type"); +} + struct vtn_ssa_value *vtn_ssa_value(struct vtn_builder *b, uint32_t value_id); +struct vtn_value *vtn_push_value_pointer(struct vtn_builder *b, + uint32_t value_id, + struct vtn_pointer *ptr); + +struct vtn_value *vtn_push_ssa(struct vtn_builder *b, uint32_t value_id, + struct vtn_type *type, struct vtn_ssa_value *ssa); + struct vtn_ssa_value *vtn_create_ssa_value(struct vtn_builder *b, const struct glsl_type *type); struct vtn_ssa_value *vtn_ssa_transpose(struct vtn_builder *b, struct vtn_ssa_value *src); -nir_ssa_def *vtn_vector_extract(struct vtn_builder *b, nir_ssa_def *src, - unsigned index); -nir_ssa_def *vtn_vector_extract_dynamic(struct vtn_builder *b, nir_ssa_def *src, - nir_ssa_def *index); -nir_ssa_def *vtn_vector_insert(struct vtn_builder *b, nir_ssa_def *src, - nir_ssa_def *insert, unsigned index); -nir_ssa_def *vtn_vector_insert_dynamic(struct vtn_builder *b, nir_ssa_def *src, - nir_ssa_def *insert, nir_ssa_def *index); - nir_deref_instr *vtn_nir_deref(struct vtn_builder *b, uint32_t id); -struct vtn_pointer *vtn_pointer_for_variable(struct vtn_builder *b, - struct vtn_variable *var, - struct vtn_type *ptr_type); - nir_deref_instr *vtn_pointer_to_deref(struct vtn_builder *b, struct vtn_pointer *ptr); nir_ssa_def * @@ -721,10 +822,12 @@ vtn_pointer_to_offset(struct vtn_builder *b, struct vtn_pointer *ptr, nir_ssa_def **index_out); struct vtn_ssa_value * -vtn_local_load(struct vtn_builder *b, nir_deref_instr *src); +vtn_local_load(struct vtn_builder *b, nir_deref_instr *src, + enum gl_access_qualifier access); void vtn_local_store(struct vtn_builder *b, struct vtn_ssa_value *src, - nir_deref_instr *dest); + nir_deref_instr *dest, + enum gl_access_qualifier access); struct vtn_ssa_value * vtn_variable_load(struct vtn_builder *b, struct vtn_pointer *src); @@ -760,12 +863,18 @@ nir_op vtn_nir_alu_op_for_spirv_opcode(struct vtn_builder *b, void vtn_handle_alu(struct vtn_builder *b, SpvOp opcode, const uint32_t *w, unsigned count); +void vtn_handle_bitcast(struct vtn_builder *b, const uint32_t *w, + unsigned count); + void vtn_handle_subgroup(struct vtn_builder *b, SpvOp opcode, const uint32_t *w, unsigned count); bool vtn_handle_glsl450_instruction(struct vtn_builder *b, SpvOp ext_opcode, const uint32_t *words, unsigned count); +bool vtn_handle_opencl_instruction(struct vtn_builder *b, SpvOp ext_opcode, + const uint32_t *words, unsigned count); + struct vtn_builder* vtn_create_builder(const uint32_t *words, size_t word_count, gl_shader_stage stage, const char *entry_point_name, const struct spirv_to_nir_options *options); @@ -776,6 +885,14 @@ void vtn_handle_entry_point(struct vtn_builder *b, const uint32_t *w, void vtn_handle_decoration(struct vtn_builder *b, SpvOp opcode, const uint32_t *w, unsigned count); +enum vtn_variable_mode vtn_storage_class_to_mode(struct vtn_builder *b, + SpvStorageClass class, + struct vtn_type *interface_type, + nir_variable_mode *nir_mode_out); + +nir_address_format vtn_mode_to_address_format(struct vtn_builder *b, + enum vtn_variable_mode); + static inline uint32_t vtn_align_u32(uint32_t v, uint32_t a) { @@ -792,6 +909,20 @@ vtn_u64_literal(const uint32_t *w) bool vtn_handle_amd_gcn_shader_instruction(struct vtn_builder *b, SpvOp ext_opcode, const uint32_t *words, unsigned count); +bool vtn_handle_amd_shader_ballot_instruction(struct vtn_builder *b, SpvOp ext_opcode, + const uint32_t *w, unsigned count); + bool vtn_handle_amd_shader_trinary_minmax_instruction(struct vtn_builder *b, SpvOp ext_opcode, const uint32_t *words, unsigned count); + +bool vtn_handle_amd_shader_explicit_vertex_parameter_instruction(struct vtn_builder *b, + SpvOp ext_opcode, + const uint32_t *words, + unsigned count); + +SpvMemorySemanticsMask vtn_storage_class_to_memory_semantics(SpvStorageClass sc); + +void vtn_emit_memory_barrier(struct vtn_builder *b, SpvScope scope, + SpvMemorySemanticsMask semantics); + #endif /* _VTN_PRIVATE_H_ */