vtn_fail(__VA_ARGS__); \
} while (0)
+#define _vtn_fail_with(t, msg, v) \
+ vtn_fail("%s: %s (%u)\n", msg, spirv_ ## t ## _to_string(v), v)
+
+#define vtn_fail_with_decoration(msg, v) _vtn_fail_with(decoration, msg, v)
+#define vtn_fail_with_opcode(msg, v) _vtn_fail_with(op, msg, v)
+
/** Assert that a condition is true and, if it isn't, vtn_fail
*
* This macro is transitional only and should not be used in new code. Use
const uint32_t *end);
void vtn_function_emit(struct vtn_builder *b, struct vtn_function *func,
vtn_instruction_handler instruction_handler);
+void vtn_handle_function_call(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count);
const uint32_t *
vtn_foreach_instruction(struct vtn_builder *b, const uint32_t *start,
struct vtn_ssa_value *transposed;
const struct glsl_type *type;
+
+ /* Access qualifiers */
+ enum gl_access_qualifier access;
};
enum vtn_base_type {
/* for arrays, matrices and pointers, the array stride */
unsigned stride;
+ /* Access qualifiers */
+ enum gl_access_qualifier access;
+
union {
/* Members for scalar, vector, and array-like types */
struct {
* (i.e. a block that contains only builtins).
*/
bool builtin_block:1;
+
+ /* for structs and unions it specifies the minimum alignment of the
+ * members. 0 means packed.
+ *
+ * Set by CPacked and Alignment Decorations in kernels.
+ */
+ bool packed:1;
};
/* Members for pointer types */
};
};
+bool vtn_type_contains_block(struct vtn_builder *b, struct vtn_type *type);
+
bool vtn_types_compatible(struct vtn_builder *b,
struct vtn_type *t1, struct vtn_type *t2);
+struct vtn_type *vtn_type_without_array(struct vtn_type *type);
+
struct vtn_variable;
enum vtn_access_mode {
struct vtn_access_link {
enum vtn_access_mode mode;
- uint32_t id;
+ int64_t id;
};
struct vtn_access_chain {
*/
bool ptr_as_array;
+ /* Access qualifiers */
+ enum gl_access_qualifier access;
+
/** Struct elements and array offsets.
*
* This is an array of 1 so that it can conveniently be created on the
};
enum vtn_variable_mode {
- vtn_variable_mode_local,
- vtn_variable_mode_global,
- vtn_variable_mode_param,
+ vtn_variable_mode_function,
+ vtn_variable_mode_private,
vtn_variable_mode_uniform,
vtn_variable_mode_ubo,
vtn_variable_mode_ssbo,
+ vtn_variable_mode_phys_ssbo,
vtn_variable_mode_push_constant,
vtn_variable_mode_workgroup,
+ vtn_variable_mode_cross_workgroup,
vtn_variable_mode_input,
vtn_variable_mode_output,
+ vtn_variable_mode_image,
};
struct vtn_pointer {
*/
struct vtn_variable *var;
- /** The deref at the base of the chain
- *
- * This field may be NULL if the pointer uses a (block_index, offset) pair
- * instead of an access chain or if the access chain starts at a variable.
- */
+ /** The NIR deref corresponding to this pointer */
nir_deref_instr *deref;
- /** An access chain describing how to get from var to the referenced data
- *
- * This field may be NULL if the pointer references the entire variable or
- * if a (block_index, offset) pair is used instead of an access chain.
- */
- struct vtn_access_chain *chain;
-
/** A (block_index, offset) pair representing a UBO or SSBO position. */
struct nir_ssa_def *block_index;
struct nir_ssa_def *offset;
+
+ /* Access qualifiers */
+ enum gl_access_qualifier access;
};
+bool vtn_mode_uses_ssa_offset(struct vtn_builder *b,
+ enum vtn_variable_mode mode);
+
+static inline bool vtn_pointer_uses_ssa_offset(struct vtn_builder *b,
+ struct vtn_pointer *ptr)
+{
+ return vtn_mode_uses_ssa_offset(b, ptr->mode);
+}
+
+
struct vtn_variable {
enum vtn_variable_mode mode;
unsigned descriptor_set;
unsigned binding;
bool explicit_binding;
+ unsigned offset;
unsigned input_attachment_index;
bool patch;
nir_variable *var;
+ /* If the variable is a struct with a location set on it then this will be
+ * stored here. This will be used to calculate locations for members that
+ * don’t have their own explicit location.
+ */
+ int base_location;
+
int shared_location;
/**
* hack at some point in the future.
*/
struct vtn_pointer *copy_prop_sampler;
+
+ /* Access qualifiers. */
+ enum gl_access_qualifier access;
};
struct vtn_image_pointer {
*/
int scope;
- const uint32_t *literals;
+ const uint32_t *operands;
struct vtn_value *group;
union {
size_t spirv_word_count;
nir_shader *shader;
- const struct spirv_to_nir_options *options;
+ struct spirv_to_nir_options *options;
struct vtn_block *block;
/* Current offset, file, line, and column. Useful for debugging. Set
unsigned value_id_bound;
struct vtn_value *values;
+ /* True if we should watch out for GLSLang issue #179 */
+ bool wa_glslang_179;
+
gl_shader_stage entry_point_stage;
const char *entry_point_name;
struct vtn_value *entry_point;
- bool origin_upper_left;
- bool pixel_center_integer;
+ struct vtn_value *workgroup_size_builtin;
+ bool variable_pointers;
struct vtn_function *func;
struct exec_list functions;
unsigned func_param_idx;
bool has_loop_continue;
+
+ /* false by default, set to true by the ContractionOff execution mode */
+ bool exact;
+
+ /* when a physical memory model is choosen */
+ bool physical_ptrs;
};
nir_ssa_def *
return &b->values[value_id];
}
+/* Consider not using this function directly and instead use
+ * vtn_push_ssa/vtn_push_value_pointer so that appropriate applying of
+ * decorations is handled by common code.
+ */
static inline struct vtn_value *
vtn_push_value(struct vtn_builder *b, uint32_t value_id,
enum vtn_value_type value_type)
value_id);
val->value_type = value_type;
- return &b->values[value_id];
-}
-static inline struct vtn_value *
-vtn_push_ssa(struct vtn_builder *b, uint32_t value_id,
- struct vtn_type *type, struct vtn_ssa_value *ssa)
-{
- struct vtn_value *val;
- if (type->base_type == vtn_base_type_pointer) {
- val = vtn_push_value(b, value_id, vtn_value_type_pointer);
- val->pointer = vtn_pointer_from_ssa(b, ssa->def, type);
- } else {
- val = vtn_push_value(b, value_id, vtn_value_type_ssa);
- val->ssa = ssa;
- }
- return val;
+ return &b->values[value_id];
}
static inline struct vtn_value *
vtn_set_instruction_result_type(struct vtn_builder *b, SpvOp opcode,
const uint32_t *w, unsigned count);
-static inline nir_constant *
-vtn_constant_value(struct vtn_builder *b, uint32_t value_id)
+static inline uint64_t
+vtn_constant_uint(struct vtn_builder *b, uint32_t value_id)
{
- return vtn_value(b, value_id, vtn_value_type_constant)->constant;
+ struct vtn_value *val = vtn_value(b, value_id, vtn_value_type_constant);
+
+ vtn_fail_if(val->type->base_type != vtn_base_type_scalar ||
+ !glsl_type_is_integer(val->type->type),
+ "Expected id %u to be an integer constant", value_id);
+
+ switch (glsl_get_bit_size(val->type->type)) {
+ case 8: return val->constant->values[0].u8;
+ case 16: return val->constant->values[0].u16;
+ case 32: return val->constant->values[0].u32;
+ case 64: return val->constant->values[0].u64;
+ default: unreachable("Invalid bit size");
+ }
+}
+
+static inline int64_t
+vtn_constant_int(struct vtn_builder *b, uint32_t value_id)
+{
+ struct vtn_value *val = vtn_value(b, value_id, vtn_value_type_constant);
+
+ vtn_fail_if(val->type->base_type != vtn_base_type_scalar ||
+ !glsl_type_is_integer(val->type->type),
+ "Expected id %u to be an integer constant", value_id);
+
+ switch (glsl_get_bit_size(val->type->type)) {
+ case 8: return val->constant->values[0].i8;
+ case 16: return val->constant->values[0].i16;
+ case 32: return val->constant->values[0].i32;
+ case 64: return val->constant->values[0].i64;
+ default: unreachable("Invalid bit size");
+ }
+}
+
+static inline enum gl_access_qualifier vtn_value_access(struct vtn_value *value)
+{
+ switch (value->value_type) {
+ case vtn_value_type_invalid:
+ case vtn_value_type_undef:
+ case vtn_value_type_string:
+ case vtn_value_type_decoration_group:
+ case vtn_value_type_constant:
+ case vtn_value_type_function:
+ case vtn_value_type_block:
+ case vtn_value_type_extension:
+ return 0;
+ case vtn_value_type_type:
+ return value->type->access;
+ case vtn_value_type_pointer:
+ return value->pointer->access;
+ case vtn_value_type_ssa:
+ return value->ssa->access;
+ case vtn_value_type_image_pointer:
+ return value->image->image->access;
+ case vtn_value_type_sampled_image:
+ return value->sampled_image->image->access |
+ value->sampled_image->sampler->access;
+ }
+
+ unreachable("invalid type");
}
struct vtn_ssa_value *vtn_ssa_value(struct vtn_builder *b, uint32_t value_id);
+struct vtn_value *vtn_push_value_pointer(struct vtn_builder *b,
+ uint32_t value_id,
+ struct vtn_pointer *ptr);
+
+struct vtn_value *vtn_push_ssa(struct vtn_builder *b, uint32_t value_id,
+ struct vtn_type *type, struct vtn_ssa_value *ssa);
+
struct vtn_ssa_value *vtn_create_ssa_value(struct vtn_builder *b,
const struct glsl_type *type);
struct vtn_variable *var,
struct vtn_type *ptr_type);
-nir_deref_var *vtn_pointer_to_deref_var(struct vtn_builder *b,
- struct vtn_pointer *ptr);
nir_deref_instr *vtn_pointer_to_deref(struct vtn_builder *b,
struct vtn_pointer *ptr);
nir_ssa_def *
nir_ssa_def **index_out);
struct vtn_ssa_value *
-vtn_local_load(struct vtn_builder *b, nir_deref_instr *src);
+vtn_local_load(struct vtn_builder *b, nir_deref_instr *src,
+ enum gl_access_qualifier access);
void vtn_local_store(struct vtn_builder *b, struct vtn_ssa_value *src,
- nir_deref_instr *dest);
+ nir_deref_instr *dest,
+ enum gl_access_qualifier access);
struct vtn_ssa_value *
vtn_variable_load(struct vtn_builder *b, struct vtn_pointer *src);
void vtn_handle_alu(struct vtn_builder *b, SpvOp opcode,
const uint32_t *w, unsigned count);
+void vtn_handle_bitcast(struct vtn_builder *b, const uint32_t *w,
+ unsigned count);
+
void vtn_handle_subgroup(struct vtn_builder *b, SpvOp opcode,
const uint32_t *w, unsigned count);
bool vtn_handle_glsl450_instruction(struct vtn_builder *b, SpvOp ext_opcode,
const uint32_t *words, unsigned count);
+bool vtn_handle_opencl_instruction(struct vtn_builder *b, uint32_t ext_opcode,
+ const uint32_t *words, unsigned count);
+
struct vtn_builder* vtn_create_builder(const uint32_t *words, size_t word_count,
gl_shader_stage stage, const char *entry_point_name,
const struct spirv_to_nir_options *options);
void vtn_handle_decoration(struct vtn_builder *b, SpvOp opcode,
const uint32_t *w, unsigned count);
+enum vtn_variable_mode vtn_storage_class_to_mode(struct vtn_builder *b,
+ SpvStorageClass class,
+ struct vtn_type *interface_type,
+ nir_variable_mode *nir_mode_out);
+
+nir_address_format vtn_mode_to_address_format(struct vtn_builder *b,
+ enum vtn_variable_mode);
+
static inline uint32_t
vtn_align_u32(uint32_t v, uint32_t a)
{
bool vtn_handle_amd_gcn_shader_instruction(struct vtn_builder *b, SpvOp ext_opcode,
const uint32_t *words, unsigned count);
+bool vtn_handle_amd_shader_ballot_instruction(struct vtn_builder *b, SpvOp ext_opcode,
+ const uint32_t *w, unsigned count);
+
bool vtn_handle_amd_shader_trinary_minmax_instruction(struct vtn_builder *b, SpvOp ext_opcode,
const uint32_t *words, unsigned count);
#endif /* _VTN_PRIVATE_H_ */