#include "nir/nir_deref.h"
#include "spirv_info.h"
+#include "util/format/u_format.h"
#include "util/u_math.h"
#include <stdio.h>
longjmp(b->fail_jump, 1);
}
-struct spec_constant_value {
- bool is_double;
- union {
- uint32_t data32;
- uint64_t data64;
- };
-};
-
static struct vtn_ssa_value *
vtn_undef_ssa_value(struct vtn_builder *b, const struct glsl_type *type)
{
break;
}
- case GLSL_TYPE_STRUCT: {
+ case GLSL_TYPE_STRUCT:
+ case GLSL_TYPE_INTERFACE: {
unsigned elems = glsl_get_length(val->type);
val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
for (unsigned i = 0; i < elems; i++) {
} else if ((strcmp(ext, "SPV_AMD_shader_trinary_minmax") == 0)
&& (b->options && b->options->caps.amd_trinary_minmax)) {
val->ext_handler = vtn_handle_amd_shader_trinary_minmax_instruction;
+ } else if ((strcmp(ext, "SPV_AMD_shader_explicit_vertex_parameter") == 0)
+ && (b->options && b->options->caps.amd_shader_explicit_vertex_parameter)) {
+ val->ext_handler = vtn_handle_amd_shader_explicit_vertex_parameter_instruction;
} else if (strcmp(ext, "OpenCL.std") == 0) {
val->ext_handler = vtn_handle_opencl_instruction;
} else if (strstr(ext, "NonSemantic.") == ext) {
static void
struct_member_decoration_cb(struct vtn_builder *b,
- struct vtn_value *val, int member,
+ UNUSED struct vtn_value *val, int member,
const struct vtn_decoration *dec, void *void_ctx)
{
struct member_decoration_ctx *ctx = void_ctx;
case SpvDecorationFlat:
ctx->fields[member].interpolation = INTERP_MODE_FLAT;
break;
+ case SpvDecorationExplicitInterpAMD:
+ ctx->fields[member].interpolation = INTERP_MODE_EXPLICIT;
+ break;
case SpvDecorationCentroid:
ctx->fields[member].centroid = true;
break;
ctx->fields[member].sample = true;
break;
case SpvDecorationStream:
- /* Vulkan only allows one GS stream */
- vtn_assert(dec->operands[0] == 0);
+ /* This is handled later by var_decoration_cb in vtn_variables.c */
break;
case SpvDecorationLocation:
ctx->fields[member].location = dec->operands[0];
case SpvDecorationXfbBuffer:
case SpvDecorationXfbStride:
- vtn_warn("Vulkan does not have transform feedback");
+ /* This is handled later by var_decoration_cb in vtn_variables.c */
break;
case SpvDecorationCPacked:
break;
case SpvDecorationUserSemantic:
+ case SpvDecorationUserTypeGOOGLE:
/* User semantic decorations can safely be ignored by the driver. */
break;
*/
static void
struct_member_matrix_stride_cb(struct vtn_builder *b,
- struct vtn_value *val, int member,
+ UNUSED struct vtn_value *val, int member,
const struct vtn_decoration *dec,
void *void_ctx)
{
static void
type_decoration_cb(struct vtn_builder *b,
struct vtn_value *val, int member,
- const struct vtn_decoration *dec, void *ctx)
+ const struct vtn_decoration *dec, UNUSED void *ctx)
{
struct vtn_type *type = val->type;
case SpvDecorationPatch:
case SpvDecorationCentroid:
case SpvDecorationSample:
+ case SpvDecorationExplicitInterpAMD:
case SpvDecorationVolatile:
case SpvDecorationCoherent:
case SpvDecorationNonWritable:
spirv_decoration_to_string(dec->decoration));
break;
+ case SpvDecorationUserTypeGOOGLE:
+ /* User semantic decorations can safely be ignored by the driver. */
+ break;
+
default:
vtn_fail_with_decoration("Unhandled decoration", dec->decoration);
}
translate_image_format(struct vtn_builder *b, SpvImageFormat format)
{
switch (format) {
- case SpvImageFormatUnknown: return 0; /* GL_NONE */
- case SpvImageFormatRgba32f: return 0x8814; /* GL_RGBA32F */
- case SpvImageFormatRgba16f: return 0x881A; /* GL_RGBA16F */
- case SpvImageFormatR32f: return 0x822E; /* GL_R32F */
- case SpvImageFormatRgba8: return 0x8058; /* GL_RGBA8 */
- case SpvImageFormatRgba8Snorm: return 0x8F97; /* GL_RGBA8_SNORM */
- case SpvImageFormatRg32f: return 0x8230; /* GL_RG32F */
- case SpvImageFormatRg16f: return 0x822F; /* GL_RG16F */
- case SpvImageFormatR11fG11fB10f: return 0x8C3A; /* GL_R11F_G11F_B10F */
- case SpvImageFormatR16f: return 0x822D; /* GL_R16F */
- case SpvImageFormatRgba16: return 0x805B; /* GL_RGBA16 */
- case SpvImageFormatRgb10A2: return 0x8059; /* GL_RGB10_A2 */
- case SpvImageFormatRg16: return 0x822C; /* GL_RG16 */
- case SpvImageFormatRg8: return 0x822B; /* GL_RG8 */
- case SpvImageFormatR16: return 0x822A; /* GL_R16 */
- case SpvImageFormatR8: return 0x8229; /* GL_R8 */
- case SpvImageFormatRgba16Snorm: return 0x8F9B; /* GL_RGBA16_SNORM */
- case SpvImageFormatRg16Snorm: return 0x8F99; /* GL_RG16_SNORM */
- case SpvImageFormatRg8Snorm: return 0x8F95; /* GL_RG8_SNORM */
- case SpvImageFormatR16Snorm: return 0x8F98; /* GL_R16_SNORM */
- case SpvImageFormatR8Snorm: return 0x8F94; /* GL_R8_SNORM */
- case SpvImageFormatRgba32i: return 0x8D82; /* GL_RGBA32I */
- case SpvImageFormatRgba16i: return 0x8D88; /* GL_RGBA16I */
- case SpvImageFormatRgba8i: return 0x8D8E; /* GL_RGBA8I */
- case SpvImageFormatR32i: return 0x8235; /* GL_R32I */
- case SpvImageFormatRg32i: return 0x823B; /* GL_RG32I */
- case SpvImageFormatRg16i: return 0x8239; /* GL_RG16I */
- case SpvImageFormatRg8i: return 0x8237; /* GL_RG8I */
- case SpvImageFormatR16i: return 0x8233; /* GL_R16I */
- case SpvImageFormatR8i: return 0x8231; /* GL_R8I */
- case SpvImageFormatRgba32ui: return 0x8D70; /* GL_RGBA32UI */
- case SpvImageFormatRgba16ui: return 0x8D76; /* GL_RGBA16UI */
- case SpvImageFormatRgba8ui: return 0x8D7C; /* GL_RGBA8UI */
- case SpvImageFormatR32ui: return 0x8236; /* GL_R32UI */
- case SpvImageFormatRgb10a2ui: return 0x906F; /* GL_RGB10_A2UI */
- case SpvImageFormatRg32ui: return 0x823C; /* GL_RG32UI */
- case SpvImageFormatRg16ui: return 0x823A; /* GL_RG16UI */
- case SpvImageFormatRg8ui: return 0x8238; /* GL_RG8UI */
- case SpvImageFormatR16ui: return 0x8234; /* GL_R16UI */
- case SpvImageFormatR8ui: return 0x8232; /* GL_R8UI */
+ case SpvImageFormatUnknown: return PIPE_FORMAT_NONE;
+ case SpvImageFormatRgba32f: return PIPE_FORMAT_R32G32B32A32_FLOAT;
+ case SpvImageFormatRgba16f: return PIPE_FORMAT_R16G16B16A16_FLOAT;
+ case SpvImageFormatR32f: return PIPE_FORMAT_R32_FLOAT;
+ case SpvImageFormatRgba8: return PIPE_FORMAT_R8G8B8A8_UNORM;
+ case SpvImageFormatRgba8Snorm: return PIPE_FORMAT_R8G8B8A8_SNORM;
+ case SpvImageFormatRg32f: return PIPE_FORMAT_R32G32_FLOAT;
+ case SpvImageFormatRg16f: return PIPE_FORMAT_R16G16_FLOAT;
+ case SpvImageFormatR11fG11fB10f: return PIPE_FORMAT_R11G11B10_FLOAT;
+ case SpvImageFormatR16f: return PIPE_FORMAT_R16_FLOAT;
+ case SpvImageFormatRgba16: return PIPE_FORMAT_R16G16B16A16_UNORM;
+ case SpvImageFormatRgb10A2: return PIPE_FORMAT_R10G10B10A2_UNORM;
+ case SpvImageFormatRg16: return PIPE_FORMAT_R16G16_UNORM;
+ case SpvImageFormatRg8: return PIPE_FORMAT_R8G8_UNORM;
+ case SpvImageFormatR16: return PIPE_FORMAT_R16_UNORM;
+ case SpvImageFormatR8: return PIPE_FORMAT_R8_UNORM;
+ case SpvImageFormatRgba16Snorm: return PIPE_FORMAT_R16G16B16A16_SNORM;
+ case SpvImageFormatRg16Snorm: return PIPE_FORMAT_R16G16_SNORM;
+ case SpvImageFormatRg8Snorm: return PIPE_FORMAT_R8G8_SNORM;
+ case SpvImageFormatR16Snorm: return PIPE_FORMAT_R16_SNORM;
+ case SpvImageFormatR8Snorm: return PIPE_FORMAT_R8_SNORM;
+ case SpvImageFormatRgba32i: return PIPE_FORMAT_R32G32B32A32_SINT;
+ case SpvImageFormatRgba16i: return PIPE_FORMAT_R16G16B16A16_SINT;
+ case SpvImageFormatRgba8i: return PIPE_FORMAT_R8G8B8A8_SINT;
+ case SpvImageFormatR32i: return PIPE_FORMAT_R32_SINT;
+ case SpvImageFormatRg32i: return PIPE_FORMAT_R32G32_SINT;
+ case SpvImageFormatRg16i: return PIPE_FORMAT_R16G16_SINT;
+ case SpvImageFormatRg8i: return PIPE_FORMAT_R8G8_SINT;
+ case SpvImageFormatR16i: return PIPE_FORMAT_R16_SINT;
+ case SpvImageFormatR8i: return PIPE_FORMAT_R8_SINT;
+ case SpvImageFormatRgba32ui: return PIPE_FORMAT_R32G32B32A32_UINT;
+ case SpvImageFormatRgba16ui: return PIPE_FORMAT_R16G16B16A16_UINT;
+ case SpvImageFormatRgba8ui: return PIPE_FORMAT_R8G8B8A8_UINT;
+ case SpvImageFormatR32ui: return PIPE_FORMAT_R32_UINT;
+ case SpvImageFormatRgb10a2ui: return PIPE_FORMAT_R10G10B10A2_UINT;
+ case SpvImageFormatRg32ui: return PIPE_FORMAT_R32G32_UINT;
+ case SpvImageFormatRg16ui: return PIPE_FORMAT_R16G16_UINT;
+ case SpvImageFormatRg8ui: return PIPE_FORMAT_R8G8_UINT;
+ case SpvImageFormatR16ui: return PIPE_FORMAT_R16_UINT;
+ case SpvImageFormatR8ui: return PIPE_FORMAT_R8_UINT;
default:
vtn_fail("Invalid image format: %s (%u)",
spirv_imageformat_to_string(format), format);
case SpvStorageClassUniform:
case SpvStorageClassPushConstant:
case SpvStorageClassStorageBuffer:
- case SpvStorageClassPhysicalStorageBufferEXT:
+ case SpvStorageClassPhysicalStorageBuffer:
vtn_foreach_decoration(b, val, array_stride_decoration_cb, NULL);
break;
default:
}
static void
-spec_constant_decoration_cb(struct vtn_builder *b, struct vtn_value *v,
- int member, const struct vtn_decoration *dec,
- void *data)
+spec_constant_decoration_cb(struct vtn_builder *b, UNUSED struct vtn_value *val,
+ ASSERTED int member,
+ const struct vtn_decoration *dec, void *data)
{
vtn_assert(member == -1);
if (dec->decoration != SpvDecorationSpecId)
return;
- struct spec_constant_value *const_value = data;
-
+ nir_const_value *value = data;
for (unsigned i = 0; i < b->num_specializations; i++) {
if (b->specializations[i].id == dec->operands[0]) {
- if (const_value->is_double)
- const_value->data64 = b->specializations[i].data64;
- else
- const_value->data32 = b->specializations[i].data32;
+ *value = b->specializations[i].value;
return;
}
}
}
-static uint32_t
-get_specialization(struct vtn_builder *b, struct vtn_value *val,
- uint32_t const_value)
-{
- struct spec_constant_value data;
- data.is_double = false;
- data.data32 = const_value;
- vtn_foreach_decoration(b, val, spec_constant_decoration_cb, &data);
- return data.data32;
-}
-
-static uint64_t
-get_specialization64(struct vtn_builder *b, struct vtn_value *val,
- uint64_t const_value)
-{
- struct spec_constant_value data;
- data.is_double = true;
- data.data64 = const_value;
- vtn_foreach_decoration(b, val, spec_constant_decoration_cb, &data);
- return data.data64;
-}
-
static void
handle_workgroup_size_decoration_cb(struct vtn_builder *b,
struct vtn_value *val,
- int member,
+ ASSERTED int member,
const struct vtn_decoration *dec,
- void *data)
+ UNUSED void *data)
{
vtn_assert(member == -1);
if (dec->decoration != SpvDecorationBuiltIn ||
"Result type of %s must be OpTypeBool",
spirv_op_to_string(opcode));
- uint32_t int_val = (opcode == SpvOpConstantTrue ||
- opcode == SpvOpSpecConstantTrue);
+ bool bval = (opcode == SpvOpConstantTrue ||
+ opcode == SpvOpSpecConstantTrue);
+
+ nir_const_value u32val = nir_const_value_for_uint(bval, 32);
if (opcode == SpvOpSpecConstantTrue ||
opcode == SpvOpSpecConstantFalse)
- int_val = get_specialization(b, val, int_val);
+ vtn_foreach_decoration(b, val, spec_constant_decoration_cb, &u32val);
- val->constant->values[0].b = int_val != 0;
+ val->constant->values[0].b = u32val.u32 != 0;
break;
}
- case SpvOpConstant: {
+ case SpvOpConstant:
+ case SpvOpSpecConstant: {
vtn_fail_if(val->type->base_type != vtn_base_type_scalar,
"Result type of %s must be a scalar",
spirv_op_to_string(opcode));
default:
vtn_fail("Unsupported SpvOpConstant bit size: %u", bit_size);
}
- break;
- }
- case SpvOpSpecConstant: {
- vtn_fail_if(val->type->base_type != vtn_base_type_scalar,
- "Result type of %s must be a scalar",
- spirv_op_to_string(opcode));
- int bit_size = glsl_get_bit_size(val->type->type);
- switch (bit_size) {
- case 64:
- val->constant->values[0].u64 =
- get_specialization64(b, val, vtn_u64_literal(&w[3]));
- break;
- case 32:
- val->constant->values[0].u32 = get_specialization(b, val, w[3]);
- break;
- case 16:
- val->constant->values[0].u16 = get_specialization(b, val, w[3]);
- break;
- case 8:
- val->constant->values[0].u8 = get_specialization(b, val, w[3]);
- break;
- default:
- vtn_fail("Unsupported SpvOpSpecConstant bit size");
- }
+ if (opcode == SpvOpSpecConstant)
+ vtn_foreach_decoration(b, val, spec_constant_decoration_cb,
+ &val->constant->values[0]);
break;
}
}
case SpvOpSpecConstantOp: {
- SpvOp opcode = get_specialization(b, val, w[3]);
+ nir_const_value u32op = nir_const_value_for_uint(w[3], 32);
+ vtn_foreach_decoration(b, val, spec_constant_decoration_cb, &u32op);
+ SpvOp opcode = u32op.u32;
switch (opcode) {
case SpvOpVectorShuffle: {
struct vtn_value *v0 = &b->values[w[4]];
{
switch (sc) {
case SpvStorageClassStorageBuffer:
- case SpvStorageClassPhysicalStorageBufferEXT:
+ case SpvStorageClassPhysicalStorageBuffer:
return SpvMemorySemanticsUniformMemoryMask;
case SpvStorageClassWorkgroup:
return SpvMemorySemanticsWorkgroupMemoryMask;
*after |= SpvMemorySemanticsMakeAvailableMask | storage_semantics;
}
-static void
-vtn_emit_scoped_memory_barrier(struct vtn_builder *b, SpvScope scope,
- SpvMemorySemanticsMask semantics)
+static nir_memory_semantics
+vtn_mem_semantics_to_nir_mem_semantics(struct vtn_builder *b,
+ SpvMemorySemanticsMask semantics)
{
nir_memory_semantics nir_semantics = 0;
nir_semantics |= NIR_MEMORY_MAKE_VISIBLE;
}
+ return nir_semantics;
+}
+
+static nir_variable_mode
+vtn_mem_sematics_to_nir_var_modes(struct vtn_builder *b,
+ SpvMemorySemanticsMask semantics)
+{
/* Vulkan Environment for SPIR-V says "SubgroupMemory, CrossWorkgroupMemory,
* and AtomicCounterMemory are ignored".
*/
nir_variable_mode modes = 0;
if (semantics & (SpvMemorySemanticsUniformMemoryMask |
- SpvMemorySemanticsImageMemoryMask))
- modes |= nir_var_mem_ubo | nir_var_mem_ssbo | nir_var_uniform;
+ SpvMemorySemanticsImageMemoryMask)) {
+ modes |= nir_var_uniform |
+ nir_var_mem_ubo |
+ nir_var_mem_ssbo |
+ nir_var_mem_global;
+ }
if (semantics & SpvMemorySemanticsWorkgroupMemoryMask)
modes |= nir_var_mem_shared;
if (semantics & SpvMemorySemanticsOutputMemoryMask) {
modes |= nir_var_shader_out;
}
- /* No barrier to add. */
- if (nir_semantics == 0 || modes == 0)
- return;
+ return modes;
+}
+static nir_scope
+vtn_scope_to_nir_scope(struct vtn_builder *b, SpvScope scope)
+{
nir_scope nir_scope;
switch (scope) {
case SpvScopeDevice:
vtn_fail("Invalid memory scope");
}
- nir_intrinsic_instr *intrin =
- nir_intrinsic_instr_create(b->shader, nir_intrinsic_scoped_memory_barrier);
- nir_intrinsic_set_memory_semantics(intrin, nir_semantics);
+ return nir_scope;
+}
- nir_intrinsic_set_memory_modes(intrin, modes);
- nir_intrinsic_set_memory_scope(intrin, nir_scope);
- nir_builder_instr_insert(&b->nb, &intrin->instr);
+static void
+vtn_emit_scoped_control_barrier(struct vtn_builder *b, SpvScope exec_scope,
+ SpvScope mem_scope,
+ SpvMemorySemanticsMask semantics)
+{
+ nir_memory_semantics nir_semantics =
+ vtn_mem_semantics_to_nir_mem_semantics(b, semantics);
+ nir_variable_mode modes = vtn_mem_sematics_to_nir_var_modes(b, semantics);
+ nir_scope nir_exec_scope = vtn_scope_to_nir_scope(b, exec_scope);
+
+ /* Memory semantics is optional for OpControlBarrier. */
+ nir_scope nir_mem_scope;
+ if (nir_semantics == 0 || modes == 0)
+ nir_mem_scope = NIR_SCOPE_NONE;
+ else
+ nir_mem_scope = vtn_scope_to_nir_scope(b, mem_scope);
+
+ nir_scoped_barrier(&b->nb, nir_exec_scope, nir_mem_scope, nir_semantics, modes);
+}
+
+static void
+vtn_emit_scoped_memory_barrier(struct vtn_builder *b, SpvScope scope,
+ SpvMemorySemanticsMask semantics)
+{
+ nir_variable_mode modes = vtn_mem_sematics_to_nir_var_modes(b, semantics);
+ nir_memory_semantics nir_semantics =
+ vtn_mem_semantics_to_nir_mem_semantics(b, semantics);
+
+ /* No barrier to add. */
+ if (nir_semantics == 0 || modes == 0)
+ return;
+
+ nir_scope nir_mem_scope = vtn_scope_to_nir_scope(b, scope);
+ nir_scoped_barrier(&b->nb, NIR_SCOPE_NONE, nir_mem_scope, nir_semantics, modes);
}
struct vtn_ssa_value *
struct vtn_value *val =
vtn_push_value(b, w[2], vtn_value_type_sampled_image);
val->sampled_image = ralloc(b, struct vtn_sampled_image);
- val->sampled_image->image =
- vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
- val->sampled_image->sampler =
- vtn_value(b, w[4], vtn_value_type_pointer)->pointer;
+
+ /* It seems valid to use OpSampledImage with OpUndef instead of
+ * OpTypeImage or OpTypeSampler.
+ */
+ if (vtn_untyped_value(b, w[3])->value_type == vtn_value_type_undef) {
+ val->sampled_image->image = NULL;
+ } else {
+ val->sampled_image->image =
+ vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
+ }
+
+ if (vtn_untyped_value(b, w[4])->value_type == vtn_value_type_undef) {
+ val->sampled_image->sampler = NULL;
+ } else {
+ val->sampled_image->sampler =
+ vtn_value(b, w[4], vtn_value_type_pointer)->pointer;
+ }
return;
} else if (opcode == SpvOpImage) {
struct vtn_value *src_val = vtn_untyped_value(b, w[3]);
image = sampled_val->pointer;
}
+ if (!image) {
+ vtn_push_value(b, w[2], vtn_value_type_undef);
+ return;
+ }
+
nir_deref_instr *image_deref = vtn_pointer_to_deref(b, image);
nir_deref_instr *sampler_deref =
sampler ? vtn_pointer_to_deref(b, sampler) : NULL;
dest_type = nir_type_int;
break;
+ case SpvOpFragmentFetchAMD:
+ texop = nir_texop_fragment_fetch;
+ break;
+
+ case SpvOpFragmentMaskFetchAMD:
+ texop = nir_texop_fragment_mask_fetch;
+ break;
+
default:
vtn_fail_with_opcode("Unhandled opcode", opcode);
}
case nir_texop_query_levels:
case nir_texop_texture_samples:
case nir_texop_samples_identical:
+ case nir_texop_fragment_fetch:
+ case nir_texop_fragment_mask_fetch:
/* These don't */
break;
case nir_texop_txf_ms_fb:
case SpvOpImageFetch:
case SpvOpImageGather:
case SpvOpImageDrefGather:
- case SpvOpImageQueryLod: {
+ case SpvOpImageQueryLod:
+ case SpvOpFragmentFetchAMD:
+ case SpvOpFragmentMaskFetchAMD: {
/* All these types have the coordinate as their first real argument */
- switch (sampler_dim) {
- case GLSL_SAMPLER_DIM_1D:
- case GLSL_SAMPLER_DIM_BUF:
- coord_components = 1;
- break;
- case GLSL_SAMPLER_DIM_2D:
- case GLSL_SAMPLER_DIM_RECT:
- case GLSL_SAMPLER_DIM_MS:
- coord_components = 2;
- break;
- case GLSL_SAMPLER_DIM_3D:
- case GLSL_SAMPLER_DIM_CUBE:
- coord_components = 3;
- break;
- default:
- vtn_fail("Invalid sampler type");
- }
+ coord_components = glsl_get_sampler_dim_coordinate_components(sampler_dim);
if (is_array && texop != nir_texop_lod)
coord_components++;
if (opcode == SpvOpImageQuerySizeLod)
(*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_lod);
+ /* For OpFragmentFetchAMD, we always have a multisample index */
+ if (opcode == SpvOpFragmentFetchAMD)
+ (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_ms_index);
+
/* Now we need to handle some number of optional arguments */
struct vtn_value *gather_offsets = NULL;
if (idx < count) {
uint32_t operands = w[idx];
if (operands & SpvImageOperandsBiasMask) {
- vtn_assert(texop == nir_texop_tex);
- texop = nir_texop_txb;
+ vtn_assert(texop == nir_texop_tex ||
+ texop == nir_texop_tg4);
+ if (texop == nir_texop_tex)
+ texop = nir_texop_txb;
uint32_t arg = image_operand_arg(b, w, count, idx,
SpvImageOperandsBiasMask);
(*p++) = vtn_tex_src(b, w[arg], nir_tex_src_bias);
if (operands & SpvImageOperandsLodMask) {
vtn_assert(texop == nir_texop_txl || texop == nir_texop_txf ||
- texop == nir_texop_txs);
+ texop == nir_texop_txs || texop == nir_texop_tg4);
uint32_t arg = image_operand_arg(b, w, count, idx,
SpvImageOperandsLodMask);
(*p++) = vtn_tex_src(b, w[arg], nir_tex_src_lod);
struct vtn_type *type = vtn_value(b, w[1], vtn_value_type_type)->type;
unsigned dest_components = glsl_get_vector_elements(type->type);
- intrin->num_components = nir_intrinsic_infos[op].dest_components;
- if (intrin->num_components == 0)
+ if (nir_intrinsic_infos[op].dest_components == 0)
intrin->num_components = dest_components;
nir_ssa_dest_init(&intrin->instr, &intrin->dest,
- intrin->num_components, 32, NULL);
+ nir_intrinsic_dest_components(intrin), 32, NULL);
nir_builder_instr_insert(&b->nb, &intrin->instr);
nir_ssa_def *result = &intrin->dest.ssa;
- if (intrin->num_components != dest_components)
+ if (nir_intrinsic_dest_components(intrin) != dest_components)
result = nir_channels(&b->nb, result, (1 << dest_components) - 1);
struct vtn_value *val =
* only need to support GLSL Atomic Counters that are uints and don't
* allow direct storage.
*/
- unreachable("Invalid uniform atomic");
+ vtn_fail("Invalid uniform atomic");
}
}
*/
static void
vtn_handle_atomics(struct vtn_builder *b, SpvOp opcode,
- const uint32_t *w, unsigned count)
+ const uint32_t *w, UNUSED unsigned count)
{
struct vtn_pointer *ptr;
nir_intrinsic_instr *atomic;
/* uniform as "atomic counter uniform" */
if (ptr->mode == vtn_variable_mode_uniform) {
nir_deref_instr *deref = vtn_pointer_to_deref(b, ptr);
- const struct glsl_type *deref_type = deref->type;
nir_intrinsic_op op = get_uniform_nir_atomic_op(b, opcode);
atomic = nir_intrinsic_instr_create(b->nb.shader, op);
atomic->src[0] = nir_src_for_ssa(&deref->dest.ssa);
switch (opcode) {
case SpvOpAtomicLoad:
- atomic->num_components = glsl_get_vector_elements(deref_type);
- break;
-
- case SpvOpAtomicStore:
- atomic->num_components = glsl_get_vector_elements(deref_type);
- nir_intrinsic_set_write_mask(atomic, (1 << atomic->num_components) - 1);
- break;
-
case SpvOpAtomicExchange:
case SpvOpAtomicCompareExchange:
case SpvOpAtomicCompareExchangeWeak:
return dest;
}
-nir_ssa_def *
-vtn_vector_extract(struct vtn_builder *b, nir_ssa_def *src, unsigned index)
-{
- return nir_channel(&b->nb, src, index);
-}
-
-nir_ssa_def *
-vtn_vector_insert(struct vtn_builder *b, nir_ssa_def *src, nir_ssa_def *insert,
- unsigned index)
-{
- nir_alu_instr *vec = create_vec(b, src->num_components,
- src->bit_size);
-
- for (unsigned i = 0; i < src->num_components; i++) {
- if (i == index) {
- vec->src[i].src = nir_src_for_ssa(insert);
- } else {
- vec->src[i].src = nir_src_for_ssa(src);
- vec->src[i].swizzle[0] = i;
- }
- }
-
- nir_builder_instr_insert(&b->nb, &vec->instr);
-
- return &vec->dest.dest.ssa;
-}
-
-static nir_ssa_def *
-nir_ieq_imm(nir_builder *b, nir_ssa_def *x, uint64_t i)
-{
- return nir_ieq(b, x, nir_imm_intN_t(b, i, x->bit_size));
-}
-
-nir_ssa_def *
-vtn_vector_extract_dynamic(struct vtn_builder *b, nir_ssa_def *src,
- nir_ssa_def *index)
-{
- return nir_vector_extract(&b->nb, src, nir_i2i(&b->nb, index, 32));
-}
-
-nir_ssa_def *
-vtn_vector_insert_dynamic(struct vtn_builder *b, nir_ssa_def *src,
- nir_ssa_def *insert, nir_ssa_def *index)
-{
- nir_ssa_def *dest = vtn_vector_insert(b, src, insert, 0);
- for (unsigned i = 1; i < src->num_components; i++)
- dest = nir_bcsel(&b->nb, nir_ieq_imm(&b->nb, index, i),
- vtn_vector_insert(b, src, insert, i), dest);
-
- return dest;
-}
-
static nir_ssa_def *
vtn_vector_shuffle(struct vtn_builder *b, unsigned num_components,
nir_ssa_def *src0, nir_ssa_def *src1,
struct vtn_ssa_value *cur = dest;
unsigned i;
for (i = 0; i < num_indices - 1; i++) {
+ /* If we got a vector here, that means the next index will be trying to
+ * dereference a scalar.
+ */
+ vtn_fail_if(glsl_type_is_vector_or_scalar(cur->type),
+ "OpCompositeInsert has too many indices.");
+ vtn_fail_if(indices[i] >= glsl_get_length(cur->type),
+ "All indices in an OpCompositeInsert must be in-bounds");
cur = cur->elems[indices[i]];
}
if (glsl_type_is_vector_or_scalar(cur->type)) {
+ vtn_fail_if(indices[i] >= glsl_get_vector_elements(cur->type),
+ "All indices in an OpCompositeInsert must be in-bounds");
+
/* According to the SPIR-V spec, OpCompositeInsert may work down to
* the component granularity. In that case, the last index will be
* the index to insert the scalar into the vector.
*/
- cur->def = vtn_vector_insert(b, cur->def, insert->def, indices[i]);
+ cur->def = nir_vector_insert_imm(&b->nb, cur->def, insert->def, indices[i]);
} else {
+ vtn_fail_if(indices[i] >= glsl_get_length(cur->type),
+ "All indices in an OpCompositeInsert must be in-bounds");
cur->elems[indices[i]] = insert;
}
for (unsigned i = 0; i < num_indices; i++) {
if (glsl_type_is_vector_or_scalar(cur->type)) {
vtn_assert(i == num_indices - 1);
+ vtn_fail_if(indices[i] >= glsl_get_vector_elements(cur->type),
+ "All indices in an OpCompositeExtract must be in-bounds");
+
/* According to the SPIR-V spec, OpCompositeExtract may work down to
* the component granularity. The last index will be the index of the
* vector to extract.
struct vtn_ssa_value *ret = rzalloc(b, struct vtn_ssa_value);
ret->type = glsl_scalar_type(glsl_get_base_type(cur->type));
- ret->def = vtn_vector_extract(b, cur->def, indices[i]);
+ ret->def = nir_channel(&b->nb, cur->def, indices[i]);
return ret;
} else {
+ vtn_fail_if(indices[i] >= glsl_get_length(cur->type),
+ "All indices in an OpCompositeExtract must be in-bounds");
cur = cur->elems[indices[i]];
}
}
switch (opcode) {
case SpvOpVectorExtractDynamic:
- ssa->def = vtn_vector_extract_dynamic(b, vtn_ssa_value(b, w[3])->def,
- vtn_ssa_value(b, w[4])->def);
+ ssa->def = nir_vector_extract(&b->nb, vtn_ssa_value(b, w[3])->def,
+ vtn_ssa_value(b, w[4])->def);
break;
case SpvOpVectorInsertDynamic:
- ssa->def = vtn_vector_insert_dynamic(b, vtn_ssa_value(b, w[3])->def,
- vtn_ssa_value(b, w[4])->def,
- vtn_ssa_value(b, w[5])->def);
+ ssa->def = nir_vector_insert(&b->nb, vtn_ssa_value(b, w[3])->def,
+ vtn_ssa_value(b, w[4])->def,
+ vtn_ssa_value(b, w[5])->def);
break;
case SpvOpVectorShuffle:
break;
case SpvOpCopyLogical:
- case SpvOpCopyObject:
ssa = vtn_composite_copy(b, vtn_ssa_value(b, w[3]));
break;
+ case SpvOpCopyObject:
+ vtn_copy_value(b, w[3], w[2]);
+ return;
default:
vtn_fail_with_opcode("unknown composite operation", opcode);
vtn_emit_memory_barrier(struct vtn_builder *b, SpvScope scope,
SpvMemorySemanticsMask semantics)
{
- if (b->options->use_scoped_memory_barrier) {
+ if (b->shader->options->use_scoped_barrier) {
vtn_emit_scoped_memory_barrier(b, scope, semantics);
return;
}
SpvMemorySemanticsUniformMemoryMask |
SpvMemorySemanticsWorkgroupMemoryMask |
SpvMemorySemanticsAtomicCounterMemoryMask |
- SpvMemorySemanticsImageMemoryMask;
+ SpvMemorySemanticsImageMemoryMask |
+ SpvMemorySemanticsOutputMemoryMask;
/* If we're not actually doing a memory barrier, bail */
if (!(semantics & all_memory_semantics))
/* There's only two scopes thing left */
vtn_assert(scope == SpvScopeInvocation || scope == SpvScopeDevice);
- if ((semantics & all_memory_semantics) == all_memory_semantics) {
+ /* Map the GLSL memoryBarrier() construct and any barriers with more than one
+ * semantic to the corresponding NIR one.
+ */
+ if (util_bitcount(semantics & all_memory_semantics) > 1) {
vtn_emit_barrier(b, nir_intrinsic_memory_barrier);
+ if (semantics & SpvMemorySemanticsOutputMemoryMask) {
+ /* GLSL memoryBarrier() (and the corresponding NIR one) doesn't include
+ * TCS outputs, so we have to emit it's own intrinsic for that. We
+ * then need to emit another memory_barrier to prevent moving
+ * non-output operations to before the tcs_patch barrier.
+ */
+ vtn_emit_barrier(b, nir_intrinsic_memory_barrier_tcs_patch);
+ vtn_emit_barrier(b, nir_intrinsic_memory_barrier);
+ }
return;
}
- /* Issue a bunch of more specific barriers */
- uint32_t bits = semantics;
- while (bits) {
- SpvMemorySemanticsMask semantic = 1 << u_bit_scan(&bits);
- switch (semantic) {
- case SpvMemorySemanticsUniformMemoryMask:
- vtn_emit_barrier(b, nir_intrinsic_memory_barrier_buffer);
- break;
- case SpvMemorySemanticsWorkgroupMemoryMask:
- vtn_emit_barrier(b, nir_intrinsic_memory_barrier_shared);
- break;
- case SpvMemorySemanticsAtomicCounterMemoryMask:
- vtn_emit_barrier(b, nir_intrinsic_memory_barrier_atomic_counter);
- break;
- case SpvMemorySemanticsImageMemoryMask:
- vtn_emit_barrier(b, nir_intrinsic_memory_barrier_image);
- break;
- case SpvMemorySemanticsOutputMemoryMask:
- if (b->nb.shader->info.stage == MESA_SHADER_TESS_CTRL)
- vtn_emit_barrier(b, nir_intrinsic_memory_barrier_tcs_patch);
- break;
- default:
- break;;
- }
+ /* Issue a more specific barrier */
+ switch (semantics & all_memory_semantics) {
+ case SpvMemorySemanticsUniformMemoryMask:
+ vtn_emit_barrier(b, nir_intrinsic_memory_barrier_buffer);
+ break;
+ case SpvMemorySemanticsWorkgroupMemoryMask:
+ vtn_emit_barrier(b, nir_intrinsic_memory_barrier_shared);
+ break;
+ case SpvMemorySemanticsAtomicCounterMemoryMask:
+ vtn_emit_barrier(b, nir_intrinsic_memory_barrier_atomic_counter);
+ break;
+ case SpvMemorySemanticsImageMemoryMask:
+ vtn_emit_barrier(b, nir_intrinsic_memory_barrier_image);
+ break;
+ case SpvMemorySemanticsOutputMemoryMask:
+ if (b->nb.shader->info.stage == MESA_SHADER_TESS_CTRL)
+ vtn_emit_barrier(b, nir_intrinsic_memory_barrier_tcs_patch);
+ break;
+ default:
+ break;
}
}
static void
vtn_handle_barrier(struct vtn_builder *b, SpvOp opcode,
- const uint32_t *w, unsigned count)
+ const uint32_t *w, UNUSED unsigned count)
{
switch (opcode) {
case SpvOpEmitVertex:
/* GLSLang, prior to commit 8297936dd6eb3, emitted OpControlBarrier with
* memory semantics of None for GLSL barrier().
+ * And before that, prior to c3f1cdfa, emitted the OpControlBarrier with
+ * Device instead of Workgroup for execution scope.
*/
if (b->wa_glslang_cs_barrier &&
b->nb.shader->info.stage == MESA_SHADER_COMPUTE &&
- execution_scope == SpvScopeWorkgroup &&
+ (execution_scope == SpvScopeWorkgroup ||
+ execution_scope == SpvScopeDevice) &&
memory_semantics == SpvMemorySemanticsMaskNone) {
+ execution_scope = SpvScopeWorkgroup;
memory_scope = SpvScopeWorkgroup;
memory_semantics = SpvMemorySemanticsAcquireReleaseMask |
SpvMemorySemanticsWorkgroupMemoryMask;
SpvMemorySemanticsOutputMemoryMask;
}
- vtn_emit_memory_barrier(b, memory_scope, memory_semantics);
+ if (b->shader->options->use_scoped_barrier) {
+ vtn_emit_scoped_control_barrier(b, execution_scope, memory_scope,
+ memory_semantics);
+ } else {
+ vtn_emit_memory_barrier(b, memory_scope, memory_semantics);
- if (execution_scope == SpvScopeWorkgroup)
- vtn_emit_barrier(b, nir_intrinsic_barrier);
+ if (execution_scope == SpvScopeWorkgroup)
+ vtn_emit_barrier(b, nir_intrinsic_control_barrier);
+ }
break;
}
spv_check_supported(float_controls, cap);
break;
- case SpvCapabilityPhysicalStorageBufferAddressesEXT:
+ case SpvCapabilityPhysicalStorageBufferAddresses:
spv_check_supported(physical_storage_buffer_address, cap);
break;
spv_check_supported(amd_image_read_write_lod, cap);
break;
+ case SpvCapabilityIntegerFunctions2INTEL:
+ spv_check_supported(integer_functions2, cap);
+ break;
+
+ case SpvCapabilityFragmentMaskAMD:
+ spv_check_supported(amd_fragment_mask, cap);
+ break;
+
+ case SpvCapabilityImageGatherBiasLodAMD:
+ spv_check_supported(amd_image_gather_bias_lod, cap);
+ break;
+
default:
vtn_fail("Unhandled capability: %s (%u)",
spirv_capability_to_string(cap), cap);
b->options->temp_addr_format = nir_address_format_64bit_global;
break;
case SpvAddressingModelLogical:
- vtn_fail_if(b->shader->info.stage >= MESA_SHADER_STAGES,
+ vtn_fail_if(b->shader->info.stage == MESA_SHADER_KERNEL,
"AddressingModelLogical only supported for shaders");
b->physical_ptrs = false;
break;
- case SpvAddressingModelPhysicalStorageBuffer64EXT:
+ case SpvAddressingModelPhysicalStorageBuffer64:
vtn_fail_if(!b->options ||
!b->options->caps.physical_storage_buffer_address,
- "AddressingModelPhysicalStorageBuffer64EXT not supported");
+ "AddressingModelPhysicalStorageBuffer64 not supported");
break;
default:
vtn_fail("Unknown addressing model: %s (%u)",
break;
}
+ b->mem_model = w[2];
switch (w[2]) {
case SpvMemoryModelSimple:
case SpvMemoryModelGLSL450:
static void
vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point,
- const struct vtn_decoration *mode, void *data)
+ const struct vtn_decoration *mode, UNUSED void *data)
{
vtn_assert(b->entry_point == entry_point);
break;
}
+ case SpvOpFragmentMaskFetchAMD:
+ case SpvOpFragmentFetchAMD:
+ vtn_handle_texture(b, opcode, w, count);
+ break;
+
case SpvOpAtomicLoad:
case SpvOpAtomicExchange:
case SpvOpAtomicCompareExchange:
case SpvOpVectorTimesMatrix:
case SpvOpMatrixTimesVector:
case SpvOpMatrixTimesMatrix:
+ case SpvOpUCountLeadingZerosINTEL:
+ case SpvOpUCountTrailingZerosINTEL:
+ case SpvOpAbsISubINTEL:
+ case SpvOpAbsUSubINTEL:
+ case SpvOpIAddSatINTEL:
+ case SpvOpUAddSatINTEL:
+ case SpvOpIAverageINTEL:
+ case SpvOpUAverageINTEL:
+ case SpvOpIAverageRoundedINTEL:
+ case SpvOpUAverageRoundedINTEL:
+ case SpvOpISubSatINTEL:
+ case SpvOpUSubSatINTEL:
+ case SpvOpIMul32x16INTEL:
+ case SpvOpUMul32x16INTEL:
vtn_handle_alu(b, opcode, w, count);
break;
}
case SpvOpReadClockKHR: {
- assert(vtn_constant_uint(b, w[3]) == SpvScopeSubgroup);
+ SpvScope scope = vtn_constant_uint(b, w[3]);
+ nir_scope nir_scope;
+
+ switch (scope) {
+ case SpvScopeDevice:
+ nir_scope = NIR_SCOPE_DEVICE;
+ break;
+ case SpvScopeSubgroup:
+ nir_scope = NIR_SCOPE_SUBGROUP;
+ break;
+ default:
+ vtn_fail("invalid read clock scope");
+ }
/* Operation supports two result types: uvec2 and uint64_t. The NIR
* intrinsic gives uvec2, so pack the result for the other case.
nir_intrinsic_instr *intrin =
nir_intrinsic_instr_create(b->nb.shader, nir_intrinsic_shader_clock);
nir_ssa_dest_init(&intrin->instr, &intrin->dest, 2, 32, NULL);
+ nir_intrinsic_set_memory_scope(intrin, nir_scope);
nir_builder_instr_insert(&b->nb, &intrin->instr);
struct vtn_type *type = vtn_value(b, w[1], vtn_value_type_type)->type;
b->file = NULL;
b->line = -1;
b->col = -1;
- exec_list_make_empty(&b->functions);
+ list_inithead(&b->functions);
b->entry_point_stage = stage;
b->entry_point_name = entry_point_name;
b->options = dup_options;
bool progress;
do {
progress = false;
- foreach_list_typed(struct vtn_function, func, node, &b->functions) {
+ vtn_foreach_cf_node(node, &b->functions) {
+ struct vtn_function *func = vtn_cf_node_as_function(node);
if (func->referenced && !func->emitted) {
b->const_table = _mesa_pointer_hash_table_create(b);
* right away. In order to do so, we must lower any constant initializers
* on outputs so nir_remove_dead_variables sees that they're written to.
*/
- nir_lower_constant_initializers(b->shader, nir_var_shader_out);
+ nir_lower_variable_initializers(b->shader, nir_var_shader_out);
nir_remove_dead_variables(b->shader,
- nir_var_shader_in | nir_var_shader_out);
+ nir_var_shader_in | nir_var_shader_out, NULL);
/* We sometimes generate bogus derefs that, while never used, give the
* validator a bit of heartburn. Run dead code to get rid of them.