with_gallium_virgl = gallium_drivers.contains('virgl')
with_gallium_swr = gallium_drivers.contains('swr')
with_gallium_lima = gallium_drivers.contains('lima')
+with_gallium_zink = gallium_drivers.contains('zink')
if cc.get_id().startswith('intel') and meson.version().version_compare('< 0.49.1')
error('Meson does not have sufficient support of ICC before 0.49.1 to compile mesa')
vdpau_drivers_path = join_paths(get_option('libdir'), 'vdpau')
endif
+if with_gallium_zink
+ dep_vulkan = dependency('vulkan')
+endif
+
_xvmc = get_option('gallium-xvmc')
if not system_has_kms_drm
if _xvmc == 'true'
choices : [
'', 'auto', 'kmsro', 'radeonsi', 'r300', 'r600', 'nouveau', 'freedreno',
'swrast', 'v3d', 'vc4', 'etnaviv', 'tegra', 'i915', 'svga', 'virgl',
- 'swr', 'panfrost', 'iris', 'lima'
+ 'swr', 'panfrost', 'iris', 'lima', 'zink'
],
description : 'List of gallium drivers to build. If this is set to auto all drivers applicable to the target OS/architecture will be built'
)
screen = swr_create_screen(winsys);
#endif
+#if defined(GALLIUM_ZINK)
+ if (screen == NULL && strcmp(driver, "zink") == 0)
+ screen = zink_create_screen(winsys);
+#endif
+
return screen;
}
default_driver = "softpipe";
#elif defined(GALLIUM_SWR)
default_driver = "swr";
+#elif defined(GALLIUM_ZINK)
+ default_driver = "zink";
#else
default_driver = "";
#endif
* llvmpipe, softpipe, swr.
*/
+#ifdef GALLIUM_ZINK
+#include "zink/zink_public.h"
+#endif
+
#ifdef GALLIUM_SOFTPIPE
#include "softpipe/sp_public.h"
#endif
screen = swr_create_screen(winsys);
#endif
+#if defined(GALLIUM_ZINK)
+ if (screen == NULL && strcmp(driver, "zink") == 0)
+ screen = zink_create_screen(winsys);
+#endif
+
return screen;
}
default_driver = "softpipe";
#elif defined(GALLIUM_SWR)
default_driver = "swr";
+#elif defined(GALLIUM_SWR)
+ default_driver = "zink";
#else
default_driver = "";
#endif
--- /dev/null
+# Copyright © 2018 Collabora Ltd
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+files_libzink = files(
+ 'nir_to_spirv/nir_to_spirv.c',
+ 'nir_to_spirv/spirv_builder.c',
+ 'zink_cmdbuf.c',
+ 'zink_compiler.c',
+ 'zink_context.c',
+ 'zink_fence.c',
+ 'zink_framebuffer.c',
+ 'zink_pipeline.c',
+ 'zink_program.c',
+ 'zink_render_pass.c',
+ 'zink_resource.c',
+ 'zink_screen.c',
+ 'zink_state.c',
+ 'zink_surface.c',
+)
+
+libzink = static_library(
+ 'zink',
+ files_libzink,
+ c_args : c_vis_args,
+ include_directories : inc_common,
+ dependencies: [dep_vulkan, idep_nir_headers],
+)
+
+driver_zink = declare_dependency(
+ compile_args : '-DGALLIUM_ZINK',
+ link_with : [libzink],
+)
--- /dev/null
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "nir_to_spirv.h"
+#include "spirv_builder.h"
+
+#include "nir.h"
+#include "pipe/p_state.h"
+#include "util/u_memory.h"
+
+struct ntv_context {
+ struct spirv_builder builder;
+
+ SpvId GLSL_std_450;
+
+ gl_shader_stage stage;
+ SpvId inputs[PIPE_MAX_SHADER_INPUTS][4];
+ SpvId outputs[PIPE_MAX_SHADER_OUTPUTS][4];
+
+ SpvId ubos[128];
+ size_t num_ubos;
+ SpvId samplers[PIPE_MAX_SAMPLERS];
+ size_t num_samplers;
+ SpvId entry_ifaces[PIPE_MAX_SHADER_INPUTS * 4 + PIPE_MAX_SHADER_OUTPUTS * 4];
+ size_t num_entry_ifaces;
+
+ SpvId *defs;
+ size_t num_defs;
+};
+
+static SpvId
+get_bvec_type(struct ntv_context *ctx, int num_components)
+{
+ SpvId bool_type = spirv_builder_type_bool(&ctx->builder);
+ if (num_components > 1)
+ return spirv_builder_type_vector(&ctx->builder, bool_type,
+ num_components);
+
+ assert(num_components == 1);
+ return bool_type;
+}
+
+static SpvId
+get_fvec_type(struct ntv_context *ctx, unsigned bit_size, unsigned num_components)
+{
+ assert(bit_size == 32); // only 32-bit floats supported so far
+
+ SpvId float_type = spirv_builder_type_float(&ctx->builder, bit_size);
+ if (num_components > 1)
+ return spirv_builder_type_vector(&ctx->builder, float_type,
+ num_components);
+
+ assert(num_components == 1);
+ return float_type;
+}
+
+static SpvId
+get_dest_type(struct ntv_context *ctx, nir_dest *dest)
+{
+ return get_fvec_type(ctx, nir_dest_bit_size(*dest),
+ nir_dest_num_components(*dest));
+}
+
+static SpvId
+get_glsl_basetype(struct ntv_context *ctx, enum glsl_base_type type)
+{
+ switch (type) {
+ case GLSL_TYPE_FLOAT:
+ return spirv_builder_type_float(&ctx->builder, 32);
+
+ case GLSL_TYPE_INT:
+ return spirv_builder_type_int(&ctx->builder, 32);
+
+ case GLSL_TYPE_UINT:
+ return spirv_builder_type_uint(&ctx->builder, 32);
+ /* TODO: handle more types */
+
+ default:
+ unreachable("unknown GLSL type");
+ }
+}
+
+static SpvId
+get_glsl_type(struct ntv_context *ctx, const struct glsl_type *type)
+{
+ assert(type);
+ if (glsl_type_is_scalar(type))
+ return get_glsl_basetype(ctx, glsl_get_base_type(type));
+
+ if (glsl_type_is_vector(type))
+ return spirv_builder_type_vector(&ctx->builder,
+ get_glsl_basetype(ctx, glsl_get_base_type(type)),
+ glsl_get_vector_elements(type));
+
+ unreachable("we shouldn't get here, I think...");
+}
+
+static void
+emit_input(struct ntv_context *ctx, struct nir_variable *var)
+{
+ SpvId vec_type = get_glsl_type(ctx, var->type);
+ SpvId pointer_type = spirv_builder_type_pointer(&ctx->builder,
+ SpvStorageClassInput,
+ vec_type);
+ SpvId var_id = spirv_builder_emit_var(&ctx->builder, pointer_type,
+ SpvStorageClassInput);
+
+ if (var->name)
+ spirv_builder_emit_name(&ctx->builder, var_id, var->name);
+
+ if (ctx->stage == MESA_SHADER_FRAGMENT) {
+ switch (var->data.location) {
+ case VARYING_SLOT_POS:
+ spirv_builder_emit_builtin(&ctx->builder, var_id, SpvBuiltInFragCoord);
+ break;
+
+ default:
+ spirv_builder_emit_location(&ctx->builder, var_id,
+ var->data.driver_location);
+ break;
+ }
+ } else {
+ spirv_builder_emit_location(&ctx->builder, var_id,
+ var->data.driver_location);
+ }
+
+ if (var->data.location_frac)
+ spirv_builder_emit_component(&ctx->builder, var_id,
+ var->data.location_frac);
+
+ if (var->data.interpolation == INTERP_MODE_FLAT)
+ spirv_builder_emit_decoration(&ctx->builder, var_id, SpvDecorationFlat);
+
+ assert(var->data.driver_location < PIPE_MAX_SHADER_INPUTS);
+ assert(var->data.location_frac < 4);
+ assert(ctx->inputs[var->data.driver_location][var->data.location_frac] == 0);
+ ctx->inputs[var->data.driver_location][var->data.location_frac] = var_id;
+
+ assert(ctx->num_entry_ifaces < ARRAY_SIZE(ctx->entry_ifaces));
+ ctx->entry_ifaces[ctx->num_entry_ifaces++] = var_id;
+}
+
+static void
+emit_output(struct ntv_context *ctx, struct nir_variable *var)
+{
+ SpvId vec_type = get_glsl_type(ctx, var->type);
+ SpvId pointer_type = spirv_builder_type_pointer(&ctx->builder,
+ SpvStorageClassOutput,
+ vec_type);
+ SpvId var_id = spirv_builder_emit_var(&ctx->builder, pointer_type,
+ SpvStorageClassOutput);
+ if (var->name)
+ spirv_builder_emit_name(&ctx->builder, var_id, var->name);
+
+
+ if (ctx->stage == MESA_SHADER_VERTEX) {
+ switch (var->data.location) {
+ case VARYING_SLOT_POS:
+ spirv_builder_emit_builtin(&ctx->builder, var_id, SpvBuiltInPosition);
+ break;
+
+ case VARYING_SLOT_PSIZ:
+ spirv_builder_emit_builtin(&ctx->builder, var_id, SpvBuiltInPointSize);
+ break;
+
+ default:
+ spirv_builder_emit_location(&ctx->builder, var_id,
+ var->data.driver_location - 1);
+ }
+ } else if (ctx->stage == MESA_SHADER_FRAGMENT) {
+ switch (var->data.location) {
+ case FRAG_RESULT_DEPTH:
+ spirv_builder_emit_builtin(&ctx->builder, var_id, SpvBuiltInFragDepth);
+ break;
+
+ default:
+ spirv_builder_emit_location(&ctx->builder, var_id,
+ var->data.driver_location);
+ }
+ }
+
+ if (var->data.location_frac)
+ spirv_builder_emit_component(&ctx->builder, var_id,
+ var->data.location_frac);
+
+ assert(var->data.driver_location < PIPE_MAX_SHADER_INPUTS);
+ assert(var->data.location_frac < 4);
+ assert(ctx->outputs[var->data.driver_location][var->data.location_frac] == 0);
+ ctx->outputs[var->data.driver_location][var->data.location_frac] = var_id;
+
+ assert(ctx->num_entry_ifaces < ARRAY_SIZE(ctx->entry_ifaces));
+ ctx->entry_ifaces[ctx->num_entry_ifaces++] = var_id;
+}
+
+static SpvDim
+type_to_dim(enum glsl_sampler_dim gdim, bool *is_ms)
+{
+ *is_ms = false;
+ switch (gdim) {
+ case GLSL_SAMPLER_DIM_1D:
+ return SpvDim1D;
+ case GLSL_SAMPLER_DIM_2D:
+ return SpvDim2D;
+ case GLSL_SAMPLER_DIM_RECT:
+ return SpvDimRect;
+ case GLSL_SAMPLER_DIM_CUBE:
+ return SpvDimCube;
+ case GLSL_SAMPLER_DIM_3D:
+ return SpvDim3D;
+ case GLSL_SAMPLER_DIM_MS:
+ *is_ms = true;
+ return SpvDim2D;
+ default:
+ fprintf(stderr, "unknown sampler type %d\n", gdim);
+ break;
+ }
+ return SpvDim2D;
+}
+
+static void
+emit_sampler(struct ntv_context *ctx, struct nir_variable *var)
+{
+ bool is_ms;
+ SpvDim dimension = type_to_dim(glsl_get_sampler_dim(var->type), &is_ms);
+ SpvId float_type = spirv_builder_type_float(&ctx->builder, 32);
+ SpvId image_type = spirv_builder_type_image(&ctx->builder, float_type,
+ dimension, false, glsl_sampler_type_is_array(var->type), is_ms, 1,
+ SpvImageFormatUnknown);
+
+ SpvId sampled_type = spirv_builder_type_sampled_image(&ctx->builder,
+ image_type);
+ SpvId pointer_type = spirv_builder_type_pointer(&ctx->builder,
+ SpvStorageClassUniformConstant,
+ sampled_type);
+ SpvId var_id = spirv_builder_emit_var(&ctx->builder, pointer_type,
+ SpvStorageClassUniformConstant);
+
+ if (var->name)
+ spirv_builder_emit_name(&ctx->builder, var_id, var->name);
+
+ assert(ctx->num_samplers < ARRAY_SIZE(ctx->samplers));
+ ctx->samplers[ctx->num_samplers++] = var_id;
+
+ spirv_builder_emit_descriptor_set(&ctx->builder, var_id,
+ var->data.descriptor_set);
+ spirv_builder_emit_binding(&ctx->builder, var_id, var->data.binding);
+}
+
+static void
+emit_ubo(struct ntv_context *ctx, struct nir_variable *var)
+{
+ uint32_t size = glsl_count_attribute_slots(var->type, false);
+ SpvId vec4_type = get_fvec_type(ctx, 32, 4);
+ SpvId array_length = spirv_builder_const_uint(&ctx->builder, 32, size);
+ SpvId array_type = spirv_builder_type_array(&ctx->builder, vec4_type,
+ array_length);
+ spirv_builder_emit_array_stride(&ctx->builder, array_type, 16);
+
+ // wrap UBO-array in a struct
+ SpvId struct_type = spirv_builder_type_struct(&ctx->builder, &array_type, 1);
+ if (var->name) {
+ char struct_name[100];
+ snprintf(struct_name, sizeof(struct_name), "struct_%s", var->name);
+ spirv_builder_emit_name(&ctx->builder, struct_type, struct_name);
+ }
+
+ spirv_builder_emit_decoration(&ctx->builder, struct_type,
+ SpvDecorationBlock);
+ spirv_builder_emit_member_offset(&ctx->builder, struct_type, 0, 0);
+
+
+ SpvId pointer_type = spirv_builder_type_pointer(&ctx->builder,
+ SpvStorageClassUniform,
+ struct_type);
+
+ SpvId var_id = spirv_builder_emit_var(&ctx->builder, pointer_type,
+ SpvStorageClassUniform);
+ if (var->name)
+ spirv_builder_emit_name(&ctx->builder, var_id, var->name);
+
+ assert(ctx->num_ubos < ARRAY_SIZE(ctx->ubos));
+ ctx->ubos[ctx->num_ubos++] = var_id;
+
+ spirv_builder_emit_descriptor_set(&ctx->builder, var_id,
+ var->data.descriptor_set);
+ spirv_builder_emit_binding(&ctx->builder, var_id, var->data.binding);
+}
+
+static void
+emit_uniform(struct ntv_context *ctx, struct nir_variable *var)
+{
+ if (glsl_type_is_sampler(var->type))
+ emit_sampler(ctx, var);
+ else if (var->interface_type)
+ emit_ubo(ctx, var);
+}
+
+static SpvId
+get_src(struct ntv_context *ctx, nir_src *src)
+{
+ assert(src->is_ssa);
+ assert(src->ssa->index < ctx->num_defs);
+ assert(ctx->defs[src->ssa->index] != 0);
+ return ctx->defs[src->ssa->index];
+}
+
+static SpvId
+get_alu_src(struct ntv_context *ctx, nir_alu_instr *alu, unsigned src)
+{
+ assert(!alu->src[src].negate);
+ assert(!alu->src[src].abs);
+
+ SpvId def = get_src(ctx, &alu->src[src].src);
+
+ unsigned used_channels = 0;
+ bool need_swizzle = false;
+ for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++) {
+ if (!nir_alu_instr_channel_used(alu, src, i))
+ continue;
+
+ used_channels++;
+
+ if (alu->src[src].swizzle[i] != i)
+ need_swizzle = true;
+ }
+ assert(used_channels != 0);
+
+ unsigned live_channels = nir_src_num_components(alu->src[src].src);
+ if (used_channels != live_channels)
+ need_swizzle = true;
+
+ if (!need_swizzle)
+ return def;
+
+ int bit_size = nir_src_bit_size(alu->src[src].src);
+
+ if (used_channels == 1) {
+ SpvId result_type = spirv_builder_type_float(&ctx->builder, bit_size);
+ uint32_t indices[] = { alu->src[src].swizzle[0] };
+ return spirv_builder_emit_composite_extract(&ctx->builder, result_type,
+ def, indices,
+ ARRAY_SIZE(indices));
+ } else if (live_channels == 1) {
+ SpvId type = get_fvec_type(ctx, bit_size, used_channels);
+
+ SpvId constituents[NIR_MAX_VEC_COMPONENTS];
+ for (unsigned i = 0; i < used_channels; ++i)
+ constituents[i] = def;
+
+ return spirv_builder_emit_composite_construct(&ctx->builder, type,
+ constituents,
+ used_channels);
+ } else {
+ uint32_t components[NIR_MAX_VEC_COMPONENTS];
+ size_t num_components = 0;
+ for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++) {
+ if (!nir_alu_instr_channel_used(alu, src, i))
+ continue;
+
+ components[num_components++] = alu->src[src].swizzle[i];
+ }
+
+ SpvId vecType = get_fvec_type(ctx, bit_size, used_channels);
+ return spirv_builder_emit_vector_shuffle(&ctx->builder, vecType,
+ def, def, components, num_components);
+ }
+}
+
+static void
+store_ssa_def(struct ntv_context *ctx, nir_ssa_def *ssa, SpvId result)
+{
+ assert(result != 0);
+ assert(ssa->index < ctx->num_defs);
+ ctx->defs[ssa->index] = result;
+}
+
+static void
+store_dest(struct ntv_context *ctx, nir_dest *dest, SpvId result)
+{
+ assert(dest->is_ssa);
+ store_ssa_def(ctx, &dest->ssa, result);
+}
+
+static void
+store_alu_result(struct ntv_context *ctx, nir_alu_dest *dest, SpvId result)
+{
+ assert(!dest->saturate);
+ return store_dest(ctx, &dest->dest, result);
+}
+
+static SpvId
+emit_unop(struct ntv_context *ctx, SpvOp op, SpvId type, SpvId src)
+{
+ return spirv_builder_emit_unop(&ctx->builder, op, type, src);
+}
+
+static SpvId
+emit_binop(struct ntv_context *ctx, SpvOp op, SpvId type,
+ SpvId src0, SpvId src1)
+{
+ return spirv_builder_emit_binop(&ctx->builder, op, type, src0, src1);
+}
+
+static SpvId
+emit_triop(struct ntv_context *ctx, SpvOp op, SpvId type,
+ SpvId src0, SpvId src1, SpvId src2)
+{
+ return spirv_builder_emit_triop(&ctx->builder, op, type, src0, src1, src2);
+}
+
+static SpvId
+emit_builtin_unop(struct ntv_context *ctx, enum GLSLstd450 op, SpvId type,
+ SpvId src)
+{
+ SpvId args[] = { src };
+ return spirv_builder_emit_ext_inst(&ctx->builder, type, ctx->GLSL_std_450,
+ op, args, ARRAY_SIZE(args));
+}
+
+static SpvId
+emit_builtin_binop(struct ntv_context *ctx, enum GLSLstd450 op, SpvId type,
+ SpvId src0, SpvId src1)
+{
+ SpvId args[] = { src0, src1 };
+ return spirv_builder_emit_ext_inst(&ctx->builder, type, ctx->GLSL_std_450,
+ op, args, ARRAY_SIZE(args));
+}
+
+static SpvId
+get_fvec_constant(struct ntv_context *ctx, int bit_size, int num_components,
+ float values[])
+{
+ assert(bit_size == 32);
+
+ if (num_components > 1) {
+ SpvId components[num_components];
+ for (int i = 0; i < num_components; i++)
+ components[i] = spirv_builder_const_float(&ctx->builder, bit_size,
+ values[i]);
+
+ SpvId type = get_fvec_type(ctx, bit_size, num_components);
+ return spirv_builder_const_composite(&ctx->builder, type, components,
+ num_components);
+ }
+
+ assert(num_components == 1);
+ return spirv_builder_const_float(&ctx->builder, bit_size, values[0]);
+}
+
+static void
+emit_alu(struct ntv_context *ctx, nir_alu_instr *alu)
+{
+ SpvId src[nir_op_infos[alu->op].num_inputs];
+ for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++)
+ src[i] = get_alu_src(ctx, alu, i);
+
+ SpvId dest_type = get_dest_type(ctx, &alu->dest.dest);
+
+ SpvId result = 0;
+ switch (alu->op) {
+ case nir_op_mov:
+ assert(nir_op_infos[alu->op].num_inputs == 1);
+ result = src[0];
+ break;
+
+#define UNOP(nir_op, spirv_op) \
+ case nir_op: \
+ assert(nir_op_infos[alu->op].num_inputs == 1); \
+ result = emit_unop(ctx, spirv_op, dest_type, src[0]); \
+ break;
+
+#define BUILTIN_UNOP(nir_op, spirv_op) \
+ case nir_op: \
+ assert(nir_op_infos[alu->op].num_inputs == 1); \
+ result = emit_builtin_unop(ctx, spirv_op, dest_type, src[0]); \
+ break;
+
+ UNOP(nir_op_fneg, SpvOpFNegate)
+ UNOP(nir_op_fddx, SpvOpDPdx)
+ UNOP(nir_op_fddy, SpvOpDPdy)
+
+ BUILTIN_UNOP(nir_op_fabs, GLSLstd450FAbs)
+ BUILTIN_UNOP(nir_op_fsqrt, GLSLstd450Sqrt)
+ BUILTIN_UNOP(nir_op_frsq, GLSLstd450InverseSqrt)
+ BUILTIN_UNOP(nir_op_flog2, GLSLstd450Log2)
+ BUILTIN_UNOP(nir_op_fexp2, GLSLstd450Exp2)
+ BUILTIN_UNOP(nir_op_ffract, GLSLstd450Fract)
+ BUILTIN_UNOP(nir_op_ffloor, GLSLstd450Floor)
+ BUILTIN_UNOP(nir_op_fceil, GLSLstd450Ceil)
+ BUILTIN_UNOP(nir_op_ftrunc, GLSLstd450Trunc)
+ BUILTIN_UNOP(nir_op_fround_even, GLSLstd450RoundEven)
+ BUILTIN_UNOP(nir_op_fsign, GLSLstd450FSign)
+ BUILTIN_UNOP(nir_op_fsin, GLSLstd450Sin)
+ BUILTIN_UNOP(nir_op_fcos, GLSLstd450Cos)
+
+ case nir_op_frcp: {
+ assert(nir_op_infos[alu->op].num_inputs == 1);
+ float one[4] = { 1, 1, 1, 1 };
+ src[1] = src[0];
+ src[0] = get_fvec_constant(ctx, nir_dest_bit_size(alu->dest.dest),
+ nir_dest_num_components(alu->dest.dest),
+ one);
+ result = emit_binop(ctx, SpvOpFDiv, dest_type, src[0], src[1]);
+ }
+ break;
+
+#undef UNOP
+#undef BUILTIN_UNOP
+
+#define BINOP(nir_op, spirv_op) \
+ case nir_op: \
+ assert(nir_op_infos[alu->op].num_inputs == 2); \
+ result = emit_binop(ctx, spirv_op, dest_type, src[0], src[1]); \
+ break;
+
+#define BUILTIN_BINOP(nir_op, spirv_op) \
+ case nir_op: \
+ assert(nir_op_infos[alu->op].num_inputs == 2); \
+ result = emit_builtin_binop(ctx, spirv_op, dest_type, src[0], src[1]); \
+ break;
+
+ BINOP(nir_op_fadd, SpvOpFAdd)
+ BINOP(nir_op_fsub, SpvOpFSub)
+ BINOP(nir_op_fmul, SpvOpFMul)
+ BINOP(nir_op_flt, SpvOpFUnordLessThan)
+ BINOP(nir_op_fge, SpvOpFUnordGreaterThanEqual)
+
+ BUILTIN_BINOP(nir_op_fmin, GLSLstd450FMin)
+ BUILTIN_BINOP(nir_op_fmax, GLSLstd450FMax)
+
+#undef BINOP
+#undef BUILTIN_BINOP
+
+ case nir_op_fdot2:
+ case nir_op_fdot3:
+ case nir_op_fdot4:
+ assert(nir_op_infos[alu->op].num_inputs == 2);
+ result = emit_binop(ctx, SpvOpDot, dest_type, src[0], src[1]);
+ break;
+
+ case nir_op_seq:
+ case nir_op_sne:
+ case nir_op_slt:
+ case nir_op_sge: {
+ assert(nir_op_infos[alu->op].num_inputs == 2);
+ int num_components = nir_dest_num_components(alu->dest.dest);
+ SpvId bool_type = get_bvec_type(ctx, num_components);
+
+ SpvId zero = spirv_builder_const_float(&ctx->builder, 32, 0.0f);
+ SpvId one = spirv_builder_const_float(&ctx->builder, 32, 1.0f);
+ if (num_components > 1) {
+ SpvId zero_comps[num_components], one_comps[num_components];
+ for (int i = 0; i < num_components; i++) {
+ zero_comps[i] = zero;
+ one_comps[i] = one;
+ }
+
+ zero = spirv_builder_const_composite(&ctx->builder, dest_type,
+ zero_comps, num_components);
+ one = spirv_builder_const_composite(&ctx->builder, dest_type,
+ one_comps, num_components);
+ }
+
+ SpvOp op;
+ switch (alu->op) {
+ case nir_op_seq: op = SpvOpFOrdEqual; break;
+ case nir_op_sne: op = SpvOpFOrdNotEqual; break;
+ case nir_op_slt: op = SpvOpFOrdLessThan; break;
+ case nir_op_sge: op = SpvOpFOrdGreaterThanEqual; break;
+ default: unreachable("unexpected op");
+ }
+
+ result = emit_binop(ctx, op, bool_type, src[0], src[1]);
+ result = emit_triop(ctx, SpvOpSelect, dest_type, result, one, zero);
+ }
+ break;
+
+ case nir_op_fcsel: {
+ assert(nir_op_infos[alu->op].num_inputs == 3);
+ int num_components = nir_dest_num_components(alu->dest.dest);
+ SpvId bool_type = get_bvec_type(ctx, num_components);
+
+ float zero[4] = { 0, 0, 0, 0 };
+ SpvId cmp = get_fvec_constant(ctx, nir_src_bit_size(alu->src[0].src),
+ num_components, zero);
+
+ result = emit_binop(ctx, SpvOpFOrdGreaterThan, bool_type, src[0], cmp);
+ result = emit_triop(ctx, SpvOpSelect, dest_type, result, src[1], src[2]);
+ }
+ break;
+
+ case nir_op_vec2:
+ case nir_op_vec3:
+ case nir_op_vec4: {
+ int num_inputs = nir_op_infos[alu->op].num_inputs;
+ assert(2 <= num_inputs && num_inputs <= 4);
+ result = spirv_builder_emit_composite_construct(&ctx->builder, dest_type,
+ src, num_inputs);
+ }
+ break;
+
+ default:
+ fprintf(stderr, "emit_alu: not implemented (%s)\n",
+ nir_op_infos[alu->op].name);
+
+ unreachable("unsupported opcode");
+ return;
+ }
+
+ store_alu_result(ctx, &alu->dest, result);
+}
+
+static void
+emit_load_const(struct ntv_context *ctx, nir_load_const_instr *load_const)
+{
+ float values[NIR_MAX_VEC_COMPONENTS];
+ for (int i = 0; i < load_const->def.num_components; ++i)
+ values[i] = load_const->value[i].f32;
+
+ SpvId constant = get_fvec_constant(ctx, load_const->def.bit_size,
+ load_const->def.num_components,
+ values);
+ store_ssa_def(ctx, &load_const->def, constant);
+}
+
+static void
+emit_load_input(struct ntv_context *ctx, nir_intrinsic_instr *intr)
+{
+ nir_const_value *const_offset = nir_src_as_const_value(intr->src[0]);
+ if (const_offset) {
+ SpvId type = get_dest_type(ctx, &intr->dest);
+
+ int driver_location = (int)nir_intrinsic_base(intr) + const_offset->u32;
+ assert(driver_location < PIPE_MAX_SHADER_INPUTS);
+ int location_frac = nir_intrinsic_component(intr);
+ assert(location_frac < 4);
+
+ SpvId ptr = ctx->inputs[driver_location][location_frac];
+ assert(ptr > 0);
+
+ store_dest(ctx, &intr->dest, spirv_builder_emit_load(&ctx->builder, type, ptr));
+ } else
+ unreachable("input-addressing not yet supported");
+}
+
+static void
+emit_load_ubo(struct ntv_context *ctx, nir_intrinsic_instr *intr)
+{
+ nir_const_value *const_block_index = nir_src_as_const_value(intr->src[0]);
+ assert(const_block_index); // no dynamic indexing for now
+ assert(const_block_index->u32 == 0); // we only support the default UBO for now
+
+ nir_const_value *const_offset = nir_src_as_const_value(intr->src[1]);
+ if (const_offset) {
+ SpvId vec4_type = get_fvec_type(ctx, 32, 4);
+ SpvId pointer_type = spirv_builder_type_pointer(&ctx->builder,
+ SpvStorageClassUniform,
+ vec4_type);
+
+ unsigned idx = const_offset->u32;
+ SpvId member = spirv_builder_const_uint(&ctx->builder, 32, 0);
+ SpvId offset = spirv_builder_const_uint(&ctx->builder, 32, idx);
+ SpvId offsets[] = { member, offset };
+ SpvId ptr = spirv_builder_emit_access_chain(&ctx->builder, pointer_type,
+ ctx->ubos[0], offsets,
+ ARRAY_SIZE(offsets));
+ SpvId result = spirv_builder_emit_load(&ctx->builder, vec4_type, ptr);
+
+ SpvId type = get_dest_type(ctx, &intr->dest);
+ unsigned num_components = nir_dest_num_components(intr->dest);
+ if (num_components == 1) {
+ uint32_t components[] = { 0 };
+ result = spirv_builder_emit_composite_extract(&ctx->builder,
+ type,
+ result, components,
+ 1);
+ } else if (num_components < 4) {
+ SpvId constituents[num_components];
+ SpvId float_type = spirv_builder_type_float(&ctx->builder, 32);
+ for (uint32_t i = 0; i < num_components; ++i)
+ constituents[i] = spirv_builder_emit_composite_extract(&ctx->builder,
+ float_type,
+ result, &i,
+ 1);
+
+ result = spirv_builder_emit_composite_construct(&ctx->builder,
+ type,
+ constituents,
+ num_components);
+ }
+
+ store_dest(ctx, &intr->dest, result);
+ } else
+ unreachable("uniform-addressing not yet supported");
+}
+
+static void
+emit_store_output(struct ntv_context *ctx, nir_intrinsic_instr *intr)
+{
+ nir_const_value *const_offset = nir_src_as_const_value(intr->src[1]);
+ if (const_offset) {
+ int driver_location = (int)nir_intrinsic_base(intr) + const_offset->u32;
+ assert(driver_location < PIPE_MAX_SHADER_OUTPUTS);
+ int location_frac = nir_intrinsic_component(intr);
+ assert(location_frac < 4);
+
+ SpvId ptr = ctx->outputs[driver_location][location_frac];
+ assert(ptr > 0);
+
+ SpvId src = get_src(ctx, &intr->src[0]);
+ spirv_builder_emit_store(&ctx->builder, ptr, src);
+ } else
+ unreachable("output-addressing not yet supported");
+}
+
+static void
+emit_intrinsic(struct ntv_context *ctx, nir_intrinsic_instr *intr)
+{
+ switch (intr->intrinsic) {
+ case nir_intrinsic_load_input:
+ emit_load_input(ctx, intr);
+ break;
+
+ case nir_intrinsic_load_ubo:
+ emit_load_ubo(ctx, intr);
+ break;
+
+ case nir_intrinsic_store_output:
+ emit_store_output(ctx, intr);
+ break;
+
+ default:
+ fprintf(stderr, "emit_intrinsic: not implemented (%s)\n",
+ nir_intrinsic_infos[intr->intrinsic].name);
+ unreachable("unsupported intrinsic");
+ }
+}
+
+static void
+emit_undef(struct ntv_context *ctx, nir_ssa_undef_instr *undef)
+{
+ SpvId type = get_fvec_type(ctx, undef->def.bit_size,
+ undef->def.num_components);
+
+ store_ssa_def(ctx, &undef->def,
+ spirv_builder_emit_undef(&ctx->builder, type));
+}
+
+static void
+emit_tex(struct ntv_context *ctx, nir_tex_instr *tex)
+{
+ assert(tex->op == nir_texop_tex);
+ assert(nir_alu_type_get_base_type(tex->dest_type) == nir_type_float);
+ assert(tex->texture_index == tex->sampler_index);
+
+ bool has_proj = false;
+ SpvId coord = 0, proj;
+ unsigned coord_size;
+ for (unsigned i = 0; i < tex->num_srcs; i++) {
+ switch (tex->src[i].src_type) {
+ case nir_tex_src_coord:
+ coord = get_src(ctx, &tex->src[i].src);
+ coord_size = nir_src_num_components(tex->src[i].src);
+ break;
+
+ case nir_tex_src_projector:
+ has_proj = true;
+ proj = get_src(ctx, &tex->src[i].src);
+ break;
+
+ default:
+ fprintf(stderr, "texture source: %d\n", tex->src[i].src_type);
+ unreachable("unknown texture source");
+ }
+ }
+
+ bool is_ms;
+ SpvDim dimension = type_to_dim(tex->sampler_dim, &is_ms);
+ SpvId float_type = spirv_builder_type_float(&ctx->builder, 32);
+ SpvId image_type = spirv_builder_type_image(&ctx->builder, float_type,
+ dimension, false, tex->is_array, is_ms, 1,
+ SpvImageFormatUnknown);
+ SpvId sampled_type = spirv_builder_type_sampled_image(&ctx->builder,
+ image_type);
+
+ assert(tex->texture_index < ctx->num_samplers);
+ SpvId load = spirv_builder_emit_load(&ctx->builder, sampled_type,
+ ctx->samplers[tex->texture_index]);
+
+ SpvId dest_type = get_dest_type(ctx, &tex->dest);
+
+ SpvId result;
+ if (has_proj) {
+ SpvId constituents[coord_size + 1];
+ SpvId float_type = spirv_builder_type_float(&ctx->builder, 32);
+ for (uint32_t i = 0; i < coord_size; ++i)
+ constituents[i] = spirv_builder_emit_composite_extract(&ctx->builder,
+ float_type,
+ coord,
+ &i, 1);
+
+ constituents[coord_size++] = proj;
+
+ SpvId vec_type = get_fvec_type(ctx, 32, coord_size);
+ SpvId merged = spirv_builder_emit_composite_construct(&ctx->builder,
+ vec_type,
+ constituents,
+ coord_size);
+
+ result = spirv_builder_emit_image_sample_proj_implicit_lod(&ctx->builder,
+ dest_type,
+ load,
+ merged);
+ } else
+ result = spirv_builder_emit_image_sample_implicit_lod(&ctx->builder,
+ dest_type, load,
+ coord);
+ spirv_builder_emit_decoration(&ctx->builder, result,
+ SpvDecorationRelaxedPrecision);
+
+ store_dest(ctx, &tex->dest, result);
+}
+
+static void
+emit_block(struct ntv_context *ctx, struct nir_block *block)
+{
+ nir_foreach_instr(instr, block) {
+ switch (instr->type) {
+ case nir_instr_type_alu:
+ emit_alu(ctx, nir_instr_as_alu(instr));
+ break;
+ case nir_instr_type_intrinsic:
+ emit_intrinsic(ctx, nir_instr_as_intrinsic(instr));
+ break;
+ case nir_instr_type_load_const:
+ emit_load_const(ctx, nir_instr_as_load_const(instr));
+ break;
+ case nir_instr_type_ssa_undef:
+ emit_undef(ctx, nir_instr_as_ssa_undef(instr));
+ break;
+ case nir_instr_type_tex:
+ emit_tex(ctx, nir_instr_as_tex(instr));
+ break;
+ case nir_instr_type_phi:
+ unreachable("nir_instr_type_phi not supported");
+ break;
+ case nir_instr_type_jump:
+ unreachable("nir_instr_type_jump not supported");
+ break;
+ case nir_instr_type_call:
+ unreachable("nir_instr_type_call not supported");
+ break;
+ case nir_instr_type_parallel_copy:
+ unreachable("nir_instr_type_parallel_copy not supported");
+ break;
+ case nir_instr_type_deref:
+ unreachable("nir_instr_type_deref not supported");
+ break;
+ }
+ }
+}
+
+static void
+emit_cf_list(struct ntv_context *ctx, struct exec_list *list)
+{
+ foreach_list_typed(nir_cf_node, node, node, list) {
+ switch (node->type) {
+ case nir_cf_node_block:
+ emit_block(ctx, nir_cf_node_as_block(node));
+ break;
+
+ case nir_cf_node_if:
+ unreachable("nir_cf_node_if not supported");
+ break;
+
+ case nir_cf_node_loop:
+ unreachable("nir_cf_node_loop not supported");
+ break;
+
+ case nir_cf_node_function:
+ unreachable("nir_cf_node_function not supported");
+ break;
+ }
+ }
+}
+
+struct spirv_shader *
+nir_to_spirv(struct nir_shader *s)
+{
+ struct spirv_shader *ret = NULL;
+
+ struct ntv_context ctx = {};
+
+ switch (s->info.stage) {
+ case MESA_SHADER_VERTEX:
+ case MESA_SHADER_FRAGMENT:
+ case MESA_SHADER_COMPUTE:
+ spirv_builder_emit_cap(&ctx.builder, SpvCapabilityShader);
+ break;
+
+ case MESA_SHADER_TESS_CTRL:
+ case MESA_SHADER_TESS_EVAL:
+ spirv_builder_emit_cap(&ctx.builder, SpvCapabilityTessellation);
+ break;
+
+ case MESA_SHADER_GEOMETRY:
+ spirv_builder_emit_cap(&ctx.builder, SpvCapabilityGeometry);
+ break;
+
+ default:
+ unreachable("invalid stage");
+ }
+
+ ctx.stage = s->info.stage;
+ ctx.GLSL_std_450 = spirv_builder_import(&ctx.builder, "GLSL.std.450");
+ spirv_builder_emit_source(&ctx.builder, SpvSourceLanguageGLSL, 450);
+
+ spirv_builder_emit_mem_model(&ctx.builder, SpvAddressingModelLogical,
+ SpvMemoryModelGLSL450);
+
+ SpvExecutionModel exec_model;
+ switch (s->info.stage) {
+ case MESA_SHADER_VERTEX:
+ exec_model = SpvExecutionModelVertex;
+ break;
+ case MESA_SHADER_TESS_CTRL:
+ exec_model = SpvExecutionModelTessellationControl;
+ break;
+ case MESA_SHADER_TESS_EVAL:
+ exec_model = SpvExecutionModelTessellationEvaluation;
+ break;
+ case MESA_SHADER_GEOMETRY:
+ exec_model = SpvExecutionModelGeometry;
+ break;
+ case MESA_SHADER_FRAGMENT:
+ exec_model = SpvExecutionModelFragment;
+ break;
+ case MESA_SHADER_COMPUTE:
+ exec_model = SpvExecutionModelGLCompute;
+ break;
+ default:
+ unreachable("invalid stage");
+ }
+
+ SpvId type_void = spirv_builder_type_void(&ctx.builder);
+ SpvId type_main = spirv_builder_type_function(&ctx.builder, type_void,
+ NULL, 0);
+ SpvId entry_point = spirv_builder_new_id(&ctx.builder);
+ SpvId label = spirv_builder_new_id(&ctx.builder);
+ spirv_builder_emit_name(&ctx.builder, entry_point, "main");
+
+ nir_foreach_variable(var, &s->inputs)
+ emit_input(&ctx, var);
+
+ nir_foreach_variable(var, &s->outputs)
+ emit_output(&ctx, var);
+
+ nir_foreach_variable(var, &s->uniforms)
+ emit_uniform(&ctx, var);
+
+ spirv_builder_emit_entry_point(&ctx.builder, exec_model, entry_point,
+ "main", ctx.entry_ifaces,
+ ctx.num_entry_ifaces);
+ if (s->info.stage == MESA_SHADER_FRAGMENT)
+ spirv_builder_emit_exec_mode(&ctx.builder, entry_point,
+ SpvExecutionModeOriginUpperLeft);
+
+
+ spirv_builder_function(&ctx.builder, entry_point, type_void,
+ SpvFunctionControlMaskNone,
+ type_main);
+ spirv_builder_label(&ctx.builder, label);
+
+ nir_function_impl *entry = nir_shader_get_entrypoint(s);
+
+ ctx.defs = (SpvId *)malloc(sizeof(SpvId) * entry->ssa_alloc);
+ if (!ctx.defs)
+ goto fail;
+ ctx.num_defs = entry->ssa_alloc;
+
+ emit_cf_list(&ctx, &entry->body);
+ free(ctx.defs);
+
+ spirv_builder_return(&ctx.builder); // doesn't belong here, but whatevz
+ spirv_builder_function_end(&ctx.builder);
+
+ size_t num_words = spirv_builder_get_num_words(&ctx.builder);
+
+ ret = CALLOC_STRUCT(spirv_shader);
+ if (!ret)
+ goto fail;
+
+ ret->words = MALLOC(sizeof(uint32_t) * num_words);
+ if (!ret->words)
+ goto fail;
+
+ ret->num_words = spirv_builder_get_words(&ctx.builder, ret->words, num_words);
+ assert(ret->num_words == num_words);
+
+ return ret;
+
+fail:
+
+ if (ret)
+ spirv_shader_delete(ret);
+
+ return NULL;
+}
+
+void
+spirv_shader_delete(struct spirv_shader *s)
+{
+ FREE(s->words);
+ FREE(s);
+}
--- /dev/null
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdlib.h>
+#include <stdint.h>
+#include <vulkan/vulkan.h>
+
+struct spirv_shader {
+ uint32_t *words;
+ size_t num_words;
+};
+
+struct nir_shader;
+
+struct spirv_shader *
+nir_to_spirv(struct nir_shader *s);
+
+void
+spirv_shader_delete(struct spirv_shader *s);
+
+#ifdef __cplusplus
+}
+#endif
--- /dev/null
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "spirv_builder.h"
+
+#include "util/macros.h"
+#include "util/u_bitcast.h"
+#include "util/u_memory.h"
+#include "util/hash_table.h"
+
+#include <stdbool.h>
+#include <inttypes.h>
+#include <string.h>
+
+static bool
+spirv_buffer_grow(struct spirv_buffer *b, size_t needed)
+{
+ size_t new_room = MAX3(64, (b->room * 3) / 2, needed);
+
+ uint32_t *new_words = realloc(b->words, new_room * sizeof(uint32_t));
+ if (!new_words)
+ return false;
+
+ b->words = new_words;
+ b->room = new_room;
+ return true;
+}
+
+static inline bool
+spirv_buffer_prepare(struct spirv_buffer *b, size_t needed)
+{
+ needed += b->num_words;
+ if (b->room >= b->num_words + needed)
+ return true;
+
+ return spirv_buffer_grow(b, needed);
+}
+
+static inline void
+spirv_buffer_emit_word(struct spirv_buffer *b, uint32_t word)
+{
+ assert(b->num_words < b->room);
+ b->words[b->num_words++] = word;
+}
+
+static int
+spirv_buffer_emit_string(struct spirv_buffer *b, const char *str)
+{
+ int pos = 0;
+ uint32_t word = 0;
+ while (str[pos] != '\0') {
+ word |= str[pos] << (8 * (pos % 4));
+ if (++pos % 4 == 0) {
+ spirv_buffer_prepare(b, 1);
+ spirv_buffer_emit_word(b, word);
+ word = 0;
+ }
+ }
+
+ spirv_buffer_prepare(b, 1);
+ spirv_buffer_emit_word(b, word);
+
+ return 1 + pos / 4;
+}
+
+void
+spirv_builder_emit_cap(struct spirv_builder *b, SpvCapability cap)
+{
+ spirv_buffer_prepare(&b->capabilities, 2);
+ spirv_buffer_emit_word(&b->capabilities, SpvOpCapability | (2 << 16));
+ spirv_buffer_emit_word(&b->capabilities, cap);
+}
+
+void
+spirv_builder_emit_source(struct spirv_builder *b, SpvSourceLanguage lang,
+ uint32_t version)
+{
+ spirv_buffer_prepare(&b->debug_names, 3);
+ spirv_buffer_emit_word(&b->debug_names, SpvOpSource | (3 << 16));
+ spirv_buffer_emit_word(&b->debug_names, lang);
+ spirv_buffer_emit_word(&b->debug_names, version);
+}
+
+void
+spirv_builder_emit_mem_model(struct spirv_builder *b,
+ SpvAddressingModel addr_model,
+ SpvMemoryModel mem_model)
+{
+ spirv_buffer_prepare(&b->memory_model, 3);
+ spirv_buffer_emit_word(&b->memory_model, SpvOpMemoryModel | (3 << 16));
+ spirv_buffer_emit_word(&b->memory_model, addr_model);
+ spirv_buffer_emit_word(&b->memory_model, mem_model);
+}
+
+void
+spirv_builder_emit_entry_point(struct spirv_builder *b,
+ SpvExecutionModel exec_model, SpvId entry_point,
+ const char *name, const SpvId interfaces[],
+ size_t num_interfaces)
+{
+ size_t pos = b->entry_points.num_words;
+ spirv_buffer_prepare(&b->entry_points, 3);
+ spirv_buffer_emit_word(&b->entry_points, SpvOpEntryPoint);
+ spirv_buffer_emit_word(&b->entry_points, exec_model);
+ spirv_buffer_emit_word(&b->entry_points, entry_point);
+ int len = spirv_buffer_emit_string(&b->entry_points, name);
+ b->entry_points.words[pos] |= (3 + len + num_interfaces) << 16;
+ spirv_buffer_prepare(&b->entry_points, num_interfaces);
+ for (int i = 0; i < num_interfaces; ++i)
+ spirv_buffer_emit_word(&b->entry_points, interfaces[i]);
+}
+
+void
+spirv_builder_emit_exec_mode(struct spirv_builder *b, SpvId entry_point,
+ SpvExecutionMode exec_mode)
+{
+ spirv_buffer_prepare(&b->exec_modes, 3);
+ spirv_buffer_emit_word(&b->exec_modes, SpvOpExecutionMode | (3 << 16));
+ spirv_buffer_emit_word(&b->exec_modes, entry_point);
+ spirv_buffer_emit_word(&b->exec_modes, exec_mode);
+}
+
+void
+spirv_builder_emit_name(struct spirv_builder *b, SpvId target,
+ const char *name)
+{
+ size_t pos = b->debug_names.num_words;
+ spirv_buffer_prepare(&b->debug_names, 2);
+ spirv_buffer_emit_word(&b->debug_names, SpvOpName);
+ spirv_buffer_emit_word(&b->debug_names, target);
+ int len = spirv_buffer_emit_string(&b->debug_names, name);
+ b->debug_names.words[pos] |= (2 + len) << 16;
+}
+
+static void
+emit_decoration(struct spirv_builder *b, SpvId target,
+ SpvDecoration decoration, const uint32_t extra_operands[],
+ size_t num_extra_operands)
+{
+ int words = 3 + num_extra_operands;
+ spirv_buffer_prepare(&b->decorations, words);
+ spirv_buffer_emit_word(&b->decorations, SpvOpDecorate | (words << 16));
+ spirv_buffer_emit_word(&b->decorations, target);
+ spirv_buffer_emit_word(&b->decorations, decoration);
+ for (int i = 0; i < num_extra_operands; ++i)
+ spirv_buffer_emit_word(&b->decorations, extra_operands[i]);
+}
+
+void
+spirv_builder_emit_decoration(struct spirv_builder *b, SpvId target,
+ SpvDecoration decoration)
+{
+ emit_decoration(b, target, decoration, NULL, 0);
+}
+
+void
+spirv_builder_emit_location(struct spirv_builder *b, SpvId target,
+ uint32_t location)
+{
+ uint32_t args[] = { location };
+ emit_decoration(b, target, SpvDecorationLocation, args, ARRAY_SIZE(args));
+}
+
+void
+spirv_builder_emit_component(struct spirv_builder *b, SpvId target,
+ uint32_t component)
+{
+ uint32_t args[] = { component };
+ emit_decoration(b, target, SpvDecorationComponent, args, ARRAY_SIZE(args));
+}
+
+void
+spirv_builder_emit_builtin(struct spirv_builder *b, SpvId target,
+ SpvBuiltIn builtin)
+{
+ uint32_t args[] = { builtin };
+ emit_decoration(b, target, SpvDecorationBuiltIn, args, ARRAY_SIZE(args));
+}
+
+void
+spirv_builder_emit_descriptor_set(struct spirv_builder *b, SpvId target,
+ uint32_t descriptor_set)
+{
+ uint32_t args[] = { descriptor_set };
+ emit_decoration(b, target, SpvDecorationDescriptorSet, args,
+ ARRAY_SIZE(args));
+}
+
+void
+spirv_builder_emit_binding(struct spirv_builder *b, SpvId target,
+ uint32_t binding)
+{
+ uint32_t args[] = { binding };
+ emit_decoration(b, target, SpvDecorationBinding, args, ARRAY_SIZE(args));
+}
+
+void
+spirv_builder_emit_array_stride(struct spirv_builder *b, SpvId target,
+ uint32_t stride)
+{
+ uint32_t args[] = { stride };
+ emit_decoration(b, target, SpvDecorationArrayStride, args, ARRAY_SIZE(args));
+}
+
+static void
+emit_member_decoration(struct spirv_builder *b, SpvId target, uint32_t member,
+ SpvDecoration decoration, const uint32_t extra_operands[],
+ size_t num_extra_operands)
+{
+ int words = 4 + num_extra_operands;
+ spirv_buffer_prepare(&b->decorations, words);
+ spirv_buffer_emit_word(&b->decorations,
+ SpvOpMemberDecorate | (words << 16));
+ spirv_buffer_emit_word(&b->decorations, target);
+ spirv_buffer_emit_word(&b->decorations, member);
+ spirv_buffer_emit_word(&b->decorations, decoration);
+ for (int i = 0; i < num_extra_operands; ++i)
+ spirv_buffer_emit_word(&b->decorations, extra_operands[i]);
+}
+
+void
+spirv_builder_emit_member_offset(struct spirv_builder *b, SpvId target,
+ uint32_t member, uint32_t offset)
+{
+ uint32_t args[] = { offset };
+ emit_member_decoration(b, target, member, SpvDecorationOffset,
+ args, ARRAY_SIZE(args));
+}
+
+SpvId
+spirv_builder_emit_undef(struct spirv_builder *b, SpvId result_type)
+{
+ SpvId result = spirv_builder_new_id(b);
+ spirv_buffer_prepare(&b->instructions, 3);
+ spirv_buffer_emit_word(&b->instructions, SpvOpUndef | (3 << 16));
+ spirv_buffer_emit_word(&b->instructions, result_type);
+ spirv_buffer_emit_word(&b->instructions, result);
+ return result;
+}
+
+void
+spirv_builder_function(struct spirv_builder *b, SpvId result,
+ SpvId return_type,
+ SpvFunctionControlMask function_control,
+ SpvId function_type)
+{
+ spirv_buffer_prepare(&b->instructions, 5);
+ spirv_buffer_emit_word(&b->instructions, SpvOpFunction | (5 << 16));
+ spirv_buffer_emit_word(&b->instructions, return_type);
+ spirv_buffer_emit_word(&b->instructions, result);
+ spirv_buffer_emit_word(&b->instructions, function_control);
+ spirv_buffer_emit_word(&b->instructions, function_type);
+}
+
+void
+spirv_builder_function_end(struct spirv_builder *b)
+{
+ spirv_buffer_prepare(&b->instructions, 1);
+ spirv_buffer_emit_word(&b->instructions, SpvOpFunctionEnd | (1 << 16));
+}
+
+void
+spirv_builder_label(struct spirv_builder *b, SpvId label)
+{
+ spirv_buffer_prepare(&b->instructions, 2);
+ spirv_buffer_emit_word(&b->instructions, SpvOpLabel | (2 << 16));
+ spirv_buffer_emit_word(&b->instructions, label);
+}
+
+void
+spirv_builder_return(struct spirv_builder *b)
+{
+ spirv_buffer_prepare(&b->instructions, 1);
+ spirv_buffer_emit_word(&b->instructions, SpvOpReturn | (1 << 16));
+}
+
+SpvId
+spirv_builder_emit_load(struct spirv_builder *b, SpvId result_type,
+ SpvId pointer)
+{
+ return spirv_builder_emit_unop(b, SpvOpLoad, result_type, pointer);
+}
+
+void
+spirv_builder_emit_store(struct spirv_builder *b, SpvId pointer, SpvId object)
+{
+ spirv_buffer_prepare(&b->instructions, 3);
+ spirv_buffer_emit_word(&b->instructions, SpvOpStore | (3 << 16));
+ spirv_buffer_emit_word(&b->instructions, pointer);
+ spirv_buffer_emit_word(&b->instructions, object);
+}
+
+SpvId
+spirv_builder_emit_access_chain(struct spirv_builder *b, SpvId result_type,
+ SpvId base, const SpvId indexes[],
+ size_t num_indexes)
+{
+ SpvId result = spirv_builder_new_id(b);
+
+ int words = 4 + num_indexes;
+ spirv_buffer_prepare(&b->instructions, words);
+ spirv_buffer_emit_word(&b->instructions, SpvOpAccessChain | (words << 16));
+ spirv_buffer_emit_word(&b->instructions, result_type);
+ spirv_buffer_emit_word(&b->instructions, result);
+ spirv_buffer_emit_word(&b->instructions, base);
+ for (int i = 0; i < num_indexes; ++i)
+ spirv_buffer_emit_word(&b->instructions, indexes[i]);
+ return result;
+}
+
+
+SpvId
+spirv_builder_emit_unop(struct spirv_builder *b, SpvOp op, SpvId result_type,
+ SpvId operand)
+{
+ SpvId result = spirv_builder_new_id(b);
+ spirv_buffer_prepare(&b->instructions, 4);
+ spirv_buffer_emit_word(&b->instructions, op | (4 << 16));
+ spirv_buffer_emit_word(&b->instructions, result_type);
+ spirv_buffer_emit_word(&b->instructions, result);
+ spirv_buffer_emit_word(&b->instructions, operand);
+ return result;
+}
+
+SpvId
+spirv_builder_emit_binop(struct spirv_builder *b, SpvOp op, SpvId result_type,
+ SpvId operand0, SpvId operand1)
+{
+ SpvId result = spirv_builder_new_id(b);
+ spirv_buffer_prepare(&b->instructions, 5);
+ spirv_buffer_emit_word(&b->instructions, op | (5 << 16));
+ spirv_buffer_emit_word(&b->instructions, result_type);
+ spirv_buffer_emit_word(&b->instructions, result);
+ spirv_buffer_emit_word(&b->instructions, operand0);
+ spirv_buffer_emit_word(&b->instructions, operand1);
+ return result;
+}
+
+SpvId
+spirv_builder_emit_triop(struct spirv_builder *b, SpvOp op, SpvId result_type,
+ SpvId operand0, SpvId operand1, SpvId operand2)
+{
+ SpvId result = spirv_builder_new_id(b);
+ spirv_buffer_prepare(&b->instructions, 6);
+ spirv_buffer_emit_word(&b->instructions, op | (6 << 16));
+ spirv_buffer_emit_word(&b->instructions, result_type);
+ spirv_buffer_emit_word(&b->instructions, result);
+ spirv_buffer_emit_word(&b->instructions, operand0);
+ spirv_buffer_emit_word(&b->instructions, operand1);
+ spirv_buffer_emit_word(&b->instructions, operand2);
+ return result;
+}
+
+SpvId
+spirv_builder_emit_composite_extract(struct spirv_builder *b, SpvId result_type,
+ SpvId composite, const uint32_t indexes[],
+ size_t num_indexes)
+{
+ SpvId result = spirv_builder_new_id(b);
+
+ assert(num_indexes > 0);
+ int words = 4 + num_indexes;
+ spirv_buffer_prepare(&b->instructions, words);
+ spirv_buffer_emit_word(&b->instructions,
+ SpvOpCompositeExtract | (words << 16));
+ spirv_buffer_emit_word(&b->instructions, result_type);
+ spirv_buffer_emit_word(&b->instructions, result);
+ spirv_buffer_emit_word(&b->instructions, composite);
+ for (int i = 0; i < num_indexes; ++i)
+ spirv_buffer_emit_word(&b->instructions, indexes[i]);
+ return result;
+}
+
+SpvId
+spirv_builder_emit_composite_construct(struct spirv_builder *b,
+ SpvId result_type,
+ const SpvId constituents[],
+ size_t num_constituents)
+{
+ SpvId result = spirv_builder_new_id(b);
+
+ assert(num_constituents > 0);
+ int words = 3 + num_constituents;
+ spirv_buffer_prepare(&b->instructions, words);
+ spirv_buffer_emit_word(&b->instructions,
+ SpvOpCompositeConstruct | (words << 16));
+ spirv_buffer_emit_word(&b->instructions, result_type);
+ spirv_buffer_emit_word(&b->instructions, result);
+ for (int i = 0; i < num_constituents; ++i)
+ spirv_buffer_emit_word(&b->instructions, constituents[i]);
+ return result;
+}
+
+SpvId
+spirv_builder_emit_vector_shuffle(struct spirv_builder *b, SpvId result_type,
+ SpvId vector_1, SpvId vector_2,
+ const uint32_t components[],
+ size_t num_components)
+{
+ SpvId result = spirv_builder_new_id(b);
+
+ assert(num_components > 0);
+ int words = 5 + num_components;
+ spirv_buffer_prepare(&b->instructions, words);
+ spirv_buffer_emit_word(&b->instructions, SpvOpVectorShuffle | (words << 16));
+ spirv_buffer_emit_word(&b->instructions, result_type);
+ spirv_buffer_emit_word(&b->instructions, result);
+ spirv_buffer_emit_word(&b->instructions, vector_1);
+ spirv_buffer_emit_word(&b->instructions, vector_2);
+ for (int i = 0; i < num_components; ++i)
+ spirv_buffer_emit_word(&b->instructions, components[i]);
+ return result;
+}
+
+SpvId
+spirv_builder_emit_image_sample_implicit_lod(struct spirv_builder *b,
+ SpvId result_type,
+ SpvId sampled_image,
+ SpvId coordinate)
+{
+ SpvId result = spirv_builder_new_id(b);
+ spirv_buffer_prepare(&b->instructions, 5);
+ spirv_buffer_emit_word(&b->instructions, SpvOpImageSampleImplicitLod | (5 << 16));
+ spirv_buffer_emit_word(&b->instructions, result_type);
+ spirv_buffer_emit_word(&b->instructions, result);
+ spirv_buffer_emit_word(&b->instructions, sampled_image);
+ spirv_buffer_emit_word(&b->instructions, coordinate);
+ return result;
+}
+
+SpvId
+spirv_builder_emit_image_sample_proj_implicit_lod(struct spirv_builder *b,
+ SpvId result_type,
+ SpvId sampled_image,
+ SpvId coordinate)
+{
+ SpvId result = spirv_builder_new_id(b);
+ spirv_buffer_prepare(&b->instructions, 5);
+ spirv_buffer_emit_word(&b->instructions, SpvOpImageSampleProjImplicitLod | (5 << 16));
+ spirv_buffer_emit_word(&b->instructions, result_type);
+ spirv_buffer_emit_word(&b->instructions, result);
+ spirv_buffer_emit_word(&b->instructions, sampled_image);
+ spirv_buffer_emit_word(&b->instructions, coordinate);
+ return result;
+}
+
+SpvId
+spirv_builder_emit_ext_inst(struct spirv_builder *b, SpvId result_type,
+ SpvId set, uint32_t instruction,
+ const SpvId *args, size_t num_args)
+{
+ SpvId result = spirv_builder_new_id(b);
+
+ int words = 5 + num_args;
+ spirv_buffer_prepare(&b->instructions, words);
+ spirv_buffer_emit_word(&b->instructions, SpvOpExtInst | (words << 16));
+ spirv_buffer_emit_word(&b->instructions, result_type);
+ spirv_buffer_emit_word(&b->instructions, result);
+ spirv_buffer_emit_word(&b->instructions, set);
+ spirv_buffer_emit_word(&b->instructions, instruction);
+ for (int i = 0; i < num_args; ++i)
+ spirv_buffer_emit_word(&b->instructions, args[i]);
+ return result;
+}
+
+struct spirv_type {
+ SpvOp op;
+ uint32_t args[8];
+ size_t num_args;
+
+ SpvId type;
+};
+
+static uint32_t
+non_aggregate_type_hash(const void *arg)
+{
+ const struct spirv_type *type = arg;
+
+ uint32_t hash = _mesa_fnv32_1a_offset_bias;
+ hash = _mesa_fnv32_1a_accumulate(hash, type->op);
+ hash = _mesa_fnv32_1a_accumulate_block(hash, type->args, sizeof(uint32_t) *
+ type->num_args);
+ return hash;
+}
+
+static bool
+non_aggregate_type_equals(const void *a, const void *b)
+{
+ const struct spirv_type *ta = a, *tb = b;
+
+ if (ta->op != tb->op)
+ return false;
+
+ assert(ta->num_args == tb->num_args);
+ return memcmp(ta->args, tb->args, sizeof(uint32_t) * ta->num_args) == 0;
+}
+
+static SpvId
+get_type_def(struct spirv_builder *b, SpvOp op, const uint32_t args[],
+ size_t num_args)
+{
+ /* According to the SPIR-V specification:
+ *
+ * "Two different type <id>s form, by definition, two different types. It
+ * is valid to declare multiple aggregate type <id>s having the same
+ * opcode and operands. This is to allow multiple instances of aggregate
+ * types with the same structure to be decorated differently. (Different
+ * decorations are not required; two different aggregate type <id>s are
+ * allowed to have identical declarations and decorations, and will still
+ * be two different types.) Non-aggregate types are different: It is
+ * invalid to declare multiple type <id>s for the same scalar, vector, or
+ * matrix type. That is, non-aggregate type declarations must all have
+ * different opcodes or operands. (Note that non-aggregate types cannot
+ * be decorated in ways that affect their type.)"
+ *
+ * ..so, we need to prevent the same non-aggregate type to be re-defined
+ * with a new <id>. We do this by putting the definitions in a hash-map, so
+ * we can easily look up and reuse them.
+ */
+
+ struct spirv_type key;
+ assert(num_args < ARRAY_SIZE(key.args));
+ key.op = op;
+ memcpy(&key.args, args, sizeof(uint32_t) * num_args);
+ key.num_args = num_args;
+
+ struct hash_entry *entry;
+ if (b->types) {
+ entry = _mesa_hash_table_search(b->types, &key);
+ if (entry)
+ return ((struct spirv_type *)entry->data)->type;
+ } else {
+ b->types = _mesa_hash_table_create(NULL, non_aggregate_type_hash,
+ non_aggregate_type_equals);
+ assert(b->types);
+ }
+
+ struct spirv_type *type = CALLOC_STRUCT(spirv_type);
+ if (!type)
+ return 0;
+
+ type->op = op;
+ memcpy(&type->args, args, sizeof(uint32_t) * num_args);
+ type->num_args = num_args;
+
+ type->type = spirv_builder_new_id(b);
+ spirv_buffer_prepare(&b->types_const_defs, 2 + num_args);
+ spirv_buffer_emit_word(&b->types_const_defs, op | ((2 + num_args) << 16));
+ spirv_buffer_emit_word(&b->types_const_defs, type->type);
+ for (int i = 0; i < num_args; ++i)
+ spirv_buffer_emit_word(&b->types_const_defs, args[i]);
+
+ entry = _mesa_hash_table_insert(b->types, type, type);
+ assert(entry);
+
+ return ((struct spirv_type *)entry->data)->type;
+}
+
+SpvId
+spirv_builder_type_void(struct spirv_builder *b)
+{
+ return get_type_def(b, SpvOpTypeVoid, NULL, 0);
+}
+
+SpvId
+spirv_builder_type_bool(struct spirv_builder *b)
+{
+ return get_type_def(b, SpvOpTypeBool, NULL, 0);
+}
+
+SpvId
+spirv_builder_type_int(struct spirv_builder *b, unsigned width)
+{
+ uint32_t args[] = { width, 1 };
+ return get_type_def(b, SpvOpTypeInt, args, ARRAY_SIZE(args));
+}
+
+SpvId
+spirv_builder_type_uint(struct spirv_builder *b, unsigned width)
+{
+ uint32_t args[] = { width, 0 };
+ return get_type_def(b, SpvOpTypeInt, args, ARRAY_SIZE(args));
+}
+
+SpvId
+spirv_builder_type_float(struct spirv_builder *b, unsigned width)
+{
+ uint32_t args[] = { width };
+ return get_type_def(b, SpvOpTypeFloat, args, ARRAY_SIZE(args));
+}
+
+SpvId
+spirv_builder_type_image(struct spirv_builder *b, SpvId sampled_type,
+ SpvDim dim, bool depth, bool arrayed, bool ms,
+ unsigned sampled, SpvImageFormat image_format)
+{
+ assert(sampled < 3);
+ uint32_t args[] = {
+ sampled_type, dim, depth ? 1 : 0, arrayed ? 1 : 0, ms ? 1 : 0, sampled,
+ image_format
+ };
+ return get_type_def(b, SpvOpTypeImage, args, ARRAY_SIZE(args));
+}
+
+SpvId
+spirv_builder_type_sampled_image(struct spirv_builder *b, SpvId image_type)
+{
+ uint32_t args[] = { image_type };
+ return get_type_def(b, SpvOpTypeSampledImage, args, ARRAY_SIZE(args));
+}
+
+SpvId
+spirv_builder_type_pointer(struct spirv_builder *b,
+ SpvStorageClass storage_class, SpvId type)
+{
+ uint32_t args[] = { storage_class, type };
+ return get_type_def(b, SpvOpTypePointer, args, ARRAY_SIZE(args));
+}
+
+SpvId
+spirv_builder_type_vector(struct spirv_builder *b, SpvId component_type,
+ unsigned component_count)
+{
+ assert(component_count > 1);
+ uint32_t args[] = { component_type, component_count };
+ return get_type_def(b, SpvOpTypeVector, args, ARRAY_SIZE(args));
+}
+
+SpvId
+spirv_builder_type_array(struct spirv_builder *b, SpvId component_type,
+ SpvId length)
+{
+ SpvId type = spirv_builder_new_id(b);
+ spirv_buffer_prepare(&b->types_const_defs, 4);
+ spirv_buffer_emit_word(&b->types_const_defs, SpvOpTypeArray | (4 << 16));
+ spirv_buffer_emit_word(&b->types_const_defs, type);
+ spirv_buffer_emit_word(&b->types_const_defs, component_type);
+ spirv_buffer_emit_word(&b->types_const_defs, length);
+ return type;
+}
+
+SpvId
+spirv_builder_type_struct(struct spirv_builder *b, const SpvId member_types[],
+ size_t num_member_types)
+{
+ int words = 2 + num_member_types;
+ SpvId type = spirv_builder_new_id(b);
+ spirv_buffer_prepare(&b->types_const_defs, words);
+ spirv_buffer_emit_word(&b->types_const_defs, SpvOpTypeStruct | (words << 16));
+ spirv_buffer_emit_word(&b->types_const_defs, type);
+ for (int i = 0; i < num_member_types; ++i)
+ spirv_buffer_emit_word(&b->types_const_defs, member_types[i]);
+ return type;
+}
+
+SpvId
+spirv_builder_type_function(struct spirv_builder *b, SpvId return_type,
+ const SpvId parameter_types[],
+ size_t num_parameter_types)
+{
+ int words = 3 + num_parameter_types;
+ SpvId type = spirv_builder_new_id(b);
+ spirv_buffer_prepare(&b->types_const_defs, words);
+ spirv_buffer_emit_word(&b->types_const_defs, SpvOpTypeFunction | (words << 16));
+ spirv_buffer_emit_word(&b->types_const_defs, type);
+ spirv_buffer_emit_word(&b->types_const_defs, return_type);
+ for (int i = 0; i < num_parameter_types; ++i)
+ spirv_buffer_emit_word(&b->types_const_defs, parameter_types[i]);
+ return type;
+}
+
+static SpvId
+get_const_def(struct spirv_builder *b, SpvOp op, SpvId type,
+ const uint32_t args[], size_t num_args)
+{
+ /* TODO: reuse constants */
+ SpvId result = spirv_builder_new_id(b);
+ spirv_buffer_prepare(&b->types_const_defs, 3 + num_args);
+ spirv_buffer_emit_word(&b->types_const_defs, op | ((3 + num_args) << 16));
+ spirv_buffer_emit_word(&b->types_const_defs, type);
+ spirv_buffer_emit_word(&b->types_const_defs, result);
+ for (int i = 0; i < num_args; ++i)
+ spirv_buffer_emit_word(&b->types_const_defs, args[i]);
+ return result;
+}
+
+SpvId
+spirv_builder_const_bool(struct spirv_builder *b, bool val)
+{
+ return get_const_def(b, val ? SpvOpConstantTrue : SpvOpConstantFalse,
+ spirv_builder_type_bool(b), NULL, 0);
+}
+
+SpvId
+spirv_builder_const_int(struct spirv_builder *b, int width, int32_t val)
+{
+ assert(width <= 32);
+ uint32_t args[] = { val };
+ return get_const_def(b, SpvOpConstant, spirv_builder_type_int(b, width),
+ args, ARRAY_SIZE(args));
+}
+
+SpvId
+spirv_builder_const_uint(struct spirv_builder *b, int width, uint32_t val)
+{
+ assert(width <= 32);
+ uint32_t args[] = { val };
+ return get_const_def(b, SpvOpConstant, spirv_builder_type_uint(b, width),
+ args, ARRAY_SIZE(args));
+}
+
+SpvId
+spirv_builder_const_float(struct spirv_builder *b, int width, float val)
+{
+ assert(width <= 32);
+ uint32_t args[] = { u_bitcast_f2u(val) };
+ return get_const_def(b, SpvOpConstant, spirv_builder_type_float(b, width),
+ args, ARRAY_SIZE(args));
+}
+
+SpvId
+spirv_builder_const_composite(struct spirv_builder *b, SpvId result_type,
+ const SpvId constituents[],
+ size_t num_constituents)
+{
+ return get_const_def(b, SpvOpConstantComposite, result_type,
+ (const uint32_t *)constituents,
+ num_constituents);
+}
+
+SpvId
+spirv_builder_emit_var(struct spirv_builder *b, SpvId type,
+ SpvStorageClass storage_class)
+{
+ assert(storage_class != SpvStorageClassGeneric);
+ struct spirv_buffer *buf = storage_class != SpvStorageClassFunction ?
+ &b->types_const_defs : &b->instructions;
+
+ SpvId ret = spirv_builder_new_id(b);
+ spirv_buffer_prepare(buf, 4);
+ spirv_buffer_emit_word(buf, SpvOpVariable | (4 << 16));
+ spirv_buffer_emit_word(buf, type);
+ spirv_buffer_emit_word(buf, ret);
+ spirv_buffer_emit_word(buf, storage_class);
+ return ret;
+}
+
+SpvId
+spirv_builder_import(struct spirv_builder *b, const char *name)
+{
+ SpvId result = spirv_builder_new_id(b);
+ size_t pos = b->imports.num_words;
+ spirv_buffer_prepare(&b->imports, 2);
+ spirv_buffer_emit_word(&b->imports, SpvOpExtInstImport);
+ spirv_buffer_emit_word(&b->imports, result);
+ int len = spirv_buffer_emit_string(&b->imports, name);
+ b->imports.words[pos] |= (2 + len) << 16;
+ return result;
+}
+
+size_t
+spirv_builder_get_num_words(struct spirv_builder *b)
+{
+ const size_t header_size = 5;
+ return header_size +
+ b->capabilities.num_words +
+ b->imports.num_words +
+ b->memory_model.num_words +
+ b->entry_points.num_words +
+ b->exec_modes.num_words +
+ b->debug_names.num_words +
+ b->decorations.num_words +
+ b->types_const_defs.num_words +
+ b->instructions.num_words;
+}
+
+size_t
+spirv_builder_get_words(struct spirv_builder *b, uint32_t *words,
+ size_t num_words)
+{
+ assert(num_words >= spirv_builder_get_num_words(b));
+
+ size_t written = 0;
+ words[written++] = SpvMagicNumber;
+ words[written++] = 0x00010000;
+ words[written++] = 0;
+ words[written++] = b->prev_id + 1;
+ words[written++] = 0;
+
+ const struct spirv_buffer *buffers[] = {
+ &b->capabilities,
+ &b->imports,
+ &b->memory_model,
+ &b->entry_points,
+ &b->exec_modes,
+ &b->debug_names,
+ &b->decorations,
+ &b->types_const_defs,
+ &b->instructions
+ };
+
+ for (int i = 0; i < ARRAY_SIZE(buffers); ++i) {
+ const struct spirv_buffer *buffer = buffers[i];
+ for (int j = 0; j < buffer->num_words; ++j)
+ words[written++] = buffer->words[j];
+ }
+
+ assert(written == spirv_builder_get_num_words(b));
+ return written;
+}
--- /dev/null
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef SPIRV_BUILDER_H
+#define SPIRV_BUILDER_H
+
+#include "compiler/spirv/spirv.h"
+#include "compiler/spirv/GLSL.std.450.h"
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdlib.h>
+
+struct spirv_buffer {
+ uint32_t *words;
+ size_t num_words, room;
+};
+
+struct spirv_builder {
+ struct spirv_buffer capabilities;
+ struct spirv_buffer imports;
+ struct spirv_buffer memory_model;
+ struct spirv_buffer entry_points;
+ struct spirv_buffer exec_modes;
+ struct spirv_buffer debug_names;
+ struct spirv_buffer decorations;
+
+ struct spirv_buffer types_const_defs;
+ struct hash_table *types;
+
+ struct spirv_buffer instructions;
+ SpvId prev_id;
+};
+
+static inline SpvId
+spirv_builder_new_id(struct spirv_builder *b)
+{
+ return ++b->prev_id;
+}
+
+void
+spirv_builder_emit_cap(struct spirv_builder *b, SpvCapability cap);
+
+void
+spirv_builder_emit_source(struct spirv_builder *b, SpvSourceLanguage lang,
+ uint32_t version);
+
+void
+spirv_builder_emit_mem_model(struct spirv_builder *b,
+ SpvAddressingModel addr_model,
+ SpvMemoryModel mem_model);
+
+void
+spirv_builder_emit_name(struct spirv_builder *b, SpvId target,
+ const char *name);
+
+void
+spirv_builder_emit_decoration(struct spirv_builder *b, SpvId target,
+ SpvDecoration decoration);
+
+void
+spirv_builder_emit_location(struct spirv_builder *b, SpvId target,
+ uint32_t location);
+
+void
+spirv_builder_emit_component(struct spirv_builder *b, SpvId target,
+ uint32_t component);
+
+void
+spirv_builder_emit_builtin(struct spirv_builder *b, SpvId target,
+ SpvBuiltIn builtin);
+
+void
+spirv_builder_emit_descriptor_set(struct spirv_builder *b, SpvId target,
+ uint32_t descriptor_set);
+
+void
+spirv_builder_emit_binding(struct spirv_builder *b, SpvId target,
+ uint32_t binding);
+
+void
+spirv_builder_emit_array_stride(struct spirv_builder *b, SpvId target,
+ uint32_t stride);
+
+void
+spirv_builder_emit_member_offset(struct spirv_builder *b, SpvId target,
+ uint32_t member, uint32_t offset);
+
+void
+spirv_builder_emit_entry_point(struct spirv_builder *b,
+ SpvExecutionModel exec_model, SpvId entry_point,
+ const char *name, const SpvId interfaces[],
+ size_t num_interfaces);
+
+void
+spirv_builder_emit_exec_mode(struct spirv_builder *b, SpvId entry_point,
+ SpvExecutionMode exec_mode);
+
+void
+spirv_builder_function(struct spirv_builder *b, SpvId result,
+ SpvId return_type,
+ SpvFunctionControlMask function_control,
+ SpvId function_type);
+
+void
+spirv_builder_function_end(struct spirv_builder *b);
+
+void
+spirv_builder_label(struct spirv_builder *b, SpvId label);
+
+void
+spirv_builder_return(struct spirv_builder *b);
+
+SpvId
+spirv_builder_emit_undef(struct spirv_builder *b, SpvId result_type);
+
+SpvId
+spirv_builder_emit_load(struct spirv_builder *b, SpvId result_type,
+ SpvId pointer);
+
+void
+spirv_builder_emit_store(struct spirv_builder *b, SpvId pointer, SpvId object);
+
+SpvId
+spirv_builder_emit_access_chain(struct spirv_builder *b, SpvId result_type,
+ SpvId base, const SpvId indexes[],
+ size_t num_indexes);
+
+SpvId
+spirv_builder_emit_unop(struct spirv_builder *b, SpvOp op, SpvId result_type,
+ SpvId operand);
+
+SpvId
+spirv_builder_emit_binop(struct spirv_builder *b, SpvOp op, SpvId result_type,
+ SpvId operand0, SpvId operand1);
+
+SpvId
+spirv_builder_emit_triop(struct spirv_builder *b, SpvOp op, SpvId result_type,
+ SpvId operand0, SpvId operand1, SpvId operand2);
+
+SpvId
+spirv_builder_emit_composite_extract(struct spirv_builder *b, SpvId result_type,
+ SpvId composite, const uint32_t indexes[],
+ size_t num_indexes);
+
+SpvId
+spirv_builder_emit_composite_construct(struct spirv_builder *b,
+ SpvId result_type,
+ const SpvId constituents[],
+ size_t num_constituents);
+
+SpvId
+spirv_builder_emit_vector_shuffle(struct spirv_builder *b, SpvId result_type,
+ SpvId vector_1, SpvId vector_2,
+ const uint32_t components[],
+ size_t num_components);
+
+SpvId
+spirv_builder_emit_image_sample_implicit_lod(struct spirv_builder *b,
+ SpvId result_type,
+ SpvId sampled_image,
+ SpvId coordinate);
+
+SpvId
+spirv_builder_emit_image_sample_proj_implicit_lod(struct spirv_builder *b,
+ SpvId result_type,
+ SpvId sampled_image,
+ SpvId coordinate);
+
+SpvId
+spirv_builder_emit_ext_inst(struct spirv_builder *b, SpvId result_type,
+ SpvId set, uint32_t instruction,
+ const SpvId args[], size_t num_args);
+
+SpvId
+spirv_builder_type_void(struct spirv_builder *b);
+
+SpvId
+spirv_builder_type_bool(struct spirv_builder *b);
+
+SpvId
+spirv_builder_type_int(struct spirv_builder *b, unsigned width);
+
+SpvId
+spirv_builder_type_uint(struct spirv_builder *b, unsigned width);
+
+SpvId
+spirv_builder_type_float(struct spirv_builder *b, unsigned width);
+
+SpvId
+spirv_builder_type_image(struct spirv_builder *b, SpvId sampled_type,
+ SpvDim dim, bool depth, bool arrayed, bool ms,
+ unsigned sampled, SpvImageFormat image_format);
+
+SpvId
+spirv_builder_type_sampled_image(struct spirv_builder *b, SpvId image_type);
+
+SpvId
+spirv_builder_type_pointer(struct spirv_builder *b,
+ SpvStorageClass storage_class, SpvId type);
+
+SpvId
+spirv_builder_type_vector(struct spirv_builder *b, SpvId component_type,
+ unsigned component_count);
+
+SpvId
+spirv_builder_type_array(struct spirv_builder *b, SpvId component_type,
+ SpvId length);
+
+SpvId
+spirv_builder_type_struct(struct spirv_builder *b, const SpvId member_types[],
+ size_t num_member_types);
+
+SpvId
+spirv_builder_type_function(struct spirv_builder *b, SpvId return_type,
+ const SpvId parameter_types[],
+ size_t num_parameter_types);
+
+SpvId
+spirv_builder_const_bool(struct spirv_builder *b, bool val);
+
+SpvId
+spirv_builder_const_int(struct spirv_builder *b, int width, int32_t val);
+
+SpvId
+spirv_builder_const_uint(struct spirv_builder *b, int width, uint32_t val);
+
+SpvId
+spirv_builder_const_float(struct spirv_builder *b, int width, float val);
+
+SpvId
+spirv_builder_const_composite(struct spirv_builder *b, SpvId result_type,
+ const SpvId constituents[],
+ size_t num_constituents);
+
+SpvId
+spirv_builder_emit_var(struct spirv_builder *b, SpvId type,
+ SpvStorageClass storage_class);
+
+SpvId
+spirv_builder_import(struct spirv_builder *b, const char *name);
+
+size_t
+spirv_builder_get_num_words(struct spirv_builder *b);
+
+size_t
+spirv_builder_get_words(struct spirv_builder *b, uint32_t *words,
+ size_t num_words);
+
+#endif
--- /dev/null
+#include "zink_cmdbuf.h"
+
+#include "zink_context.h"
+#include "zink_fence.h"
+#include "zink_screen.h"
+
+#include "util/u_debug.h"
+
+struct zink_cmdbuf *
+zink_start_cmdbuf(struct zink_context *ctx)
+{
+ struct zink_cmdbuf *cmdbuf = &ctx->cmdbuf;
+
+ if (cmdbuf->fence) {
+ struct zink_screen *screen = zink_screen(ctx->base.screen);
+ zink_fence_finish(screen, cmdbuf->fence, PIPE_TIMEOUT_INFINITE);
+ zink_fence_reference(screen, &cmdbuf->fence, NULL);
+ }
+
+ VkCommandBufferBeginInfo cbbi = {};
+ cbbi.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
+ cbbi.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
+ if (vkBeginCommandBuffer(cmdbuf->cmdbuf, &cbbi) != VK_SUCCESS) {
+ debug_printf("vkBeginCommandBuffer failed\n");
+ return NULL;
+ }
+
+ return cmdbuf;
+}
+
+static bool
+submit_cmdbuf(struct zink_context *ctx, VkCommandBuffer cmdbuf, VkFence fence)
+{
+ VkPipelineStageFlags wait = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
+
+ VkSubmitInfo si = {};
+ si.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
+ si.waitSemaphoreCount = 0;
+ si.pWaitSemaphores = NULL;
+ si.signalSemaphoreCount = 0;
+ si.pSignalSemaphores = NULL;
+ si.pWaitDstStageMask = &wait;
+ si.commandBufferCount = 1;
+ si.pCommandBuffers = &cmdbuf;
+
+ if (vkQueueSubmit(ctx->queue, 1, &si, fence) != VK_SUCCESS) {
+ debug_printf("vkQueueSubmit failed\n");
+ return false;
+ }
+
+ return true;
+}
+
+void
+zink_end_cmdbuf(struct zink_context *ctx, struct zink_cmdbuf *cmdbuf)
+{
+ if (vkEndCommandBuffer(cmdbuf->cmdbuf) != VK_SUCCESS) {
+ debug_printf("vkEndCommandBuffer failed\n");
+ return;
+ }
+
+ assert(cmdbuf->fence == NULL);
+ cmdbuf->fence = zink_create_fence(ctx->base.screen);
+ if (!cmdbuf->fence ||
+ !submit_cmdbuf(ctx, cmdbuf->cmdbuf, cmdbuf->fence->fence))
+ return;
+
+ if (vkQueueWaitIdle(ctx->queue) != VK_SUCCESS)
+ debug_printf("vkQueueWaitIdle failed\n");
+}
--- /dev/null
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef ZINK_CMDBUF_H
+#define ZINK_CMDBUF_H
+
+#include <vulkan/vulkan.h>
+
+struct zink_context;
+struct zink_fence;
+
+struct zink_cmdbuf {
+ VkCommandBuffer cmdbuf;
+ struct zink_fence *fence;
+};
+
+struct zink_cmdbuf *
+zink_start_cmdbuf(struct zink_context *ctx);
+
+void
+zink_end_cmdbuf(struct zink_context *ctx, struct zink_cmdbuf *cmdbuf);
+
+#endif
--- /dev/null
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "zink_compiler.h"
+#include "zink_screen.h"
+#include "nir_to_spirv/nir_to_spirv.h"
+
+#include "pipe/p_state.h"
+
+#include "nir.h"
+#include "compiler/nir/nir_builder.h"
+
+#include "nir/tgsi_to_nir.h"
+#include "tgsi/tgsi_dump.h"
+#include "tgsi/tgsi_from_mesa.h"
+
+#include "util/u_memory.h"
+
+static bool
+lower_instr(nir_intrinsic_instr *instr, nir_builder *b)
+{
+ b->cursor = nir_before_instr(&instr->instr);
+
+ if (instr->intrinsic == nir_intrinsic_load_ubo) {
+ nir_ssa_def *old_idx = nir_ssa_for_src(b, instr->src[0], 1);
+ nir_ssa_def *new_idx = nir_iadd(b, old_idx, nir_imm_int(b, 1));
+ nir_instr_rewrite_src(&instr->instr, &instr->src[0],
+ nir_src_for_ssa(new_idx));
+ return true;
+ }
+
+ if (instr->intrinsic == nir_intrinsic_load_uniform) {
+ nir_ssa_def *ubo_idx = nir_imm_int(b, 0);
+ nir_ssa_def *ubo_offset =
+ nir_iadd(b, nir_imm_int(b, nir_intrinsic_base(instr)),
+ nir_ssa_for_src(b, instr->src[0], 1));
+
+ nir_intrinsic_instr *load =
+ nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_ubo);
+ load->num_components = instr->num_components;
+ load->src[0] = nir_src_for_ssa(ubo_idx);
+ load->src[1] = nir_src_for_ssa(ubo_offset);
+ nir_ssa_dest_init(&load->instr, &load->dest,
+ load->num_components, instr->dest.ssa.bit_size,
+ instr->dest.ssa.name);
+ nir_builder_instr_insert(b, &load->instr);
+ nir_ssa_def_rewrite_uses(&instr->dest.ssa, nir_src_for_ssa(&load->dest.ssa));
+
+ nir_instr_remove(&instr->instr);
+ return true;
+ }
+
+ return false;
+}
+
+static bool
+lower_uniforms_to_ubo(nir_shader *shader)
+{
+ bool progress = false;
+
+ nir_foreach_function(function, shader) {
+ if (function->impl) {
+ nir_builder builder;
+ nir_builder_init(&builder, function->impl);
+ nir_foreach_block(block, function->impl) {
+ nir_foreach_instr_safe(instr, block) {
+ if (instr->type == nir_instr_type_intrinsic)
+ progress |= lower_instr(nir_instr_as_intrinsic(instr),
+ &builder);
+ }
+ }
+
+ nir_metadata_preserve(function->impl, nir_metadata_block_index |
+ nir_metadata_dominance);
+ }
+ }
+
+ if (progress) {
+ assert(shader->num_uniforms > 0);
+ const struct glsl_type *type = glsl_array_type(glsl_vec4_type(),
+ shader->num_uniforms, 0);
+ nir_variable *ubo = nir_variable_create(shader, nir_var_mem_ubo, type,
+ "uniform_0");
+ ubo->data.binding = 0;
+
+ struct glsl_struct_field field = {
+ .type = type,
+ .name = "data",
+ .location = -1,
+ };
+ ubo->interface_type =
+ glsl_interface_type(&field, 1, GLSL_INTERFACE_PACKING_STD430,
+ false, "__ubo0_interface");
+ }
+
+ return progress;
+}
+
+static const struct nir_shader_compiler_options nir_options = {
+ .lower_all_io_to_temps = true,
+ .lower_ffma = true,
+ .lower_flrp32 = true,
+ .lower_fpow = true,
+ .lower_fsat = true,
+};
+
+const void *
+zink_get_compiler_options(struct pipe_screen *screen,
+ enum pipe_shader_ir ir,
+ enum pipe_shader_type shader)
+{
+ assert(ir == PIPE_SHADER_IR_NIR);
+ return &nir_options;
+}
+
+struct nir_shader *
+zink_tgsi_to_nir(struct pipe_screen *screen, const struct tgsi_token *tokens)
+{
+ if (zink_debug & ZINK_DEBUG_TGSI) {
+ fprintf(stderr, "TGSI shader:\n---8<---\n");
+ tgsi_dump_to_file(tokens, 0, stderr);
+ fprintf(stderr, "---8<---\n\n");
+ }
+
+ return tgsi_to_nir(tokens, screen);
+}
+
+static void
+optimize_nir(struct nir_shader *s)
+{
+ bool progress;
+ do {
+ progress = false;
+ NIR_PASS_V(s, nir_lower_vars_to_ssa);
+ NIR_PASS(progress, s, nir_copy_prop);
+ NIR_PASS(progress, s, nir_opt_remove_phis);
+ NIR_PASS(progress, s, nir_opt_dce);
+ NIR_PASS(progress, s, nir_opt_dead_cf);
+ NIR_PASS(progress, s, nir_opt_cse);
+ NIR_PASS(progress, s, nir_opt_peephole_select, 8, true, true);
+ NIR_PASS(progress, s, nir_opt_algebraic);
+ NIR_PASS(progress, s, nir_opt_constant_folding);
+ NIR_PASS(progress, s, nir_opt_undef);
+ } while (progress);
+}
+
+static int
+glsl_type_size(const struct glsl_type *type, bool bindless)
+{
+ return glsl_count_attribute_slots(type, false);
+}
+
+static uint32_t
+zink_binding(enum pipe_shader_type stage, VkDescriptorType type, int index)
+{
+ if (stage == PIPE_SHADER_COMPUTE) {
+ unreachable("not supported");
+ } else {
+ uint32_t stage_offset = (uint32_t)stage * (PIPE_MAX_CONSTANT_BUFFERS +
+ PIPE_MAX_SHADER_SAMPLER_VIEWS);
+
+ switch (type) {
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
+ assert(index < PIPE_MAX_CONSTANT_BUFFERS);
+ return stage_offset + index;
+
+ case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+ assert(index < PIPE_MAX_SHADER_SAMPLER_VIEWS);
+ return stage_offset + PIPE_MAX_CONSTANT_BUFFERS + index;
+
+ default:
+ unreachable("unexpected type");
+ }
+ }
+}
+
+struct zink_shader *
+zink_compile_nir(struct zink_screen *screen, struct nir_shader *nir)
+{
+ struct zink_shader *ret = CALLOC_STRUCT(zink_shader);
+
+ NIR_PASS_V(nir, nir_lower_io, nir_var_all, glsl_type_size, (nir_lower_io_options)0);
+ NIR_PASS_V(nir, lower_uniforms_to_ubo);
+ NIR_PASS_V(nir, nir_lower_regs_to_ssa);
+ NIR_PASS_V(nir, nir_lower_bool_to_float);
+ optimize_nir(nir);
+ NIR_PASS_V(nir, nir_remove_dead_variables, nir_var_function_temp);
+
+ if (zink_debug & ZINK_DEBUG_NIR) {
+ fprintf(stderr, "NIR shader:\n---8<---\n");
+ nir_print_shader(nir, stderr);
+ fprintf(stderr, "---8<---\n");
+ }
+
+ enum pipe_shader_type stage = pipe_shader_type_from_mesa(nir->info.stage);
+
+ ret->num_bindings = 0;
+ nir_foreach_variable(var, &nir->uniforms) {
+ if (glsl_type_is_sampler(var->type)) {
+ ret->bindings[ret->num_bindings].index = var->data.driver_location;
+ var->data.binding = zink_binding(stage, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, var->data.driver_location);
+ ret->bindings[ret->num_bindings].binding = var->data.binding;
+ ret->bindings[ret->num_bindings].type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
+ ret->num_bindings++;
+ } else if (var->interface_type) {
+ ret->bindings[ret->num_bindings].index = var->data.binding;
+ var->data.binding = zink_binding(stage, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, var->data.binding);
+ ret->bindings[ret->num_bindings].binding = var->data.binding;
+ ret->bindings[ret->num_bindings].type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
+ ret->num_bindings++;
+ }
+ }
+
+ struct spirv_shader *spirv = nir_to_spirv(nir);
+ assert(spirv);
+
+ if (zink_debug & ZINK_DEBUG_SPIRV) {
+ char buf[256];
+ static int i;
+ snprintf(buf, sizeof(buf), "dump%02d.spv", i++);
+ FILE *fp = fopen(buf, "wb");
+ fwrite(spirv->words, sizeof(uint32_t), spirv->num_words, fp);
+ fclose(fp);
+ fprintf(stderr, "wrote '%s'...\n", buf);
+ }
+
+ VkShaderModuleCreateInfo smci = {};
+ smci.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
+ smci.codeSize = spirv->num_words * sizeof(uint32_t);
+ smci.pCode = spirv->words;
+
+ if (vkCreateShaderModule(screen->dev, &smci, NULL, &ret->shader_module) != VK_SUCCESS)
+ return NULL;
+
+ return ret;
+}
+
+void
+zink_shader_free(struct zink_screen *screen, struct zink_shader *shader)
+{
+ vkDestroyShaderModule(screen->dev, shader->shader_module, NULL);
+ FREE(shader);
+}
--- /dev/null
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef ZINK_COMPILER_H
+#define ZINK_COMPILER_H
+
+#include "pipe/p_defines.h"
+#include "pipe/p_state.h"
+
+#include <vulkan/vulkan.h>
+
+struct pipe_screen;
+struct zink_screen;
+
+struct nir_shader_compiler_options;
+struct nir_shader;
+
+struct tgsi_token;
+
+const void *
+zink_get_compiler_options(struct pipe_screen *screen,
+ enum pipe_shader_ir ir,
+ enum pipe_shader_type shader);
+
+struct nir_shader *
+zink_tgsi_to_nir(struct pipe_screen *screen, const struct tgsi_token *tokens);
+
+struct zink_shader {
+ VkShaderModule shader_module;
+
+ struct {
+ int index;
+ int binding;
+ VkDescriptorType type;
+ } bindings[PIPE_MAX_CONSTANT_BUFFERS + PIPE_MAX_SHADER_SAMPLER_VIEWS];
+ size_t num_bindings;
+};
+
+struct zink_shader *
+zink_compile_nir(struct zink_screen *screen, struct nir_shader *nir);
+
+void
+zink_shader_free(struct zink_screen *screen, struct zink_shader *shader);
+
+#endif
--- /dev/null
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "zink_context.h"
+
+#include "zink_cmdbuf.h"
+#include "zink_compiler.h"
+#include "zink_framebuffer.h"
+#include "zink_pipeline.h"
+#include "zink_program.h"
+#include "zink_render_pass.h"
+#include "zink_resource.h"
+#include "zink_screen.h"
+#include "zink_state.h"
+#include "zink_surface.h"
+
+#include "indices/u_primconvert.h"
+#include "util/u_blitter.h"
+#include "util/u_debug.h"
+#include "util/u_format.h"
+#include "util/u_framebuffer.h"
+#include "util/u_helpers.h"
+#include "util/u_inlines.h"
+
+#include "nir.h"
+
+#include "util/u_memory.h"
+#include "util/u_prim.h"
+#include "util/u_upload_mgr.h"
+
+static void
+zink_context_destroy(struct pipe_context *pctx)
+{
+ struct zink_context *ctx = zink_context(pctx);
+ struct zink_screen *screen = zink_screen(pctx->screen);
+ vkFreeCommandBuffers(screen->dev, ctx->cmdpool, 1, &ctx->cmdbuf.cmdbuf);
+ vkDestroyCommandPool(screen->dev, ctx->cmdpool, NULL);
+
+ util_primconvert_destroy(ctx->primconvert);
+ u_upload_destroy(pctx->stream_uploader);
+ slab_destroy_child(&ctx->transfer_pool);
+ util_blitter_destroy(ctx->blitter);
+ FREE(ctx);
+}
+
+static VkFilter
+filter(enum pipe_tex_filter filter)
+{
+ switch (filter) {
+ case PIPE_TEX_FILTER_NEAREST: return VK_FILTER_NEAREST;
+ case PIPE_TEX_FILTER_LINEAR: return VK_FILTER_LINEAR;
+ }
+ unreachable("unexpected filter");
+}
+
+static VkSamplerMipmapMode
+sampler_mipmap_mode(enum pipe_tex_mipfilter filter)
+{
+ switch (filter) {
+ case PIPE_TEX_MIPFILTER_NEAREST: return VK_SAMPLER_MIPMAP_MODE_NEAREST;
+ case PIPE_TEX_MIPFILTER_LINEAR: return VK_SAMPLER_MIPMAP_MODE_LINEAR;
+ case PIPE_TEX_MIPFILTER_NONE:
+ unreachable("PIPE_TEX_MIPFILTER_NONE should be dealt with earlier");
+ }
+ unreachable("unexpected filter");
+}
+
+static VkSamplerAddressMode
+sampler_address_mode(enum pipe_tex_wrap filter)
+{
+ switch (filter) {
+ case PIPE_TEX_WRAP_REPEAT: return VK_SAMPLER_ADDRESS_MODE_REPEAT;
+ case PIPE_TEX_WRAP_CLAMP: return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; /* not technically correct, but kinda works */
+ case PIPE_TEX_WRAP_CLAMP_TO_EDGE: return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
+ case PIPE_TEX_WRAP_CLAMP_TO_BORDER: return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER;
+ case PIPE_TEX_WRAP_MIRROR_REPEAT: return VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT;
+ case PIPE_TEX_WRAP_MIRROR_CLAMP: return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; /* not technically correct, but kinda works */
+ case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE: return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE;
+ case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER: return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; /* not technically correct, but kinda works */
+ }
+ unreachable("unexpected wrap");
+}
+
+static void *
+zink_create_sampler_state(struct pipe_context *pctx,
+ const struct pipe_sampler_state *state)
+{
+ struct zink_screen *screen = zink_screen(pctx->screen);
+
+ VkSamplerCreateInfo sci = {};
+ sci.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
+ sci.magFilter = filter(state->mag_img_filter);
+ sci.minFilter = filter(state->min_img_filter);
+
+ if (state->min_mip_filter != PIPE_TEX_MIPFILTER_NONE) {
+ sci.mipmapMode = sampler_mipmap_mode(state->min_mip_filter);
+ sci.minLod = state->min_lod;
+ sci.maxLod = state->max_lod;
+ } else {
+ sci.mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST;
+ sci.minLod = 0;
+ sci.maxLod = 0;
+ }
+
+ sci.addressModeU = sampler_address_mode(state->wrap_s);
+ sci.addressModeV = sampler_address_mode(state->wrap_t);
+ sci.addressModeW = sampler_address_mode(state->wrap_r);
+ sci.mipLodBias = state->lod_bias;
+ sci.compareOp = VK_COMPARE_OP_NEVER; // TODO
+ sci.borderColor = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK; // TODO
+
+ if (state->max_anisotropy > 1) {
+ sci.maxAnisotropy = state->max_anisotropy;
+ sci.anisotropyEnable = VK_TRUE;
+ }
+
+ VkSampler sampler;
+ VkResult err = vkCreateSampler(screen->dev, &sci, NULL, &sampler);
+ if (err != VK_SUCCESS)
+ return NULL;
+
+ return sampler;
+}
+
+static void
+zink_bind_sampler_states(struct pipe_context *pctx,
+ enum pipe_shader_type shader,
+ unsigned start_slot,
+ unsigned num_samplers,
+ void **samplers)
+{
+ struct zink_context *ctx = zink_context(pctx);
+ for (unsigned i = 0; i < num_samplers; ++i)
+ ctx->samplers[shader][start_slot + i] = (VkSampler)samplers[i];
+}
+
+static void
+zink_delete_sampler_state(struct pipe_context *pctx,
+ void *sampler_state)
+{
+ struct zink_screen *screen = zink_screen(pctx->screen);
+ vkDestroySampler(screen->dev, sampler_state, NULL);
+}
+
+
+static VkImageViewType
+image_view_type(enum pipe_texture_target target)
+{
+ switch (target) {
+ case PIPE_TEXTURE_1D: return VK_IMAGE_VIEW_TYPE_1D;
+ case PIPE_TEXTURE_1D_ARRAY: return VK_IMAGE_VIEW_TYPE_1D_ARRAY;
+ case PIPE_TEXTURE_2D: return VK_IMAGE_VIEW_TYPE_2D;
+ case PIPE_TEXTURE_2D_ARRAY: return VK_IMAGE_VIEW_TYPE_2D_ARRAY;
+ case PIPE_TEXTURE_CUBE: return VK_IMAGE_VIEW_TYPE_CUBE;
+ case PIPE_TEXTURE_CUBE_ARRAY: return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
+ case PIPE_TEXTURE_3D: return VK_IMAGE_VIEW_TYPE_3D;
+ case PIPE_TEXTURE_RECT: return VK_IMAGE_VIEW_TYPE_2D; /* not sure */
+ default:
+ unreachable("unexpected target");
+ }
+}
+
+static VkComponentSwizzle
+component_mapping(enum pipe_swizzle swizzle)
+{
+ switch (swizzle) {
+ case PIPE_SWIZZLE_X: return VK_COMPONENT_SWIZZLE_R;
+ case PIPE_SWIZZLE_Y: return VK_COMPONENT_SWIZZLE_G;
+ case PIPE_SWIZZLE_Z: return VK_COMPONENT_SWIZZLE_B;
+ case PIPE_SWIZZLE_W: return VK_COMPONENT_SWIZZLE_A;
+ case PIPE_SWIZZLE_0: return VK_COMPONENT_SWIZZLE_ZERO;
+ case PIPE_SWIZZLE_1: return VK_COMPONENT_SWIZZLE_ONE;
+ case PIPE_SWIZZLE_NONE: return VK_COMPONENT_SWIZZLE_IDENTITY; // ???
+ default:
+ unreachable("unexpected swizzle");
+ }
+}
+
+static struct pipe_sampler_view *
+zink_create_sampler_view(struct pipe_context *pctx, struct pipe_resource *pres,
+ const struct pipe_sampler_view *state)
+{
+ struct zink_screen *screen = zink_screen(pctx->screen);
+ struct zink_resource *res = zink_resource(pres);
+ struct zink_sampler_view *sampler_view = CALLOC_STRUCT(zink_sampler_view);
+
+ sampler_view->base = *state;
+ sampler_view->base.texture = NULL;
+ pipe_resource_reference(&sampler_view->base.texture, pres);
+ sampler_view->base.reference.count = 1;
+ sampler_view->base.context = pctx;
+
+ VkImageViewCreateInfo ivci = {};
+ ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
+ ivci.image = res->image;
+ ivci.viewType = image_view_type(state->target);
+ ivci.format = zink_get_format(state->format);
+ ivci.components.r = component_mapping(state->swizzle_r);
+ ivci.components.g = component_mapping(state->swizzle_g);
+ ivci.components.b = component_mapping(state->swizzle_b);
+ ivci.components.a = component_mapping(state->swizzle_a);
+ ivci.subresourceRange.aspectMask = zink_aspect_from_format(state->format);
+ ivci.subresourceRange.baseMipLevel = state->u.tex.first_level;
+ ivci.subresourceRange.baseArrayLayer = state->u.tex.first_layer;
+ ivci.subresourceRange.levelCount = state->u.tex.last_level - state->u.tex.first_level + 1;
+ ivci.subresourceRange.layerCount = state->u.tex.last_layer - state->u.tex.first_layer + 1;
+
+ VkResult err = vkCreateImageView(screen->dev, &ivci, NULL, &sampler_view->image_view);
+ if (err != VK_SUCCESS) {
+ FREE(sampler_view);
+ return NULL;
+ }
+
+ return &sampler_view->base;
+}
+
+static void
+zink_destroy_sampler_view(struct pipe_context *pctx,
+ struct pipe_sampler_view *view)
+{
+ FREE(view);
+}
+
+static void *
+zink_create_vs_state(struct pipe_context *pctx,
+ const struct pipe_shader_state *shader)
+{
+ struct nir_shader *nir;
+ if (shader->type != PIPE_SHADER_IR_NIR)
+ nir = zink_tgsi_to_nir(pctx->screen, shader->tokens);
+ else
+ nir = (struct nir_shader *)shader->ir.nir;
+
+ return zink_compile_nir(zink_screen(pctx->screen), nir);
+}
+
+static void
+zink_bind_vs_state(struct pipe_context *pctx,
+ void *cso)
+{
+ struct zink_context *ctx = zink_context(pctx);
+ ctx->gfx_stages[PIPE_SHADER_VERTEX] = cso;
+}
+
+static void
+zink_delete_vs_state(struct pipe_context *pctx,
+ void *cso)
+{
+ zink_shader_free(zink_screen(pctx->screen), cso);
+}
+
+static void *
+zink_create_fs_state(struct pipe_context *pctx,
+ const struct pipe_shader_state *shader)
+{
+ struct nir_shader *nir;
+ if (shader->type != PIPE_SHADER_IR_NIR)
+ nir = zink_tgsi_to_nir(pctx->screen, shader->tokens);
+ else
+ nir = (struct nir_shader *)shader->ir.nir;
+
+ return zink_compile_nir(zink_screen(pctx->screen), nir);
+}
+
+static void
+zink_bind_fs_state(struct pipe_context *pctx,
+ void *cso)
+{
+ struct zink_context *ctx = zink_context(pctx);
+ ctx->gfx_stages[PIPE_SHADER_FRAGMENT] = cso;
+}
+
+static void
+zink_delete_fs_state(struct pipe_context *pctx,
+ void *cso)
+{
+ zink_shader_free(zink_screen(pctx->screen), cso);
+}
+
+static void
+zink_set_polygon_stipple(struct pipe_context *pctx,
+ const struct pipe_poly_stipple *ps)
+{
+}
+
+static void
+zink_set_vertex_buffers(struct pipe_context *pctx,
+ unsigned start_slot,
+ unsigned num_buffers,
+ const struct pipe_vertex_buffer *buffers)
+{
+ struct zink_context *ctx = zink_context(pctx);
+
+ if (buffers) {
+ for (int i = 0; i < num_buffers; ++i) {
+ const struct pipe_vertex_buffer *vb = buffers + i;
+ ctx->gfx_pipeline_state.bindings[start_slot + i].stride = vb->stride;
+ }
+ }
+
+ util_set_vertex_buffers_mask(ctx->buffers, &ctx->buffers_enabled_mask,
+ buffers, start_slot, num_buffers);
+}
+
+static void
+zink_set_viewport_states(struct pipe_context *pctx,
+ unsigned start_slot,
+ unsigned num_viewports,
+ const struct pipe_viewport_state *state)
+{
+ struct zink_context *ctx = zink_context(pctx);
+
+ for (unsigned i = 0; i < num_viewports; ++i) {
+ VkViewport viewport = {
+ state[i].translate[0] - state[i].scale[0],
+ state[i].translate[1] - state[i].scale[1],
+ state[i].scale[0] * 2,
+ state[i].scale[1] * 2,
+ state[i].translate[2] - state[i].scale[2],
+ state[i].translate[2] + state[i].scale[2]
+ };
+ ctx->viewports[start_slot + i] = viewport;
+ }
+ ctx->num_viewports = start_slot + num_viewports;
+}
+
+static void
+zink_set_scissor_states(struct pipe_context *pctx,
+ unsigned start_slot, unsigned num_scissors,
+ const struct pipe_scissor_state *states)
+{
+ struct zink_context *ctx = zink_context(pctx);
+
+ for (unsigned i = 0; i < num_scissors; i++) {
+ VkRect2D scissor;
+
+ scissor.offset.x = states[i].minx;
+ scissor.offset.y = states[i].miny;
+ scissor.extent.width = states[i].maxx - states[i].minx;
+ scissor.extent.height = states[i].maxy - states[i].miny;
+ ctx->scissors[start_slot + i] = scissor;
+ }
+ ctx->num_scissors = start_slot + num_scissors;
+}
+
+static void
+zink_set_constant_buffer(struct pipe_context *pctx,
+ enum pipe_shader_type shader, uint index,
+ const struct pipe_constant_buffer *cb)
+{
+ struct zink_context *ctx = zink_context(pctx);
+
+ if (cb) {
+ struct pipe_resource *buffer = cb->buffer;
+ unsigned offset = cb->buffer_offset;
+ if (cb->user_buffer)
+ u_upload_data(ctx->base.const_uploader, 0, cb->buffer_size, 64,
+ cb->user_buffer, &offset, &buffer);
+
+ pipe_resource_reference(&ctx->ubos[shader][index].buffer, buffer);
+ ctx->ubos[shader][index].buffer_offset = offset;
+ ctx->ubos[shader][index].buffer_size = cb->buffer_size;
+ ctx->ubos[shader][index].user_buffer = NULL;
+
+ if (cb->user_buffer)
+ pipe_resource_reference(&buffer, NULL);
+ } else {
+ pipe_resource_reference(&ctx->ubos[shader][index].buffer, NULL);
+ ctx->ubos[shader][index].buffer_offset = 0;
+ ctx->ubos[shader][index].buffer_size = 0;
+ ctx->ubos[shader][index].user_buffer = NULL;
+ }
+}
+
+static void
+zink_set_sampler_views(struct pipe_context *pctx,
+ enum pipe_shader_type shader_type,
+ unsigned start_slot,
+ unsigned num_views,
+ struct pipe_sampler_view **views)
+{
+ struct zink_context *ctx = zink_context(pctx);
+ assert(views);
+ for (unsigned i = 0; i < num_views; ++i) {
+ pipe_sampler_view_reference(
+ &ctx->image_views[shader_type][start_slot + i],
+ views[i]);
+ }
+}
+
+static void
+zink_set_stencil_ref(struct pipe_context *pctx,
+ const struct pipe_stencil_ref *ref)
+{
+ struct zink_context *ctx = zink_context(pctx);
+ ctx->stencil_ref[0] = ref->ref_value[0];
+ ctx->stencil_ref[1] = ref->ref_value[1];
+}
+
+static void
+zink_set_clip_state(struct pipe_context *pctx,
+ const struct pipe_clip_state *pcs)
+{
+}
+
+static struct zink_render_pass *
+get_render_pass(struct zink_screen *screen,
+ const struct pipe_framebuffer_state *fb)
+{
+ struct zink_render_pass_state state;
+
+ for (int i = 0; i < fb->nr_cbufs; i++) {
+ struct zink_resource *cbuf = zink_resource(fb->cbufs[i]->texture);
+ state.rts[i].format = cbuf->format;
+ }
+ state.num_cbufs = fb->nr_cbufs;
+
+ if (fb->zsbuf) {
+ struct zink_resource *zsbuf = zink_resource(fb->zsbuf->texture);
+ state.rts[fb->nr_cbufs].format = zsbuf->format;
+ }
+ state.have_zsbuf = fb->zsbuf != NULL;
+
+ return zink_create_render_pass(screen, &state);
+}
+
+static void
+zink_set_framebuffer_state(struct pipe_context *pctx,
+ const struct pipe_framebuffer_state *state)
+{
+ struct zink_context *ctx = zink_context(pctx);
+ struct zink_screen *screen = zink_screen(pctx->screen);
+
+ struct zink_render_pass *rp = get_render_pass(screen, state);
+ zink_render_pass_reference(screen, &ctx->render_pass, rp);
+
+ struct zink_framebuffer *fb = zink_create_framebuffer(screen, state, rp);
+ zink_framebuffer_reference(screen, &ctx->framebuffer, fb);
+ zink_framebuffer_reference(screen, &fb, NULL);
+ zink_render_pass_reference(screen, &rp, NULL);
+
+ ctx->gfx_pipeline_state.num_attachments = state->nr_cbufs;
+
+ util_copy_framebuffer_state(&ctx->fb_state, state);
+
+ struct zink_cmdbuf *cmdbuf = zink_start_cmdbuf(ctx);
+ if (!cmdbuf)
+ return;
+
+ for (int i = 0; i < state->nr_cbufs; i++) {
+ struct zink_resource *res = zink_resource(state->cbufs[i]->texture);
+ if (res->layout != VK_IMAGE_LAYOUT_GENERAL &&
+ res->layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL)
+ zink_resource_barrier(cmdbuf->cmdbuf, res, res->aspect,
+ VK_IMAGE_LAYOUT_GENERAL);
+ }
+
+ if (state->zsbuf) {
+ struct zink_resource *res = zink_resource(state->zsbuf->texture);
+ if (res->layout != VK_IMAGE_LAYOUT_GENERAL &&
+ res->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL)
+ zink_resource_barrier(cmdbuf->cmdbuf, res, res->aspect,
+ VK_IMAGE_LAYOUT_GENERAL);
+ }
+
+ zink_end_cmdbuf(ctx, cmdbuf);
+}
+
+static void
+zink_set_active_query_state(struct pipe_context *pctx, bool enable)
+{
+}
+
+static void
+zink_set_blend_color(struct pipe_context *pctx,
+ const struct pipe_blend_color *color)
+{
+ struct zink_context *ctx = zink_context(pctx);
+ memcpy(ctx->blend_constants, color->color, sizeof(float) * 4);
+}
+
+static VkAccessFlags
+access_flags(VkImageLayout layout)
+{
+ switch (layout) {
+ case VK_IMAGE_LAYOUT_UNDEFINED:
+ case VK_IMAGE_LAYOUT_GENERAL:
+ return 0;
+
+ case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
+ return VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
+ case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
+ return VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
+
+ case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
+ return VK_ACCESS_SHADER_READ_BIT;
+
+ case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
+ return VK_ACCESS_TRANSFER_READ_BIT;
+
+ case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
+ return VK_ACCESS_TRANSFER_WRITE_BIT;
+
+ case VK_IMAGE_LAYOUT_PREINITIALIZED:
+ return VK_ACCESS_HOST_WRITE_BIT;
+
+ default:
+ unreachable("unexpected layout");
+ }
+}
+
+void
+zink_resource_barrier(VkCommandBuffer cmdbuf, struct zink_resource *res,
+ VkImageAspectFlags aspect, VkImageLayout new_layout)
+{
+ VkImageSubresourceRange isr = {
+ aspect,
+ 0, VK_REMAINING_MIP_LEVELS,
+ 0, VK_REMAINING_ARRAY_LAYERS
+ };
+
+ VkImageMemoryBarrier imb = {
+ VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
+ NULL,
+ access_flags(res->layout),
+ access_flags(new_layout),
+ res->layout,
+ new_layout,
+ VK_QUEUE_FAMILY_IGNORED,
+ VK_QUEUE_FAMILY_IGNORED,
+ res->image,
+ isr
+ };
+ vkCmdPipelineBarrier(
+ cmdbuf,
+ VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
+ VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
+ 0,
+ 0, NULL,
+ 0, NULL,
+ 1, &imb
+ );
+
+ res->layout = new_layout;
+}
+
+static void
+zink_clear(struct pipe_context *pctx,
+ unsigned buffers,
+ const union pipe_color_union *pcolor,
+ double depth, unsigned stencil)
+{
+ struct zink_context *ctx = zink_context(pctx);
+ struct pipe_framebuffer_state *fb = &ctx->fb_state;
+
+ struct zink_cmdbuf *cmdbuf = zink_start_cmdbuf(ctx);
+ if (!cmdbuf)
+ return;
+
+ // first transition all images to a compatible layout
+ if (buffers & PIPE_CLEAR_COLOR) {
+ for (unsigned i = 0; i < fb->nr_cbufs; i++) {
+ if (!(buffers & (PIPE_CLEAR_COLOR0 << i)) || !fb->cbufs[i])
+ continue;
+
+ struct zink_resource *cbuf = zink_resource(fb->cbufs[i]->texture);
+
+ if (cbuf->layout != VK_IMAGE_LAYOUT_GENERAL &&
+ cbuf->layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL)
+ zink_resource_barrier(cmdbuf->cmdbuf, cbuf, cbuf->aspect,
+ VK_IMAGE_LAYOUT_GENERAL);
+ }
+ }
+
+ VkImageAspectFlags depthStencilAspect = 0;
+ if (buffers & PIPE_CLEAR_DEPTHSTENCIL && fb->zsbuf) {
+ struct zink_resource *zsbuf = zink_resource(fb->zsbuf->texture);
+ if (buffers & PIPE_CLEAR_DEPTH)
+ depthStencilAspect |= VK_IMAGE_ASPECT_DEPTH_BIT;
+ if (buffers & PIPE_CLEAR_STENCIL)
+ depthStencilAspect |= VK_IMAGE_ASPECT_STENCIL_BIT;
+
+ if (zsbuf->layout != VK_IMAGE_LAYOUT_GENERAL &&
+ zsbuf->layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL)
+ zink_resource_barrier(cmdbuf->cmdbuf, zsbuf, depthStencilAspect,
+ VK_IMAGE_LAYOUT_GENERAL);
+ }
+
+ VkClearColorValue color;
+ color.float32[0] = pcolor->f[0];
+ color.float32[1] = pcolor->f[1];
+ color.float32[2] = pcolor->f[2];
+ color.float32[3] = pcolor->f[3];
+
+ if (buffers & PIPE_CLEAR_COLOR) {
+ for (unsigned i = 0; i < fb->nr_cbufs; i++) {
+ if (!(buffers & (PIPE_CLEAR_COLOR0 << i)) || !fb->cbufs[i])
+ continue;
+
+ struct zink_resource *cbuf = zink_resource(fb->cbufs[i]->texture);
+
+ VkImageSubresourceRange range;
+ range.aspectMask = cbuf->aspect;
+ range.baseMipLevel = 0;
+ range.levelCount = VK_REMAINING_MIP_LEVELS;
+ range.baseArrayLayer = 0;
+ range.layerCount = VK_REMAINING_ARRAY_LAYERS;
+ vkCmdClearColorImage(cmdbuf->cmdbuf,
+ cbuf->image, VK_IMAGE_LAYOUT_GENERAL,
+ &color,
+ 1, &range);
+ }
+ }
+
+ if (depthStencilAspect) {
+ struct zink_resource *zsbuf = zink_resource(fb->zsbuf->texture);
+
+ VkClearDepthStencilValue zsvalue = { depth, stencil };
+
+ VkImageSubresourceRange range;
+ range.aspectMask = depthStencilAspect;
+ range.baseMipLevel = 0;
+ range.levelCount = VK_REMAINING_MIP_LEVELS;
+ range.baseArrayLayer = 0;
+ range.layerCount = VK_REMAINING_ARRAY_LAYERS;
+
+ vkCmdClearDepthStencilImage(cmdbuf->cmdbuf,
+ zsbuf->image, VK_IMAGE_LAYOUT_GENERAL,
+ &zsvalue,
+ 1, &range);
+ }
+
+ zink_end_cmdbuf(ctx, cmdbuf);
+}
+
+VkShaderStageFlagBits
+zink_shader_stage(enum pipe_shader_type type)
+{
+ VkShaderStageFlagBits stages[] = {
+ [PIPE_SHADER_VERTEX] = VK_SHADER_STAGE_VERTEX_BIT,
+ [PIPE_SHADER_FRAGMENT] = VK_SHADER_STAGE_FRAGMENT_BIT,
+ [PIPE_SHADER_GEOMETRY] = VK_SHADER_STAGE_GEOMETRY_BIT,
+ [PIPE_SHADER_TESS_CTRL] = VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
+ [PIPE_SHADER_TESS_EVAL] = VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
+ [PIPE_SHADER_COMPUTE] = VK_SHADER_STAGE_COMPUTE_BIT,
+ };
+ return stages[type];
+}
+
+static VkDescriptorSet
+allocate_descriptor_set(struct zink_context *ctx, VkDescriptorSetLayout dsl)
+{
+ struct zink_screen *screen = zink_screen(ctx->base.screen);
+ VkDescriptorSetAllocateInfo dsai;
+ memset((void *)&dsai, 0, sizeof(dsai));
+ dsai.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
+ dsai.pNext = NULL;
+ dsai.descriptorPool = ctx->descpool;
+ dsai.descriptorSetCount = 1;
+ dsai.pSetLayouts = &dsl;
+
+ VkDescriptorSet desc_set;
+ if (vkAllocateDescriptorSets(screen->dev, &dsai, &desc_set) != VK_SUCCESS) {
+ if (vkResetDescriptorPool(screen->dev, ctx->descpool, 0) != VK_SUCCESS) {
+ fprintf(stderr, "vkResetDescriptorPool failed\n");
+ return VK_NULL_HANDLE;
+ }
+ if (vkAllocateDescriptorSets(screen->dev, &dsai, &desc_set) != VK_SUCCESS) {
+ fprintf(stderr, "vkAllocateDescriptorSets failed\n");
+ return VK_NULL_HANDLE;
+ }
+ }
+
+ return desc_set;
+}
+
+static VkPrimitiveTopology
+zink_primitive_topology(enum pipe_prim_type mode)
+{
+ switch (mode) {
+ case PIPE_PRIM_POINTS:
+ return VK_PRIMITIVE_TOPOLOGY_POINT_LIST;
+
+ case PIPE_PRIM_LINES:
+ return VK_PRIMITIVE_TOPOLOGY_LINE_LIST;
+
+ case PIPE_PRIM_LINE_STRIP:
+ return VK_PRIMITIVE_TOPOLOGY_LINE_STRIP;
+
+ case PIPE_PRIM_TRIANGLES:
+ return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
+
+ case PIPE_PRIM_TRIANGLE_STRIP:
+ return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP;
+
+ case PIPE_PRIM_TRIANGLE_FAN:
+ return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN;
+
+ default:
+ unreachable("unexpected enum pipe_prim_type");
+ }
+}
+
+static void
+zink_bind_vertex_buffers(VkCommandBuffer cmdbuf, struct zink_context *ctx)
+{
+ VkBuffer buffers[PIPE_MAX_ATTRIBS];
+ VkDeviceSize buffer_offsets[PIPE_MAX_ATTRIBS];
+ struct zink_vertex_elements_state *elems = ctx->gfx_pipeline_state.element_state;
+ for (unsigned i = 0; i < elems->num_bindings; i++) {
+ struct pipe_vertex_buffer *vb = ctx->buffers + elems->binding_map[i];
+ assert(vb && vb->buffer.resource);
+ struct zink_resource *res = zink_resource(vb->buffer.resource);
+ buffers[i] = res->buffer;
+ buffer_offsets[i] = vb->buffer_offset;
+ }
+
+ if (elems->num_bindings > 0)
+ vkCmdBindVertexBuffers(cmdbuf, 0, elems->num_bindings, buffers, buffer_offsets);
+}
+
+static void
+zink_draw_vbo(struct pipe_context *pctx,
+ const struct pipe_draw_info *dinfo)
+{
+ struct zink_context *ctx = zink_context(pctx);
+ struct zink_screen *screen = zink_screen(pctx->screen);
+ struct zink_rasterizer_state *rast_state = ctx->gfx_pipeline_state.rast_state;
+
+ if (dinfo->mode >= PIPE_PRIM_QUADS ||
+ dinfo->mode == PIPE_PRIM_LINE_LOOP) {
+ if (!u_trim_pipe_prim(dinfo->mode, (unsigned *)&dinfo->count))
+ return;
+
+ util_primconvert_save_rasterizer_state(ctx->primconvert, &rast_state->base);
+ util_primconvert_draw_vbo(ctx->primconvert, dinfo);
+ return;
+ }
+
+ struct zink_gfx_program *gfx_program = zink_create_gfx_program(screen->dev,
+ ctx->gfx_stages);
+ if (!gfx_program)
+ return;
+
+ ctx->gfx_pipeline_state.primitive_topology = zink_primitive_topology(dinfo->mode);
+
+ VkPipeline pipeline = zink_create_gfx_pipeline(screen->dev,
+ gfx_program,
+ &ctx->gfx_pipeline_state,
+ ctx->render_pass->render_pass);
+
+ bool depth_bias = false;
+ switch (u_reduced_prim(dinfo->mode)) {
+ case PIPE_PRIM_POINTS:
+ depth_bias = rast_state->offset_point;
+ break;
+
+ case PIPE_PRIM_LINES:
+ depth_bias = rast_state->offset_line;
+ break;
+
+ case PIPE_PRIM_TRIANGLES:
+ depth_bias = rast_state->offset_tri;
+ break;
+
+ default:
+ unreachable("unexpected reduced prim");
+ }
+
+ unsigned index_offset = 0;
+ struct pipe_resource *index_buffer = NULL;
+ if (dinfo->index_size > 0) {
+ if (dinfo->has_user_indices) {
+ if (!util_upload_index_buffer(pctx, dinfo, &index_buffer, &index_offset)) {
+ debug_printf("util_upload_index_buffer() failed\n");
+ return;
+ }
+ } else
+ index_buffer = dinfo->index.resource;
+ }
+
+ struct zink_cmdbuf *cmdbuf = zink_start_cmdbuf(ctx);
+ if (!cmdbuf)
+ return;
+
+ VkRenderPassBeginInfo rpbi = {};
+ rpbi.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
+ rpbi.renderPass = ctx->render_pass->render_pass;
+ rpbi.renderArea.offset.x = 0;
+ rpbi.renderArea.offset.y = 0;
+ rpbi.renderArea.extent.width = ctx->fb_state.width;
+ rpbi.renderArea.extent.height = ctx->fb_state.height;
+ rpbi.clearValueCount = 0;
+ rpbi.pClearValues = NULL;
+ rpbi.framebuffer = ctx->framebuffer->fb;
+
+ vkCmdBeginRenderPass(cmdbuf->cmdbuf, &rpbi, VK_SUBPASS_CONTENTS_INLINE);
+
+ vkCmdSetViewport(cmdbuf->cmdbuf, 0, ctx->num_viewports, ctx->viewports);
+
+ if (ctx->num_scissors)
+ vkCmdSetScissor(cmdbuf->cmdbuf, 0, ctx->num_scissors, ctx->scissors);
+ else if (ctx->fb_state.width && ctx->fb_state.height) {
+ VkRect2D fb_scissor = {};
+ fb_scissor.extent.width = ctx->fb_state.width;
+ fb_scissor.extent.height = ctx->fb_state.height;
+ vkCmdSetScissor(cmdbuf->cmdbuf, 0, 1, &fb_scissor);
+ }
+
+ vkCmdSetStencilReference(cmdbuf->cmdbuf, VK_STENCIL_FACE_FRONT_BIT, ctx->stencil_ref[0]);
+ vkCmdSetStencilReference(cmdbuf->cmdbuf, VK_STENCIL_FACE_BACK_BIT, ctx->stencil_ref[1]);
+
+ if (depth_bias)
+ vkCmdSetDepthBias(cmdbuf->cmdbuf, rast_state->offset_units, rast_state->offset_clamp, rast_state->offset_scale);
+ else
+ vkCmdSetDepthBias(cmdbuf->cmdbuf, 0.0f, 0.0f, 0.0f);
+
+ if (ctx->gfx_pipeline_state.blend_state->need_blend_constants)
+ vkCmdSetBlendConstants(cmdbuf->cmdbuf, ctx->blend_constants);
+
+ VkDescriptorSet desc_set = allocate_descriptor_set(ctx, gfx_program->dsl);
+
+ VkWriteDescriptorSet wds[PIPE_SHADER_TYPES * PIPE_MAX_CONSTANT_BUFFERS + PIPE_SHADER_TYPES * PIPE_MAX_SHADER_SAMPLER_VIEWS];
+ VkDescriptorBufferInfo buffer_infos[PIPE_SHADER_TYPES * PIPE_MAX_CONSTANT_BUFFERS];
+ VkDescriptorImageInfo image_infos[PIPE_SHADER_TYPES * PIPE_MAX_SHADER_SAMPLER_VIEWS];
+ int num_wds = 0, num_buffer_info = 0, num_image_info = 0;
+
+ for (int i = 0; i < ARRAY_SIZE(ctx->gfx_stages); i++) {
+ struct zink_shader *shader = ctx->gfx_stages[i];
+ if (!shader)
+ continue;
+
+ for (int j = 0; j < shader->num_bindings; j++) {
+ int index = shader->bindings[j].index;
+ if (shader->bindings[j].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) {
+ assert(ctx->ubos[i][index].buffer_size > 0);
+ assert(ctx->ubos[i][index].buffer);
+ buffer_infos[num_buffer_info].buffer = zink_resource(ctx->ubos[i][index].buffer)->buffer;
+ buffer_infos[num_buffer_info].offset = ctx->ubos[i][index].buffer_offset;
+ buffer_infos[num_buffer_info].range = VK_WHOLE_SIZE;
+ wds[num_wds].pBufferInfo = buffer_infos + num_buffer_info;
+ ++num_buffer_info;
+ } else {
+ struct pipe_sampler_view *psampler_view = ctx->image_views[i][index];
+ assert(psampler_view);
+ struct zink_sampler_view *sampler_view = (struct zink_sampler_view *)psampler_view;
+ struct zink_resource *resource = zink_resource(psampler_view->texture);
+ image_infos[num_image_info].imageLayout = resource->layout;
+ image_infos[num_image_info].imageView = sampler_view->image_view;
+ image_infos[num_image_info].sampler = ctx->samplers[i][index];
+ wds[num_wds].pImageInfo = image_infos + num_image_info;
+ ++num_image_info;
+ }
+
+ wds[num_wds].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
+ wds[num_wds].pNext = NULL;
+ wds[num_wds].dstSet = desc_set;
+ wds[num_wds].dstBinding = shader->bindings[j].binding;
+ wds[num_wds].dstArrayElement = 0;
+ wds[num_wds].descriptorCount = 1;
+ wds[num_wds].descriptorType = shader->bindings[j].type;
+ ++num_wds;
+ }
+ }
+
+ vkUpdateDescriptorSets(screen->dev, num_wds, wds, 0, NULL);
+
+ vkCmdBindPipeline(cmdbuf->cmdbuf, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
+ vkCmdBindDescriptorSets(cmdbuf->cmdbuf, VK_PIPELINE_BIND_POINT_GRAPHICS,
+ gfx_program->layout, 0, 1, &desc_set, 0, NULL);
+ zink_bind_vertex_buffers(cmdbuf->cmdbuf, ctx);
+
+ if (dinfo->index_size > 0) {
+ assert(dinfo->index_size != 1);
+ VkIndexType index_type = dinfo->index_size == 2 ? VK_INDEX_TYPE_UINT16 : VK_INDEX_TYPE_UINT32;
+ vkCmdBindIndexBuffer(cmdbuf->cmdbuf, zink_resource(index_buffer)->buffer, index_offset, index_type);
+ vkCmdDrawIndexed(cmdbuf->cmdbuf,
+ dinfo->count, dinfo->instance_count,
+ dinfo->start, dinfo->index_bias, dinfo->start_instance);
+ } else
+ vkCmdDraw(cmdbuf->cmdbuf, dinfo->count, dinfo->instance_count, dinfo->start, dinfo->start_instance);
+
+ vkCmdEndRenderPass(cmdbuf->cmdbuf);
+
+ zink_end_cmdbuf(ctx, cmdbuf);
+
+ vkDestroyPipeline(screen->dev, pipeline, NULL);
+
+ if (dinfo->index_size > 0 && dinfo->has_user_indices)
+ pipe_resource_reference(&index_buffer, NULL);
+}
+
+static void
+zink_flush(struct pipe_context *pctx,
+ struct pipe_fence_handle **pfence,
+ enum pipe_flush_flags flags)
+{
+}
+
+static void
+zink_blit(struct pipe_context *pctx,
+ const struct pipe_blit_info *info)
+{
+ struct zink_context *ctx = zink_context(pctx);
+ bool is_resolve = false;
+ if (info->mask != PIPE_MASK_RGBA ||
+ info->scissor_enable ||
+ info->alpha_blend) {
+ if (!util_blitter_is_blit_supported(ctx->blitter, info)) {
+ debug_printf("blit unsupported %s -> %s\n",
+ util_format_short_name(info->src.resource->format),
+ util_format_short_name(info->dst.resource->format));
+ return;
+ }
+
+ util_blitter_save_fragment_constant_buffer_slot(ctx->blitter, ctx->ubos[PIPE_SHADER_FRAGMENT]);
+ util_blitter_save_vertex_buffer_slot(ctx->blitter, ctx->buffers);
+ util_blitter_save_vertex_shader(ctx->blitter, ctx->gfx_stages[PIPE_SHADER_VERTEX]);
+ util_blitter_save_fragment_shader(ctx->blitter, ctx->gfx_stages[PIPE_SHADER_FRAGMENT]);
+ util_blitter_save_rasterizer(ctx->blitter, ctx->gfx_pipeline_state.rast_state);
+
+ util_blitter_blit(ctx->blitter, info);
+ }
+
+ struct zink_resource *src = zink_resource(info->src.resource);
+ struct zink_resource *dst = zink_resource(info->dst.resource);
+
+ if (src->base.nr_samples > 1 && dst->base.nr_samples <= 1)
+ is_resolve = true;
+
+ struct zink_cmdbuf *cmdbuf = zink_start_cmdbuf(ctx);
+ if (!cmdbuf)
+ return;
+
+ if (is_resolve) {
+ VkImageResolve region = {};
+
+ region.srcSubresource.aspectMask = src->aspect;
+ region.srcSubresource.mipLevel = info->src.level;
+ region.srcSubresource.baseArrayLayer = 0; // no clue
+ region.srcSubresource.layerCount = 1; // no clue
+ region.srcOffset.x = info->src.box.x;
+ region.srcOffset.y = info->src.box.y;
+ region.srcOffset.z = info->src.box.z;
+
+ region.dstSubresource.aspectMask = dst->aspect;
+ region.dstSubresource.mipLevel = info->dst.level;
+ region.dstSubresource.baseArrayLayer = 0; // no clue
+ region.dstSubresource.layerCount = 1; // no clue
+ region.dstOffset.x = info->dst.box.x;
+ region.dstOffset.y = info->dst.box.y;
+ region.dstOffset.z = info->dst.box.z;
+
+ region.extent.width = info->dst.box.width;
+ region.extent.height = info->dst.box.height;
+ region.extent.depth = info->dst.box.depth;
+ vkCmdResolveImage(cmdbuf->cmdbuf, src->image, src->layout,
+ dst->image, dst->layout,
+ 1, ®ion);
+
+ } else {
+ if (dst->layout != VK_IMAGE_LAYOUT_GENERAL &&
+ dst->layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL)
+ zink_resource_barrier(cmdbuf->cmdbuf, dst, dst->aspect,
+ VK_IMAGE_LAYOUT_GENERAL);
+
+ VkImageBlit region = {};
+ region.srcSubresource.aspectMask = src->aspect;
+ region.srcSubresource.mipLevel = info->src.level;
+ region.srcOffsets[0].x = info->src.box.x;
+ region.srcOffsets[0].y = info->src.box.y;
+ region.srcOffsets[1].x = info->src.box.x + info->src.box.width;
+ region.srcOffsets[1].y = info->src.box.y + info->src.box.height;
+
+ if (src->base.array_size > 1) {
+ region.srcOffsets[0].z = 0;
+ region.srcOffsets[1].z = 1;
+ region.srcSubresource.baseArrayLayer = info->src.box.z;
+ region.srcSubresource.layerCount = info->src.box.depth;
+ } else {
+ region.srcOffsets[0].z = info->src.box.z;
+ region.srcOffsets[1].z = info->src.box.z + info->src.box.depth;
+ region.srcSubresource.baseArrayLayer = 0;
+ region.srcSubresource.layerCount = 1;
+ }
+
+ region.dstSubresource.aspectMask = dst->aspect;
+ region.dstSubresource.mipLevel = info->dst.level;
+ region.dstOffsets[0].x = info->dst.box.x;
+ region.dstOffsets[0].y = info->dst.box.y;
+ region.dstOffsets[1].x = info->dst.box.x + info->dst.box.width;
+ region.dstOffsets[1].y = info->dst.box.y + info->dst.box.height;
+
+ if (dst->base.array_size > 1) {
+ region.dstOffsets[0].z = 0;
+ region.dstOffsets[1].z = 1;
+ region.dstSubresource.baseArrayLayer = info->dst.box.z;
+ region.dstSubresource.layerCount = info->dst.box.depth;
+ } else {
+ region.dstOffsets[0].z = info->dst.box.z;
+ region.dstOffsets[1].z = info->dst.box.z + info->dst.box.depth;
+ region.dstSubresource.baseArrayLayer = 0;
+ region.dstSubresource.layerCount = 1;
+ }
+
+ vkCmdBlitImage(cmdbuf->cmdbuf, src->image, src->layout,
+ dst->image, dst->layout,
+ 1, ®ion,
+ filter(info->filter));
+ }
+ zink_end_cmdbuf(ctx, cmdbuf);
+}
+
+static void
+zink_resource_copy_region(struct pipe_context *pctx,
+ struct pipe_resource *pdst,
+ unsigned dst_level, unsigned dstx, unsigned dsty, unsigned dstz,
+ struct pipe_resource *psrc,
+ unsigned src_level, const struct pipe_box *src_box)
+{
+ struct zink_resource *dst = zink_resource(pdst);
+ struct zink_resource *src = zink_resource(psrc);
+ struct zink_context *ctx = zink_context(pctx);
+ if (dst->base.target != PIPE_BUFFER && src->base.target != PIPE_BUFFER) {
+ VkImageCopy region = {};
+
+ region.srcSubresource.aspectMask = src->aspect;
+ region.srcSubresource.mipLevel = src_level;
+ region.srcSubresource.layerCount = 1;
+ if (src->base.array_size > 1) {
+ region.srcSubresource.baseArrayLayer = src_box->z;
+ region.srcSubresource.layerCount = src_box->depth;
+ region.extent.depth = 1;
+ } else {
+ region.srcOffset.z = src_box->z;
+ region.srcSubresource.layerCount = 1;
+ region.extent.depth = src_box->depth;
+ }
+
+ region.srcOffset.x = src_box->x;
+ region.srcOffset.y = src_box->y;
+
+ region.dstSubresource.aspectMask = dst->aspect;
+ region.dstSubresource.mipLevel = dst_level;
+ if (dst->base.array_size > 1) {
+ region.dstSubresource.baseArrayLayer = dstz;
+ region.dstSubresource.layerCount = src_box->depth;
+ } else {
+ region.dstOffset.z = dstz;
+ region.dstSubresource.layerCount = 1;
+ }
+
+ region.dstOffset.x = dstx;
+ region.dstOffset.y = dsty;
+ region.extent.width = src_box->width;
+ region.extent.height = src_box->height;
+
+ struct zink_cmdbuf *cmdbuf = zink_start_cmdbuf(ctx);
+ if (!cmdbuf)
+ return;
+
+ vkCmdCopyImage(cmdbuf->cmdbuf, src->image, src->layout,
+ dst->image, dst->layout,
+ 1, ®ion);
+ zink_end_cmdbuf(ctx, cmdbuf);
+ } else
+ debug_printf("zink: TODO resource copy\n");
+}
+
+struct pipe_context *
+zink_context_create(struct pipe_screen *pscreen, void *priv, unsigned flags)
+{
+ struct zink_screen *screen = zink_screen(pscreen);
+ struct zink_context *ctx = CALLOC_STRUCT(zink_context);
+
+ ctx->base.screen = pscreen;
+ ctx->base.priv = priv;
+
+ ctx->base.destroy = zink_context_destroy;
+
+ zink_context_state_init(&ctx->base);
+
+ ctx->base.create_sampler_state = zink_create_sampler_state;
+ ctx->base.bind_sampler_states = zink_bind_sampler_states;
+ ctx->base.delete_sampler_state = zink_delete_sampler_state;
+
+ ctx->base.create_sampler_view = zink_create_sampler_view;
+ ctx->base.set_sampler_views = zink_set_sampler_views;
+ ctx->base.sampler_view_destroy = zink_destroy_sampler_view;
+
+ ctx->base.create_vs_state = zink_create_vs_state;
+ ctx->base.bind_vs_state = zink_bind_vs_state;
+ ctx->base.delete_vs_state = zink_delete_vs_state;
+
+ ctx->base.create_fs_state = zink_create_fs_state;
+ ctx->base.bind_fs_state = zink_bind_fs_state;
+ ctx->base.delete_fs_state = zink_delete_fs_state;
+
+ ctx->base.set_polygon_stipple = zink_set_polygon_stipple;
+ ctx->base.set_vertex_buffers = zink_set_vertex_buffers;
+ ctx->base.set_viewport_states = zink_set_viewport_states;
+ ctx->base.set_scissor_states = zink_set_scissor_states;
+ ctx->base.set_constant_buffer = zink_set_constant_buffer;
+ ctx->base.set_framebuffer_state = zink_set_framebuffer_state;
+ ctx->base.set_stencil_ref = zink_set_stencil_ref;
+ ctx->base.set_clip_state = zink_set_clip_state;
+ ctx->base.set_active_query_state = zink_set_active_query_state;
+ ctx->base.set_blend_color = zink_set_blend_color;
+
+ ctx->base.clear = zink_clear;
+ ctx->base.draw_vbo = zink_draw_vbo;
+ ctx->base.flush = zink_flush;
+
+ ctx->base.resource_copy_region = zink_resource_copy_region;
+ ctx->base.blit = zink_blit;
+
+ zink_context_surface_init(&ctx->base);
+ zink_context_resource_init(&ctx->base);
+
+ slab_create_child(&ctx->transfer_pool, &screen->transfer_pool);
+
+ ctx->base.stream_uploader = u_upload_create_default(&ctx->base);
+ ctx->base.const_uploader = ctx->base.stream_uploader;
+
+ int prim_hwsupport = 1 << PIPE_PRIM_POINTS |
+ 1 << PIPE_PRIM_LINES |
+ 1 << PIPE_PRIM_LINE_STRIP |
+ 1 << PIPE_PRIM_TRIANGLES |
+ 1 << PIPE_PRIM_TRIANGLE_STRIP |
+ 1 << PIPE_PRIM_TRIANGLE_FAN;
+
+ ctx->primconvert = util_primconvert_create(&ctx->base, prim_hwsupport);
+ if (!ctx->primconvert)
+ goto fail;
+
+ ctx->blitter = util_blitter_create(&ctx->base);
+ if (!ctx->blitter)
+ goto fail;
+
+ VkCommandPoolCreateInfo cpci = {};
+ cpci.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
+ cpci.queueFamilyIndex = screen->gfx_queue;
+ cpci.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
+ if (vkCreateCommandPool(screen->dev, &cpci, NULL, &ctx->cmdpool) != VK_SUCCESS)
+ goto fail;
+
+ VkCommandBufferAllocateInfo cbai = {};
+ cbai.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
+ cbai.commandPool = ctx->cmdpool;
+ cbai.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
+ cbai.commandBufferCount = 1;
+ if (vkAllocateCommandBuffers(screen->dev, &cbai, &ctx->cmdbuf.cmdbuf) != VK_SUCCESS)
+ goto fail;
+
+ VkDescriptorPoolSize sizes[] = {
+ {VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1000}
+ };
+ VkDescriptorPoolCreateInfo dpci = {};
+ dpci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
+ dpci.pPoolSizes = sizes;
+ dpci.poolSizeCount = ARRAY_SIZE(sizes);
+ dpci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
+ dpci.maxSets = 1000;
+
+ if(vkCreateDescriptorPool(screen->dev, &dpci, 0, &ctx->descpool) != VK_SUCCESS)
+ goto fail;
+
+ vkGetDeviceQueue(screen->dev, screen->gfx_queue, 0, &ctx->queue);
+
+ return &ctx->base;
+
+fail:
+ if (ctx) {
+ vkDestroyCommandPool(screen->dev, ctx->cmdpool, NULL);
+ FREE(ctx);
+ }
+ return NULL;
+}
--- /dev/null
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef ZINK_CONTEXT_H
+#define ZINK_CONTEXT_H
+
+#include "zink_pipeline.h"
+#include "zink_cmdbuf.h"
+
+#include "pipe/p_context.h"
+#include "pipe/p_state.h"
+
+#include "util/slab.h"
+
+#include <vulkan/vulkan.h>
+
+struct blitter_context;
+struct primconvert_context;
+struct zink_resource;
+
+struct zink_vertex_elements_state;
+struct zink_rasterizer_state;
+struct zink_blend_state;
+struct zink_depth_stencil_alpha_state;
+
+struct zink_sampler_view {
+ struct pipe_sampler_view base;
+ VkImageView image_view;
+};
+
+struct zink_context {
+ struct pipe_context base;
+ struct slab_child_pool transfer_pool;
+ struct blitter_context *blitter;
+
+ VkCommandPool cmdpool;
+ struct zink_cmdbuf cmdbuf;
+
+ VkQueue queue;
+
+ VkDescriptorPool descpool;
+
+ struct pipe_constant_buffer ubos[PIPE_SHADER_TYPES][PIPE_MAX_CONSTANT_BUFFERS];
+ struct pipe_framebuffer_state fb_state;
+
+ struct zink_shader *gfx_stages[PIPE_SHADER_TYPES - 1];
+ struct zink_gfx_pipeline_state gfx_pipeline_state;
+
+ struct primconvert_context *primconvert;
+
+ struct zink_render_pass *render_pass;
+ struct zink_framebuffer *framebuffer;
+
+ VkViewport viewports[PIPE_MAX_VIEWPORTS];
+ unsigned num_viewports;
+
+ VkRect2D scissors[PIPE_MAX_VIEWPORTS];
+ unsigned num_scissors;
+
+ struct pipe_vertex_buffer buffers[PIPE_MAX_ATTRIBS];
+ uint32_t buffers_enabled_mask;
+
+ VkSampler samplers[PIPE_SHADER_TYPES][PIPE_MAX_SAMPLERS];
+ struct pipe_sampler_view *image_views[PIPE_SHADER_TYPES][PIPE_MAX_SHADER_SAMPLER_VIEWS];
+
+ float blend_constants[4];
+
+ uint32_t stencil_ref[2];
+};
+
+static inline struct zink_context *
+zink_context(struct pipe_context *context)
+{
+ return (struct zink_context *)context;
+}
+
+void
+zink_resource_barrier(VkCommandBuffer cmdbuf, struct zink_resource *res,
+ VkImageAspectFlags aspect, VkImageLayout new_layout);
+
+VkShaderStageFlagBits
+zink_shader_stage(enum pipe_shader_type type);
+
+struct pipe_context *
+zink_context_create(struct pipe_screen *pscreen, void *priv, unsigned flags);
+
+#endif
--- /dev/null
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "zink_fence.h"
+
+#include "zink_screen.h"
+
+#include "util/u_memory.h"
+
+struct zink_fence *
+zink_create_fence(struct pipe_screen *pscreen)
+{
+ struct zink_screen *screen = zink_screen(pscreen);
+
+ VkFenceCreateInfo fci = {};
+ fci.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
+
+ struct zink_fence *ret = CALLOC_STRUCT(zink_fence);
+ if (!ret) {
+ debug_printf("CALLOC_STRUCT failed\n");
+ goto fail;
+ }
+
+ if (vkCreateFence(screen->dev, &fci, NULL, &ret->fence) != VK_SUCCESS) {
+ debug_printf("vkCreateFence failed\n");
+ goto fail;
+ }
+
+ pipe_reference_init(&ret->reference, 1);
+ return ret;
+
+fail:
+ FREE(ret);
+ return NULL;
+}
+
+void
+zink_fence_reference(struct zink_screen *screen,
+ struct zink_fence **ptr,
+ struct zink_fence *fence)
+{
+ if (pipe_reference(&(*ptr)->reference, &fence->reference)) {
+ vkDestroyFence(screen->dev, (*ptr)->fence, NULL);
+ free(*ptr);
+ }
+
+ *ptr = fence;
+}
+
+static void
+fence_reference(struct pipe_screen *pscreen,
+ struct pipe_fence_handle **pptr,
+ struct pipe_fence_handle *pfence)
+{
+ zink_fence_reference(zink_screen(pscreen), (struct zink_fence **)pptr,
+ zink_fence(pfence));
+}
+
+bool
+zink_fence_finish(struct zink_screen *screen, struct zink_fence *fence,
+ uint64_t timeout_ns)
+{
+ return vkWaitForFences(screen->dev, 1, &fence->fence, VK_TRUE,
+ timeout_ns) == VK_SUCCESS;
+}
+
+static bool
+fence_finish(struct pipe_screen *pscreen, struct pipe_context *pctx,
+ struct pipe_fence_handle *pfence, uint64_t timeout_ns)
+{
+ return zink_fence_finish(zink_screen(pscreen), zink_fence(pfence),
+ timeout_ns);
+}
+
+void
+zink_screen_fence_init(struct pipe_screen *pscreen)
+{
+ pscreen->fence_reference = fence_reference;
+ pscreen->fence_finish = fence_finish;
+}
--- /dev/null
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef ZINK_FENCE_H
+#define ZINK_FENCE_H
+
+#include "util/u_inlines.h"
+
+#include <vulkan/vulkan.h>
+
+struct pipe_screen;
+struct zink_screen;
+
+struct zink_fence {
+ struct pipe_reference reference;
+ VkFence fence;
+};
+
+static inline struct zink_fence *
+zink_fence(struct pipe_fence_handle *pfence)
+{
+ return (struct zink_fence *)pfence;
+}
+
+struct zink_fence *
+zink_create_fence(struct pipe_screen *pscreen);
+
+void
+zink_fence_reference(struct zink_screen *screen,
+ struct zink_fence **ptr,
+ struct zink_fence *fence);
+
+bool
+zink_fence_finish(struct zink_screen *screen, struct zink_fence *fence,
+ uint64_t timeout_ns);
+
+void
+zink_screen_fence_init(struct pipe_screen *pscreen);
+
+#endif
--- /dev/null
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "zink_framebuffer.h"
+
+#include "zink_render_pass.h"
+#include "zink_screen.h"
+#include "zink_surface.h"
+
+#include "util/u_memory.h"
+#include "util/u_string.h"
+
+void
+zink_destroy_framebuffer(struct zink_screen *screen,
+ struct zink_framebuffer *fbuf)
+{
+ vkDestroyFramebuffer(screen->dev, fbuf->fb, NULL);
+ for (int i = 0; i < ARRAY_SIZE(fbuf->surfaces); ++i)
+ pipe_surface_reference(fbuf->surfaces + i, NULL);
+
+ zink_render_pass_reference(screen, &fbuf->rp, NULL);
+
+ FREE(fbuf);
+}
+
+struct zink_framebuffer *
+zink_create_framebuffer(struct zink_screen *screen,
+ const struct pipe_framebuffer_state *fb,
+ struct zink_render_pass *rp)
+{
+ struct zink_framebuffer *fbuf = CALLOC_STRUCT(zink_framebuffer);
+ if (!fbuf)
+ return NULL;
+
+ pipe_reference_init(&fbuf->reference, 1);
+
+ VkImageView attachments[PIPE_MAX_COLOR_BUFS + 1];
+ for (int i = 0; i < fb->nr_cbufs; i++) {
+ struct pipe_surface *psurf = fb->cbufs[i];
+ pipe_surface_reference(fbuf->surfaces + i, psurf);
+ attachments[i] = zink_surface(psurf)->image_view;
+ }
+
+ int num_attachments = fb->nr_cbufs;
+ if (fb->zsbuf) {
+ struct pipe_surface *psurf = fb->zsbuf;
+ pipe_surface_reference(fbuf->surfaces + num_attachments, psurf);
+ attachments[num_attachments++] = zink_surface(psurf)->image_view;
+ }
+
+ assert(rp);
+ zink_render_pass_reference(screen, &fbuf->rp, rp);
+
+ VkFramebufferCreateInfo fci = {};
+ fci.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
+ fci.renderPass = rp->render_pass;
+ fci.attachmentCount = num_attachments;
+ fci.pAttachments = attachments;
+ fci.width = (uint32_t)fb->width;
+ fci.height = (uint32_t)fb->height;
+ fci.layers = (uint32_t)MAX2(fb->layers, 1);
+
+ if (vkCreateFramebuffer(screen->dev, &fci, NULL, &fbuf->fb) != VK_SUCCESS) {
+ zink_destroy_framebuffer(screen, fbuf);
+ return NULL;
+ }
+
+ return fbuf;
+}
+
+void
+debug_describe_zink_framebuffer(char* buf, const struct zink_framebuffer *ptr)
+{
+ sprintf(buf, "zink_framebuffer");
+}
--- /dev/null
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef ZINK_FRAMEBUFFER_H
+#define ZINK_FRAMEBUFFER_H
+
+#include "pipe/p_state.h"
+#include <vulkan/vulkan.h>
+
+#include "util/u_inlines.h"
+
+struct zink_screen;
+struct zink_render_pass;
+
+struct zink_framebuffer {
+ struct pipe_reference reference;
+ VkFramebuffer fb;
+
+ struct pipe_surface *surfaces[PIPE_MAX_COLOR_BUFS + 1];
+ struct zink_render_pass *rp;
+};
+
+struct zink_framebuffer *
+zink_create_framebuffer(struct zink_screen *screen,
+ const struct pipe_framebuffer_state *fb,
+ struct zink_render_pass *rp);
+
+void
+zink_destroy_framebuffer(struct zink_screen *screen,
+ struct zink_framebuffer *fbuf);
+
+void
+debug_describe_zink_framebuffer(char* buf, const struct zink_framebuffer *ptr);
+
+static inline void
+zink_framebuffer_reference(struct zink_screen *screen,
+ struct zink_framebuffer **dst,
+ struct zink_framebuffer *src)
+{
+ struct zink_framebuffer *old_dst = *dst;
+
+ if (pipe_reference_described(&old_dst->reference, &src->reference,
+ (debug_reference_descriptor) debug_describe_zink_framebuffer))
+ zink_destroy_framebuffer(screen, old_dst);
+ *dst = src;
+}
+
+#endif
--- /dev/null
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "zink_pipeline.h"
+
+#include "zink_compiler.h"
+#include "zink_context.h"
+#include "zink_program.h"
+#include "zink_screen.h"
+#include "zink_state.h"
+
+#include "util/u_debug.h"
+#include "util/u_prim.h"
+
+VkPipeline
+zink_create_gfx_pipeline(VkDevice dev, struct zink_gfx_program *prog,
+ struct zink_gfx_pipeline_state *state,
+ VkRenderPass render_pass)
+{
+ VkPipelineVertexInputStateCreateInfo vertex_input_state = {};
+ vertex_input_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
+ vertex_input_state.pVertexBindingDescriptions = state->bindings;
+ vertex_input_state.vertexBindingDescriptionCount = state->element_state->num_bindings;
+ vertex_input_state.pVertexAttributeDescriptions = state->element_state->attribs;
+ vertex_input_state.vertexAttributeDescriptionCount = state->element_state->num_attribs;
+
+ VkPipelineInputAssemblyStateCreateInfo primitive_state = {};
+ primitive_state.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
+ primitive_state.topology = state->primitive_topology;
+ primitive_state.primitiveRestartEnable = VK_FALSE;
+
+ VkPipelineColorBlendStateCreateInfo blend_state = {};
+ blend_state.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
+ blend_state.pAttachments = state->blend_state->attachments;
+ blend_state.attachmentCount = state->num_attachments;
+ blend_state.logicOpEnable = state->blend_state->logicop_enable;
+ blend_state.logicOp = state->blend_state->logicop_func;
+
+ VkPipelineMultisampleStateCreateInfo ms_state = {};
+ ms_state.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
+ ms_state.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT;
+ ms_state.alphaToCoverageEnable = state->blend_state->alpha_to_coverage;
+ ms_state.alphaToOneEnable = state->blend_state->alpha_to_one;
+
+ VkPipelineViewportStateCreateInfo viewport_state = {};
+ viewport_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
+ viewport_state.viewportCount = 1;
+ viewport_state.pViewports = NULL;
+ viewport_state.scissorCount = 1;
+ viewport_state.pScissors = NULL;
+
+ VkPipelineRasterizationStateCreateInfo rast_state = {};
+ rast_state.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
+
+ rast_state.depthClampEnable = state->rast_state->depth_clamp;
+ rast_state.rasterizerDiscardEnable = state->rast_state->rasterizer_discard;
+ rast_state.polygonMode = state->rast_state->polygon_mode;
+ rast_state.cullMode = state->rast_state->cull_mode;
+ rast_state.frontFace = state->rast_state->front_face;
+
+ rast_state.depthBiasEnable = VK_TRUE;
+ rast_state.depthBiasConstantFactor = 0.0;
+ rast_state.depthBiasClamp = 0.0;
+ rast_state.depthBiasSlopeFactor = 0.0;
+ rast_state.lineWidth = state->line_width;
+
+ VkPipelineDepthStencilStateCreateInfo depth_stencil_state = {};
+ depth_stencil_state.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
+ depth_stencil_state.depthTestEnable = state->depth_stencil_alpha_state->depth_test;
+ depth_stencil_state.depthCompareOp = state->depth_stencil_alpha_state->depth_compare_op;
+ depth_stencil_state.depthBoundsTestEnable = state->depth_stencil_alpha_state->depth_bounds_test;
+ depth_stencil_state.minDepthBounds = state->depth_stencil_alpha_state->min_depth_bounds;
+ depth_stencil_state.maxDepthBounds = state->depth_stencil_alpha_state->max_depth_bounds;
+ depth_stencil_state.stencilTestEnable = state->depth_stencil_alpha_state->stencil_test;
+ depth_stencil_state.front = state->depth_stencil_alpha_state->stencil_front;
+ depth_stencil_state.back = state->depth_stencil_alpha_state->stencil_back;
+ depth_stencil_state.depthWriteEnable = state->depth_stencil_alpha_state->depth_write;
+
+ VkDynamicState dynamicStateEnables[] = {
+ VK_DYNAMIC_STATE_DEPTH_BIAS,
+ VK_DYNAMIC_STATE_SCISSOR,
+ VK_DYNAMIC_STATE_STENCIL_REFERENCE,
+ VK_DYNAMIC_STATE_VIEWPORT,
+ VK_DYNAMIC_STATE_BLEND_CONSTANTS,
+ };
+
+ VkPipelineDynamicStateCreateInfo pipelineDynamicStateCreateInfo = {};
+ pipelineDynamicStateCreateInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
+ pipelineDynamicStateCreateInfo.pDynamicStates = dynamicStateEnables;
+ pipelineDynamicStateCreateInfo.dynamicStateCount = ARRAY_SIZE(dynamicStateEnables);
+
+ VkGraphicsPipelineCreateInfo pci = {};
+ pci.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
+ pci.flags = VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT;
+ pci.layout = prog->layout;
+ pci.renderPass = render_pass;
+ pci.pVertexInputState = &vertex_input_state;
+ pci.pInputAssemblyState = &primitive_state;
+ pci.pRasterizationState = &rast_state;
+ pci.pColorBlendState = &blend_state;
+ pci.pMultisampleState = &ms_state;
+ pci.pViewportState = &viewport_state;
+ pci.pDepthStencilState = &depth_stencil_state;
+ pci.pDynamicState = &pipelineDynamicStateCreateInfo;
+
+ VkPipelineShaderStageCreateInfo shader_stages[PIPE_SHADER_TYPES - 1];
+ uint32_t num_stages = 0;
+ for (int i = 0; i < PIPE_SHADER_TYPES - 1; ++i) {
+ if (!prog->stages[i])
+ continue;
+
+ VkPipelineShaderStageCreateInfo stage = {};
+ stage.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
+ stage.stage = zink_shader_stage(i);
+ stage.module = prog->stages[i]->shader_module;
+ stage.pName = "main";
+ shader_stages[num_stages++] = stage;
+ }
+ assert(num_stages > 0);
+
+ pci.pStages = shader_stages;
+ pci.stageCount = num_stages;
+
+ VkPipeline pipeline;
+ if (vkCreateGraphicsPipelines(dev, VK_NULL_HANDLE, 1, &pci, NULL, &pipeline) != VK_SUCCESS) {
+ debug_printf("vkCreateGraphicsPipelines failed\n");
+ return VK_NULL_HANDLE;
+ }
+
+ return pipeline;
+}
--- /dev/null
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef ZINK_PIPELINE_H
+#define ZINK_PIPELINE_H
+
+#include <vulkan/vulkan.h>
+
+#include "pipe/p_state.h"
+
+struct zink_blend_state;
+struct zink_depth_stencil_alpha_state;
+struct zink_gfx_program;
+struct zink_rasterizer_state;
+struct zink_shader;
+struct zink_vertex_elements_state;
+
+struct zink_gfx_pipeline_state {
+ VkPrimitiveTopology primitive_topology;
+
+ struct zink_vertex_elements_state *element_state;
+ VkVertexInputBindingDescription bindings[PIPE_MAX_ATTRIBS]; // combination of element_state and stride
+
+ uint32_t num_attachments;
+ struct zink_blend_state *blend_state;
+
+ struct zink_rasterizer_state *rast_state;
+
+ struct zink_depth_stencil_alpha_state *depth_stencil_alpha_state;
+
+ float line_width;
+};
+
+VkPipeline
+zink_create_gfx_pipeline(VkDevice dev, struct zink_gfx_program *prog,
+ struct zink_gfx_pipeline_state *state,
+ VkRenderPass render_pass);
+
+#endif
--- /dev/null
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "zink_program.h"
+
+#include "zink_compiler.h"
+#include "zink_context.h"
+
+#include "util/u_debug.h"
+#include "util/u_memory.h"
+
+static VkDescriptorSetLayout
+create_desc_set_layout(VkDevice dev,
+ struct zink_shader *stages[PIPE_SHADER_TYPES - 1])
+{
+ VkDescriptorSetLayoutBinding bindings[PIPE_SHADER_TYPES * PIPE_MAX_CONSTANT_BUFFERS];
+ int num_bindings = 0;
+
+ for (int i = 0; i < PIPE_SHADER_TYPES - 1; i++) {
+ struct zink_shader *shader = stages[i];
+ if (!shader)
+ continue;
+
+ VkShaderStageFlagBits stage_flags = zink_shader_stage(i);
+ for (int j = 0; j < shader->num_bindings; j++) {
+ assert(num_bindings < ARRAY_SIZE(bindings));
+ bindings[num_bindings].binding = shader->bindings[j].binding;
+ bindings[num_bindings].descriptorType = shader->bindings[j].type;
+ bindings[num_bindings].descriptorCount = 1;
+ bindings[num_bindings].stageFlags = stage_flags;
+ bindings[num_bindings].pImmutableSamplers = NULL;
+ ++num_bindings;
+ }
+ }
+
+ VkDescriptorSetLayoutCreateInfo dcslci = {};
+ dcslci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
+ dcslci.pNext = NULL;
+ dcslci.flags = 0;
+ dcslci.bindingCount = num_bindings;
+ dcslci.pBindings = bindings;
+
+ VkDescriptorSetLayout dsl;
+ if (vkCreateDescriptorSetLayout(dev, &dcslci, 0, &dsl) != VK_SUCCESS) {
+ debug_printf("vkCreateDescriptorSetLayout failed\n");
+ return VK_NULL_HANDLE;
+ }
+
+ return dsl;
+}
+
+static VkPipelineLayout
+create_pipeline_layout(VkDevice dev, VkDescriptorSetLayout dsl)
+{
+ assert(dsl != VK_NULL_HANDLE);
+
+ VkPipelineLayoutCreateInfo plci = {};
+ plci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
+
+ plci.pSetLayouts = &dsl;
+ plci.setLayoutCount = 1;
+
+ VkPipelineLayout layout;
+ if (vkCreatePipelineLayout(dev, &plci, NULL, &layout) != VK_SUCCESS) {
+ debug_printf("vkCreatePipelineLayout failed!\n");
+ return VK_NULL_HANDLE;
+ }
+
+ return layout;
+}
+
+struct zink_gfx_program *
+zink_create_gfx_program(VkDevice dev,
+ struct zink_shader *stages[PIPE_SHADER_TYPES - 1])
+{
+ struct zink_gfx_program *prog = CALLOC_STRUCT(zink_gfx_program);
+ if (!prog) {
+ debug_printf("failed to allocate gfx-program\n");
+ goto fail;
+ }
+
+ for (int i = 0; i < PIPE_SHADER_TYPES - 1; ++i)
+ prog->stages[i] = stages[i];
+
+ prog->dsl = create_desc_set_layout(dev, stages);
+ if (!prog->dsl)
+ goto fail;
+
+ prog->layout = create_pipeline_layout(dev, prog->dsl);
+ if (!prog->layout)
+ goto fail;
+
+ return prog;
+
+fail:
+ if (prog)
+ zink_destroy_gfx_program(dev, prog);
+ return NULL;
+}
+
+void
+zink_destroy_gfx_program(VkDevice dev, struct zink_gfx_program *prog)
+{
+ if (prog->layout)
+ vkDestroyPipelineLayout(dev, prog->layout, NULL);
+
+ if (prog->dsl)
+ vkDestroyDescriptorSetLayout(dev, prog->dsl, NULL);
+
+ FREE(prog);
+}
+
--- /dev/null
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef ZINK_PROGRAM_H
+#define ZINK_PROGRAM_H
+
+#include <vulkan/vulkan.h>
+
+#include "pipe/p_state.h"
+
+struct zink_context;
+struct zink_shader;
+struct zink_gfx_pipeline_state;
+
+struct zink_gfx_program {
+ struct zink_shader *stages[PIPE_SHADER_TYPES - 1]; // compute stage doesn't belong here
+ VkDescriptorSetLayout dsl;
+ VkPipelineLayout layout;
+};
+
+struct zink_gfx_program *
+zink_create_gfx_program(VkDevice dev,
+ struct zink_shader *stages[PIPE_SHADER_TYPES - 1]);
+
+void
+zink_destroy_gfx_program(VkDevice dev, struct zink_gfx_program *);
+
+#endif
--- /dev/null
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef ZINK_PUBLIC_H
+#define ZINK_PUBLIC_H
+
+struct pipe_screen;
+struct sw_winsys;
+
+struct pipe_screen *
+zink_create_screen(struct sw_winsys *winsys);
+
+#endif
--- /dev/null
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "zink_render_pass.h"
+
+#include "zink_screen.h"
+
+#include "util/u_memory.h"
+#include "util/u_string.h"
+
+static VkRenderPass
+create_render_pass(VkDevice dev, struct zink_render_pass_state *state)
+{
+
+ VkAttachmentReference color_refs[PIPE_MAX_COLOR_BUFS], zs_ref;
+ VkAttachmentDescription attachments[PIPE_MAX_COLOR_BUFS + 1];
+
+ for (int i = 0; i < state->num_cbufs; i++) {
+ struct zink_rt_attrib *rt = state->rts + i;
+ attachments[i].flags = 0;
+ attachments[i].format = rt->format;
+ attachments[i].samples = VK_SAMPLE_COUNT_1_BIT;
+ attachments[i].loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
+ attachments[i].storeOp = VK_ATTACHMENT_STORE_OP_STORE;
+ attachments[i].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
+ attachments[i].stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
+ attachments[i].initialLayout = VK_IMAGE_LAYOUT_GENERAL;
+ attachments[i].finalLayout = VK_IMAGE_LAYOUT_GENERAL;
+ color_refs[i].attachment = i;
+ color_refs[i].layout = VK_IMAGE_LAYOUT_GENERAL;
+ }
+
+ int num_attachments = state->num_cbufs;
+ if (state->have_zsbuf) {
+ struct zink_rt_attrib *rt = state->rts + state->num_cbufs;
+ attachments[num_attachments].flags = 0;
+ attachments[num_attachments].format = rt->format;
+ attachments[num_attachments].samples = VK_SAMPLE_COUNT_1_BIT;
+ attachments[num_attachments].loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
+ attachments[num_attachments].storeOp = VK_ATTACHMENT_STORE_OP_STORE;
+ attachments[num_attachments].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
+ attachments[num_attachments].stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE;
+ attachments[num_attachments].initialLayout = VK_IMAGE_LAYOUT_GENERAL;
+ attachments[num_attachments].finalLayout = VK_IMAGE_LAYOUT_GENERAL;
+
+ zs_ref.attachment = num_attachments++;
+ zs_ref.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
+ }
+
+ VkSubpassDescription subpass = {};
+ subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
+ subpass.colorAttachmentCount = state->num_cbufs;
+ subpass.pColorAttachments = color_refs;
+ subpass.pDepthStencilAttachment = state->have_zsbuf ? &zs_ref : NULL;
+
+ VkRenderPassCreateInfo rpci = {};
+ rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
+ rpci.attachmentCount = num_attachments;
+ rpci.pAttachments = attachments;
+ rpci.subpassCount = 1;
+ rpci.pSubpasses = &subpass;
+
+ VkRenderPass render_pass;
+ if (vkCreateRenderPass(dev, &rpci, NULL, &render_pass) != VK_SUCCESS)
+ return VK_NULL_HANDLE;
+
+ return render_pass;
+}
+
+struct zink_render_pass *
+zink_create_render_pass(struct zink_screen *screen,
+ struct zink_render_pass_state *state)
+{
+ struct zink_render_pass *rp = CALLOC_STRUCT(zink_render_pass);
+ if (!rp)
+ goto fail;
+
+ pipe_reference_init(&rp->reference, 1);
+
+ rp->render_pass = create_render_pass(screen->dev, state);
+ if (!rp->render_pass)
+ goto fail;
+
+ return rp;
+
+fail:
+ if (rp)
+ zink_destroy_render_pass(screen, rp);
+ return NULL;
+}
+
+void
+zink_destroy_render_pass(struct zink_screen *screen,
+ struct zink_render_pass *rp)
+{
+ vkDestroyRenderPass(screen->dev, rp->render_pass, NULL);
+ FREE(rp);
+}
+
+void
+debug_describe_zink_render_pass(char* buf, const struct zink_render_pass *ptr)
+{
+ sprintf(buf, "zink_render_pass");
+}
--- /dev/null
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef ZINK_RENDERPASS_H
+#define ZINK_RENDERPASS_H
+
+#include <vulkan/vulkan.h>
+
+#include "pipe/p_state.h"
+#include "util/u_inlines.h"
+
+struct zink_screen;
+
+struct zink_rt_attrib {
+ VkFormat format;
+};
+
+struct zink_render_pass_state {
+ uint8_t num_cbufs : 4; /* PIPE_MAX_COLOR_BUFS = 8 */
+ uint8_t have_zsbuf : 1;
+ struct zink_rt_attrib rts[PIPE_MAX_COLOR_BUFS + 1];
+};
+
+struct zink_render_pass {
+ struct pipe_reference reference;
+
+ VkRenderPass render_pass;
+};
+
+struct zink_render_pass *
+zink_create_render_pass(struct zink_screen *screen,
+ struct zink_render_pass_state *state);
+
+void
+zink_destroy_render_pass(struct zink_screen *screen,
+ struct zink_render_pass *rp);
+
+void
+debug_describe_zink_render_pass(char* buf, const struct zink_render_pass *ptr);
+
+static inline void
+zink_render_pass_reference(struct zink_screen *screen,
+ struct zink_render_pass **dst,
+ struct zink_render_pass *src)
+{
+ struct zink_render_pass *old_dst = *dst;
+
+ if (pipe_reference_described(&old_dst->reference, &src->reference,
+ (debug_reference_descriptor) debug_describe_zink_render_pass))
+ zink_destroy_render_pass(screen, old_dst);
+ *dst = src;
+}
+
+#endif
--- /dev/null
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "zink_resource.h"
+
+#include "zink_cmdbuf.h"
+#include "zink_context.h"
+#include "zink_screen.h"
+
+#include "util/slab.h"
+#include "util/u_debug.h"
+#include "util/u_format.h"
+#include "util/u_inlines.h"
+#include "util/u_memory.h"
+
+#include "state_tracker/sw_winsys.h"
+
+static void
+zink_resource_destroy(struct pipe_screen *pscreen,
+ struct pipe_resource *pres)
+{
+ struct zink_screen *screen = zink_screen(pscreen);
+ struct zink_resource *res = zink_resource(pres);
+ if (pres->target == PIPE_BUFFER)
+ vkDestroyBuffer(screen->dev, res->buffer, NULL);
+ else
+ vkDestroyImage(screen->dev, res->image, NULL);
+
+ vkFreeMemory(screen->dev, res->mem, NULL);
+ FREE(res);
+}
+
+static uint32_t
+get_memory_type_index(struct zink_screen *screen,
+ const VkMemoryRequirements *reqs,
+ VkMemoryPropertyFlags props)
+{
+ for (uint32_t i = 0u; i < VK_MAX_MEMORY_TYPES; i++) {
+ if (((reqs->memoryTypeBits >> i) & 1) == 1) {
+ if ((screen->mem_props.memoryTypes[i].propertyFlags & props) == props) {
+ return i;
+ break;
+ }
+ }
+ }
+
+ unreachable("Unsupported memory-type");
+ return 0;
+}
+
+VkImageAspectFlags
+zink_aspect_from_format(enum pipe_format fmt)
+{
+ if (util_format_is_depth_or_stencil(fmt)) {
+ VkImageAspectFlags aspect = 0;
+ const struct util_format_description *desc = util_format_description(fmt);
+ if (util_format_has_depth(desc))
+ aspect |= VK_IMAGE_ASPECT_DEPTH_BIT;
+ if (util_format_has_stencil(desc))
+ aspect |= VK_IMAGE_ASPECT_STENCIL_BIT;
+ return aspect;
+ } else
+ return VK_IMAGE_ASPECT_COLOR_BIT;
+}
+
+static struct pipe_resource *
+zink_resource_create(struct pipe_screen *pscreen,
+ const struct pipe_resource *templ)
+{
+ struct zink_screen *screen = zink_screen(pscreen);
+ struct zink_resource *res = CALLOC_STRUCT(zink_resource);
+
+ res->base = *templ;
+
+ pipe_reference_init(&res->base.reference, 1);
+ res->base.screen = pscreen;
+
+ VkMemoryRequirements reqs;
+ VkMemoryPropertyFlags flags = 0;
+ if (templ->target == PIPE_BUFFER) {
+ VkBufferCreateInfo bci = {};
+ bci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
+ bci.size = templ->width0;
+
+ bci.usage = 0;
+
+ if (templ->bind & PIPE_BIND_VERTEX_BUFFER)
+ bci.usage |= VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
+
+ if (templ->bind & PIPE_BIND_INDEX_BUFFER)
+ bci.usage |= VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
+
+ if (templ->bind & PIPE_BIND_CONSTANT_BUFFER)
+ bci.usage |= VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
+
+ if (templ->bind & PIPE_BIND_SHADER_BUFFER)
+ bci.usage |= VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
+
+ if (templ->bind & PIPE_BIND_COMMAND_ARGS_BUFFER)
+ bci.usage |= VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT;
+
+ if (templ->usage == PIPE_USAGE_STAGING)
+ bci.usage |= VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
+
+ if (vkCreateBuffer(screen->dev, &bci, NULL, &res->buffer) !=
+ VK_SUCCESS) {
+ FREE(res);
+ return NULL;
+ }
+
+ vkGetBufferMemoryRequirements(screen->dev, res->buffer, &reqs);
+ flags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
+ } else {
+ res->format = zink_get_format(templ->format);
+
+ VkImageCreateInfo ici = {};
+ ici.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
+
+ switch (templ->target) {
+ case PIPE_TEXTURE_1D:
+ case PIPE_TEXTURE_1D_ARRAY:
+ ici.imageType = VK_IMAGE_TYPE_1D;
+ break;
+
+ case PIPE_TEXTURE_2D:
+ case PIPE_TEXTURE_2D_ARRAY:
+ case PIPE_TEXTURE_CUBE:
+ case PIPE_TEXTURE_CUBE_ARRAY:
+ ici.imageType = VK_IMAGE_TYPE_2D;
+ /* cube and 2D array needs some quirks here */
+ if (templ->target == PIPE_TEXTURE_CUBE)
+ ici.flags = VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
+ else if (templ->target == PIPE_TEXTURE_2D_ARRAY)
+ ici.flags = VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR;
+ else if (templ->target == PIPE_TEXTURE_CUBE_ARRAY)
+ ici.flags = VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT |
+ VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR;
+ break;
+
+ case PIPE_TEXTURE_3D:
+ ici.imageType = VK_IMAGE_TYPE_3D;
+ break;
+
+ case PIPE_TEXTURE_RECT:
+ unreachable("texture rects not supported");
+
+ case PIPE_BUFFER:
+ unreachable("PIPE_BUFFER should already be handled");
+
+ default:
+ unreachable("Unknown target");
+ }
+
+ ici.format = res->format;
+ ici.extent.width = templ->width0;
+ ici.extent.height = templ->height0;
+ ici.extent.depth = templ->depth0;
+ ici.mipLevels = templ->last_level + 1;
+ ici.arrayLayers = templ->array_size;
+ ici.samples = templ->nr_samples ? templ->nr_samples : VK_SAMPLE_COUNT_1_BIT;
+ ici.tiling = templ->bind & PIPE_BIND_LINEAR ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TILING_OPTIMAL;
+
+ if (templ->target == PIPE_TEXTURE_CUBE ||
+ templ->target == PIPE_TEXTURE_CUBE_ARRAY)
+ ici.arrayLayers *= 6;
+
+ if (templ->bind & (PIPE_BIND_DISPLAY_TARGET |
+ PIPE_BIND_SCANOUT |
+ PIPE_BIND_SHARED)) {
+ // assert(ici.tiling == VK_IMAGE_TILING_LINEAR);
+ ici.tiling = VK_IMAGE_TILING_LINEAR;
+ }
+
+ if (templ->usage == PIPE_USAGE_STAGING)
+ ici.tiling = VK_IMAGE_TILING_LINEAR;
+
+ /* sadly, gallium doesn't let us know if it'll ever need this, so we have to assume */
+ ici.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+
+ if (templ->bind & PIPE_BIND_SAMPLER_VIEW)
+ ici.usage |= VK_IMAGE_USAGE_SAMPLED_BIT;
+
+ if (templ->bind & PIPE_BIND_SHADER_IMAGE)
+ ici.usage |= VK_IMAGE_USAGE_STORAGE_BIT;
+
+ if (templ->bind & PIPE_BIND_RENDER_TARGET)
+ ici.usage |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+
+ if (templ->bind & PIPE_BIND_DEPTH_STENCIL)
+ ici.usage |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
+
+ if (templ->flags & PIPE_RESOURCE_FLAG_SPARSE)
+ ici.usage |= VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT;
+
+ if (templ->bind & PIPE_BIND_STREAM_OUTPUT)
+ ici.usage |= VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
+
+ ici.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
+ ici.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
+ res->layout = VK_IMAGE_LAYOUT_UNDEFINED;
+
+ VkResult result = vkCreateImage(screen->dev, &ici, NULL, &res->image);
+ if (result != VK_SUCCESS) {
+ FREE(res);
+ return NULL;
+ }
+
+ res->optimial_tiling = ici.tiling != VK_IMAGE_TILING_LINEAR;
+ res->aspect = zink_aspect_from_format(templ->format);
+
+ vkGetImageMemoryRequirements(screen->dev, res->image, &reqs);
+ if (templ->usage == PIPE_USAGE_STAGING || (templ->bind & (PIPE_BIND_SCANOUT|PIPE_BIND_DISPLAY_TARGET|PIPE_BIND_SHARED)))
+ flags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
+ else
+ flags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
+ }
+
+ VkMemoryAllocateInfo mai = {};
+ mai.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
+ mai.allocationSize = reqs.size;
+ mai.memoryTypeIndex = get_memory_type_index(screen, &reqs, flags);
+
+ if (vkAllocateMemory(screen->dev, &mai, NULL, &res->mem) != VK_SUCCESS)
+ goto fail;
+
+ res->offset = 0;
+ res->size = reqs.size;
+
+ if (templ->target == PIPE_BUFFER)
+ vkBindBufferMemory(screen->dev, res->buffer, res->mem, res->offset);
+ else
+ vkBindImageMemory(screen->dev, res->image, res->mem, res->offset);
+
+ if (templ->bind & (PIPE_BIND_DISPLAY_TARGET |
+ PIPE_BIND_SCANOUT |
+ PIPE_BIND_SHARED)) {
+ struct sw_winsys *winsys = screen->winsys;
+ res->dt = winsys->displaytarget_create(screen->winsys,
+ res->base.bind,
+ res->base.format,
+ templ->width0,
+ templ->height0,
+ 64, NULL,
+ &res->dt_stride);
+ }
+
+ return &res->base;
+
+fail:
+ if (templ->target == PIPE_BUFFER)
+ vkDestroyBuffer(screen->dev, res->buffer, NULL);
+ else
+ vkDestroyImage(screen->dev, res->image, NULL);
+
+ FREE(res);
+
+ return NULL;
+}
+
+void
+zink_screen_resource_init(struct pipe_screen *pscreen)
+{
+ pscreen->resource_create = zink_resource_create;
+ pscreen->resource_destroy = zink_resource_destroy;
+}
+
+static bool
+zink_transfer_copy_bufimage(struct zink_context *ctx,
+ struct zink_resource *res,
+ struct zink_resource *staging_res,
+ struct zink_transfer *trans,
+ bool buf2img)
+{
+ struct zink_cmdbuf *cmdbuf = zink_start_cmdbuf(ctx);
+ if (!cmdbuf)
+ return false;
+
+ if (res->layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL &&
+ res->layout != VK_IMAGE_LAYOUT_GENERAL) {
+ zink_resource_barrier(cmdbuf->cmdbuf, res, res->aspect,
+ VK_IMAGE_LAYOUT_GENERAL);
+ res->layout = VK_IMAGE_LAYOUT_GENERAL;
+ }
+
+ VkBufferImageCopy copyRegion = {};
+ copyRegion.bufferOffset = staging_res->offset;
+ copyRegion.bufferRowLength = 0;
+ copyRegion.bufferImageHeight = 0;
+ copyRegion.imageSubresource.aspectMask = res->aspect;
+ copyRegion.imageSubresource.mipLevel = trans->base.level;
+ copyRegion.imageSubresource.layerCount = 1;
+ if (res->base.array_size > 1) {
+ copyRegion.imageSubresource.baseArrayLayer = trans->base.box.z;
+ copyRegion.imageSubresource.layerCount = trans->base.box.depth;
+ } else {
+ copyRegion.imageOffset.z = trans->base.box.z;
+ copyRegion.imageExtent.depth = trans->base.box.depth;
+ }
+ copyRegion.imageOffset.x = trans->base.box.x;
+ copyRegion.imageOffset.y = trans->base.box.y;
+
+ copyRegion.imageExtent.width = trans->base.box.width;
+ copyRegion.imageExtent.height = trans->base.box.height;
+
+ if (buf2img)
+ vkCmdCopyBufferToImage(cmdbuf->cmdbuf, staging_res->buffer, res->image, res->layout, 1, ©Region);
+ else
+ vkCmdCopyImageToBuffer(cmdbuf->cmdbuf, res->image, res->layout, staging_res->buffer, 1, ©Region);
+
+ zink_end_cmdbuf(ctx, cmdbuf);
+ return true;
+}
+
+static void *
+zink_transfer_map(struct pipe_context *pctx,
+ struct pipe_resource *pres,
+ unsigned level,
+ unsigned usage,
+ const struct pipe_box *box,
+ struct pipe_transfer **transfer)
+{
+ struct zink_context *ctx = zink_context(pctx);
+ struct zink_screen *screen = zink_screen(pctx->screen);
+ struct zink_resource *res = zink_resource(pres);
+
+ struct zink_transfer *trans = slab_alloc(&ctx->transfer_pool);
+ if (!trans)
+ return NULL;
+
+ memset(trans, 0, sizeof(*trans));
+ pipe_resource_reference(&trans->base.resource, pres);
+
+ trans->base.resource = pres;
+ trans->base.level = level;
+ trans->base.usage = usage;
+ trans->base.box = *box;
+
+ void *ptr;
+ if (pres->target == PIPE_BUFFER) {
+ VkResult result = vkMapMemory(screen->dev, res->mem, res->offset, res->size, 0, &ptr);
+ if (result != VK_SUCCESS)
+ return NULL;
+
+ trans->base.stride = 0;
+ trans->base.layer_stride = 0;
+ ptr = ((uint8_t *)ptr) + box->x;
+ } else {
+ if (res->optimial_tiling || ((res->base.usage != PIPE_USAGE_STAGING))) {
+ trans->base.stride = util_format_get_stride(pres->format, box->width);
+ trans->base.layer_stride = util_format_get_2d_size(pres->format,
+ trans->base.stride,
+ box->height);
+
+ struct pipe_resource templ = *pres;
+ templ.usage = PIPE_USAGE_STAGING;
+ templ.target = PIPE_BUFFER;
+ templ.bind = 0; // HACK: there's no transfer binding, but usage should tell us enough
+ templ.width0 = trans->base.layer_stride * box->depth;
+ templ.height0 = templ.depth0 = 0;
+ templ.last_level = 0;
+ templ.array_size = 1;
+ templ.flags = 0;
+
+ trans->staging_res = zink_resource_create(pctx->screen, &templ);
+ if (!trans->staging_res)
+ return NULL;
+
+ struct zink_resource *staging_res = zink_resource(trans->staging_res);
+
+ if (usage & PIPE_TRANSFER_READ) {
+ struct zink_context *ctx = zink_context(pctx);
+ bool ret = zink_transfer_copy_bufimage(ctx, res,
+ staging_res, trans,
+ false);
+ if (ret == false)
+ return NULL;
+ }
+
+ VkResult result = vkMapMemory(screen->dev, staging_res->mem,
+ staging_res->offset,
+ staging_res->size, 0, &ptr);
+ if (result != VK_SUCCESS)
+ return NULL;
+
+ } else {
+ assert(!res->optimial_tiling);
+ VkResult result = vkMapMemory(screen->dev, res->mem, res->offset, res->size, 0, &ptr);
+ if (result != VK_SUCCESS)
+ return NULL;
+ VkImageSubresource isr = {
+ res->aspect,
+ level,
+ 0
+ };
+ VkSubresourceLayout srl;
+ vkGetImageSubresourceLayout(screen->dev, res->image, &isr, &srl);
+ trans->base.stride = srl.rowPitch;
+ trans->base.layer_stride = srl.arrayPitch;
+ ptr = ((uint8_t *)ptr) + box->z * srl.depthPitch +
+ box->y * srl.rowPitch +
+ box->x;
+ }
+ }
+
+ *transfer = &trans->base;
+ return ptr;
+}
+
+static void
+zink_transfer_unmap(struct pipe_context *pctx,
+ struct pipe_transfer *ptrans)
+{
+ struct zink_context *ctx = zink_context(pctx);
+ struct zink_screen *screen = zink_screen(pctx->screen);
+ struct zink_resource *res = zink_resource(ptrans->resource);
+ struct zink_transfer *trans = (struct zink_transfer *)ptrans;
+ if (trans->staging_res) {
+ struct zink_resource *staging_res = zink_resource(trans->staging_res);
+ vkUnmapMemory(screen->dev, staging_res->mem);
+
+ if (trans->base.usage & PIPE_TRANSFER_WRITE) {
+ struct zink_context *ctx = zink_context(pctx);
+
+ zink_transfer_copy_bufimage(ctx, res, staging_res, trans, true);
+ }
+
+ zink_resource_destroy(pctx->screen, trans->staging_res);
+ trans->staging_res = NULL;
+ } else
+ vkUnmapMemory(screen->dev, res->mem);
+
+ pipe_resource_reference(&trans->base.resource, NULL);
+ slab_free(&ctx->transfer_pool, ptrans);
+}
+
+void
+zink_context_resource_init(struct pipe_context *pctx)
+{
+ pctx->transfer_map = zink_transfer_map;
+ pctx->transfer_unmap = zink_transfer_unmap;
+
+ pctx->transfer_flush_region = u_default_transfer_flush_region;
+ pctx->buffer_subdata = u_default_buffer_subdata;
+ pctx->texture_subdata = u_default_texture_subdata;
+}
--- /dev/null
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef ZINK_RESOURCE_H
+#define ZINK_RESOURCE_H
+
+struct pipe_screen;
+struct sw_displaytarget;
+
+#include "util/u_transfer.h"
+
+#include <vulkan/vulkan.h>
+
+struct zink_resource {
+ struct pipe_resource base;
+
+ union {
+ VkBuffer buffer;
+ struct {
+ VkFormat format;
+ VkImage image;
+ VkImageLayout layout;
+ VkImageAspectFlags aspect;
+ bool optimial_tiling;
+ };
+ };
+ VkDeviceMemory mem;
+ VkDeviceSize offset, size;
+
+ struct sw_displaytarget *dt;
+ unsigned dt_stride;
+};
+
+struct zink_transfer {
+ struct pipe_transfer base;
+ struct pipe_resource *staging_res;
+};
+
+static inline struct zink_resource *
+zink_resource(struct pipe_resource *r)
+{
+ return (struct zink_resource *)r;
+}
+
+VkImageAspectFlags
+zink_aspect_from_format(enum pipe_format fmt);
+
+void
+zink_screen_resource_init(struct pipe_screen *pscreen);
+
+void
+zink_context_resource_init(struct pipe_context *pctx);
+
+#endif
--- /dev/null
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "zink_screen.h"
+
+#include "zink_compiler.h"
+#include "zink_context.h"
+#include "zink_fence.h"
+#include "zink_public.h"
+#include "zink_resource.h"
+
+#include "os/os_process.h"
+#include "util/u_debug.h"
+#include "util/u_math.h"
+#include "util/u_memory.h"
+#include "util/u_screen.h"
+#include "util/u_string.h"
+
+#include "state_tracker/sw_winsys.h"
+
+static const struct debug_named_value
+debug_options[] = {
+ { "nir", ZINK_DEBUG_NIR, "Dump NIR during program compile" },
+ { "spirv", ZINK_DEBUG_SPIRV, "Dump SPIR-V during program compile" },
+ { "tgsi", ZINK_DEBUG_TGSI, "Dump TGSI during program compile" },
+ DEBUG_NAMED_VALUE_END
+};
+
+DEBUG_GET_ONCE_FLAGS_OPTION(zink_debug, "ZINK_DEBUG", debug_options, 0)
+
+uint32_t
+zink_debug;
+
+static const char *
+zink_get_vendor(struct pipe_screen *pscreen)
+{
+ return "Collabora Ltd";
+}
+
+static const char *
+zink_get_device_vendor(struct pipe_screen *pscreen)
+{
+ struct zink_screen *screen = zink_screen(pscreen);
+ static char buf[1000];
+ snprintf(buf, sizeof(buf), "Unknown (vendor-id: 0x%04x)", screen->props.vendorID);
+ return buf;
+}
+
+static const char *
+zink_get_name(struct pipe_screen *pscreen)
+{
+ struct zink_screen *screen = zink_screen(pscreen);
+ static char buf[1000];
+ snprintf(buf, sizeof(buf), "zink (%s)", screen->props.deviceName);
+ return buf;
+}
+
+static int
+get_video_mem(struct zink_screen *screen)
+{
+ VkDeviceSize size = 0;
+ for (uint32_t i = 0; i < screen->mem_props.memoryHeapCount; ++i)
+ size += screen->mem_props.memoryHeaps[i].size;
+ return (int)(size >> 20);
+}
+
+static int
+zink_get_param(struct pipe_screen *pscreen, enum pipe_cap param)
+{
+ struct zink_screen *screen = zink_screen(pscreen);
+
+ switch (param) {
+ case PIPE_CAP_NPOT_TEXTURES:
+ return 1;
+
+ case PIPE_CAP_MAX_DUAL_SOURCE_RENDER_TARGETS:
+ return screen->props.limits.maxFragmentDualSrcAttachments;
+
+ case PIPE_CAP_MAX_RENDER_TARGETS:
+ return screen->props.limits.maxColorAttachments;
+
+ case PIPE_CAP_TEXTURE_SWIZZLE:
+ return 1;
+
+ case PIPE_CAP_MAX_TEXTURE_2D_SIZE:
+ return screen->props.limits.maxImageDimension2D;
+ case PIPE_CAP_MAX_TEXTURE_3D_LEVELS:
+ return 1 + util_logbase2(screen->props.limits.maxImageDimension3D);
+ case PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS:
+ return 1 + util_logbase2(screen->props.limits.maxImageDimensionCube);
+
+ case PIPE_CAP_BLEND_EQUATION_SEPARATE:
+ return 1;
+
+ case PIPE_CAP_FRAGMENT_SHADER_TEXTURE_LOD:
+ case PIPE_CAP_FRAGMENT_SHADER_DERIVATIVES:
+ case PIPE_CAP_VERTEX_SHADER_SATURATE:
+ return 1;
+
+ case PIPE_CAP_INDEP_BLEND_ENABLE:
+ case PIPE_CAP_INDEP_BLEND_FUNC:
+ return 1;
+
+ case PIPE_CAP_MAX_TEXTURE_ARRAY_LAYERS:
+ return screen->props.limits.maxImageArrayLayers;
+
+#if 0 /* TODO: Enable me */
+ case PIPE_CAP_DEPTH_CLIP_DISABLE:
+ return 0;
+#endif
+
+#if 0 /* TODO: Enable me */
+ case PIPE_CAP_MIXED_COLORBUFFER_FORMATS:
+ return 1;
+#endif
+
+ case PIPE_CAP_SEAMLESS_CUBE_MAP:
+ return 1;
+
+ case PIPE_CAP_MIN_TEXEL_OFFSET:
+ return screen->props.limits.minTexelOffset;
+ case PIPE_CAP_MAX_TEXEL_OFFSET:
+ return screen->props.limits.maxTexelOffset;
+
+ case PIPE_CAP_VERTEX_COLOR_UNCLAMPED:
+ return 1;
+
+ case PIPE_CAP_GLSL_FEATURE_LEVEL:
+ case PIPE_CAP_GLSL_FEATURE_LEVEL_COMPATIBILITY:
+ return 450; /* unsure (probably wrong) */
+
+#if 0 /* TODO: Enable me */
+ case PIPE_CAP_COMPUTE:
+ return 1;
+#endif
+
+ case PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT:
+ return screen->props.limits.minUniformBufferOffsetAlignment;
+
+ case PIPE_CAP_MIN_MAP_BUFFER_ALIGNMENT:
+ return screen->props.limits.minMemoryMapAlignment;
+
+ case PIPE_CAP_CUBE_MAP_ARRAY:
+ return screen->feats.imageCubeArray;
+
+ case PIPE_CAP_PREFER_BLIT_BASED_TEXTURE_TRANSFER:
+ return 0; /* unsure */
+
+ case PIPE_CAP_MAX_TEXTURE_BUFFER_SIZE:
+ return screen->props.limits.maxTexelBufferElements;
+
+ case PIPE_CAP_ENDIANNESS:
+ return PIPE_ENDIAN_NATIVE; /* unsure */
+
+ case PIPE_CAP_MAX_VIEWPORTS:
+ return screen->props.limits.maxViewports;
+
+ case PIPE_CAP_MIXED_FRAMEBUFFER_SIZES:
+ return 1;
+
+ case PIPE_CAP_MAX_GEOMETRY_OUTPUT_VERTICES:
+ return screen->props.limits.maxGeometryOutputVertices;
+ case PIPE_CAP_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS:
+ return screen->props.limits.maxGeometryOutputComponents;
+
+#if 0 /* TODO: Enable me. Enables ARB_texture_gather */
+ case PIPE_CAP_MAX_TEXTURE_GATHER_COMPONENTS:
+ return 4;
+#endif
+
+ case PIPE_CAP_MIN_TEXTURE_GATHER_OFFSET:
+ return screen->props.limits.minTexelGatherOffset;
+ case PIPE_CAP_MAX_TEXTURE_GATHER_OFFSET:
+ return screen->props.limits.maxTexelGatherOffset;
+
+ case PIPE_CAP_VENDOR_ID:
+ return screen->props.vendorID;
+ case PIPE_CAP_DEVICE_ID:
+ return screen->props.deviceID;
+
+ case PIPE_CAP_ACCELERATED:
+ return 1;
+ case PIPE_CAP_VIDEO_MEMORY:
+ return get_video_mem(screen);
+ case PIPE_CAP_UMA:
+ /* inaccurate */
+ return screen->props.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
+
+ case PIPE_CAP_MAX_VERTEX_ATTRIB_STRIDE:
+ return screen->props.limits.maxVertexInputBindingStride;
+
+#if 0 /* TODO: Enable me */
+ case PIPE_CAP_SAMPLER_VIEW_TARGET:
+ return 1;
+#endif
+
+#if 0 /* TODO: Enable me */
+ case PIPE_CAP_TEXTURE_FLOAT_LINEAR:
+ case PIPE_CAP_TEXTURE_HALF_FLOAT_LINEAR:
+ return 1;
+#endif
+
+ case PIPE_CAP_SHAREABLE_SHADERS:
+ return 1;
+
+#if 0 /* TODO: Enable me. Enables GL_ARB_shader_storage_buffer_object */
+ case PIPE_CAP_SHADER_BUFFER_OFFSET_ALIGNMENT:
+ return screen->props.limits.minStorageBufferOffsetAlignment;
+#endif
+
+ case PIPE_CAP_PCI_GROUP:
+ case PIPE_CAP_PCI_BUS:
+ case PIPE_CAP_PCI_DEVICE:
+ case PIPE_CAP_PCI_FUNCTION:
+ return 0; /* TODO: figure these out */
+
+#if 0 /* TODO: Enable me */
+ case PIPE_CAP_CULL_DISTANCE:
+ return screen->feats.shaderCullDistance;
+#endif
+
+ case PIPE_CAP_VIEWPORT_SUBPIXEL_BITS:
+ return screen->props.limits.viewportSubPixelBits;
+
+ case PIPE_CAP_GLSL_OPTIMIZE_CONSERVATIVELY:
+ return 0; /* not sure */
+
+ case PIPE_CAP_MAX_GS_INVOCATIONS:
+ return 0; /* not implemented */
+
+ case PIPE_CAP_MAX_COMBINED_SHADER_BUFFERS:
+ return screen->props.limits.maxDescriptorSetStorageBuffers;
+
+ case PIPE_CAP_MAX_SHADER_BUFFER_SIZE:
+ return screen->props.limits.maxStorageBufferRange; /* unsure */
+
+ case PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT:
+ case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER:
+ return 1;
+
+ case PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT:
+ case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER:
+ return 0;
+
+ case PIPE_CAP_BUFFER_MAP_PERSISTENT_COHERENT:
+ return 1;
+
+ default:
+ return u_pipe_screen_get_param_defaults(pscreen, param);
+ }
+}
+
+static float
+zink_get_paramf(struct pipe_screen *pscreen, enum pipe_capf param)
+{
+ struct zink_screen *screen = zink_screen(pscreen);
+
+ switch (param) {
+ case PIPE_CAPF_MAX_LINE_WIDTH:
+ case PIPE_CAPF_MAX_LINE_WIDTH_AA:
+ return screen->props.limits.lineWidthRange[1];
+
+ case PIPE_CAPF_MAX_POINT_WIDTH:
+ case PIPE_CAPF_MAX_POINT_WIDTH_AA:
+ return screen->props.limits.pointSizeRange[1];
+
+ case PIPE_CAPF_MAX_TEXTURE_ANISOTROPY:
+ return screen->props.limits.maxSamplerAnisotropy;
+
+ case PIPE_CAPF_MAX_TEXTURE_LOD_BIAS:
+ return screen->props.limits.maxSamplerLodBias;
+
+ case PIPE_CAPF_MIN_CONSERVATIVE_RASTER_DILATE:
+ case PIPE_CAPF_MAX_CONSERVATIVE_RASTER_DILATE:
+ case PIPE_CAPF_CONSERVATIVE_RASTER_DILATE_GRANULARITY:
+ return 0.0f; /* not implemented */
+ }
+
+ /* should only get here on unhandled cases */
+ return 0.0;
+}
+
+static int
+zink_get_shader_param(struct pipe_screen *pscreen,
+ enum pipe_shader_type shader,
+ enum pipe_shader_cap param)
+{
+ struct zink_screen *screen = zink_screen(pscreen);
+
+ switch (param) {
+ case PIPE_SHADER_CAP_MAX_INSTRUCTIONS:
+ case PIPE_SHADER_CAP_MAX_ALU_INSTRUCTIONS:
+ case PIPE_SHADER_CAP_MAX_TEX_INSTRUCTIONS:
+ case PIPE_SHADER_CAP_MAX_TEX_INDIRECTIONS:
+ case PIPE_SHADER_CAP_MAX_CONTROL_FLOW_DEPTH:
+ if (shader == PIPE_SHADER_VERTEX ||
+ shader == PIPE_SHADER_FRAGMENT)
+ return INT_MAX;
+ return 0;
+
+ case PIPE_SHADER_CAP_MAX_INPUTS:
+ switch (shader) {
+ case PIPE_SHADER_VERTEX:
+ return MIN2(screen->props.limits.maxVertexInputAttributes,
+ PIPE_MAX_SHADER_INPUTS);
+ case PIPE_SHADER_FRAGMENT:
+ return MIN2(screen->props.limits.maxFragmentInputComponents / 4,
+ PIPE_MAX_SHADER_INPUTS);
+ default:
+ return 0; /* unsupported stage */
+ }
+
+ case PIPE_SHADER_CAP_MAX_OUTPUTS:
+ switch (shader) {
+ case PIPE_SHADER_VERTEX:
+ return MIN2(screen->props.limits.maxVertexOutputComponents / 4,
+ PIPE_MAX_SHADER_OUTPUTS);
+ case PIPE_SHADER_FRAGMENT:
+ return MIN2(screen->props.limits.maxColorAttachments,
+ PIPE_MAX_SHADER_OUTPUTS);
+ default:
+ return 0; /* unsupported stage */
+ }
+
+ case PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS:
+ /* this might be a bit simplistic... */
+ return MIN2(screen->props.limits.maxPerStageDescriptorSamplers,
+ PIPE_MAX_SAMPLERS);
+
+ case PIPE_SHADER_CAP_MAX_CONST_BUFFER_SIZE:
+ return screen->props.limits.maxUniformBufferRange;
+
+ case PIPE_SHADER_CAP_MAX_CONST_BUFFERS:
+ return screen->props.limits.maxPerStageDescriptorUniformBuffers;
+
+ case PIPE_SHADER_CAP_MAX_TEMPS:
+ return INT_MAX;
+
+ case PIPE_SHADER_CAP_INDIRECT_INPUT_ADDR:
+ case PIPE_SHADER_CAP_INDIRECT_OUTPUT_ADDR:
+ case PIPE_SHADER_CAP_INDIRECT_TEMP_ADDR:
+ case PIPE_SHADER_CAP_INDIRECT_CONST_ADDR:
+ case PIPE_SHADER_CAP_SUBROUTINES:
+ case PIPE_SHADER_CAP_INTEGERS:
+ case PIPE_SHADER_CAP_INT64_ATOMICS:
+ case PIPE_SHADER_CAP_FP16:
+ return 0; /* not implemented */
+
+ case PIPE_SHADER_CAP_PREFERRED_IR:
+ return PIPE_SHADER_IR_NIR;
+
+ case PIPE_SHADER_CAP_TGSI_SQRT_SUPPORTED:
+ return 0; /* not implemented */
+
+ case PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS:
+ return MIN2(screen->props.limits.maxPerStageDescriptorSampledImages,
+ PIPE_MAX_SHADER_SAMPLER_VIEWS);
+
+ case PIPE_SHADER_CAP_TGSI_DROUND_SUPPORTED:
+ case PIPE_SHADER_CAP_TGSI_DFRACEXP_DLDEXP_SUPPORTED:
+ case PIPE_SHADER_CAP_TGSI_FMA_SUPPORTED:
+ return 0; /* not implemented */
+
+ case PIPE_SHADER_CAP_TGSI_ANY_INOUT_DECL_RANGE:
+ return 0; /* no idea */
+
+ case PIPE_SHADER_CAP_MAX_UNROLL_ITERATIONS_HINT:
+ return 32; /* arbitrary */
+
+ case PIPE_SHADER_CAP_MAX_SHADER_BUFFERS:
+ /* TODO: this limitation is dumb, and will need some fixes in mesa */
+ return MIN2(screen->props.limits.maxPerStageDescriptorStorageBuffers, 8);
+
+ case PIPE_SHADER_CAP_SUPPORTED_IRS:
+ return (1 << PIPE_SHADER_IR_NIR) | (1 << PIPE_SHADER_IR_TGSI);
+
+ case PIPE_SHADER_CAP_MAX_SHADER_IMAGES:
+ return screen->props.limits.maxPerStageDescriptorStorageImages;
+
+ case PIPE_SHADER_CAP_LOWER_IF_THRESHOLD:
+ case PIPE_SHADER_CAP_TGSI_SKIP_MERGE_REGISTERS:
+ return 0; /* unsure */
+
+ case PIPE_SHADER_CAP_TGSI_LDEXP_SUPPORTED:
+ case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTERS:
+ case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTER_BUFFERS:
+ case PIPE_SHADER_CAP_TGSI_CONT_SUPPORTED:
+ return 0; /* not implemented */
+ }
+
+ /* should only get here on unhandled cases */
+ return 0;
+}
+
+static const VkFormat formats[PIPE_FORMAT_COUNT] = {
+#define MAP_FORMAT_NORM(FMT) \
+ [PIPE_FORMAT_ ## FMT ## _UNORM] = VK_FORMAT_ ## FMT ## _UNORM, \
+ [PIPE_FORMAT_ ## FMT ## _SNORM] = VK_FORMAT_ ## FMT ## _SNORM,
+
+#define MAP_FORMAT_SCALED(FMT) \
+ [PIPE_FORMAT_ ## FMT ## _USCALED] = VK_FORMAT_ ## FMT ## _USCALED, \
+ [PIPE_FORMAT_ ## FMT ## _SSCALED] = VK_FORMAT_ ## FMT ## _SSCALED,
+
+#define MAP_FORMAT_INT(FMT) \
+ [PIPE_FORMAT_ ## FMT ## _UINT] = VK_FORMAT_ ## FMT ## _UINT, \
+ [PIPE_FORMAT_ ## FMT ## _SINT] = VK_FORMAT_ ## FMT ## _SINT,
+
+#define MAP_FORMAT_SRGB(FMT) \
+ [PIPE_FORMAT_ ## FMT ## _SRGB] = VK_FORMAT_ ## FMT ## _SRGB,
+
+#define MAP_FORMAT_FLOAT(FMT) \
+ [PIPE_FORMAT_ ## FMT ## _FLOAT] = VK_FORMAT_ ## FMT ## _SFLOAT,
+
+ // one component
+
+ // 8-bits
+ MAP_FORMAT_NORM(R8)
+ MAP_FORMAT_SCALED(R8)
+ MAP_FORMAT_INT(R8)
+ // 16-bits
+ MAP_FORMAT_NORM(R16)
+ MAP_FORMAT_SCALED(R16)
+ MAP_FORMAT_INT(R16)
+ MAP_FORMAT_FLOAT(R16)
+ // 32-bits
+ MAP_FORMAT_INT(R32)
+ MAP_FORMAT_FLOAT(R32)
+
+ // two components
+
+ // 8-bits
+ MAP_FORMAT_NORM(R8G8)
+ MAP_FORMAT_SCALED(R8G8)
+ MAP_FORMAT_INT(R8G8)
+ // 16-bits
+ MAP_FORMAT_NORM(R16G16)
+ MAP_FORMAT_SCALED(R16G16)
+ MAP_FORMAT_INT(R16G16)
+ MAP_FORMAT_FLOAT(R16G16)
+ // 32-bits
+ MAP_FORMAT_INT(R32G32)
+ MAP_FORMAT_FLOAT(R32G32)
+
+ // three components
+
+ // 8-bits
+ MAP_FORMAT_NORM(R8G8B8)
+ MAP_FORMAT_SCALED(R8G8B8)
+ MAP_FORMAT_INT(R8G8B8)
+ MAP_FORMAT_SRGB(R8G8B8)
+ // 16-bits
+ MAP_FORMAT_NORM(R16G16B16)
+ MAP_FORMAT_SCALED(R16G16B16)
+ MAP_FORMAT_INT(R16G16B16)
+ MAP_FORMAT_FLOAT(R16G16B16)
+ // 32-bits
+ MAP_FORMAT_INT(R32G32B32)
+ MAP_FORMAT_FLOAT(R32G32B32)
+
+ // four components
+
+ // 8-bits
+ MAP_FORMAT_NORM(R8G8B8A8)
+ MAP_FORMAT_SCALED(R8G8B8A8)
+ MAP_FORMAT_INT(R8G8B8A8)
+ MAP_FORMAT_SRGB(R8G8B8A8)
+ [PIPE_FORMAT_B8G8R8A8_UNORM] = VK_FORMAT_B8G8R8A8_UNORM,
+ MAP_FORMAT_SRGB(B8G8R8A8)
+ [PIPE_FORMAT_A8B8G8R8_SRGB] = VK_FORMAT_A8B8G8R8_SRGB_PACK32,
+ // 16-bits
+ MAP_FORMAT_NORM(R16G16B16A16)
+ MAP_FORMAT_SCALED(R16G16B16A16)
+ MAP_FORMAT_INT(R16G16B16A16)
+ MAP_FORMAT_FLOAT(R16G16B16A16)
+ // 32-bits
+ MAP_FORMAT_INT(R32G32B32A32)
+ MAP_FORMAT_FLOAT(R32G32B32A32)
+
+ // other color formats
+ [PIPE_FORMAT_B5G6R5_UNORM] = VK_FORMAT_R5G6B5_UNORM_PACK16,
+ [PIPE_FORMAT_B5G5R5A1_UNORM] = VK_FORMAT_B5G5R5A1_UNORM_PACK16,
+ [PIPE_FORMAT_R11G11B10_FLOAT] = VK_FORMAT_B10G11R11_UFLOAT_PACK32,
+ [PIPE_FORMAT_R9G9B9E5_FLOAT] = VK_FORMAT_E5B9G9R9_UFLOAT_PACK32,
+ [PIPE_FORMAT_R10G10B10A2_UNORM] = VK_FORMAT_A2B10G10R10_UNORM_PACK32,
+ [PIPE_FORMAT_B10G10R10A2_UNORM] = VK_FORMAT_A2R10G10B10_UNORM_PACK32,
+ [PIPE_FORMAT_R10G10B10A2_UINT] = VK_FORMAT_A2B10G10R10_UINT_PACK32,
+ [PIPE_FORMAT_B10G10R10A2_UINT] = VK_FORMAT_A2R10G10B10_UINT_PACK32,
+
+ // depth/stencil formats
+ [PIPE_FORMAT_Z32_FLOAT] = VK_FORMAT_D32_SFLOAT,
+ [PIPE_FORMAT_Z32_FLOAT_S8X24_UINT] = VK_FORMAT_D32_SFLOAT_S8_UINT,
+ [PIPE_FORMAT_Z16_UNORM] = VK_FORMAT_D16_UNORM,
+ [PIPE_FORMAT_X8Z24_UNORM] = VK_FORMAT_X8_D24_UNORM_PACK32,
+ [PIPE_FORMAT_Z24_UNORM_S8_UINT] = VK_FORMAT_D24_UNORM_S8_UINT,
+};
+
+VkFormat
+zink_get_format(enum pipe_format format)
+{
+ return formats[format];
+}
+
+static bool
+zink_is_format_supported(struct pipe_screen *pscreen,
+ enum pipe_format format,
+ enum pipe_texture_target target,
+ unsigned sample_count,
+ unsigned storage_sample_count,
+ unsigned bind)
+{
+ struct zink_screen *screen = zink_screen(pscreen);
+
+ if (sample_count > 1)
+ return FALSE;
+
+ VkFormat vkformat = formats[format];
+ if (vkformat == VK_FORMAT_UNDEFINED)
+ return FALSE;
+
+ VkFormatProperties props;
+ vkGetPhysicalDeviceFormatProperties(screen->pdev, vkformat, &props);
+
+ if (target == PIPE_BUFFER) {
+ if (bind & PIPE_BIND_VERTEX_BUFFER &&
+ !(props.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT))
+ return FALSE;
+ } else {
+ /* all other targets are texture-targets */
+ if (bind & PIPE_BIND_RENDER_TARGET &&
+ !(props.optimalTilingFeatures & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT))
+ return FALSE;
+
+ if (bind & PIPE_BIND_BLENDABLE &&
+ !(props.optimalTilingFeatures & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT))
+ return FALSE;
+
+ if (bind & PIPE_BIND_SAMPLER_VIEW &&
+ !(props.optimalTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT))
+ return FALSE;
+
+ if (bind & PIPE_BIND_DEPTH_STENCIL &&
+ !(props.optimalTilingFeatures & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT))
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+static void
+zink_destroy_screen(struct pipe_screen *pscreen)
+{
+ struct zink_screen *screen = zink_screen(pscreen);
+ slab_destroy_parent(&screen->transfer_pool);
+ FREE(screen);
+}
+
+static VkInstance
+create_instance()
+{
+ VkApplicationInfo ai = {};
+ ai.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO;
+
+ char proc_name[128];
+ if (os_get_process_name(proc_name, ARRAY_SIZE(proc_name)))
+ ai.pApplicationName = proc_name;
+ else
+ ai.pApplicationName = "unknown";
+
+ ai.pEngineName = "mesa zink";
+ ai.apiVersion = VK_API_VERSION_1_0;
+
+ VkInstanceCreateInfo ici = {};
+ ici.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO;
+ ici.pApplicationInfo = &ai;
+
+ VkInstance instance = VK_NULL_HANDLE;
+ VkResult err = vkCreateInstance(&ici, NULL, &instance);
+ if (err != VK_SUCCESS)
+ return VK_NULL_HANDLE;
+
+ return instance;
+}
+
+static VkPhysicalDevice
+choose_pdev(const VkInstance instance)
+{
+ uint32_t i, pdev_count;
+ VkPhysicalDevice *pdevs, pdev;
+ vkEnumeratePhysicalDevices(instance, &pdev_count, NULL);
+ assert(pdev_count > 0);
+
+ pdevs = malloc(sizeof(*pdevs) * pdev_count);
+ vkEnumeratePhysicalDevices(instance, &pdev_count, pdevs);
+ assert(pdev_count > 0);
+
+ pdev = pdevs[0];
+ for (i = 0; i < pdev_count; ++i) {
+ VkPhysicalDeviceProperties props;
+ vkGetPhysicalDeviceProperties(pdevs[i], &props);
+ if (props.deviceType == VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU) {
+ pdev = pdevs[i];
+ break;
+ }
+ }
+ free(pdevs);
+ return pdev;
+}
+
+static uint32_t
+find_gfx_queue(const VkPhysicalDevice pdev)
+{
+ uint32_t num_queues;
+ vkGetPhysicalDeviceQueueFamilyProperties(pdev, &num_queues, NULL);
+ assert(num_queues > 0);
+
+ VkQueueFamilyProperties *props = malloc(sizeof(*props) * num_queues);
+ vkGetPhysicalDeviceQueueFamilyProperties(pdev, &num_queues, props);
+
+ for (uint32_t i = 0; i < num_queues; i++) {
+ if (props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
+ free(props);
+ return i;
+ }
+ }
+
+ return UINT32_MAX;
+}
+
+static void
+zink_flush_frontbuffer(struct pipe_screen *pscreen,
+ struct pipe_resource *pres,
+ unsigned level, unsigned layer,
+ void *winsys_drawable_handle,
+ struct pipe_box *sub_box)
+{
+ struct zink_screen *screen = zink_screen(pscreen);
+ struct sw_winsys *winsys = screen->winsys;
+ struct zink_resource *res = zink_resource(pres);
+
+ void *map = winsys->displaytarget_map(winsys, res->dt, 0);
+
+ if (map) {
+ VkImageSubresource isr = {};
+ isr.aspectMask = res->aspect;
+ isr.mipLevel = level;
+ isr.arrayLayer = layer;
+ VkSubresourceLayout layout;
+ vkGetImageSubresourceLayout(screen->dev, res->image, &isr, &layout);
+
+ void *ptr;
+ VkResult result = vkMapMemory(screen->dev, res->mem, res->offset, res->size, 0, &ptr);
+ if (result != VK_SUCCESS) {
+ debug_printf("failed to map memory for display\n");
+ return;
+ }
+ for (int i = 0; i < pres->height0; ++i) {
+ uint8_t *src = (uint8_t *)ptr + i * layout.rowPitch;
+ uint8_t *dst = (uint8_t *)map + i * res->dt_stride;
+ memcpy(dst, src, res->dt_stride);
+ }
+ vkUnmapMemory(screen->dev, res->mem);
+ }
+
+ winsys->displaytarget_unmap(winsys, res->dt);
+
+ assert(res->dt);
+ if (res->dt)
+ winsys->displaytarget_display(winsys, res->dt, winsys_drawable_handle, sub_box);
+}
+
+struct pipe_screen *
+zink_create_screen(struct sw_winsys *winsys)
+{
+ struct zink_screen *screen = CALLOC_STRUCT(zink_screen);
+ if (!screen)
+ return NULL;
+
+ zink_debug = debug_get_option_zink_debug();
+
+ screen->instance = create_instance();
+ screen->pdev = choose_pdev(screen->instance);
+ screen->gfx_queue = find_gfx_queue(screen->pdev);
+
+ vkGetPhysicalDeviceProperties(screen->pdev, &screen->props);
+ vkGetPhysicalDeviceFeatures(screen->pdev, &screen->feats);
+ vkGetPhysicalDeviceMemoryProperties(screen->pdev, &screen->mem_props);
+
+ VkDeviceQueueCreateInfo qci = {};
+ float dummy = 0.0f;
+ qci.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
+ qci.queueFamilyIndex = screen->gfx_queue;
+ qci.queueCount = 1;
+ qci.pQueuePriorities = &dummy;
+
+ VkDeviceCreateInfo dci = {};
+ dci.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
+ dci.queueCreateInfoCount = 1;
+ dci.pQueueCreateInfos = &qci;
+ dci.pEnabledFeatures = &screen->feats;
+ const char *extensions[] = {
+ VK_KHR_MAINTENANCE1_EXTENSION_NAME
+ };
+ dci.ppEnabledExtensionNames = extensions;
+ dci.enabledExtensionCount = ARRAY_SIZE(extensions);
+ if (vkCreateDevice(screen->pdev, &dci, NULL, &screen->dev) != VK_SUCCESS)
+ goto fail;
+
+ screen->winsys = winsys;
+
+ screen->base.get_name = zink_get_name;
+ screen->base.get_vendor = zink_get_vendor;
+ screen->base.get_device_vendor = zink_get_device_vendor;
+ screen->base.get_param = zink_get_param;
+ screen->base.get_paramf = zink_get_paramf;
+ screen->base.get_shader_param = zink_get_shader_param;
+ screen->base.get_compiler_options = zink_get_compiler_options;
+ screen->base.is_format_supported = zink_is_format_supported;
+ screen->base.context_create = zink_context_create;
+ screen->base.flush_frontbuffer = zink_flush_frontbuffer;
+ screen->base.destroy = zink_destroy_screen;
+
+ zink_screen_resource_init(&screen->base);
+ zink_screen_fence_init(&screen->base);
+
+ slab_create_parent(&screen->transfer_pool, sizeof(struct zink_transfer), 16);
+
+ return &screen->base;
+
+fail:
+ FREE(screen);
+ return NULL;
+}
--- /dev/null
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef ZINK_SCREEN_H
+#define ZINK_SCREEN_H
+
+#include "pipe/p_screen.h"
+#include "util/slab.h"
+
+#include <vulkan/vulkan.h>
+
+extern uint32_t zink_debug;
+
+#define ZINK_DEBUG_NIR 0x1
+#define ZINK_DEBUG_SPIRV 0x2
+#define ZINK_DEBUG_TGSI 0x4
+
+struct zink_screen {
+ struct pipe_screen base;
+
+ struct sw_winsys *winsys;
+
+ struct slab_parent_pool transfer_pool;
+
+ VkInstance instance;
+ VkPhysicalDevice pdev;
+ VkPhysicalDeviceProperties props;
+ VkPhysicalDeviceFeatures feats;
+ VkPhysicalDeviceMemoryProperties mem_props;
+
+ uint32_t gfx_queue;
+ VkDevice dev;
+};
+
+static inline struct zink_screen *
+zink_screen(struct pipe_screen *pipe)
+{
+ return (struct zink_screen *)pipe;
+}
+
+VkFormat
+zink_get_format(enum pipe_format format);
+
+#endif
--- /dev/null
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "zink_state.h"
+
+#include "zink_context.h"
+#include "zink_screen.h"
+
+#include "util/u_memory.h"
+
+#include <math.h>
+
+static void *
+zink_create_vertex_elements_state(struct pipe_context *pctx,
+ unsigned num_elements,
+ const struct pipe_vertex_element *elements)
+{
+ unsigned int i;
+ struct zink_vertex_elements_state *ves = CALLOC_STRUCT(zink_vertex_elements_state);
+ if (!ves)
+ return NULL;
+
+ int buffer_map[PIPE_MAX_ATTRIBS];
+ for (int i = 0; i < ARRAY_SIZE(buffer_map); ++i)
+ buffer_map[i] = -1;
+
+ int num_bindings = 0;
+ for (i = 0; i < num_elements; ++i) {
+ const struct pipe_vertex_element *elem = elements + i;
+ assert(!elem->instance_divisor);
+
+ int binding = elem->vertex_buffer_index;
+ if (buffer_map[binding] < 0) {
+ ves->binding_map[num_bindings] = binding;
+ buffer_map[binding] = num_bindings++;
+ }
+ binding = buffer_map[binding];
+
+
+ ves->bindings[binding].binding = binding;
+ ves->bindings[binding].inputRate = VK_VERTEX_INPUT_RATE_VERTEX;
+
+ ves->attribs[i].binding = binding;
+ ves->attribs[i].location = i; // TODO: unsure
+ ves->attribs[i].format = zink_get_format(elem->src_format);
+ assert(ves->attribs[i].format != VK_FORMAT_UNDEFINED);
+ ves->attribs[i].offset = elem->src_offset;
+ }
+
+ ves->num_bindings = num_bindings;
+ ves->num_attribs = num_elements;
+ return ves;
+}
+
+static void
+zink_bind_vertex_elements_state(struct pipe_context *pctx,
+ void *cso)
+{
+ struct zink_gfx_pipeline_state *state = &zink_context(pctx)->gfx_pipeline_state;
+ state->element_state = cso;
+ if (cso) {
+ struct zink_vertex_elements_state *ves = cso;
+ for (int i = 0; i < ves->num_bindings; ++i) {
+ state->bindings[i].binding = ves->bindings[i].binding;
+ state->bindings[i].inputRate = ves->bindings[i].inputRate;
+ }
+ }
+}
+
+static void
+zink_delete_vertex_elements_state(struct pipe_context *pctx,
+ void *ves)
+{
+}
+
+static VkBlendFactor
+blend_factor(enum pipe_blendfactor factor)
+{
+ switch (factor) {
+ case PIPE_BLENDFACTOR_ONE: return VK_BLEND_FACTOR_ONE;
+ case PIPE_BLENDFACTOR_SRC_COLOR: return VK_BLEND_FACTOR_SRC_COLOR;
+ case PIPE_BLENDFACTOR_SRC_ALPHA: return VK_BLEND_FACTOR_SRC_ALPHA;
+ case PIPE_BLENDFACTOR_DST_ALPHA: return VK_BLEND_FACTOR_DST_ALPHA;
+ case PIPE_BLENDFACTOR_DST_COLOR: return VK_BLEND_FACTOR_DST_COLOR;
+ case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE:
+ return VK_BLEND_FACTOR_SRC_ALPHA_SATURATE;
+ case PIPE_BLENDFACTOR_CONST_COLOR: return VK_BLEND_FACTOR_CONSTANT_COLOR;
+ case PIPE_BLENDFACTOR_CONST_ALPHA: return VK_BLEND_FACTOR_CONSTANT_ALPHA;
+ case PIPE_BLENDFACTOR_SRC1_COLOR: return VK_BLEND_FACTOR_SRC1_COLOR;
+ case PIPE_BLENDFACTOR_SRC1_ALPHA: return VK_BLEND_FACTOR_SRC1_ALPHA;
+
+ case PIPE_BLENDFACTOR_ZERO: return VK_BLEND_FACTOR_ZERO;
+
+ case PIPE_BLENDFACTOR_INV_SRC_COLOR:
+ return VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR;
+ case PIPE_BLENDFACTOR_INV_SRC_ALPHA:
+ return VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
+ case PIPE_BLENDFACTOR_INV_DST_ALPHA:
+ return VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA;
+ case PIPE_BLENDFACTOR_INV_DST_COLOR:
+ return VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR;
+
+ case PIPE_BLENDFACTOR_INV_CONST_COLOR:
+ return VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR;
+ case PIPE_BLENDFACTOR_INV_CONST_ALPHA:
+ return VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA;
+ case PIPE_BLENDFACTOR_INV_SRC1_COLOR:
+ return VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR;
+ case PIPE_BLENDFACTOR_INV_SRC1_ALPHA:
+ return VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA;
+ }
+ unreachable("unexpected blend factor");
+}
+
+
+static bool
+need_blend_constants(enum pipe_blendfactor factor)
+{
+ switch (factor) {
+ case PIPE_BLENDFACTOR_CONST_COLOR:
+ case PIPE_BLENDFACTOR_CONST_ALPHA:
+ case PIPE_BLENDFACTOR_INV_CONST_COLOR:
+ case PIPE_BLENDFACTOR_INV_CONST_ALPHA:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+static VkBlendOp
+blend_op(enum pipe_blend_func func)
+{
+ switch (func) {
+ case PIPE_BLEND_ADD: return VK_BLEND_OP_ADD;
+ case PIPE_BLEND_SUBTRACT: return VK_BLEND_OP_SUBTRACT;
+ case PIPE_BLEND_REVERSE_SUBTRACT: return VK_BLEND_OP_REVERSE_SUBTRACT;
+ case PIPE_BLEND_MIN: return VK_BLEND_OP_MIN;
+ case PIPE_BLEND_MAX: return VK_BLEND_OP_MAX;
+ }
+ unreachable("unexpected blend function");
+}
+
+static VkLogicOp
+logic_op(enum pipe_logicop func)
+{
+ switch (func) {
+ case PIPE_LOGICOP_CLEAR: return VK_LOGIC_OP_CLEAR;
+ case PIPE_LOGICOP_NOR: return VK_LOGIC_OP_NOR;
+ case PIPE_LOGICOP_AND_INVERTED: return VK_LOGIC_OP_AND_INVERTED;
+ case PIPE_LOGICOP_COPY_INVERTED: return VK_LOGIC_OP_COPY_INVERTED;
+ case PIPE_LOGICOP_AND_REVERSE: return VK_LOGIC_OP_AND_REVERSE;
+ case PIPE_LOGICOP_INVERT: return VK_LOGIC_OP_INVERT;
+ case PIPE_LOGICOP_XOR: return VK_LOGIC_OP_XOR;
+ case PIPE_LOGICOP_NAND: return VK_LOGIC_OP_NAND;
+ case PIPE_LOGICOP_AND: return VK_LOGIC_OP_AND;
+ case PIPE_LOGICOP_EQUIV: return VK_LOGIC_OP_EQUIVALENT;
+ case PIPE_LOGICOP_NOOP: return VK_LOGIC_OP_NO_OP;
+ case PIPE_LOGICOP_OR_INVERTED: return VK_LOGIC_OP_OR_INVERTED;
+ case PIPE_LOGICOP_COPY: return VK_LOGIC_OP_COPY;
+ case PIPE_LOGICOP_OR_REVERSE: return VK_LOGIC_OP_OR_REVERSE;
+ case PIPE_LOGICOP_OR: return VK_LOGIC_OP_OR;
+ case PIPE_LOGICOP_SET: return VK_LOGIC_OP_SET;
+ }
+ unreachable("unexpected logicop function");
+}
+
+static void *
+zink_create_blend_state(struct pipe_context *pctx,
+ const struct pipe_blend_state *blend_state)
+{
+ struct zink_blend_state *cso = CALLOC_STRUCT(zink_blend_state);
+ if (!cso)
+ return NULL;
+
+ if (blend_state->logicop_enable) {
+ cso->logicop_enable = VK_TRUE;
+ cso->logicop_func = logic_op(blend_state->logicop_func);
+ }
+
+ /* TODO: figure out what to do with dither (nothing is probably "OK" for now,
+ * as dithering is undefined in GL
+ */
+
+ /* TODO: these are multisampling-state, and should be set there instead of
+ * here, as that's closer tied to the update-frequency
+ */
+ cso->alpha_to_coverage = blend_state->alpha_to_coverage;
+ cso->alpha_to_one = blend_state->alpha_to_one;
+
+ cso->need_blend_constants = false;
+
+ for (int i = 0; i < PIPE_MAX_COLOR_BUFS; ++i) {
+ const struct pipe_rt_blend_state *rt = blend_state->rt;
+ if (blend_state->independent_blend_enable)
+ rt = blend_state->rt + i;
+
+ VkPipelineColorBlendAttachmentState att = { };
+
+ if (rt->blend_enable) {
+ att.blendEnable = VK_TRUE;
+ att.srcColorBlendFactor = blend_factor(rt->rgb_src_factor);
+ att.dstColorBlendFactor = blend_factor(rt->rgb_dst_factor);
+ att.colorBlendOp = blend_op(rt->rgb_func);
+ att.srcAlphaBlendFactor = blend_factor(rt->alpha_src_factor);
+ att.dstAlphaBlendFactor = blend_factor(rt->alpha_dst_factor);
+ att.alphaBlendOp = blend_op(rt->alpha_func);
+
+ if (need_blend_constants(rt->rgb_src_factor) ||
+ need_blend_constants(rt->rgb_dst_factor) ||
+ need_blend_constants(rt->alpha_src_factor) ||
+ need_blend_constants(rt->alpha_dst_factor))
+ cso->need_blend_constants = true;
+ }
+
+ if (rt->colormask & PIPE_MASK_R)
+ att.colorWriteMask |= VK_COLOR_COMPONENT_R_BIT;
+ if (rt->colormask & PIPE_MASK_G)
+ att.colorWriteMask |= VK_COLOR_COMPONENT_G_BIT;
+ if (rt->colormask & PIPE_MASK_B)
+ att.colorWriteMask |= VK_COLOR_COMPONENT_B_BIT;
+ if (rt->colormask & PIPE_MASK_A)
+ att.colorWriteMask |= VK_COLOR_COMPONENT_A_BIT;
+
+ cso->attachments[i] = att;
+ }
+
+ return cso;
+}
+
+static void
+zink_bind_blend_state(struct pipe_context *pctx, void *cso)
+{
+ zink_context(pctx)->gfx_pipeline_state.blend_state = cso;
+}
+
+static void
+zink_delete_blend_state(struct pipe_context *pctx, void *blend_state)
+{
+ FREE(blend_state);
+}
+
+static VkCompareOp
+compare_op(enum pipe_compare_func func)
+{
+ switch (func) {
+ case PIPE_FUNC_NEVER: return VK_COMPARE_OP_NEVER;
+ case PIPE_FUNC_LESS: return VK_COMPARE_OP_LESS;
+ case PIPE_FUNC_EQUAL: return VK_COMPARE_OP_EQUAL;
+ case PIPE_FUNC_LEQUAL: return VK_COMPARE_OP_LESS_OR_EQUAL;
+ case PIPE_FUNC_GREATER: return VK_COMPARE_OP_GREATER;
+ case PIPE_FUNC_NOTEQUAL: return VK_COMPARE_OP_NOT_EQUAL;
+ case PIPE_FUNC_GEQUAL: return VK_COMPARE_OP_GREATER_OR_EQUAL;
+ case PIPE_FUNC_ALWAYS: return VK_COMPARE_OP_ALWAYS;
+ }
+ unreachable("unexpected func");
+}
+
+static VkStencilOp
+stencil_op(enum pipe_stencil_op op)
+{
+ switch (op) {
+ case PIPE_STENCIL_OP_KEEP: return VK_STENCIL_OP_KEEP;
+ case PIPE_STENCIL_OP_ZERO: return VK_STENCIL_OP_ZERO;
+ case PIPE_STENCIL_OP_REPLACE: return VK_STENCIL_OP_REPLACE;
+ case PIPE_STENCIL_OP_INCR: return VK_STENCIL_OP_INCREMENT_AND_CLAMP;
+ case PIPE_STENCIL_OP_DECR: return VK_STENCIL_OP_DECREMENT_AND_CLAMP;
+ case PIPE_STENCIL_OP_INCR_WRAP: return VK_STENCIL_OP_INCREMENT_AND_CLAMP;
+ case PIPE_STENCIL_OP_DECR_WRAP: return VK_STENCIL_OP_DECREMENT_AND_CLAMP;
+ case PIPE_STENCIL_OP_INVERT: return VK_STENCIL_OP_INVERT;
+ }
+ unreachable("unexpected op");
+}
+
+static VkStencilOpState
+stencil_op_state(const struct pipe_stencil_state *src)
+{
+ VkStencilOpState ret;
+ ret.failOp = stencil_op(src->fail_op);
+ ret.passOp = stencil_op(src->zpass_op);
+ ret.depthFailOp = stencil_op(src->zfail_op);
+ ret.compareOp = compare_op(src->func);
+ ret.compareMask = src->valuemask;
+ ret.writeMask = src->writemask;
+ ret.reference = 0; // not used: we'll use a dynamic state for this
+ return ret;
+}
+
+static void *
+zink_create_depth_stencil_alpha_state(struct pipe_context *pctx,
+ const struct pipe_depth_stencil_alpha_state *depth_stencil_alpha)
+{
+ struct zink_depth_stencil_alpha_state *cso = CALLOC_STRUCT(zink_depth_stencil_alpha_state);
+ if (!cso)
+ return NULL;
+
+ if (depth_stencil_alpha->depth.enabled) {
+ cso->depth_test = VK_TRUE;
+ cso->depth_compare_op = compare_op(depth_stencil_alpha->depth.func);
+ }
+
+ if (depth_stencil_alpha->depth.bounds_test) {
+ cso->depth_bounds_test = VK_TRUE;
+ cso->min_depth_bounds = depth_stencil_alpha->depth.bounds_min;
+ cso->max_depth_bounds = depth_stencil_alpha->depth.bounds_max;
+ }
+
+ if (depth_stencil_alpha->stencil[0].enabled) {
+ cso->stencil_test = VK_TRUE;
+ cso->stencil_front = stencil_op_state(depth_stencil_alpha->stencil);
+ }
+
+ if (depth_stencil_alpha->stencil[0].enabled)
+ cso->stencil_back = stencil_op_state(depth_stencil_alpha->stencil + 1);
+ else
+ cso->stencil_back = cso->stencil_front;
+
+ cso->depth_write = depth_stencil_alpha->depth.writemask;
+
+ return cso;
+}
+
+static void
+zink_bind_depth_stencil_alpha_state(struct pipe_context *pctx, void *cso)
+{
+ zink_context(pctx)->gfx_pipeline_state.depth_stencil_alpha_state = cso;
+}
+
+static void
+zink_delete_depth_stencil_alpha_state(struct pipe_context *pctx,
+ void *depth_stencil_alpha)
+{
+ FREE(depth_stencil_alpha);
+}
+
+static float
+round_to_granularity(float value, float granularity)
+{
+ return (float)(round(value / granularity) * granularity);
+}
+
+static void *
+zink_create_rasterizer_state(struct pipe_context *pctx,
+ const struct pipe_rasterizer_state *rs_state)
+{
+ struct zink_screen *screen = zink_screen(pctx->screen);
+
+ struct zink_rasterizer_state *state = CALLOC_STRUCT(zink_rasterizer_state);
+ if (!state)
+ return NULL;
+
+ state->base = *rs_state;
+
+ assert(rs_state->depth_clip_far == rs_state->depth_clip_near);
+ state->depth_clamp = rs_state->depth_clip_near == 0;
+ state->rasterizer_discard = rs_state->rasterizer_discard;
+
+ assert(rs_state->fill_front <= PIPE_POLYGON_MODE_POINT);
+ if (rs_state->fill_back != rs_state->fill_front)
+ debug_printf("BUG: vulkan doesn't support different front and back fill modes\n");
+ state->polygon_mode = (VkPolygonMode)rs_state->fill_front; // same values
+ state->cull_mode = (VkCullModeFlags)rs_state->cull_face; // same bits
+
+ state->front_face = rs_state->front_ccw ? VK_FRONT_FACE_COUNTER_CLOCKWISE
+ : VK_FRONT_FACE_CLOCKWISE;
+
+ state->offset_point = rs_state->offset_point;
+ state->offset_line = rs_state->offset_line;
+ state->offset_tri = rs_state->offset_tri;
+ state->offset_units = rs_state->offset_units;
+ state->offset_clamp = rs_state->offset_clamp;
+ state->offset_scale = rs_state->offset_scale;
+
+ state->line_width = round_to_granularity(rs_state->line_width,
+ screen->props.limits.lineWidthGranularity);
+
+ return state;
+}
+
+static void
+zink_bind_rasterizer_state(struct pipe_context *pctx, void *cso)
+{
+ zink_context(pctx)->gfx_pipeline_state.rast_state = cso;
+}
+
+static void
+zink_delete_rasterizer_state(struct pipe_context *pctx, void *rs_state)
+{
+ FREE(rs_state);
+}
+
+void
+zink_context_state_init(struct pipe_context *pctx)
+{
+ pctx->create_vertex_elements_state = zink_create_vertex_elements_state;
+ pctx->bind_vertex_elements_state = zink_bind_vertex_elements_state;
+ pctx->delete_vertex_elements_state = zink_delete_vertex_elements_state;
+
+ pctx->create_blend_state = zink_create_blend_state;
+ pctx->bind_blend_state = zink_bind_blend_state;
+ pctx->delete_blend_state = zink_delete_blend_state;
+
+ pctx->create_depth_stencil_alpha_state = zink_create_depth_stencil_alpha_state;
+ pctx->bind_depth_stencil_alpha_state = zink_bind_depth_stencil_alpha_state;
+ pctx->delete_depth_stencil_alpha_state = zink_delete_depth_stencil_alpha_state;
+
+ pctx->create_rasterizer_state = zink_create_rasterizer_state;
+ pctx->bind_rasterizer_state = zink_bind_rasterizer_state;
+ pctx->delete_rasterizer_state = zink_delete_rasterizer_state;
+}
--- /dev/null
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef ZINK_STATE_H
+#define ZINK_STATE_H
+
+#include <vulkan/vulkan.h>
+
+#include "pipe/p_state.h"
+
+struct zink_vertex_elements_state {
+ struct {
+ uint32_t binding;
+ VkVertexInputRate inputRate;
+ } bindings[PIPE_MAX_ATTRIBS];
+ VkVertexInputAttributeDescription attribs[PIPE_MAX_ATTRIBS];
+ uint32_t num_bindings, num_attribs;
+ uint8_t binding_map[PIPE_MAX_ATTRIBS];
+};
+
+struct zink_rasterizer_state {
+ struct pipe_rasterizer_state base;
+
+ VkBool32 depth_clamp;
+ VkBool32 rasterizer_discard;
+ VkFrontFace front_face;
+ VkPolygonMode polygon_mode;
+ VkCullModeFlags cull_mode;
+
+ bool offset_point, offset_line, offset_tri;
+ float offset_units, offset_clamp, offset_scale;
+ float line_width;
+};
+
+struct zink_blend_state {
+ VkPipelineColorBlendAttachmentState attachments[PIPE_MAX_COLOR_BUFS];
+
+ VkBool32 logicop_enable;
+ VkLogicOp logicop_func;
+
+ VkBool32 alpha_to_coverage;
+ VkBool32 alpha_to_one;
+
+ bool need_blend_constants;
+};
+
+struct zink_depth_stencil_alpha_state {
+ VkBool32 depth_test;
+ VkCompareOp depth_compare_op;
+
+ VkBool32 depth_bounds_test;
+ float min_depth_bounds, max_depth_bounds;
+
+ VkBool32 stencil_test;
+ VkStencilOpState stencil_front;
+ VkStencilOpState stencil_back;
+
+ VkBool32 depth_write;
+};
+
+void
+zink_context_state_init(struct pipe_context *pctx);
+
+#endif
--- /dev/null
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "zink_context.h"
+#include "zink_resource.h"
+#include "zink_screen.h"
+#include "zink_surface.h"
+
+#include "util/u_format.h"
+#include "util/u_inlines.h"
+#include "util/u_memory.h"
+
+static struct pipe_surface *
+zink_create_surface(struct pipe_context *pctx,
+ struct pipe_resource *pres,
+ const struct pipe_surface *templ)
+{
+ struct zink_screen *screen = zink_screen(pctx->screen);
+ unsigned int level = templ->u.tex.level;
+
+ struct zink_surface *surface = CALLOC_STRUCT(zink_surface);
+ if (!surface)
+ return NULL;
+
+ pipe_resource_reference(&surface->base.texture, pres);
+ pipe_reference_init(&surface->base.reference, 1);
+ surface->base.context = pctx;
+ surface->base.format = templ->format;
+ surface->base.width = u_minify(pres->width0, level);
+ surface->base.height = u_minify(pres->height0, level);
+ surface->base.u.tex.level = level;
+ surface->base.u.tex.first_layer = templ->u.tex.first_layer;
+ surface->base.u.tex.last_layer = templ->u.tex.last_layer;
+
+ struct zink_resource *res = zink_resource(pres);
+
+ VkImageViewCreateInfo ivci = {};
+ ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
+ ivci.image = res->image;
+
+ switch (pres->target) {
+ case PIPE_TEXTURE_1D:
+ ivci.viewType = VK_IMAGE_VIEW_TYPE_1D;
+ break;
+
+ case PIPE_TEXTURE_1D_ARRAY:
+ ivci.viewType = VK_IMAGE_VIEW_TYPE_1D_ARRAY;
+ break;
+
+ case PIPE_TEXTURE_2D:
+ case PIPE_TEXTURE_RECT:
+ ivci.viewType = VK_IMAGE_VIEW_TYPE_2D;
+ break;
+
+ case PIPE_TEXTURE_2D_ARRAY:
+ ivci.viewType = VK_IMAGE_VIEW_TYPE_2D_ARRAY;
+ break;
+
+ case PIPE_TEXTURE_CUBE:
+ ivci.viewType = VK_IMAGE_VIEW_TYPE_CUBE;
+ break;
+
+ case PIPE_TEXTURE_CUBE_ARRAY:
+ ivci.viewType = VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
+ break;
+
+ case PIPE_TEXTURE_3D:
+ ivci.viewType = VK_IMAGE_VIEW_TYPE_3D;
+ break;
+
+ default:
+ unreachable("unsupported target");
+ }
+
+ ivci.format = zink_get_format(templ->format);
+
+ // TODO: format swizzles
+ ivci.components.r = VK_COMPONENT_SWIZZLE_R;
+ ivci.components.g = VK_COMPONENT_SWIZZLE_G;
+ ivci.components.b = VK_COMPONENT_SWIZZLE_B;
+ ivci.components.a = VK_COMPONENT_SWIZZLE_A;
+
+ ivci.subresourceRange.aspectMask = res->aspect;
+ ivci.subresourceRange.baseMipLevel = templ->u.tex.level;
+ ivci.subresourceRange.levelCount = 1;
+ ivci.subresourceRange.baseArrayLayer = templ->u.tex.first_layer;
+ ivci.subresourceRange.layerCount = 1 + templ->u.tex.last_layer - templ->u.tex.first_layer;
+
+ if (pres->target == PIPE_TEXTURE_CUBE ||
+ pres->target == PIPE_TEXTURE_CUBE_ARRAY)
+ ivci.subresourceRange.layerCount *= 6;
+
+ if (vkCreateImageView(screen->dev, &ivci, NULL,
+ &surface->image_view) != VK_SUCCESS) {
+ FREE(surface);
+ return NULL;
+ }
+
+ return &surface->base;
+}
+
+static void
+zink_surface_destroy(struct pipe_context *pctx,
+ struct pipe_surface *psurface)
+{
+ struct zink_screen *screen = zink_screen(pctx->screen);
+ struct zink_surface *surface = zink_surface(psurface);
+ pipe_resource_reference(&psurface->texture, NULL);
+ vkDestroyImageView(screen->dev, surface->image_view, NULL);
+ FREE(surface);
+}
+
+void
+zink_context_surface_init(struct pipe_context *context)
+{
+ context->create_surface = zink_create_surface;
+ context->surface_destroy = zink_surface_destroy;
+}
--- /dev/null
+/*
+ * Copyright 2018 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+ #ifndef ZINK_SURFACE_H
+ #define ZINK_SURFACE_H
+
+#include "pipe/p_state.h"
+
+#include <vulkan/vulkan.h>
+
+struct pipe_context;
+
+struct zink_surface {
+ struct pipe_surface base;
+ VkImageView image_view;
+};
+
+static inline struct zink_surface *
+zink_surface(struct pipe_surface *pipe)
+{
+ return (struct zink_surface *)pipe;
+}
+
+void
+zink_context_surface_init(struct pipe_context *context);
+
+#endif
else
driver_lima = declare_dependency()
endif
+
+if with_gallium_zink
+ subdir('drivers/zink')
+else
+ driver_zink = declare_dependency()
+endif
+
if with_gallium_opencl
# TODO: this isn't really clover specific, but ATM clover is the only
# consumer
driver_swrast, driver_r300, driver_r600, driver_radeonsi, driver_nouveau,
driver_kmsro, driver_v3d, driver_vc4, driver_freedreno, driver_etnaviv,
driver_tegra, driver_i915, driver_svga, driver_virgl,
- driver_swr, driver_panfrost, driver_iris, driver_lima
+ driver_swr, driver_panfrost, driver_iris, driver_lima, driver_zink
],
# Will be deleted during installation, see install_megadrivers.py
install : true,
[with_gallium_r600, 'r600_dri.so'],
[with_gallium_svga, 'vmwgfx_dri.so'],
[with_gallium_virgl, 'virtio_gpu_dri.so'],
- [with_gallium_lima, 'lima_dri.so']]
+ [with_gallium_lima, 'lima_dri.so'],
+ [with_gallium_zink, 'zink_dri.so']]
if d[0]
gallium_dri_drivers += d[1]
endif