#include "zink_fence.h"
#include "zink_framebuffer.h"
#include "zink_helpers.h"
+#include "zink_program.h"
#include "zink_pipeline.h"
+#include "zink_query.h"
#include "zink_render_pass.h"
#include "zink_resource.h"
#include "zink_screen.h"
#include "indices/u_primconvert.h"
#include "util/u_blitter.h"
#include "util/u_debug.h"
-#include "util/u_format.h"
+#include "util/format/u_format.h"
#include "util/u_framebuffer.h"
#include "util/u_helpers.h"
#include "util/u_inlines.h"
if (vkQueueWaitIdle(ctx->queue) != VK_SUCCESS)
debug_printf("vkQueueWaitIdle failed\n");
- for (int i = 0; i < ARRAY_SIZE(ctx->batches); ++i)
+ for (unsigned i = 0; i < ARRAY_SIZE(ctx->null_buffers); i++)
+ pipe_resource_reference(&ctx->null_buffers[i], NULL);
+
+ for (int i = 0; i < ARRAY_SIZE(ctx->batches); ++i) {
+ vkDestroyDescriptorPool(screen->dev, ctx->batches[i].descpool, NULL);
vkFreeCommandBuffers(screen->dev, ctx->cmdpool, 1, &ctx->batches[i].cmdbuf);
+ }
vkDestroyCommandPool(screen->dev, ctx->cmdpool, NULL);
util_primconvert_destroy(ctx->primconvert);
if (state->compare_mode == PIPE_TEX_COMPARE_NONE)
sci.compareOp = VK_COMPARE_OP_NEVER;
- else
+ else {
sci.compareOp = compare_op(state->compare_func);
+ sci.compareEnable = VK_TRUE;
+ }
sci.borderColor = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK; // TODO
sci.unnormalizedCoordinates = !state->normalized_coords;
struct zink_context *ctx = zink_context(pctx);
for (unsigned i = 0; i < num_samplers; ++i) {
VkSampler *sampler = samplers[i];
+ ctx->sampler_states[shader][start_slot + i] = sampler;
ctx->samplers[shader][start_slot + i] = sampler ? *sampler : VK_NULL_HANDLE;
}
ctx->num_samplers[shader] = start_slot + num_samplers;
case PIPE_TEXTURE_CUBE: return VK_IMAGE_VIEW_TYPE_CUBE;
case PIPE_TEXTURE_CUBE_ARRAY: return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
case PIPE_TEXTURE_3D: return VK_IMAGE_VIEW_TYPE_3D;
- case PIPE_TEXTURE_RECT: return VK_IMAGE_VIEW_TYPE_2D; /* not sure */
+ case PIPE_TEXTURE_RECT: return VK_IMAGE_VIEW_TYPE_2D;
default:
unreachable("unexpected target");
}
FREE(view);
}
-static void *
-zink_create_vs_state(struct pipe_context *pctx,
- const struct pipe_shader_state *shader)
-{
- struct nir_shader *nir;
- if (shader->type != PIPE_SHADER_IR_NIR)
- nir = zink_tgsi_to_nir(pctx->screen, shader->tokens);
- else
- nir = (struct nir_shader *)shader->ir.nir;
-
- return zink_compile_nir(zink_screen(pctx->screen), nir);
-}
-
-static void
-bind_stage(struct zink_context *ctx, enum pipe_shader_type stage,
- struct zink_shader *shader)
-{
- assert(stage < PIPE_SHADER_COMPUTE);
- ctx->gfx_stages[stage] = shader;
- ctx->dirty_program = true;
-}
-
-static void
-zink_bind_vs_state(struct pipe_context *pctx,
- void *cso)
-{
- bind_stage(zink_context(pctx), PIPE_SHADER_VERTEX, cso);
-}
-
-static void
-zink_delete_vs_state(struct pipe_context *pctx,
- void *cso)
-{
- zink_shader_free(zink_screen(pctx->screen), cso);
-}
-
-static void *
-zink_create_fs_state(struct pipe_context *pctx,
- const struct pipe_shader_state *shader)
-{
- struct nir_shader *nir;
- if (shader->type != PIPE_SHADER_IR_NIR)
- nir = zink_tgsi_to_nir(pctx->screen, shader->tokens);
- else
- nir = (struct nir_shader *)shader->ir.nir;
-
- return zink_compile_nir(zink_screen(pctx->screen), nir);
-}
-
-static void
-zink_bind_fs_state(struct pipe_context *pctx,
- void *cso)
-{
- bind_stage(zink_context(pctx), PIPE_SHADER_FRAGMENT, cso);
-}
-
-static void
-zink_delete_fs_state(struct pipe_context *pctx,
- void *cso)
-{
- zink_shader_free(zink_screen(pctx->screen), cso);
-}
-
static void
zink_set_polygon_stipple(struct pipe_context *pctx,
const struct pipe_poly_stipple *ps)
if (buffers) {
for (int i = 0; i < num_buffers; ++i) {
const struct pipe_vertex_buffer *vb = buffers + i;
+ struct zink_resource *res = zink_resource(vb->buffer.resource);
+
ctx->gfx_pipeline_state.bindings[start_slot + i].stride = vb->stride;
+ if (res && res->needs_xfb_barrier) {
+ /* if we're binding a previously-used xfb buffer, we need cmd buffer synchronization to ensure
+ * that we use the right buffer data
+ */
+ pctx->flush(pctx, NULL, 0);
+ res->needs_xfb_barrier = false;
+ }
}
+ ctx->gfx_pipeline_state.hash = 0;
}
util_set_vertex_buffers_mask(ctx->buffers, &ctx->buffers_enabled_mask,
{
struct zink_screen *screen = zink_screen(ctx->base.screen);
const struct pipe_framebuffer_state *fb = &ctx->fb_state;
- struct zink_render_pass_state state;
+ struct zink_render_pass_state state = { 0 };
for (int i = 0; i < fb->nr_cbufs; i++) {
- struct pipe_resource *res = fb->cbufs[i]->texture;
- state.rts[i].format = zink_get_format(screen, fb->cbufs[i]->format);
- state.rts[i].samples = res->nr_samples > 0 ? res->nr_samples :
- VK_SAMPLE_COUNT_1_BIT;
+ struct pipe_surface *surf = fb->cbufs[i];
+ if (surf) {
+ state.rts[i].format = zink_get_format(screen, surf->format);
+ state.rts[i].samples = surf->nr_samples > 0 ? surf->nr_samples :
+ VK_SAMPLE_COUNT_1_BIT;
+ } else {
+ state.rts[i].format = VK_FORMAT_R8_UINT;
+ state.rts[i].samples = MAX2(fb->samples, 1);
+ }
}
state.num_cbufs = fb->nr_cbufs;
}
static struct zink_framebuffer *
-get_framebuffer(struct zink_context *ctx)
+create_framebuffer(struct zink_context *ctx)
{
struct zink_screen *screen = zink_screen(ctx->base.screen);
for (int i = 0; i < ctx->fb_state.nr_cbufs; i++) {
struct pipe_surface *psurf = ctx->fb_state.cbufs[i];
state.attachments[i] = zink_surface(psurf);
+ state.has_null_attachments |= !state.attachments[i];
}
state.num_attachments = ctx->fb_state.nr_cbufs;
state.width = ctx->fb_state.width;
state.height = ctx->fb_state.height;
state.layers = MAX2(ctx->fb_state.layers, 1);
+ state.samples = ctx->fb_state.samples;
- struct hash_entry *entry = _mesa_hash_table_search(ctx->framebuffer_cache,
- &state);
- if (!entry) {
- struct zink_framebuffer *fb = zink_create_framebuffer(screen, &state);
- entry = _mesa_hash_table_insert(ctx->framebuffer_cache, &state, fb);
- if (!entry)
- return NULL;
+ return zink_create_framebuffer(ctx, screen, &state);
+}
+
+static void
+framebuffer_state_buffer_barriers_setup(struct zink_context *ctx,
+ const struct pipe_framebuffer_state *state, struct zink_batch *batch)
+{
+ for (int i = 0; i < state->nr_cbufs; i++) {
+ struct pipe_surface *surf = state->cbufs[i];
+ if (!surf)
+ surf = ctx->framebuffer->null_surface;
+ struct zink_resource *res = zink_resource(surf->texture);
+ if (res->layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL)
+ zink_resource_barrier(batch->cmdbuf, res, res->aspect,
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
}
- return entry->data;
+ if (state->zsbuf) {
+ struct zink_resource *res = zink_resource(state->zsbuf->texture);
+ if (res->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL)
+ zink_resource_barrier(batch->cmdbuf, res, res->aspect,
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
+ }
}
void
assert(!batch->rp || batch->rp == ctx->gfx_pipeline_state.render_pass);
assert(!batch->fb || batch->fb == ctx->framebuffer);
- for (int i = 0; i < fb_state->nr_cbufs; i++) {
- struct zink_resource *res = zink_resource(fb_state->cbufs[i]->texture);
- if (res->layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL)
- zink_resource_barrier(batch->cmdbuf, res, res->aspect,
- VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
- }
-
- if (fb_state->zsbuf) {
- struct zink_resource *res = zink_resource(fb_state->zsbuf->texture);
- if (res->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL)
- zink_resource_barrier(batch->cmdbuf, res, res->aspect,
- VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
- }
+ framebuffer_state_buffer_barriers_setup(ctx, fb_state, batch);
zink_render_pass_reference(screen, &batch->rp, ctx->gfx_pipeline_state.render_pass);
zink_framebuffer_reference(screen, &batch->fb, ctx->framebuffer);
struct zink_context *ctx = zink_context(pctx);
struct zink_screen *screen = zink_screen(pctx->screen);
- VkSampleCountFlagBits rast_samples = VK_SAMPLE_COUNT_1_BIT;
- for (int i = 0; i < state->nr_cbufs; i++)
- rast_samples = MAX2(rast_samples, state->cbufs[i]->texture->nr_samples);
- if (state->zsbuf && state->zsbuf->texture->nr_samples)
- rast_samples = MAX2(rast_samples, state->zsbuf->texture->nr_samples);
-
util_copy_framebuffer_state(&ctx->fb_state, state);
- struct zink_framebuffer *fb = get_framebuffer(ctx);
+ struct zink_framebuffer *fb = ctx->framebuffer;
+ /* explicitly unref previous fb to ensure it gets destroyed */
+ if (fb)
+ zink_framebuffer_reference(screen, &fb, NULL);
+ fb = create_framebuffer(ctx);
zink_framebuffer_reference(screen, &ctx->framebuffer, fb);
zink_render_pass_reference(screen, &ctx->gfx_pipeline_state.render_pass, fb->rp);
- ctx->gfx_pipeline_state.rast_samples = rast_samples;
+ ctx->gfx_pipeline_state.rast_samples = MAX2(state->samples, 1);
ctx->gfx_pipeline_state.num_attachments = state->nr_cbufs;
+ ctx->gfx_pipeline_state.hash = 0;
struct zink_batch *batch = zink_batch_no_rp(ctx);
- for (int i = 0; i < state->nr_cbufs; i++) {
- struct zink_resource *res = zink_resource(state->cbufs[i]->texture);
- if (res->layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL)
- zink_resource_barrier(batch->cmdbuf, res, res->aspect,
- VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
- }
-
- if (state->zsbuf) {
- struct zink_resource *res = zink_resource(state->zsbuf->texture);
- if (res->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL)
- zink_resource_barrier(batch->cmdbuf, res, res->aspect,
- VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
- }
+ framebuffer_state_buffer_barriers_setup(ctx, state, batch);
}
static void
{
struct zink_context *ctx = zink_context(pctx);
ctx->gfx_pipeline_state.sample_mask = sample_mask;
+ ctx->gfx_pipeline_state.hash = 0;
}
static VkAccessFlags
static void
zink_clear(struct pipe_context *pctx,
unsigned buffers,
+ const struct pipe_scissor_state *scissor_state,
const union pipe_color_union *pcolor,
double depth, unsigned stencil)
{
static uint32_t
hash_gfx_program(const void *key)
{
- return _mesa_hash_data(key, sizeof(struct zink_shader *) * (PIPE_SHADER_TYPES - 1));
+ return _mesa_hash_data(key, sizeof(struct zink_shader *) * (ZINK_SHADER_COUNT));
}
static bool
equals_gfx_program(const void *a, const void *b)
{
- return memcmp(a, b, sizeof(struct zink_shader *) * (PIPE_SHADER_TYPES - 1)) == 0;
+ return memcmp(a, b, sizeof(struct zink_shader *) * (ZINK_SHADER_COUNT)) == 0;
}
static uint32_t
return memcmp(a, b, sizeof(struct zink_render_pass_state)) == 0;
}
-static uint32_t
-hash_framebuffer_state(const void *key)
-{
- struct zink_framebuffer_state *s = (struct zink_framebuffer_state*)key;
- return _mesa_hash_data(key, sizeof(struct zink_framebuffer_state) + sizeof(s->attachments) * s->num_attachments);
-}
-
-static bool
-equals_framebuffer_state(const void *a, const void *b)
-{
- struct zink_framebuffer_state *s = (struct zink_framebuffer_state*)a;
- return memcmp(a, b, sizeof(struct zink_framebuffer_state) + sizeof(s->attachments) * s->num_attachments) == 0;
-}
-
static void
zink_flush(struct pipe_context *pctx,
struct pipe_fence_handle **pfence,
struct zink_batch *batch = zink_curr_batch(ctx);
flush_batch(ctx);
+ if (zink_screen(pctx->screen)->have_EXT_transform_feedback && ctx->num_so_targets)
+ ctx->dirty_so_targets = true;
+
if (pfence)
zink_fence_reference(zink_screen(pctx->screen),
(struct zink_fence **)pfence,
debug_printf("zink: TODO resource copy\n");
}
+static struct pipe_stream_output_target *
+zink_create_stream_output_target(struct pipe_context *pctx,
+ struct pipe_resource *pres,
+ unsigned buffer_offset,
+ unsigned buffer_size)
+{
+ struct zink_so_target *t;
+ t = CALLOC_STRUCT(zink_so_target);
+ if (!t)
+ return NULL;
+
+ t->base.reference.count = 1;
+ t->base.context = pctx;
+ pipe_resource_reference(&t->base.buffer, pres);
+ t->base.buffer_offset = buffer_offset;
+ t->base.buffer_size = buffer_size;
+
+ /* using PIPE_BIND_CUSTOM here lets us create a custom pipe buffer resource,
+ * which allows us to differentiate and use VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT
+ * as we must for this case
+ */
+ t->counter_buffer = pipe_buffer_create(pctx->screen, PIPE_BIND_STREAM_OUTPUT | PIPE_BIND_CUSTOM, PIPE_USAGE_DEFAULT, 4);
+ if (!t->counter_buffer) {
+ FREE(t);
+ return NULL;
+ }
+
+ return &t->base;
+}
+
+static void
+zink_stream_output_target_destroy(struct pipe_context *pctx,
+ struct pipe_stream_output_target *psot)
+{
+ struct zink_so_target *t = (struct zink_so_target *)psot;
+ pipe_resource_reference(&t->counter_buffer, NULL);
+ pipe_resource_reference(&t->base.buffer, NULL);
+ FREE(t);
+}
+
+static void
+zink_set_stream_output_targets(struct pipe_context *pctx,
+ unsigned num_targets,
+ struct pipe_stream_output_target **targets,
+ const unsigned *offsets)
+{
+ struct zink_context *ctx = zink_context(pctx);
+
+ if (num_targets == 0) {
+ for (unsigned i = 0; i < ctx->num_so_targets; i++)
+ pipe_so_target_reference(&ctx->so_targets[i], NULL);
+ ctx->num_so_targets = 0;
+ } else {
+ for (unsigned i = 0; i < num_targets; i++)
+ pipe_so_target_reference(&ctx->so_targets[i], targets[i]);
+ for (unsigned i = num_targets; i < ctx->num_so_targets; i++)
+ pipe_so_target_reference(&ctx->so_targets[i], NULL);
+ ctx->num_so_targets = num_targets;
+
+ /* emit memory barrier on next draw for synchronization */
+ if (offsets[0] == (unsigned)-1)
+ ctx->xfb_barrier = true;
+ /* TODO: possibly avoid rebinding on resume if resuming from same buffers? */
+ ctx->dirty_so_targets = true;
+ }
+}
+
struct pipe_context *
zink_context_create(struct pipe_screen *pscreen, void *priv, unsigned flags)
{
struct zink_screen *screen = zink_screen(pscreen);
struct zink_context *ctx = CALLOC_STRUCT(zink_context);
+ if (!ctx)
+ goto fail;
+
+ ctx->gfx_pipeline_state.hash = 0;
ctx->base.screen = pscreen;
ctx->base.priv = priv;
ctx->base.set_sampler_views = zink_set_sampler_views;
ctx->base.sampler_view_destroy = zink_sampler_view_destroy;
- ctx->base.create_vs_state = zink_create_vs_state;
- ctx->base.bind_vs_state = zink_bind_vs_state;
- ctx->base.delete_vs_state = zink_delete_vs_state;
-
- ctx->base.create_fs_state = zink_create_fs_state;
- ctx->base.bind_fs_state = zink_bind_fs_state;
- ctx->base.delete_fs_state = zink_delete_fs_state;
+ zink_program_init(ctx);
ctx->base.set_polygon_stipple = zink_set_polygon_stipple;
ctx->base.set_vertex_buffers = zink_set_vertex_buffers;
ctx->base.resource_copy_region = zink_resource_copy_region;
ctx->base.blit = zink_blit;
+ ctx->base.create_stream_output_target = zink_create_stream_output_target;
+ ctx->base.stream_output_target_destroy = zink_stream_output_target_destroy;
+ ctx->base.set_stream_output_targets = zink_set_stream_output_targets;
ctx->base.flush_resource = zink_flush_resource;
zink_context_surface_init(&ctx->base);
zink_context_resource_init(&ctx->base);
cbai.commandBufferCount = 1;
VkDescriptorPoolSize sizes[] = {
- {VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, ZINK_BATCH_DESC_SIZE}
+ {VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, ZINK_BATCH_DESC_SIZE},
+ {VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, ZINK_BATCH_DESC_SIZE}
};
VkDescriptorPoolCreateInfo dpci = {};
dpci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
ctx->batches[i].sampler_views = _mesa_set_create(NULL,
_mesa_hash_pointer,
_mesa_key_pointer_equal);
+ ctx->batches[i].programs = _mesa_set_create(NULL,
+ _mesa_hash_pointer,
+ _mesa_key_pointer_equal);
if (!ctx->batches[i].resources || !ctx->batches[i].sampler_views)
goto fail;
ctx->render_pass_cache = _mesa_hash_table_create(NULL,
hash_render_pass_state,
equals_render_pass_state);
- ctx->framebuffer_cache = _mesa_hash_table_create(NULL,
- hash_framebuffer_state,
- equals_framebuffer_state);
-
- if (!ctx->program_cache || !ctx->render_pass_cache ||
- !ctx->framebuffer_cache)
+ if (!ctx->program_cache || !ctx->render_pass_cache)
goto fail;
- ctx->dirty_program = true;
+ const uint8_t data[] = { 0 };
+ ctx->dummy_buffer = pipe_buffer_create_with_data(&ctx->base,
+ PIPE_BIND_VERTEX_BUFFER, PIPE_USAGE_IMMUTABLE, sizeof(data), data);
+ if (!ctx->dummy_buffer)
+ goto fail;
/* start the first batch */
zink_start_batch(ctx, zink_curr_batch(ctx));