#include "zink_fence.h"
#include "zink_framebuffer.h"
#include "zink_helpers.h"
+#include "zink_program.h"
#include "zink_pipeline.h"
+#include "zink_query.h"
#include "zink_render_pass.h"
#include "zink_resource.h"
#include "zink_screen.h"
if (vkQueueWaitIdle(ctx->queue) != VK_SUCCESS)
debug_printf("vkQueueWaitIdle failed\n");
- for (int i = 0; i < ARRAY_SIZE(ctx->batches); ++i)
+ for (unsigned i = 0; i < ARRAY_SIZE(ctx->null_buffers); i++)
+ pipe_resource_reference(&ctx->null_buffers[i], NULL);
+
+ for (int i = 0; i < ARRAY_SIZE(ctx->batches); ++i) {
+ vkDestroyDescriptorPool(screen->dev, ctx->batches[i].descpool, NULL);
vkFreeCommandBuffers(screen->dev, ctx->cmdpool, 1, &ctx->batches[i].cmdbuf);
+ }
vkDestroyCommandPool(screen->dev, ctx->cmdpool, NULL);
util_primconvert_destroy(ctx->primconvert);
FREE(view);
}
-static void *
-zink_create_vs_state(struct pipe_context *pctx,
- const struct pipe_shader_state *shader)
-{
- struct nir_shader *nir;
- if (shader->type != PIPE_SHADER_IR_NIR)
- nir = zink_tgsi_to_nir(pctx->screen, shader->tokens);
- else
- nir = (struct nir_shader *)shader->ir.nir;
-
- return zink_compile_nir(zink_screen(pctx->screen), nir, &shader->stream_output);
-}
-
-static void
-bind_stage(struct zink_context *ctx, enum pipe_shader_type stage,
- struct zink_shader *shader)
-{
- assert(stage < PIPE_SHADER_COMPUTE);
- ctx->gfx_stages[stage] = shader;
- ctx->dirty_program = true;
-}
-
-static void
-zink_bind_vs_state(struct pipe_context *pctx,
- void *cso)
-{
- bind_stage(zink_context(pctx), PIPE_SHADER_VERTEX, cso);
-}
-
-static void
-zink_delete_vs_state(struct pipe_context *pctx,
- void *cso)
-{
- zink_shader_free(zink_screen(pctx->screen), cso);
-}
-
-static void *
-zink_create_fs_state(struct pipe_context *pctx,
- const struct pipe_shader_state *shader)
-{
- struct nir_shader *nir;
- if (shader->type != PIPE_SHADER_IR_NIR)
- nir = zink_tgsi_to_nir(pctx->screen, shader->tokens);
- else
- nir = (struct nir_shader *)shader->ir.nir;
-
- return zink_compile_nir(zink_screen(pctx->screen), nir, NULL);
-}
-
-static void
-zink_bind_fs_state(struct pipe_context *pctx,
- void *cso)
-{
- bind_stage(zink_context(pctx), PIPE_SHADER_FRAGMENT, cso);
-}
-
-static void
-zink_delete_fs_state(struct pipe_context *pctx,
- void *cso)
-{
- zink_shader_free(zink_screen(pctx->screen), cso);
-}
-
static void
zink_set_polygon_stipple(struct pipe_context *pctx,
const struct pipe_poly_stipple *ps)
if (buffers) {
for (int i = 0; i < num_buffers; ++i) {
const struct pipe_vertex_buffer *vb = buffers + i;
+ struct zink_resource *res = zink_resource(vb->buffer.resource);
+
ctx->gfx_pipeline_state.bindings[start_slot + i].stride = vb->stride;
+ if (res && res->needs_xfb_barrier) {
+ /* if we're binding a previously-used xfb buffer, we need cmd buffer synchronization to ensure
+ * that we use the right buffer data
+ */
+ pctx->flush(pctx, NULL, 0);
+ res->needs_xfb_barrier = false;
+ }
}
+ ctx->gfx_pipeline_state.hash = 0;
}
util_set_vertex_buffers_mask(ctx->buffers, &ctx->buffers_enabled_mask,
for (int i = 0; i < fb->nr_cbufs; i++) {
struct pipe_surface *surf = fb->cbufs[i];
- state.rts[i].format = zink_get_format(screen, surf->format);
- state.rts[i].samples = surf->nr_samples > 0 ? surf->nr_samples :
- VK_SAMPLE_COUNT_1_BIT;
+ if (surf) {
+ state.rts[i].format = zink_get_format(screen, surf->format);
+ state.rts[i].samples = surf->nr_samples > 0 ? surf->nr_samples :
+ VK_SAMPLE_COUNT_1_BIT;
+ } else {
+ state.rts[i].format = VK_FORMAT_R8_UINT;
+ state.rts[i].samples = MAX2(fb->samples, 1);
+ }
}
state.num_cbufs = fb->nr_cbufs;
for (int i = 0; i < ctx->fb_state.nr_cbufs; i++) {
struct pipe_surface *psurf = ctx->fb_state.cbufs[i];
state.attachments[i] = zink_surface(psurf);
+ state.has_null_attachments |= !state.attachments[i];
}
state.num_attachments = ctx->fb_state.nr_cbufs;
state.width = ctx->fb_state.width;
state.height = ctx->fb_state.height;
state.layers = MAX2(ctx->fb_state.layers, 1);
+ state.samples = ctx->fb_state.samples;
- return zink_create_framebuffer(screen, &state);
+ return zink_create_framebuffer(ctx, screen, &state);
+}
+
+static void
+framebuffer_state_buffer_barriers_setup(struct zink_context *ctx,
+ const struct pipe_framebuffer_state *state, struct zink_batch *batch)
+{
+ for (int i = 0; i < state->nr_cbufs; i++) {
+ struct pipe_surface *surf = state->cbufs[i];
+ if (!surf)
+ surf = ctx->framebuffer->null_surface;
+ struct zink_resource *res = zink_resource(surf->texture);
+ if (res->layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL)
+ zink_resource_barrier(batch->cmdbuf, res, res->aspect,
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
+ }
+
+ if (state->zsbuf) {
+ struct zink_resource *res = zink_resource(state->zsbuf->texture);
+ if (res->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL)
+ zink_resource_barrier(batch->cmdbuf, res, res->aspect,
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
+ }
}
void
assert(!batch->rp || batch->rp == ctx->gfx_pipeline_state.render_pass);
assert(!batch->fb || batch->fb == ctx->framebuffer);
- for (int i = 0; i < fb_state->nr_cbufs; i++) {
- struct zink_resource *res = zink_resource(fb_state->cbufs[i]->texture);
- if (res->layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL)
- zink_resource_barrier(batch->cmdbuf, res, res->aspect,
- VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
- }
-
- if (fb_state->zsbuf) {
- struct zink_resource *res = zink_resource(fb_state->zsbuf->texture);
- if (res->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL)
- zink_resource_barrier(batch->cmdbuf, res, res->aspect,
- VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
- }
+ framebuffer_state_buffer_barriers_setup(ctx, fb_state, batch);
zink_render_pass_reference(screen, &batch->rp, ctx->gfx_pipeline_state.render_pass);
zink_framebuffer_reference(screen, &batch->fb, ctx->framebuffer);
ctx->gfx_pipeline_state.rast_samples = MAX2(state->samples, 1);
ctx->gfx_pipeline_state.num_attachments = state->nr_cbufs;
+ ctx->gfx_pipeline_state.hash = 0;
struct zink_batch *batch = zink_batch_no_rp(ctx);
- for (int i = 0; i < state->nr_cbufs; i++) {
- struct zink_resource *res = zink_resource(state->cbufs[i]->texture);
- if (res->layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL)
- zink_resource_barrier(batch->cmdbuf, res, res->aspect,
- VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
- }
-
- if (state->zsbuf) {
- struct zink_resource *res = zink_resource(state->zsbuf->texture);
- if (res->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL)
- zink_resource_barrier(batch->cmdbuf, res, res->aspect,
- VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
- }
+ framebuffer_state_buffer_barriers_setup(ctx, state, batch);
}
static void
{
struct zink_context *ctx = zink_context(pctx);
ctx->gfx_pipeline_state.sample_mask = sample_mask;
+ ctx->gfx_pipeline_state.hash = 0;
}
static VkAccessFlags
static uint32_t
hash_gfx_program(const void *key)
{
- return _mesa_hash_data(key, sizeof(struct zink_shader *) * (PIPE_SHADER_TYPES - 1));
+ return _mesa_hash_data(key, sizeof(struct zink_shader *) * (ZINK_SHADER_COUNT));
}
static bool
equals_gfx_program(const void *a, const void *b)
{
- return memcmp(a, b, sizeof(struct zink_shader *) * (PIPE_SHADER_TYPES - 1)) == 0;
+ return memcmp(a, b, sizeof(struct zink_shader *) * (ZINK_SHADER_COUNT)) == 0;
}
static uint32_t
struct zink_batch *batch = zink_curr_batch(ctx);
flush_batch(ctx);
+ if (zink_screen(pctx->screen)->have_EXT_transform_feedback && ctx->num_so_targets)
+ ctx->dirty_so_targets = true;
+
if (pfence)
zink_fence_reference(zink_screen(pctx->screen),
(struct zink_fence **)pfence,
debug_printf("zink: TODO resource copy\n");
}
+static struct pipe_stream_output_target *
+zink_create_stream_output_target(struct pipe_context *pctx,
+ struct pipe_resource *pres,
+ unsigned buffer_offset,
+ unsigned buffer_size)
+{
+ struct zink_so_target *t;
+ t = CALLOC_STRUCT(zink_so_target);
+ if (!t)
+ return NULL;
+
+ t->base.reference.count = 1;
+ t->base.context = pctx;
+ pipe_resource_reference(&t->base.buffer, pres);
+ t->base.buffer_offset = buffer_offset;
+ t->base.buffer_size = buffer_size;
+
+ /* using PIPE_BIND_CUSTOM here lets us create a custom pipe buffer resource,
+ * which allows us to differentiate and use VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT
+ * as we must for this case
+ */
+ t->counter_buffer = pipe_buffer_create(pctx->screen, PIPE_BIND_STREAM_OUTPUT | PIPE_BIND_CUSTOM, PIPE_USAGE_DEFAULT, 4);
+ if (!t->counter_buffer) {
+ FREE(t);
+ return NULL;
+ }
+
+ return &t->base;
+}
+
+static void
+zink_stream_output_target_destroy(struct pipe_context *pctx,
+ struct pipe_stream_output_target *psot)
+{
+ struct zink_so_target *t = (struct zink_so_target *)psot;
+ pipe_resource_reference(&t->counter_buffer, NULL);
+ pipe_resource_reference(&t->base.buffer, NULL);
+ FREE(t);
+}
+
+static void
+zink_set_stream_output_targets(struct pipe_context *pctx,
+ unsigned num_targets,
+ struct pipe_stream_output_target **targets,
+ const unsigned *offsets)
+{
+ struct zink_context *ctx = zink_context(pctx);
+
+ if (num_targets == 0) {
+ for (unsigned i = 0; i < ctx->num_so_targets; i++)
+ pipe_so_target_reference(&ctx->so_targets[i], NULL);
+ ctx->num_so_targets = 0;
+ } else {
+ for (unsigned i = 0; i < num_targets; i++)
+ pipe_so_target_reference(&ctx->so_targets[i], targets[i]);
+ for (unsigned i = num_targets; i < ctx->num_so_targets; i++)
+ pipe_so_target_reference(&ctx->so_targets[i], NULL);
+ ctx->num_so_targets = num_targets;
+
+ /* emit memory barrier on next draw for synchronization */
+ if (offsets[0] == (unsigned)-1)
+ ctx->xfb_barrier = true;
+ /* TODO: possibly avoid rebinding on resume if resuming from same buffers? */
+ ctx->dirty_so_targets = true;
+ }
+}
+
struct pipe_context *
zink_context_create(struct pipe_screen *pscreen, void *priv, unsigned flags)
{
if (!ctx)
goto fail;
+ ctx->gfx_pipeline_state.hash = 0;
+
ctx->base.screen = pscreen;
ctx->base.priv = priv;
ctx->base.set_sampler_views = zink_set_sampler_views;
ctx->base.sampler_view_destroy = zink_sampler_view_destroy;
- ctx->base.create_vs_state = zink_create_vs_state;
- ctx->base.bind_vs_state = zink_bind_vs_state;
- ctx->base.delete_vs_state = zink_delete_vs_state;
-
- ctx->base.create_fs_state = zink_create_fs_state;
- ctx->base.bind_fs_state = zink_bind_fs_state;
- ctx->base.delete_fs_state = zink_delete_fs_state;
+ zink_program_init(ctx);
ctx->base.set_polygon_stipple = zink_set_polygon_stipple;
ctx->base.set_vertex_buffers = zink_set_vertex_buffers;
ctx->base.resource_copy_region = zink_resource_copy_region;
ctx->base.blit = zink_blit;
+ ctx->base.create_stream_output_target = zink_create_stream_output_target;
+ ctx->base.stream_output_target_destroy = zink_stream_output_target_destroy;
+ ctx->base.set_stream_output_targets = zink_set_stream_output_targets;
ctx->base.flush_resource = zink_flush_resource;
zink_context_surface_init(&ctx->base);
zink_context_resource_init(&ctx->base);
ctx->batches[i].sampler_views = _mesa_set_create(NULL,
_mesa_hash_pointer,
_mesa_key_pointer_equal);
+ ctx->batches[i].programs = _mesa_set_create(NULL,
+ _mesa_hash_pointer,
+ _mesa_key_pointer_equal);
if (!ctx->batches[i].resources || !ctx->batches[i].sampler_views)
goto fail;
if (!ctx->dummy_buffer)
goto fail;
- ctx->dirty_program = true;
-
/* start the first batch */
zink_start_batch(ctx, zink_curr_batch(ctx));