#include "zink_context.h"
-#include "zink_cmdbuf.h"
+#include "zink_batch.h"
#include "zink_compiler.h"
#include "zink_fence.h"
#include "zink_framebuffer.h"
+#include "zink_helpers.h"
#include "zink_pipeline.h"
-#include "zink_program.h"
+#include "zink_query.h"
#include "zink_render_pass.h"
#include "zink_resource.h"
#include "zink_screen.h"
#include "indices/u_primconvert.h"
#include "util/u_blitter.h"
#include "util/u_debug.h"
-#include "util/u_format.h"
+#include "util/format/u_format.h"
#include "util/u_framebuffer.h"
#include "util/u_helpers.h"
#include "util/u_inlines.h"
#include "nir.h"
#include "util/u_memory.h"
-#include "util/u_prim.h"
#include "util/u_upload_mgr.h"
static void
{
struct zink_context *ctx = zink_context(pctx);
struct zink_screen *screen = zink_screen(pctx->screen);
- for (int i = 0; i < ARRAY_SIZE(ctx->cmdbufs); ++i)
- vkFreeCommandBuffers(screen->dev, ctx->cmdpool, 1, &ctx->cmdbufs[i].cmdbuf);
+
+ if (vkQueueWaitIdle(ctx->queue) != VK_SUCCESS)
+ debug_printf("vkQueueWaitIdle failed\n");
+
+ for (unsigned i = 0; i < ARRAY_SIZE(ctx->null_buffers); i++)
+ pipe_resource_reference(&ctx->null_buffers[i], NULL);
+
+ for (int i = 0; i < ARRAY_SIZE(ctx->batches); ++i) {
+ vkDestroyDescriptorPool(screen->dev, ctx->batches[i].descpool, NULL);
+ vkFreeCommandBuffers(screen->dev, ctx->cmdpool, 1, &ctx->batches[i].cmdbuf);
+ }
vkDestroyCommandPool(screen->dev, ctx->cmdpool, NULL);
util_primconvert_destroy(ctx->primconvert);
FREE(ctx);
}
-static VkFilter
-filter(enum pipe_tex_filter filter)
-{
- switch (filter) {
- case PIPE_TEX_FILTER_NEAREST: return VK_FILTER_NEAREST;
- case PIPE_TEX_FILTER_LINEAR: return VK_FILTER_LINEAR;
- }
- unreachable("unexpected filter");
-}
-
static VkSamplerMipmapMode
sampler_mipmap_mode(enum pipe_tex_mipfilter filter)
{
unreachable("unexpected wrap");
}
+static VkCompareOp
+compare_op(enum pipe_compare_func op)
+{
+ switch (op) {
+ case PIPE_FUNC_NEVER: return VK_COMPARE_OP_NEVER;
+ case PIPE_FUNC_LESS: return VK_COMPARE_OP_LESS;
+ case PIPE_FUNC_EQUAL: return VK_COMPARE_OP_EQUAL;
+ case PIPE_FUNC_LEQUAL: return VK_COMPARE_OP_LESS_OR_EQUAL;
+ case PIPE_FUNC_GREATER: return VK_COMPARE_OP_GREATER;
+ case PIPE_FUNC_NOTEQUAL: return VK_COMPARE_OP_NOT_EQUAL;
+ case PIPE_FUNC_GEQUAL: return VK_COMPARE_OP_GREATER_OR_EQUAL;
+ case PIPE_FUNC_ALWAYS: return VK_COMPARE_OP_ALWAYS;
+ }
+ unreachable("unexpected compare");
+}
+
static void *
zink_create_sampler_state(struct pipe_context *pctx,
const struct pipe_sampler_state *state)
VkSamplerCreateInfo sci = {};
sci.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
- sci.magFilter = filter(state->mag_img_filter);
- sci.minFilter = filter(state->min_img_filter);
+ sci.magFilter = zink_filter(state->mag_img_filter);
+ sci.minFilter = zink_filter(state->min_img_filter);
if (state->min_mip_filter != PIPE_TEX_MIPFILTER_NONE) {
sci.mipmapMode = sampler_mipmap_mode(state->min_mip_filter);
sci.addressModeV = sampler_address_mode(state->wrap_t);
sci.addressModeW = sampler_address_mode(state->wrap_r);
sci.mipLodBias = state->lod_bias;
- sci.compareOp = VK_COMPARE_OP_NEVER; // TODO
+
+ if (state->compare_mode == PIPE_TEX_COMPARE_NONE)
+ sci.compareOp = VK_COMPARE_OP_NEVER;
+ else {
+ sci.compareOp = compare_op(state->compare_func);
+ sci.compareEnable = VK_TRUE;
+ }
+
sci.borderColor = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK; // TODO
+ sci.unnormalizedCoordinates = !state->normalized_coords;
if (state->max_anisotropy > 1) {
sci.maxAnisotropy = state->max_anisotropy;
sci.anisotropyEnable = VK_TRUE;
}
- VkSampler sampler;
- VkResult err = vkCreateSampler(screen->dev, &sci, NULL, &sampler);
- if (err != VK_SUCCESS)
+ VkSampler *sampler = CALLOC(1, sizeof(VkSampler));
+ if (!sampler)
return NULL;
+ if (vkCreateSampler(screen->dev, &sci, NULL, sampler) != VK_SUCCESS) {
+ FREE(sampler);
+ return NULL;
+ }
+
return sampler;
}
void **samplers)
{
struct zink_context *ctx = zink_context(pctx);
- for (unsigned i = 0; i < num_samplers; ++i)
- ctx->samplers[shader][start_slot + i] = (VkSampler)samplers[i];
+ for (unsigned i = 0; i < num_samplers; ++i) {
+ VkSampler *sampler = samplers[i];
+ ctx->sampler_states[shader][start_slot + i] = sampler;
+ ctx->samplers[shader][start_slot + i] = sampler ? *sampler : VK_NULL_HANDLE;
+ }
+ ctx->num_samplers[shader] = start_slot + num_samplers;
}
static void
zink_delete_sampler_state(struct pipe_context *pctx,
void *sampler_state)
{
- struct zink_screen *screen = zink_screen(pctx->screen);
- vkDestroySampler(screen->dev, sampler_state, NULL);
+ struct zink_batch *batch = zink_curr_batch(zink_context(pctx));
+ util_dynarray_append(&batch->zombie_samplers, VkSampler,
+ *(VkSampler *)sampler_state);
+ FREE(sampler_state);
}
case PIPE_TEXTURE_CUBE: return VK_IMAGE_VIEW_TYPE_CUBE;
case PIPE_TEXTURE_CUBE_ARRAY: return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
case PIPE_TEXTURE_3D: return VK_IMAGE_VIEW_TYPE_3D;
- case PIPE_TEXTURE_RECT: return VK_IMAGE_VIEW_TYPE_2D; /* not sure */
+ case PIPE_TEXTURE_RECT: return VK_IMAGE_VIEW_TYPE_2D;
default:
unreachable("unexpected target");
}
}
}
+static VkImageAspectFlags
+sampler_aspect_from_format(enum pipe_format fmt)
+{
+ if (util_format_is_depth_or_stencil(fmt)) {
+ const struct util_format_description *desc = util_format_description(fmt);
+ if (util_format_has_depth(desc))
+ return VK_IMAGE_ASPECT_DEPTH_BIT;
+ assert(util_format_has_stencil(desc));
+ return VK_IMAGE_ASPECT_STENCIL_BIT;
+ } else
+ return VK_IMAGE_ASPECT_COLOR_BIT;
+}
+
static struct pipe_sampler_view *
zink_create_sampler_view(struct pipe_context *pctx, struct pipe_resource *pres,
const struct pipe_sampler_view *state)
ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
ivci.image = res->image;
ivci.viewType = image_view_type(state->target);
- ivci.format = zink_get_format(state->format);
+ ivci.format = zink_get_format(screen, state->format);
ivci.components.r = component_mapping(state->swizzle_r);
ivci.components.g = component_mapping(state->swizzle_g);
ivci.components.b = component_mapping(state->swizzle_b);
ivci.components.a = component_mapping(state->swizzle_a);
- ivci.subresourceRange.aspectMask = zink_aspect_from_format(state->format);
+
+ ivci.subresourceRange.aspectMask = sampler_aspect_from_format(state->format);
ivci.subresourceRange.baseMipLevel = state->u.tex.first_level;
ivci.subresourceRange.baseArrayLayer = state->u.tex.first_layer;
ivci.subresourceRange.levelCount = state->u.tex.last_level - state->u.tex.first_level + 1;
}
static void
-zink_destroy_sampler_view(struct pipe_context *pctx,
+zink_sampler_view_destroy(struct pipe_context *pctx,
struct pipe_sampler_view *pview)
{
struct zink_sampler_view *view = zink_sampler_view(pview);
else
nir = (struct nir_shader *)shader->ir.nir;
- return zink_compile_nir(zink_screen(pctx->screen), nir);
+ return zink_compile_nir(zink_screen(pctx->screen), nir, &shader->stream_output);
}
static void
{
assert(stage < PIPE_SHADER_COMPUTE);
ctx->gfx_stages[stage] = shader;
- ctx->dirty |= ZINK_DIRTY_PROGRAM;
+ ctx->dirty_program = true;
}
static void
zink_delete_vs_state(struct pipe_context *pctx,
void *cso)
{
- zink_shader_free(zink_screen(pctx->screen), cso);
+ zink_shader_free(zink_context(pctx), cso);
}
static void *
else
nir = (struct nir_shader *)shader->ir.nir;
- return zink_compile_nir(zink_screen(pctx->screen), nir);
+ return zink_compile_nir(zink_screen(pctx->screen), nir, NULL);
}
static void
zink_delete_fs_state(struct pipe_context *pctx,
void *cso)
{
- zink_shader_free(zink_screen(pctx->screen), cso);
+ zink_shader_free(zink_context(pctx), cso);
}
static void
if (buffers) {
for (int i = 0; i < num_buffers; ++i) {
const struct pipe_vertex_buffer *vb = buffers + i;
+ struct zink_resource *res = zink_resource(vb->buffer.resource);
+
ctx->gfx_pipeline_state.bindings[start_slot + i].stride = vb->stride;
+ if (res && res->needs_xfb_barrier) {
+ /* if we're binding a previously-used xfb buffer, we need cmd buffer synchronization to ensure
+ * that we use the right buffer data
+ */
+ pctx->flush(pctx, NULL, 0);
+ res->needs_xfb_barrier = false;
+ }
}
}
state[i].translate[2] - state[i].scale[2],
state[i].translate[2] + state[i].scale[2]
};
+ ctx->viewport_states[start_slot + i] = state[i];
ctx->viewports[start_slot + i] = viewport;
}
ctx->num_viewports = start_slot + num_viewports;
scissor.offset.y = states[i].miny;
scissor.extent.width = states[i].maxx - states[i].minx;
scissor.extent.height = states[i].maxy - states[i].miny;
+ ctx->scissor_states[start_slot + i] = states[i];
ctx->scissors[start_slot + i] = scissor;
}
- ctx->num_scissors = start_slot + num_scissors;
}
static void
if (cb) {
struct pipe_resource *buffer = cb->buffer;
unsigned offset = cb->buffer_offset;
- if (cb->user_buffer)
- u_upload_data(ctx->base.const_uploader, 0, cb->buffer_size, 64,
+ if (cb->user_buffer) {
+ struct zink_screen *screen = zink_screen(pctx->screen);
+ u_upload_data(ctx->base.const_uploader, 0, cb->buffer_size,
+ screen->props.limits.minUniformBufferOffsetAlignment,
cb->user_buffer, &offset, &buffer);
+ }
pipe_resource_reference(&ctx->ubos[shader][index].buffer, buffer);
ctx->ubos[shader][index].buffer_offset = offset;
&ctx->image_views[shader_type][start_slot + i],
views[i]);
}
+ ctx->num_image_views[shader_type] = start_slot + num_views;
}
static void
const struct pipe_stencil_ref *ref)
{
struct zink_context *ctx = zink_context(pctx);
- ctx->stencil_ref[0] = ref->ref_value[0];
- ctx->stencil_ref[1] = ref->ref_value[1];
+ ctx->stencil_ref = *ref;
}
static void
}
static struct zink_render_pass *
-get_render_pass(struct zink_context *ctx,
- const struct pipe_framebuffer_state *fb)
+get_render_pass(struct zink_context *ctx)
{
- struct zink_render_pass_state state;
+ struct zink_screen *screen = zink_screen(ctx->base.screen);
+ const struct pipe_framebuffer_state *fb = &ctx->fb_state;
+ struct zink_render_pass_state state = { 0 };
for (int i = 0; i < fb->nr_cbufs; i++) {
- struct zink_resource *cbuf = zink_resource(fb->cbufs[i]->texture);
- state.rts[i].format = cbuf->format;
+ struct pipe_surface *surf = fb->cbufs[i];
+ if (surf) {
+ state.rts[i].format = zink_get_format(screen, surf->format);
+ state.rts[i].samples = surf->nr_samples > 0 ? surf->nr_samples :
+ VK_SAMPLE_COUNT_1_BIT;
+ } else {
+ state.rts[i].format = VK_FORMAT_R8_UINT;
+ state.rts[i].samples = MAX2(fb->samples, 1);
+ }
}
state.num_cbufs = fb->nr_cbufs;
if (fb->zsbuf) {
struct zink_resource *zsbuf = zink_resource(fb->zsbuf->texture);
state.rts[fb->nr_cbufs].format = zsbuf->format;
+ state.rts[fb->nr_cbufs].samples = zsbuf->base.nr_samples > 0 ? zsbuf->base.nr_samples : VK_SAMPLE_COUNT_1_BIT;
}
state.have_zsbuf = fb->zsbuf != NULL;
- // TODO: cache instead!
- return zink_create_render_pass(zink_screen(ctx->base.screen), &state);
+ struct hash_entry *entry = _mesa_hash_table_search(ctx->render_pass_cache,
+ &state);
+ if (!entry) {
+ struct zink_render_pass *rp;
+ rp = zink_create_render_pass(screen, &state);
+ entry = _mesa_hash_table_insert(ctx->render_pass_cache, &state, rp);
+ if (!entry)
+ return NULL;
+ }
+
+ return entry->data;
}
static struct zink_framebuffer *
-get_framebuffer(struct zink_context *ctx,
- const struct pipe_framebuffer_state *fb,
- struct zink_render_pass *rp)
+create_framebuffer(struct zink_context *ctx)
{
- // TODO: cache!
- return zink_create_framebuffer(zink_screen(ctx->base.screen), fb, rp);
+ struct zink_screen *screen = zink_screen(ctx->base.screen);
+
+ struct zink_framebuffer_state state = {};
+ state.rp = get_render_pass(ctx);
+ for (int i = 0; i < ctx->fb_state.nr_cbufs; i++) {
+ struct pipe_surface *psurf = ctx->fb_state.cbufs[i];
+ state.attachments[i] = zink_surface(psurf);
+ state.has_null_attachments |= !state.attachments[i];
+ }
+
+ state.num_attachments = ctx->fb_state.nr_cbufs;
+ if (ctx->fb_state.zsbuf) {
+ struct pipe_surface *psurf = ctx->fb_state.zsbuf;
+ state.attachments[state.num_attachments++] = zink_surface(psurf);
+ }
+
+ state.width = ctx->fb_state.width;
+ state.height = ctx->fb_state.height;
+ state.layers = MAX2(ctx->fb_state.layers, 1);
+ state.samples = ctx->fb_state.samples;
+
+ return zink_create_framebuffer(ctx, screen, &state);
}
static void
-zink_set_framebuffer_state(struct pipe_context *pctx,
- const struct pipe_framebuffer_state *state)
+framebuffer_state_buffer_barriers_setup(struct zink_context *ctx,
+ const struct pipe_framebuffer_state *state, struct zink_batch *batch)
{
- struct zink_context *ctx = zink_context(pctx);
- struct zink_screen *screen = zink_screen(pctx->screen);
+ for (int i = 0; i < state->nr_cbufs; i++) {
+ struct pipe_surface *surf = state->cbufs[i];
+ if (!surf)
+ surf = ctx->framebuffer->null_surface;
+ struct zink_resource *res = zink_resource(surf->texture);
+ if (res->layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL)
+ zink_resource_barrier(batch->cmdbuf, res, res->aspect,
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
+ }
- struct zink_render_pass *rp = get_render_pass(ctx, state);
- zink_render_pass_reference(screen, &ctx->gfx_pipeline_state.render_pass, rp);
+ if (state->zsbuf) {
+ struct zink_resource *res = zink_resource(state->zsbuf->texture);
+ if (res->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL)
+ zink_resource_barrier(batch->cmdbuf, res, res->aspect,
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
+ }
+}
- struct zink_framebuffer *fb = get_framebuffer(ctx, state, rp);
- zink_framebuffer_reference(screen, &ctx->framebuffer, fb);
- zink_framebuffer_reference(screen, &fb, NULL);
- zink_render_pass_reference(screen, &rp, NULL);
+void
+zink_begin_render_pass(struct zink_context *ctx, struct zink_batch *batch)
+{
+ struct zink_screen *screen = zink_screen(ctx->base.screen);
+ assert(batch == zink_curr_batch(ctx));
+ assert(ctx->gfx_pipeline_state.render_pass);
- ctx->gfx_pipeline_state.num_attachments = state->nr_cbufs;
+ struct pipe_framebuffer_state *fb_state = &ctx->fb_state;
- util_copy_framebuffer_state(&ctx->fb_state, state);
+ VkRenderPassBeginInfo rpbi = {};
+ rpbi.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
+ rpbi.renderPass = ctx->gfx_pipeline_state.render_pass->render_pass;
+ rpbi.renderArea.offset.x = 0;
+ rpbi.renderArea.offset.y = 0;
+ rpbi.renderArea.extent.width = fb_state->width;
+ rpbi.renderArea.extent.height = fb_state->height;
+ rpbi.clearValueCount = 0;
+ rpbi.pClearValues = NULL;
+ rpbi.framebuffer = ctx->framebuffer->fb;
- struct zink_cmdbuf *cmdbuf = zink_start_cmdbuf(ctx);
- if (!cmdbuf)
- return;
+ assert(ctx->gfx_pipeline_state.render_pass && ctx->framebuffer);
+ assert(!batch->rp || batch->rp == ctx->gfx_pipeline_state.render_pass);
+ assert(!batch->fb || batch->fb == ctx->framebuffer);
- for (int i = 0; i < state->nr_cbufs; i++) {
- struct zink_resource *res = zink_resource(state->cbufs[i]->texture);
- if (res->layout != VK_IMAGE_LAYOUT_GENERAL &&
- res->layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL)
- zink_resource_barrier(cmdbuf->cmdbuf, res, res->aspect,
- VK_IMAGE_LAYOUT_GENERAL);
- }
+ framebuffer_state_buffer_barriers_setup(ctx, fb_state, batch);
- if (state->zsbuf) {
- struct zink_resource *res = zink_resource(state->zsbuf->texture);
- if (res->layout != VK_IMAGE_LAYOUT_GENERAL &&
- res->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL)
- zink_resource_barrier(cmdbuf->cmdbuf, res, res->aspect,
- VK_IMAGE_LAYOUT_GENERAL);
+ zink_render_pass_reference(screen, &batch->rp, ctx->gfx_pipeline_state.render_pass);
+ zink_framebuffer_reference(screen, &batch->fb, ctx->framebuffer);
+
+ vkCmdBeginRenderPass(batch->cmdbuf, &rpbi, VK_SUBPASS_CONTENTS_INLINE);
+}
+
+static void
+flush_batch(struct zink_context *ctx)
+{
+ struct zink_batch *batch = zink_curr_batch(ctx);
+ if (batch->rp)
+ vkCmdEndRenderPass(batch->cmdbuf);
+
+ zink_end_batch(ctx, batch);
+
+ ctx->curr_batch++;
+ if (ctx->curr_batch == ARRAY_SIZE(ctx->batches))
+ ctx->curr_batch = 0;
+
+ zink_start_batch(ctx, zink_curr_batch(ctx));
+}
+
+struct zink_batch *
+zink_batch_rp(struct zink_context *ctx)
+{
+ struct zink_batch *batch = zink_curr_batch(ctx);
+ if (!batch->rp) {
+ zink_begin_render_pass(ctx, batch);
+ assert(batch->rp);
}
+ return batch;
+}
- zink_end_cmdbuf(ctx, cmdbuf);
+struct zink_batch *
+zink_batch_no_rp(struct zink_context *ctx)
+{
+ struct zink_batch *batch = zink_curr_batch(ctx);
+ if (batch->rp) {
+ /* flush batch and get a new one */
+ flush_batch(ctx);
+ batch = zink_curr_batch(ctx);
+ assert(!batch->rp);
+ }
+ return batch;
}
static void
-zink_set_active_query_state(struct pipe_context *pctx, bool enable)
+zink_set_framebuffer_state(struct pipe_context *pctx,
+ const struct pipe_framebuffer_state *state)
{
+ struct zink_context *ctx = zink_context(pctx);
+ struct zink_screen *screen = zink_screen(pctx->screen);
+
+ util_copy_framebuffer_state(&ctx->fb_state, state);
+
+ struct zink_framebuffer *fb = ctx->framebuffer;
+ /* explicitly unref previous fb to ensure it gets destroyed */
+ if (fb)
+ zink_framebuffer_reference(screen, &fb, NULL);
+ fb = create_framebuffer(ctx);
+ zink_framebuffer_reference(screen, &ctx->framebuffer, fb);
+ zink_render_pass_reference(screen, &ctx->gfx_pipeline_state.render_pass, fb->rp);
+
+ ctx->gfx_pipeline_state.rast_samples = MAX2(state->samples, 1);
+ ctx->gfx_pipeline_state.num_attachments = state->nr_cbufs;
+
+ struct zink_batch *batch = zink_batch_no_rp(ctx);
+
+ framebuffer_state_buffer_barriers_setup(ctx, state, batch);
}
static void
memcpy(ctx->blend_constants, color->color, sizeof(float) * 4);
}
+static void
+zink_set_sample_mask(struct pipe_context *pctx, unsigned sample_mask)
+{
+ struct zink_context *ctx = zink_context(pctx);
+ ctx->gfx_pipeline_state.sample_mask = sample_mask;
+}
+
static VkAccessFlags
-access_flags(VkImageLayout layout)
+access_src_flags(VkImageLayout layout)
{
switch (layout) {
case VK_IMAGE_LAYOUT_UNDEFINED:
return 0;
case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
- return VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
+ return VK_ACCESS_COLOR_ATTACHMENT_READ_BIT;
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
- return VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
+ return VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
return VK_ACCESS_SHADER_READ_BIT;
}
}
+static VkAccessFlags
+access_dst_flags(VkImageLayout layout)
+{
+ switch (layout) {
+ case VK_IMAGE_LAYOUT_UNDEFINED:
+ case VK_IMAGE_LAYOUT_GENERAL:
+ return 0;
+
+ case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
+ return VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
+ case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
+ return VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
+
+ case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
+ return VK_ACCESS_TRANSFER_READ_BIT;
+
+ case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
+ return VK_ACCESS_TRANSFER_WRITE_BIT;
+
+ default:
+ unreachable("unexpected layout");
+ }
+}
+
+static VkPipelineStageFlags
+pipeline_dst_stage(VkImageLayout layout)
+{
+ switch (layout) {
+ case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
+ return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
+ case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
+ return VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
+
+ case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
+ return VK_PIPELINE_STAGE_TRANSFER_BIT;
+ case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
+ return VK_PIPELINE_STAGE_TRANSFER_BIT;
+
+ default:
+ return VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
+ }
+}
+
+static VkPipelineStageFlags
+pipeline_src_stage(VkImageLayout layout)
+{
+ switch (layout) {
+ case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
+ return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
+ case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
+ return VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT;
+
+ case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
+ return VK_PIPELINE_STAGE_TRANSFER_BIT;
+ case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
+ return VK_PIPELINE_STAGE_TRANSFER_BIT;
+
+ default:
+ return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
+ }
+}
+
+
void
zink_resource_barrier(VkCommandBuffer cmdbuf, struct zink_resource *res,
VkImageAspectFlags aspect, VkImageLayout new_layout)
VkImageMemoryBarrier imb = {
VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
NULL,
- access_flags(res->layout),
- access_flags(new_layout),
+ access_src_flags(res->layout),
+ access_dst_flags(new_layout),
res->layout,
new_layout,
VK_QUEUE_FAMILY_IGNORED,
};
vkCmdPipelineBarrier(
cmdbuf,
- VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
- VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
+ pipeline_src_stage(res->layout),
+ pipeline_dst_stage(new_layout),
0,
0, NULL,
0, NULL,
static void
zink_clear(struct pipe_context *pctx,
unsigned buffers,
+ const struct pipe_scissor_state *scissor_state,
const union pipe_color_union *pcolor,
double depth, unsigned stencil)
{
struct zink_context *ctx = zink_context(pctx);
struct pipe_framebuffer_state *fb = &ctx->fb_state;
- struct zink_cmdbuf *cmdbuf = zink_start_cmdbuf(ctx);
- if (!cmdbuf)
- return;
+ /* FIXME: this is very inefficient; if no renderpass has been started yet,
+ * we should record the clear if it's full-screen, and apply it as we
+ * start the render-pass. Otherwise we can do a partial out-of-renderpass
+ * clear.
+ */
+ struct zink_batch *batch = zink_batch_rp(ctx);
+
+ VkClearAttachment attachments[1 + PIPE_MAX_COLOR_BUFS];
+ int num_attachments = 0;
- // first transition all images to a compatible layout
if (buffers & PIPE_CLEAR_COLOR) {
+ VkClearColorValue color;
+ color.float32[0] = pcolor->f[0];
+ color.float32[1] = pcolor->f[1];
+ color.float32[2] = pcolor->f[2];
+ color.float32[3] = pcolor->f[3];
+
for (unsigned i = 0; i < fb->nr_cbufs; i++) {
if (!(buffers & (PIPE_CLEAR_COLOR0 << i)) || !fb->cbufs[i])
continue;
- struct zink_resource *cbuf = zink_resource(fb->cbufs[i]->texture);
-
- if (cbuf->layout != VK_IMAGE_LAYOUT_GENERAL &&
- cbuf->layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL)
- zink_resource_barrier(cmdbuf->cmdbuf, cbuf, cbuf->aspect,
- VK_IMAGE_LAYOUT_GENERAL);
+ attachments[num_attachments].aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ attachments[num_attachments].colorAttachment = i;
+ attachments[num_attachments].clearValue.color = color;
+ ++num_attachments;
}
}
- VkImageAspectFlags depthStencilAspect = 0;
if (buffers & PIPE_CLEAR_DEPTHSTENCIL && fb->zsbuf) {
- struct zink_resource *zsbuf = zink_resource(fb->zsbuf->texture);
+ VkImageAspectFlags aspect = 0;
if (buffers & PIPE_CLEAR_DEPTH)
- depthStencilAspect |= VK_IMAGE_ASPECT_DEPTH_BIT;
+ aspect |= VK_IMAGE_ASPECT_DEPTH_BIT;
if (buffers & PIPE_CLEAR_STENCIL)
- depthStencilAspect |= VK_IMAGE_ASPECT_STENCIL_BIT;
+ aspect |= VK_IMAGE_ASPECT_STENCIL_BIT;
- if (zsbuf->layout != VK_IMAGE_LAYOUT_GENERAL &&
- zsbuf->layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL)
- zink_resource_barrier(cmdbuf->cmdbuf, zsbuf, depthStencilAspect,
- VK_IMAGE_LAYOUT_GENERAL);
+ attachments[num_attachments].aspectMask = aspect;
+ attachments[num_attachments].clearValue.depthStencil.depth = depth;
+ attachments[num_attachments].clearValue.depthStencil.stencil = stencil;
+ ++num_attachments;
}
- VkClearColorValue color;
- color.float32[0] = pcolor->f[0];
- color.float32[1] = pcolor->f[1];
- color.float32[2] = pcolor->f[2];
- color.float32[3] = pcolor->f[3];
-
- if (buffers & PIPE_CLEAR_COLOR) {
- for (unsigned i = 0; i < fb->nr_cbufs; i++) {
- if (!(buffers & (PIPE_CLEAR_COLOR0 << i)) || !fb->cbufs[i])
- continue;
-
- struct zink_resource *cbuf = zink_resource(fb->cbufs[i]->texture);
-
- VkImageSubresourceRange range;
- range.aspectMask = cbuf->aspect;
- range.baseMipLevel = 0;
- range.levelCount = VK_REMAINING_MIP_LEVELS;
- range.baseArrayLayer = 0;
- range.layerCount = VK_REMAINING_ARRAY_LAYERS;
- vkCmdClearColorImage(cmdbuf->cmdbuf,
- cbuf->image, VK_IMAGE_LAYOUT_GENERAL,
- &color,
- 1, &range);
- }
- }
-
- if (depthStencilAspect) {
- struct zink_resource *zsbuf = zink_resource(fb->zsbuf->texture);
-
- VkClearDepthStencilValue zsvalue = { depth, stencil };
-
- VkImageSubresourceRange range;
- range.aspectMask = depthStencilAspect;
- range.baseMipLevel = 0;
- range.levelCount = VK_REMAINING_MIP_LEVELS;
- range.baseArrayLayer = 0;
- range.layerCount = VK_REMAINING_ARRAY_LAYERS;
-
- vkCmdClearDepthStencilImage(cmdbuf->cmdbuf,
- zsbuf->image, VK_IMAGE_LAYOUT_GENERAL,
- &zsvalue,
- 1, &range);
- }
-
- zink_end_cmdbuf(ctx, cmdbuf);
+ VkClearRect cr;
+ cr.rect.offset.x = 0;
+ cr.rect.offset.y = 0;
+ cr.rect.extent.width = fb->width;
+ cr.rect.extent.height = fb->height;
+ cr.baseArrayLayer = 0;
+ cr.layerCount = util_framebuffer_get_num_layers(fb);
+ vkCmdClearAttachments(batch->cmdbuf, num_attachments, attachments, 1, &cr);
}
VkShaderStageFlagBits
return stages[type];
}
-static VkDescriptorSet
-allocate_descriptor_set(struct zink_context *ctx, VkDescriptorSetLayout dsl)
-{
- struct zink_screen *screen = zink_screen(ctx->base.screen);
- VkDescriptorSetAllocateInfo dsai;
- memset((void *)&dsai, 0, sizeof(dsai));
- dsai.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
- dsai.pNext = NULL;
- dsai.descriptorPool = ctx->descpool;
- dsai.descriptorSetCount = 1;
- dsai.pSetLayouts = &dsl;
-
- VkDescriptorSet desc_set;
- if (vkAllocateDescriptorSets(screen->dev, &dsai, &desc_set) != VK_SUCCESS) {
- if (vkResetDescriptorPool(screen->dev, ctx->descpool, 0) != VK_SUCCESS) {
- fprintf(stderr, "vkResetDescriptorPool failed\n");
- return VK_NULL_HANDLE;
- }
- if (vkAllocateDescriptorSets(screen->dev, &dsai, &desc_set) != VK_SUCCESS) {
- fprintf(stderr, "vkAllocateDescriptorSets failed\n");
- return VK_NULL_HANDLE;
- }
- }
-
- return desc_set;
-}
-
-static VkPrimitiveTopology
-zink_primitive_topology(enum pipe_prim_type mode)
-{
- switch (mode) {
- case PIPE_PRIM_POINTS:
- return VK_PRIMITIVE_TOPOLOGY_POINT_LIST;
-
- case PIPE_PRIM_LINES:
- return VK_PRIMITIVE_TOPOLOGY_LINE_LIST;
-
- case PIPE_PRIM_LINE_STRIP:
- return VK_PRIMITIVE_TOPOLOGY_LINE_STRIP;
-
- case PIPE_PRIM_TRIANGLES:
- return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
-
- case PIPE_PRIM_TRIANGLE_STRIP:
- return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP;
-
- case PIPE_PRIM_TRIANGLE_FAN:
- return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN;
-
- default:
- unreachable("unexpected enum pipe_prim_type");
- }
-}
-
-static void
-zink_bind_vertex_buffers(VkCommandBuffer cmdbuf, struct zink_context *ctx)
-{
- VkBuffer buffers[PIPE_MAX_ATTRIBS];
- VkDeviceSize buffer_offsets[PIPE_MAX_ATTRIBS];
- struct zink_vertex_elements_state *elems = ctx->gfx_pipeline_state.element_state;
- for (unsigned i = 0; i < elems->num_bindings; i++) {
- struct pipe_vertex_buffer *vb = ctx->buffers + elems->binding_map[i];
- assert(vb && vb->buffer.resource);
- struct zink_resource *res = zink_resource(vb->buffer.resource);
- buffers[i] = res->buffer;
- buffer_offsets[i] = vb->buffer_offset;
- }
-
- if (elems->num_bindings > 0)
- vkCmdBindVertexBuffers(cmdbuf, 0, elems->num_bindings, buffers, buffer_offsets);
-}
-
-static void
-begin_render_pass(struct zink_screen *screen, struct zink_cmdbuf *cmdbuf,
- struct zink_render_pass *rp, struct zink_framebuffer *fb,
- unsigned width, unsigned height)
-{
- VkRenderPassBeginInfo rpbi = {};
- rpbi.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
- rpbi.renderPass = rp->render_pass;
- rpbi.renderArea.offset.x = 0;
- rpbi.renderArea.offset.y = 0;
- rpbi.renderArea.extent.width = width;
- rpbi.renderArea.extent.height = height;
- rpbi.clearValueCount = 0;
- rpbi.pClearValues = NULL;
- rpbi.framebuffer = fb->fb;
-
- assert(rp && fb);
- assert(!cmdbuf->rp && !cmdbuf->fb);
- zink_render_pass_reference(screen, &cmdbuf->rp, rp);
- zink_framebuffer_reference(screen, &cmdbuf->fb, fb);
-
- vkCmdBeginRenderPass(cmdbuf->cmdbuf, &rpbi, VK_SUBPASS_CONTENTS_INLINE);
-}
-
static uint32_t
hash_gfx_program(const void *key)
{
return memcmp(a, b, sizeof(struct zink_shader *) * (PIPE_SHADER_TYPES - 1)) == 0;
}
-static struct zink_gfx_program *
-get_gfx_program(struct zink_context *ctx)
+static uint32_t
+hash_render_pass_state(const void *key)
{
- if (ctx->dirty & ZINK_DIRTY_PROGRAM) {
- struct hash_entry *entry = _mesa_hash_table_search(ctx->program_cache,
- ctx->gfx_stages);
- if (!entry) {
- struct zink_gfx_program *prog;
- prog = zink_create_gfx_program(zink_screen(ctx->base.screen)->dev,
- ctx->gfx_stages);
- entry = _mesa_hash_table_insert(ctx->program_cache, prog->stages, prog);
- if (!entry)
- return NULL;
- }
- ctx->curr_program = entry->data;
- ctx->dirty &= ~ZINK_DIRTY_PROGRAM;
- }
-
- assert(ctx->curr_program);
- return ctx->curr_program;
+ return _mesa_hash_data(key, sizeof(struct zink_render_pass_state));
}
-static void
-zink_draw_vbo(struct pipe_context *pctx,
- const struct pipe_draw_info *dinfo)
+static bool
+equals_render_pass_state(const void *a, const void *b)
{
- struct zink_context *ctx = zink_context(pctx);
- struct zink_screen *screen = zink_screen(pctx->screen);
- struct zink_rasterizer_state *rast_state = ctx->gfx_pipeline_state.rast_state;
-
- if (dinfo->mode >= PIPE_PRIM_QUADS ||
- dinfo->mode == PIPE_PRIM_LINE_LOOP) {
- if (!u_trim_pipe_prim(dinfo->mode, (unsigned *)&dinfo->count))
- return;
-
- util_primconvert_save_rasterizer_state(ctx->primconvert, &rast_state->base);
- util_primconvert_draw_vbo(ctx->primconvert, dinfo);
- return;
- }
-
- struct zink_gfx_program *gfx_program = get_gfx_program(ctx);
- if (!gfx_program)
- return;
-
- ctx->gfx_pipeline_state.primitive_topology = zink_primitive_topology(dinfo->mode);
-
- VkPipeline pipeline = zink_get_gfx_pipeline(screen->dev, gfx_program,
- &ctx->gfx_pipeline_state);
-
- bool depth_bias = false;
- switch (u_reduced_prim(dinfo->mode)) {
- case PIPE_PRIM_POINTS:
- depth_bias = rast_state->offset_point;
- break;
-
- case PIPE_PRIM_LINES:
- depth_bias = rast_state->offset_line;
- break;
-
- case PIPE_PRIM_TRIANGLES:
- depth_bias = rast_state->offset_tri;
- break;
-
- default:
- unreachable("unexpected reduced prim");
- }
-
- unsigned index_offset = 0;
- struct pipe_resource *index_buffer = NULL;
- if (dinfo->index_size > 0) {
- if (dinfo->has_user_indices) {
- if (!util_upload_index_buffer(pctx, dinfo, &index_buffer, &index_offset)) {
- debug_printf("util_upload_index_buffer() failed\n");
- return;
- }
- } else
- index_buffer = dinfo->index.resource;
- }
-
- struct zink_cmdbuf *cmdbuf = zink_start_cmdbuf(ctx);
- if (!cmdbuf)
- return;
-
- begin_render_pass(screen, cmdbuf, ctx->gfx_pipeline_state.render_pass,
- ctx->framebuffer,
- ctx->fb_state.width, ctx->fb_state.height);
-
- vkCmdSetViewport(cmdbuf->cmdbuf, 0, ctx->num_viewports, ctx->viewports);
-
- if (ctx->num_scissors)
- vkCmdSetScissor(cmdbuf->cmdbuf, 0, ctx->num_scissors, ctx->scissors);
- else if (ctx->fb_state.width && ctx->fb_state.height) {
- VkRect2D fb_scissor = {};
- fb_scissor.extent.width = ctx->fb_state.width;
- fb_scissor.extent.height = ctx->fb_state.height;
- vkCmdSetScissor(cmdbuf->cmdbuf, 0, 1, &fb_scissor);
- }
-
- vkCmdSetStencilReference(cmdbuf->cmdbuf, VK_STENCIL_FACE_FRONT_BIT, ctx->stencil_ref[0]);
- vkCmdSetStencilReference(cmdbuf->cmdbuf, VK_STENCIL_FACE_BACK_BIT, ctx->stencil_ref[1]);
-
- if (depth_bias)
- vkCmdSetDepthBias(cmdbuf->cmdbuf, rast_state->offset_units, rast_state->offset_clamp, rast_state->offset_scale);
- else
- vkCmdSetDepthBias(cmdbuf->cmdbuf, 0.0f, 0.0f, 0.0f);
-
- if (ctx->gfx_pipeline_state.blend_state->need_blend_constants)
- vkCmdSetBlendConstants(cmdbuf->cmdbuf, ctx->blend_constants);
-
- VkDescriptorSet desc_set = allocate_descriptor_set(ctx, gfx_program->dsl);
-
- VkWriteDescriptorSet wds[PIPE_SHADER_TYPES * PIPE_MAX_CONSTANT_BUFFERS + PIPE_SHADER_TYPES * PIPE_MAX_SHADER_SAMPLER_VIEWS];
- VkDescriptorBufferInfo buffer_infos[PIPE_SHADER_TYPES * PIPE_MAX_CONSTANT_BUFFERS];
- VkDescriptorImageInfo image_infos[PIPE_SHADER_TYPES * PIPE_MAX_SHADER_SAMPLER_VIEWS];
- int num_wds = 0, num_buffer_info = 0, num_image_info = 0;
-
- for (int i = 0; i < ARRAY_SIZE(ctx->gfx_stages); i++) {
- struct zink_shader *shader = ctx->gfx_stages[i];
- if (!shader)
- continue;
-
- for (int j = 0; j < shader->num_bindings; j++) {
- int index = shader->bindings[j].index;
- if (shader->bindings[j].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) {
- assert(ctx->ubos[i][index].buffer_size > 0);
- assert(ctx->ubos[i][index].buffer);
- buffer_infos[num_buffer_info].buffer = zink_resource(ctx->ubos[i][index].buffer)->buffer;
- buffer_infos[num_buffer_info].offset = ctx->ubos[i][index].buffer_offset;
- buffer_infos[num_buffer_info].range = VK_WHOLE_SIZE;
- wds[num_wds].pBufferInfo = buffer_infos + num_buffer_info;
- ++num_buffer_info;
- } else {
- struct pipe_sampler_view *psampler_view = ctx->image_views[i][index];
- assert(psampler_view);
- struct zink_sampler_view *sampler_view = (struct zink_sampler_view *)psampler_view;
- struct zink_resource *resource = zink_resource(psampler_view->texture);
- image_infos[num_image_info].imageLayout = resource->layout;
- image_infos[num_image_info].imageView = sampler_view->image_view;
- image_infos[num_image_info].sampler = ctx->samplers[i][index];
- wds[num_wds].pImageInfo = image_infos + num_image_info;
- ++num_image_info;
- }
-
- wds[num_wds].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
- wds[num_wds].pNext = NULL;
- wds[num_wds].dstSet = desc_set;
- wds[num_wds].dstBinding = shader->bindings[j].binding;
- wds[num_wds].dstArrayElement = 0;
- wds[num_wds].descriptorCount = 1;
- wds[num_wds].descriptorType = shader->bindings[j].type;
- ++num_wds;
- }
- }
-
- vkUpdateDescriptorSets(screen->dev, num_wds, wds, 0, NULL);
-
- vkCmdBindPipeline(cmdbuf->cmdbuf, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
- vkCmdBindDescriptorSets(cmdbuf->cmdbuf, VK_PIPELINE_BIND_POINT_GRAPHICS,
- gfx_program->layout, 0, 1, &desc_set, 0, NULL);
- zink_bind_vertex_buffers(cmdbuf->cmdbuf, ctx);
-
- if (dinfo->index_size > 0) {
- assert(dinfo->index_size != 1);
- VkIndexType index_type = dinfo->index_size == 2 ? VK_INDEX_TYPE_UINT16 : VK_INDEX_TYPE_UINT32;
- vkCmdBindIndexBuffer(cmdbuf->cmdbuf, zink_resource(index_buffer)->buffer, index_offset, index_type);
- vkCmdDrawIndexed(cmdbuf->cmdbuf,
- dinfo->count, dinfo->instance_count,
- dinfo->start, dinfo->index_bias, dinfo->start_instance);
- } else
- vkCmdDraw(cmdbuf->cmdbuf, dinfo->count, dinfo->instance_count, dinfo->start, dinfo->start_instance);
-
- vkCmdEndRenderPass(cmdbuf->cmdbuf);
-
- zink_end_cmdbuf(ctx, cmdbuf);
-
- if (dinfo->index_size > 0 && dinfo->has_user_indices)
- pipe_resource_reference(&index_buffer, NULL);
+ return memcmp(a, b, sizeof(struct zink_render_pass_state)) == 0;
}
static void
{
struct zink_context *ctx = zink_context(pctx);
- if (pfence)
- zink_fence_reference(zink_screen(pctx->screen), (struct zink_fence **)pfence,
- ctx->cmdbufs[0].fence);
-}
-
-static void
-zink_blit(struct pipe_context *pctx,
- const struct pipe_blit_info *info)
-{
- struct zink_context *ctx = zink_context(pctx);
- bool is_resolve = false;
- if (info->mask != PIPE_MASK_RGBA ||
- info->scissor_enable ||
- info->alpha_blend) {
- if (!util_blitter_is_blit_supported(ctx->blitter, info)) {
- debug_printf("blit unsupported %s -> %s\n",
- util_format_short_name(info->src.resource->format),
- util_format_short_name(info->dst.resource->format));
- return;
- }
-
- util_blitter_save_fragment_constant_buffer_slot(ctx->blitter, ctx->ubos[PIPE_SHADER_FRAGMENT]);
- util_blitter_save_vertex_buffer_slot(ctx->blitter, ctx->buffers);
- util_blitter_save_vertex_shader(ctx->blitter, ctx->gfx_stages[PIPE_SHADER_VERTEX]);
- util_blitter_save_fragment_shader(ctx->blitter, ctx->gfx_stages[PIPE_SHADER_FRAGMENT]);
- util_blitter_save_rasterizer(ctx->blitter, ctx->gfx_pipeline_state.rast_state);
-
- util_blitter_blit(ctx->blitter, info);
- }
-
- struct zink_resource *src = zink_resource(info->src.resource);
- struct zink_resource *dst = zink_resource(info->dst.resource);
-
- if (src->base.nr_samples > 1 && dst->base.nr_samples <= 1)
- is_resolve = true;
-
- struct zink_cmdbuf *cmdbuf = zink_start_cmdbuf(ctx);
- if (!cmdbuf)
- return;
-
- if (is_resolve) {
- VkImageResolve region = {};
-
- region.srcSubresource.aspectMask = src->aspect;
- region.srcSubresource.mipLevel = info->src.level;
- region.srcSubresource.baseArrayLayer = 0; // no clue
- region.srcSubresource.layerCount = 1; // no clue
- region.srcOffset.x = info->src.box.x;
- region.srcOffset.y = info->src.box.y;
- region.srcOffset.z = info->src.box.z;
-
- region.dstSubresource.aspectMask = dst->aspect;
- region.dstSubresource.mipLevel = info->dst.level;
- region.dstSubresource.baseArrayLayer = 0; // no clue
- region.dstSubresource.layerCount = 1; // no clue
- region.dstOffset.x = info->dst.box.x;
- region.dstOffset.y = info->dst.box.y;
- region.dstOffset.z = info->dst.box.z;
-
- region.extent.width = info->dst.box.width;
- region.extent.height = info->dst.box.height;
- region.extent.depth = info->dst.box.depth;
- vkCmdResolveImage(cmdbuf->cmdbuf, src->image, src->layout,
- dst->image, dst->layout,
- 1, ®ion);
-
- } else {
- if (dst->layout != VK_IMAGE_LAYOUT_GENERAL &&
- dst->layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL)
- zink_resource_barrier(cmdbuf->cmdbuf, dst, dst->aspect,
- VK_IMAGE_LAYOUT_GENERAL);
-
- VkImageBlit region = {};
- region.srcSubresource.aspectMask = src->aspect;
- region.srcSubresource.mipLevel = info->src.level;
- region.srcOffsets[0].x = info->src.box.x;
- region.srcOffsets[0].y = info->src.box.y;
- region.srcOffsets[1].x = info->src.box.x + info->src.box.width;
- region.srcOffsets[1].y = info->src.box.y + info->src.box.height;
-
- if (src->base.array_size > 1) {
- region.srcOffsets[0].z = 0;
- region.srcOffsets[1].z = 1;
- region.srcSubresource.baseArrayLayer = info->src.box.z;
- region.srcSubresource.layerCount = info->src.box.depth;
- } else {
- region.srcOffsets[0].z = info->src.box.z;
- region.srcOffsets[1].z = info->src.box.z + info->src.box.depth;
- region.srcSubresource.baseArrayLayer = 0;
- region.srcSubresource.layerCount = 1;
- }
+ struct zink_batch *batch = zink_curr_batch(ctx);
+ flush_batch(ctx);
- region.dstSubresource.aspectMask = dst->aspect;
- region.dstSubresource.mipLevel = info->dst.level;
- region.dstOffsets[0].x = info->dst.box.x;
- region.dstOffsets[0].y = info->dst.box.y;
- region.dstOffsets[1].x = info->dst.box.x + info->dst.box.width;
- region.dstOffsets[1].y = info->dst.box.y + info->dst.box.height;
-
- if (dst->base.array_size > 1) {
- region.dstOffsets[0].z = 0;
- region.dstOffsets[1].z = 1;
- region.dstSubresource.baseArrayLayer = info->dst.box.z;
- region.dstSubresource.layerCount = info->dst.box.depth;
- } else {
- region.dstOffsets[0].z = info->dst.box.z;
- region.dstOffsets[1].z = info->dst.box.z + info->dst.box.depth;
- region.dstSubresource.baseArrayLayer = 0;
- region.dstSubresource.layerCount = 1;
- }
+ if (zink_screen(pctx->screen)->have_EXT_transform_feedback && ctx->num_so_targets)
+ ctx->dirty_so_targets = true;
- vkCmdBlitImage(cmdbuf->cmdbuf, src->image, src->layout,
- dst->image, dst->layout,
- 1, ®ion,
- filter(info->filter));
- }
- zink_end_cmdbuf(ctx, cmdbuf);
+ if (pfence)
+ zink_fence_reference(zink_screen(pctx->screen),
+ (struct zink_fence **)pfence,
+ batch->fence);
+
+ /* HACK:
+ * For some strange reason, we need to finish before presenting, or else
+ * we start rendering on top of the back-buffer for the next frame. This
+ * seems like a bug in the DRI-driver to me, because we really should
+ * be properly protected by fences here, and the back-buffer should
+ * either be swapped with the front-buffer, or blitted from. But for
+ * some strange reason, neither of these things happen.
+ */
+ if (flags & PIPE_FLUSH_END_OF_FRAME)
+ pctx->screen->fence_finish(pctx->screen, pctx,
+ (struct pipe_fence_handle *)batch->fence,
+ PIPE_TIMEOUT_INFINITE);
}
static void
region.extent.width = src_box->width;
region.extent.height = src_box->height;
- struct zink_cmdbuf *cmdbuf = zink_start_cmdbuf(ctx);
- if (!cmdbuf)
- return;
+ struct zink_batch *batch = zink_batch_no_rp(ctx);
+ zink_batch_reference_resoure(batch, src);
+ zink_batch_reference_resoure(batch, dst);
+
+ if (src->layout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) {
+ zink_resource_barrier(batch->cmdbuf, src, src->aspect,
+ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
+ }
+
+ if (dst->layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
+ zink_resource_barrier(batch->cmdbuf, dst, dst->aspect,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
+ }
- vkCmdCopyImage(cmdbuf->cmdbuf, src->image, src->layout,
+ vkCmdCopyImage(batch->cmdbuf, src->image, src->layout,
dst->image, dst->layout,
1, ®ion);
- zink_end_cmdbuf(ctx, cmdbuf);
+ } else if (dst->base.target == PIPE_BUFFER &&
+ src->base.target == PIPE_BUFFER) {
+ VkBufferCopy region;
+ region.srcOffset = src_box->x;
+ region.dstOffset = dstx;
+ region.size = src_box->width;
+
+ struct zink_batch *batch = zink_batch_no_rp(ctx);
+ zink_batch_reference_resoure(batch, src);
+ zink_batch_reference_resoure(batch, dst);
+
+ vkCmdCopyBuffer(batch->cmdbuf, src->buffer, dst->buffer, 1, ®ion);
} else
debug_printf("zink: TODO resource copy\n");
}
+static struct pipe_stream_output_target *
+zink_create_stream_output_target(struct pipe_context *pctx,
+ struct pipe_resource *pres,
+ unsigned buffer_offset,
+ unsigned buffer_size)
+{
+ struct zink_so_target *t;
+ t = CALLOC_STRUCT(zink_so_target);
+ if (!t)
+ return NULL;
+
+ t->base.reference.count = 1;
+ t->base.context = pctx;
+ pipe_resource_reference(&t->base.buffer, pres);
+ t->base.buffer_offset = buffer_offset;
+ t->base.buffer_size = buffer_size;
+
+ /* using PIPE_BIND_CUSTOM here lets us create a custom pipe buffer resource,
+ * which allows us to differentiate and use VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT
+ * as we must for this case
+ */
+ t->counter_buffer = pipe_buffer_create(pctx->screen, PIPE_BIND_STREAM_OUTPUT | PIPE_BIND_CUSTOM, PIPE_USAGE_DEFAULT, 4);
+ if (!t->counter_buffer) {
+ FREE(t);
+ return NULL;
+ }
+
+ return &t->base;
+}
+
+static void
+zink_stream_output_target_destroy(struct pipe_context *pctx,
+ struct pipe_stream_output_target *psot)
+{
+ struct zink_so_target *t = (struct zink_so_target *)psot;
+ pipe_resource_reference(&t->counter_buffer, NULL);
+ pipe_resource_reference(&t->base.buffer, NULL);
+ FREE(t);
+}
+
+static void
+zink_set_stream_output_targets(struct pipe_context *pctx,
+ unsigned num_targets,
+ struct pipe_stream_output_target **targets,
+ const unsigned *offsets)
+{
+ struct zink_context *ctx = zink_context(pctx);
+
+ if (num_targets == 0) {
+ for (unsigned i = 0; i < ctx->num_so_targets; i++)
+ pipe_so_target_reference(&ctx->so_targets[i], NULL);
+ ctx->num_so_targets = 0;
+ } else {
+ for (unsigned i = 0; i < num_targets; i++)
+ pipe_so_target_reference(&ctx->so_targets[i], targets[i]);
+ for (unsigned i = num_targets; i < ctx->num_so_targets; i++)
+ pipe_so_target_reference(&ctx->so_targets[i], NULL);
+ ctx->num_so_targets = num_targets;
+
+ /* emit memory barrier on next draw for synchronization */
+ if (offsets[0] == (unsigned)-1)
+ ctx->xfb_barrier = true;
+ /* TODO: possibly avoid rebinding on resume if resuming from same buffers? */
+ ctx->dirty_so_targets = true;
+ }
+}
+
struct pipe_context *
zink_context_create(struct pipe_screen *pscreen, void *priv, unsigned flags)
{
struct zink_screen *screen = zink_screen(pscreen);
struct zink_context *ctx = CALLOC_STRUCT(zink_context);
+ if (!ctx)
+ goto fail;
ctx->base.screen = pscreen;
ctx->base.priv = priv;
ctx->base.create_sampler_view = zink_create_sampler_view;
ctx->base.set_sampler_views = zink_set_sampler_views;
- ctx->base.sampler_view_destroy = zink_destroy_sampler_view;
+ ctx->base.sampler_view_destroy = zink_sampler_view_destroy;
ctx->base.create_vs_state = zink_create_vs_state;
ctx->base.bind_vs_state = zink_bind_vs_state;
ctx->base.set_framebuffer_state = zink_set_framebuffer_state;
ctx->base.set_stencil_ref = zink_set_stencil_ref;
ctx->base.set_clip_state = zink_set_clip_state;
- ctx->base.set_active_query_state = zink_set_active_query_state;
ctx->base.set_blend_color = zink_set_blend_color;
+ ctx->base.set_sample_mask = zink_set_sample_mask;
+
ctx->base.clear = zink_clear;
ctx->base.draw_vbo = zink_draw_vbo;
ctx->base.flush = zink_flush;
ctx->base.resource_copy_region = zink_resource_copy_region;
ctx->base.blit = zink_blit;
+ ctx->base.create_stream_output_target = zink_create_stream_output_target;
+ ctx->base.stream_output_target_destroy = zink_stream_output_target_destroy;
+ ctx->base.set_stream_output_targets = zink_set_stream_output_targets;
ctx->base.flush_resource = zink_flush_resource;
zink_context_surface_init(&ctx->base);
zink_context_resource_init(&ctx->base);
cbai.commandPool = ctx->cmdpool;
cbai.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
cbai.commandBufferCount = 1;
- for (int i = 0; i < ARRAY_SIZE(ctx->cmdbufs); ++i)
- if (vkAllocateCommandBuffers(screen->dev, &cbai, &ctx->cmdbufs[i].cmdbuf) != VK_SUCCESS)
- goto fail;
VkDescriptorPoolSize sizes[] = {
- {VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1000}
+ {VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, ZINK_BATCH_DESC_SIZE},
+ {VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, ZINK_BATCH_DESC_SIZE}
};
VkDescriptorPoolCreateInfo dpci = {};
dpci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
dpci.pPoolSizes = sizes;
dpci.poolSizeCount = ARRAY_SIZE(sizes);
dpci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
- dpci.maxSets = 1000;
+ dpci.maxSets = ZINK_BATCH_DESC_SIZE;
- if(vkCreateDescriptorPool(screen->dev, &dpci, 0, &ctx->descpool) != VK_SUCCESS)
- goto fail;
+ for (int i = 0; i < ARRAY_SIZE(ctx->batches); ++i) {
+ if (vkAllocateCommandBuffers(screen->dev, &cbai, &ctx->batches[i].cmdbuf) != VK_SUCCESS)
+ goto fail;
+
+ ctx->batches[i].resources = _mesa_set_create(NULL, _mesa_hash_pointer,
+ _mesa_key_pointer_equal);
+ ctx->batches[i].sampler_views = _mesa_set_create(NULL,
+ _mesa_hash_pointer,
+ _mesa_key_pointer_equal);
+
+ if (!ctx->batches[i].resources || !ctx->batches[i].sampler_views)
+ goto fail;
+
+ util_dynarray_init(&ctx->batches[i].zombie_samplers, NULL);
+
+ if (vkCreateDescriptorPool(screen->dev, &dpci, 0,
+ &ctx->batches[i].descpool) != VK_SUCCESS)
+ goto fail;
+ }
vkGetDeviceQueue(screen->dev, screen->gfx_queue, 0, &ctx->queue);
- ctx->program_cache = _mesa_hash_table_create(NULL, hash_gfx_program, equals_gfx_program);
- if (!ctx->program_cache)
+ ctx->program_cache = _mesa_hash_table_create(NULL,
+ hash_gfx_program,
+ equals_gfx_program);
+ ctx->render_pass_cache = _mesa_hash_table_create(NULL,
+ hash_render_pass_state,
+ equals_render_pass_state);
+ if (!ctx->program_cache || !ctx->render_pass_cache)
goto fail;
- ctx->dirty = ZINK_DIRTY_PROGRAM;
+ const uint8_t data[] = { 0 };
+ ctx->dummy_buffer = pipe_buffer_create_with_data(&ctx->base,
+ PIPE_BIND_VERTEX_BUFFER, PIPE_USAGE_IMMUTABLE, sizeof(data), data);
+ if (!ctx->dummy_buffer)
+ goto fail;
+
+ ctx->dirty_program = true;
+
+ /* start the first batch */
+ zink_start_batch(ctx, zink_curr_batch(ctx));
return &ctx->base;