#include "zink_context.h"
-#include "zink_cmdbuf.h"
+#include "zink_batch.h"
#include "zink_compiler.h"
#include "zink_fence.h"
#include "zink_framebuffer.h"
if (vkQueueWaitIdle(ctx->queue) != VK_SUCCESS)
debug_printf("vkQueueWaitIdle failed\n");
- for (int i = 0; i < ARRAY_SIZE(ctx->cmdbufs); ++i)
- vkFreeCommandBuffers(screen->dev, ctx->cmdpool, 1, &ctx->cmdbufs[i].cmdbuf);
+ for (int i = 0; i < ARRAY_SIZE(ctx->batches); ++i)
+ vkFreeCommandBuffers(screen->dev, ctx->cmdpool, 1, &ctx->batches[i].cmdbuf);
vkDestroyCommandPool(screen->dev, ctx->cmdpool, NULL);
util_primconvert_destroy(ctx->primconvert);
unreachable("unexpected wrap");
}
+static VkCompareOp
+compare_op(enum pipe_compare_func op)
+{
+ switch (op) {
+ case PIPE_FUNC_NEVER: return VK_COMPARE_OP_NEVER;
+ case PIPE_FUNC_LESS: return VK_COMPARE_OP_LESS;
+ case PIPE_FUNC_EQUAL: return VK_COMPARE_OP_EQUAL;
+ case PIPE_FUNC_LEQUAL: return VK_COMPARE_OP_LESS_OR_EQUAL;
+ case PIPE_FUNC_GREATER: return VK_COMPARE_OP_GREATER;
+ case PIPE_FUNC_NOTEQUAL: return VK_COMPARE_OP_NOT_EQUAL;
+ case PIPE_FUNC_GEQUAL: return VK_COMPARE_OP_GREATER_OR_EQUAL;
+ case PIPE_FUNC_ALWAYS: return VK_COMPARE_OP_ALWAYS;
+ }
+ unreachable("unexpected compare");
+}
+
static void *
zink_create_sampler_state(struct pipe_context *pctx,
const struct pipe_sampler_state *state)
sci.addressModeV = sampler_address_mode(state->wrap_t);
sci.addressModeW = sampler_address_mode(state->wrap_r);
sci.mipLodBias = state->lod_bias;
- sci.compareOp = VK_COMPARE_OP_NEVER; // TODO
+
+ if (state->compare_mode == PIPE_TEX_COMPARE_NONE)
+ sci.compareOp = VK_COMPARE_OP_NEVER;
+ else
+ sci.compareOp = compare_op(state->compare_func);
+
sci.borderColor = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK; // TODO
sci.unnormalizedCoordinates = !state->normalized_coords;
struct zink_context *ctx = zink_context(pctx);
for (unsigned i = 0; i < num_samplers; ++i)
ctx->samplers[shader][start_slot + i] = (VkSampler)samplers[i];
+ ctx->num_samplers[shader] = start_slot + num_samplers;
}
static void
zink_delete_sampler_state(struct pipe_context *pctx,
void *sampler_state)
{
- struct zink_cmdbuf *cmdbuf = zink_context_curr_cmdbuf(zink_context(pctx));
- util_dynarray_append(&cmdbuf->zombie_samplers,
+ struct zink_batch *batch = zink_curr_batch(zink_context(pctx));
+ util_dynarray_append(&batch->zombie_samplers,
VkSampler, sampler_state);
}
}
static void
-zink_destroy_sampler_view(struct pipe_context *pctx,
+zink_sampler_view_destroy(struct pipe_context *pctx,
struct pipe_sampler_view *pview)
{
struct zink_sampler_view *view = zink_sampler_view(pview);
state[i].translate[2] - state[i].scale[2],
state[i].translate[2] + state[i].scale[2]
};
+ ctx->viewport_states[start_slot + i] = state[i];
ctx->viewports[start_slot + i] = viewport;
}
ctx->num_viewports = start_slot + num_viewports;
scissor.offset.y = states[i].miny;
scissor.extent.width = states[i].maxx - states[i].minx;
scissor.extent.height = states[i].maxy - states[i].miny;
+ ctx->scissor_states[start_slot + i] = states[i];
ctx->scissors[start_slot + i] = scissor;
}
ctx->num_scissors = start_slot + num_scissors;
&ctx->image_views[shader_type][start_slot + i],
views[i]);
}
+ ctx->num_image_views[shader_type] = start_slot + num_views;
}
static void
const struct pipe_stencil_ref *ref)
{
struct zink_context *ctx = zink_context(pctx);
- ctx->stencil_ref[0] = ref->ref_value[0];
- ctx->stencil_ref[1] = ref->ref_value[1];
+ ctx->stencil_ref = *ref;
}
static void
}
static struct zink_render_pass *
-get_render_pass(struct zink_context *ctx,
- const struct pipe_framebuffer_state *fb)
+get_render_pass(struct zink_context *ctx)
{
+ struct zink_screen *screen = zink_screen(ctx->base.screen);
+ const struct pipe_framebuffer_state *fb = &ctx->fb_state;
struct zink_render_pass_state state;
for (int i = 0; i < fb->nr_cbufs; i++) {
struct zink_resource *cbuf = zink_resource(fb->cbufs[i]->texture);
state.rts[i].format = cbuf->format;
+ state.rts[i].samples = cbuf->base.nr_samples > 0 ? cbuf->base.nr_samples : VK_SAMPLE_COUNT_1_BIT;
}
state.num_cbufs = fb->nr_cbufs;
if (fb->zsbuf) {
struct zink_resource *zsbuf = zink_resource(fb->zsbuf->texture);
state.rts[fb->nr_cbufs].format = zsbuf->format;
+ state.rts[fb->nr_cbufs].samples = zsbuf->base.nr_samples > 0 ? zsbuf->base.nr_samples : VK_SAMPLE_COUNT_1_BIT;
}
state.have_zsbuf = fb->zsbuf != NULL;
- // TODO: cache instead!
- return zink_create_render_pass(zink_screen(ctx->base.screen), &state);
+ struct hash_entry *entry = _mesa_hash_table_search(ctx->render_pass_cache,
+ &state);
+ if (!entry) {
+ struct zink_render_pass *rp;
+ rp = zink_create_render_pass(screen, &state);
+ entry = _mesa_hash_table_insert(ctx->render_pass_cache, &state, rp);
+ if (!entry)
+ return NULL;
+ }
+
+ return entry->data;
}
static struct zink_framebuffer *
-get_framebuffer(struct zink_context *ctx,
- const struct pipe_framebuffer_state *fb,
- struct zink_render_pass *rp)
+get_framebuffer(struct zink_context *ctx)
+{
+ struct zink_screen *screen = zink_screen(ctx->base.screen);
+
+ struct zink_framebuffer_state state = {};
+ state.rp = get_render_pass(ctx);
+ for (int i = 0; i < ctx->fb_state.nr_cbufs; i++) {
+ struct pipe_surface *psurf = ctx->fb_state.cbufs[i];
+ state.attachments[i] = zink_surface(psurf);
+ }
+
+ state.num_attachments = ctx->fb_state.nr_cbufs;
+ if (ctx->fb_state.zsbuf) {
+ struct pipe_surface *psurf = ctx->fb_state.zsbuf;
+ state.attachments[state.num_attachments++] = zink_surface(psurf);
+ }
+
+ state.width = ctx->fb_state.width;
+ state.height = ctx->fb_state.height;
+ state.layers = MAX2(ctx->fb_state.layers, 1);
+
+ struct hash_entry *entry = _mesa_hash_table_search(ctx->framebuffer_cache,
+ &state);
+ if (!entry) {
+ struct zink_framebuffer *fb = zink_create_framebuffer(screen, &state);
+ entry = _mesa_hash_table_insert(ctx->framebuffer_cache, &state, fb);
+ if (!entry)
+ return NULL;
+ }
+
+ return entry->data;
+}
+
+void
+zink_begin_render_pass(struct zink_context *ctx, struct zink_batch *batch)
+{
+ struct zink_screen *screen = zink_screen(ctx->base.screen);
+ assert(batch == zink_curr_batch(ctx));
+ assert(ctx->gfx_pipeline_state.render_pass);
+
+ VkRenderPassBeginInfo rpbi = {};
+ rpbi.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
+ rpbi.renderPass = ctx->gfx_pipeline_state.render_pass->render_pass;
+ rpbi.renderArea.offset.x = 0;
+ rpbi.renderArea.offset.y = 0;
+ rpbi.renderArea.extent.width = ctx->fb_state.width;
+ rpbi.renderArea.extent.height = ctx->fb_state.height;
+ rpbi.clearValueCount = 0;
+ rpbi.pClearValues = NULL;
+ rpbi.framebuffer = ctx->framebuffer->fb;
+
+ assert(ctx->gfx_pipeline_state.render_pass && ctx->framebuffer);
+ assert(!batch->rp || batch->rp == ctx->gfx_pipeline_state.render_pass);
+ assert(!batch->fb || batch->fb == ctx->framebuffer);
+
+ zink_render_pass_reference(screen, &batch->rp, ctx->gfx_pipeline_state.render_pass);
+ zink_framebuffer_reference(screen, &batch->fb, ctx->framebuffer);
+
+ vkCmdBeginRenderPass(batch->cmdbuf, &rpbi, VK_SUBPASS_CONTENTS_INLINE);
+}
+
+static void
+flush_batch(struct zink_context *ctx)
{
- // TODO: cache!
- return zink_create_framebuffer(zink_screen(ctx->base.screen), fb, rp);
+ struct zink_batch *batch = zink_curr_batch(ctx);
+ if (batch->rp)
+ vkCmdEndRenderPass(batch->cmdbuf);
+
+ zink_end_batch(ctx, batch);
+
+ ctx->curr_batch++;
+ if (ctx->curr_batch == ARRAY_SIZE(ctx->batches))
+ ctx->curr_batch = 0;
+
+ zink_start_batch(ctx, zink_curr_batch(ctx));
+}
+
+struct zink_batch *
+zink_batch_rp(struct zink_context *ctx)
+{
+ struct zink_batch *batch = zink_curr_batch(ctx);
+ if (!batch->rp) {
+ zink_begin_render_pass(ctx, batch);
+ assert(batch->rp);
+ }
+ return batch;
+}
+
+struct zink_batch *
+zink_batch_no_rp(struct zink_context *ctx)
+{
+ struct zink_batch *batch = zink_curr_batch(ctx);
+ if (batch->rp) {
+ /* flush batch and get a new one */
+ flush_batch(ctx);
+ batch = zink_curr_batch(ctx);
+ assert(!batch->rp);
+ }
+ return batch;
}
static void
struct zink_context *ctx = zink_context(pctx);
struct zink_screen *screen = zink_screen(pctx->screen);
- struct zink_render_pass *rp = get_render_pass(ctx, state);
- zink_render_pass_reference(screen, &ctx->gfx_pipeline_state.render_pass, rp);
+ VkSampleCountFlagBits rast_samples = VK_SAMPLE_COUNT_1_BIT;
+ for (int i = 0; i < state->nr_cbufs; i++)
+ rast_samples = MAX2(rast_samples, state->cbufs[i]->texture->nr_samples);
+ if (state->zsbuf && state->zsbuf->texture->nr_samples)
+ rast_samples = MAX2(rast_samples, state->zsbuf->texture->nr_samples);
- struct zink_framebuffer *fb = get_framebuffer(ctx, state, rp);
+ util_copy_framebuffer_state(&ctx->fb_state, state);
+
+ struct zink_framebuffer *fb = get_framebuffer(ctx);
zink_framebuffer_reference(screen, &ctx->framebuffer, fb);
- zink_framebuffer_reference(screen, &fb, NULL);
- zink_render_pass_reference(screen, &rp, NULL);
+ zink_render_pass_reference(screen, &ctx->gfx_pipeline_state.render_pass, fb->rp);
+ ctx->gfx_pipeline_state.rast_samples = rast_samples;
ctx->gfx_pipeline_state.num_attachments = state->nr_cbufs;
- util_copy_framebuffer_state(&ctx->fb_state, state);
-
- struct zink_cmdbuf *cmdbuf = zink_start_cmdbuf(ctx);
- if (!cmdbuf)
- return;
+ struct zink_batch *batch = zink_batch_no_rp(ctx);
for (int i = 0; i < state->nr_cbufs; i++) {
struct zink_resource *res = zink_resource(state->cbufs[i]->texture);
if (res->layout != VK_IMAGE_LAYOUT_GENERAL &&
res->layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL)
- zink_resource_barrier(cmdbuf->cmdbuf, res, res->aspect,
+ zink_resource_barrier(batch->cmdbuf, res, res->aspect,
VK_IMAGE_LAYOUT_GENERAL);
}
struct zink_resource *res = zink_resource(state->zsbuf->texture);
if (res->layout != VK_IMAGE_LAYOUT_GENERAL &&
res->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL)
- zink_resource_barrier(cmdbuf->cmdbuf, res, res->aspect,
+ zink_resource_barrier(batch->cmdbuf, res, res->aspect,
VK_IMAGE_LAYOUT_GENERAL);
}
-
- zink_end_cmdbuf(ctx, cmdbuf);
}
static void
memcpy(ctx->blend_constants, color->color, sizeof(float) * 4);
}
+static void
+zink_set_sample_mask(struct pipe_context *pctx, unsigned sample_mask)
+{
+ struct zink_context *ctx = zink_context(pctx);
+ ctx->gfx_pipeline_state.sample_mask = sample_mask;
+}
+
static VkAccessFlags
access_flags(VkImageLayout layout)
{
struct zink_context *ctx = zink_context(pctx);
struct pipe_framebuffer_state *fb = &ctx->fb_state;
- struct zink_cmdbuf *cmdbuf = zink_start_cmdbuf(ctx);
- if (!cmdbuf)
- return;
+ /* FIXME: this is very inefficient; if no renderpass has been started yet,
+ * we should record the clear if it's full-screen, and apply it as we
+ * start the render-pass. Otherwise we can do a partial out-of-renderpass
+ * clear.
+ */
+ struct zink_batch *batch = zink_batch_rp(ctx);
+
+ VkClearAttachment attachments[1 + PIPE_MAX_COLOR_BUFS];
+ int num_attachments = 0;
- // first transition all images to a compatible layout
if (buffers & PIPE_CLEAR_COLOR) {
+ VkClearColorValue color;
+ color.float32[0] = pcolor->f[0];
+ color.float32[1] = pcolor->f[1];
+ color.float32[2] = pcolor->f[2];
+ color.float32[3] = pcolor->f[3];
+
for (unsigned i = 0; i < fb->nr_cbufs; i++) {
if (!(buffers & (PIPE_CLEAR_COLOR0 << i)) || !fb->cbufs[i])
continue;
- struct zink_resource *cbuf = zink_resource(fb->cbufs[i]->texture);
-
- if (cbuf->layout != VK_IMAGE_LAYOUT_GENERAL &&
- cbuf->layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL)
- zink_resource_barrier(cmdbuf->cmdbuf, cbuf, cbuf->aspect,
- VK_IMAGE_LAYOUT_GENERAL);
+ attachments[num_attachments].aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
+ attachments[num_attachments].colorAttachment = i;
+ attachments[num_attachments].clearValue.color = color;
+ ++num_attachments;
}
}
- VkImageAspectFlags depthStencilAspect = 0;
if (buffers & PIPE_CLEAR_DEPTHSTENCIL && fb->zsbuf) {
- struct zink_resource *zsbuf = zink_resource(fb->zsbuf->texture);
+ VkImageAspectFlags aspect = 0;
if (buffers & PIPE_CLEAR_DEPTH)
- depthStencilAspect |= VK_IMAGE_ASPECT_DEPTH_BIT;
+ aspect |= VK_IMAGE_ASPECT_DEPTH_BIT;
if (buffers & PIPE_CLEAR_STENCIL)
- depthStencilAspect |= VK_IMAGE_ASPECT_STENCIL_BIT;
+ aspect |= VK_IMAGE_ASPECT_STENCIL_BIT;
- if (zsbuf->layout != VK_IMAGE_LAYOUT_GENERAL &&
- zsbuf->layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL)
- zink_resource_barrier(cmdbuf->cmdbuf, zsbuf, depthStencilAspect,
- VK_IMAGE_LAYOUT_GENERAL);
+ attachments[num_attachments].aspectMask = aspect;
+ attachments[num_attachments].clearValue.depthStencil.depth = depth;
+ attachments[num_attachments].clearValue.depthStencil.stencil = stencil;
+ ++num_attachments;
}
- VkClearColorValue color;
- color.float32[0] = pcolor->f[0];
- color.float32[1] = pcolor->f[1];
- color.float32[2] = pcolor->f[2];
- color.float32[3] = pcolor->f[3];
-
- if (buffers & PIPE_CLEAR_COLOR) {
- for (unsigned i = 0; i < fb->nr_cbufs; i++) {
- if (!(buffers & (PIPE_CLEAR_COLOR0 << i)) || !fb->cbufs[i])
- continue;
-
- struct zink_resource *cbuf = zink_resource(fb->cbufs[i]->texture);
-
- VkImageSubresourceRange range;
- range.aspectMask = cbuf->aspect;
- range.baseMipLevel = 0;
- range.levelCount = VK_REMAINING_MIP_LEVELS;
- range.baseArrayLayer = 0;
- range.layerCount = VK_REMAINING_ARRAY_LAYERS;
- vkCmdClearColorImage(cmdbuf->cmdbuf,
- cbuf->image, VK_IMAGE_LAYOUT_GENERAL,
- &color,
- 1, &range);
+ unsigned num_layers = util_framebuffer_get_num_layers(fb);
+ VkClearRect rects[PIPE_MAX_VIEWPORTS];
+ uint32_t num_rects;
+ if (ctx->num_scissors) {
+ for (unsigned i = 0 ; i < ctx->num_scissors; ++i) {
+ rects[i].rect = ctx->scissors[i];
+ rects[i].rect.extent.width = MIN2(rects[i].rect.extent.width,
+ fb->width);
+ rects[i].rect.extent.height = MIN2(rects[i].rect.extent.height,
+ fb->height);
+ rects[i].baseArrayLayer = 0;
+ rects[i].layerCount = num_layers;
}
+ num_rects = ctx->num_scissors;
+ } else {
+ rects[0].rect.offset.x = 0;
+ rects[0].rect.offset.y = 0;
+ rects[0].rect.extent.width = fb->width;
+ rects[0].rect.extent.height = fb->height;
+ rects[0].baseArrayLayer = 0;
+ rects[0].layerCount = num_layers;
+ num_rects = 1;
}
- if (depthStencilAspect) {
- struct zink_resource *zsbuf = zink_resource(fb->zsbuf->texture);
-
- VkClearDepthStencilValue zsvalue = { depth, stencil };
-
- VkImageSubresourceRange range;
- range.aspectMask = depthStencilAspect;
- range.baseMipLevel = 0;
- range.levelCount = VK_REMAINING_MIP_LEVELS;
- range.baseArrayLayer = 0;
- range.layerCount = VK_REMAINING_ARRAY_LAYERS;
-
- vkCmdClearDepthStencilImage(cmdbuf->cmdbuf,
- zsbuf->image, VK_IMAGE_LAYOUT_GENERAL,
- &zsvalue,
- 1, &range);
- }
-
- zink_end_cmdbuf(ctx, cmdbuf);
+ vkCmdClearAttachments(batch->cmdbuf,
+ num_attachments, attachments,
+ num_rects, rects);
}
VkShaderStageFlagBits
}
static VkDescriptorSet
-allocate_descriptor_set(struct zink_context *ctx, VkDescriptorSetLayout dsl)
+allocate_descriptor_set(struct zink_screen *screen,
+ struct zink_batch *batch,
+ struct zink_gfx_program *prog)
{
- struct zink_screen *screen = zink_screen(ctx->base.screen);
+ assert(batch->descs_left >= prog->num_descriptors);
VkDescriptorSetAllocateInfo dsai;
memset((void *)&dsai, 0, sizeof(dsai));
dsai.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
dsai.pNext = NULL;
- dsai.descriptorPool = ctx->descpool;
+ dsai.descriptorPool = batch->descpool;
dsai.descriptorSetCount = 1;
- dsai.pSetLayouts = &dsl;
+ dsai.pSetLayouts = &prog->dsl;
VkDescriptorSet desc_set;
if (vkAllocateDescriptorSets(screen->dev, &dsai, &desc_set) != VK_SUCCESS) {
- if (vkResetDescriptorPool(screen->dev, ctx->descpool, 0) != VK_SUCCESS) {
- fprintf(stderr, "vkResetDescriptorPool failed\n");
- return VK_NULL_HANDLE;
- }
- if (vkAllocateDescriptorSets(screen->dev, &dsai, &desc_set) != VK_SUCCESS) {
- fprintf(stderr, "vkAllocateDescriptorSets failed\n");
- return VK_NULL_HANDLE;
- }
+ debug_printf("ZINK: failed to allocate descriptor set :/");
+ return VK_NULL_HANDLE;
}
+ batch->descs_left -= prog->num_descriptors;
return desc_set;
}
-static VkPrimitiveTopology
-zink_primitive_topology(enum pipe_prim_type mode)
-{
- switch (mode) {
- case PIPE_PRIM_POINTS:
- return VK_PRIMITIVE_TOPOLOGY_POINT_LIST;
-
- case PIPE_PRIM_LINES:
- return VK_PRIMITIVE_TOPOLOGY_LINE_LIST;
-
- case PIPE_PRIM_LINE_STRIP:
- return VK_PRIMITIVE_TOPOLOGY_LINE_STRIP;
-
- case PIPE_PRIM_TRIANGLES:
- return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
-
- case PIPE_PRIM_TRIANGLE_STRIP:
- return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP;
-
- case PIPE_PRIM_TRIANGLE_FAN:
- return VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN;
-
- default:
- unreachable("unexpected enum pipe_prim_type");
- }
-}
-
static void
-zink_bind_vertex_buffers(struct zink_cmdbuf *cmdbuf, struct zink_context *ctx)
+zink_bind_vertex_buffers(struct zink_batch *batch, struct zink_context *ctx)
{
VkBuffer buffers[PIPE_MAX_ATTRIBS];
VkDeviceSize buffer_offsets[PIPE_MAX_ATTRIBS];
- struct zink_vertex_elements_state *elems = ctx->gfx_pipeline_state.element_state;
- for (unsigned i = 0; i < elems->num_bindings; i++) {
- struct pipe_vertex_buffer *vb = ctx->buffers + elems->binding_map[i];
+ const struct zink_vertex_elements_state *elems = ctx->element_state;
+ for (unsigned i = 0; i < elems->hw_state.num_bindings; i++) {
+ struct pipe_vertex_buffer *vb = ctx->buffers + ctx->element_state->binding_map[i];
assert(vb && vb->buffer.resource);
struct zink_resource *res = zink_resource(vb->buffer.resource);
buffers[i] = res->buffer;
buffer_offsets[i] = vb->buffer_offset;
- zink_cmdbuf_reference_resoure(cmdbuf, res);
+ zink_batch_reference_resoure(batch, res);
}
- if (elems->num_bindings > 0)
- vkCmdBindVertexBuffers(cmdbuf->cmdbuf, 0, elems->num_bindings, buffers, buffer_offsets);
+ if (elems->hw_state.num_bindings > 0)
+ vkCmdBindVertexBuffers(batch->cmdbuf, 0,
+ elems->hw_state.num_bindings,
+ buffers, buffer_offsets);
}
-static void
-begin_render_pass(struct zink_screen *screen, struct zink_cmdbuf *cmdbuf,
- struct zink_render_pass *rp, struct zink_framebuffer *fb,
- unsigned width, unsigned height)
+static uint32_t
+hash_gfx_program(const void *key)
{
- VkRenderPassBeginInfo rpbi = {};
- rpbi.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
- rpbi.renderPass = rp->render_pass;
- rpbi.renderArea.offset.x = 0;
- rpbi.renderArea.offset.y = 0;
- rpbi.renderArea.extent.width = width;
- rpbi.renderArea.extent.height = height;
- rpbi.clearValueCount = 0;
- rpbi.pClearValues = NULL;
- rpbi.framebuffer = fb->fb;
+ return _mesa_hash_data(key, sizeof(struct zink_shader *) * (PIPE_SHADER_TYPES - 1));
+}
- assert(rp && fb);
- assert(!cmdbuf->rp && !cmdbuf->fb);
- zink_render_pass_reference(screen, &cmdbuf->rp, rp);
- zink_framebuffer_reference(screen, &cmdbuf->fb, fb);
+static bool
+equals_gfx_program(const void *a, const void *b)
+{
+ return memcmp(a, b, sizeof(struct zink_shader *) * (PIPE_SHADER_TYPES - 1)) == 0;
+}
+
+static uint32_t
+hash_render_pass_state(const void *key)
+{
+ return _mesa_hash_data(key, sizeof(struct zink_render_pass_state));
+}
- vkCmdBeginRenderPass(cmdbuf->cmdbuf, &rpbi, VK_SUBPASS_CONTENTS_INLINE);
+static bool
+equals_render_pass_state(const void *a, const void *b)
+{
+ return memcmp(a, b, sizeof(struct zink_render_pass_state)) == 0;
}
static uint32_t
-hash_gfx_program(const void *key)
+hash_framebuffer_state(const void *key)
{
- return _mesa_hash_data(key, sizeof(struct zink_shader *) * (PIPE_SHADER_TYPES - 1));
+ struct zink_framebuffer_state *s = (struct zink_framebuffer_state*)key;
+ return _mesa_hash_data(key, sizeof(struct zink_framebuffer_state) + sizeof(s->attachments) * s->num_attachments);
}
static bool
-equals_gfx_program(const void *a, const void *b)
+equals_framebuffer_state(const void *a, const void *b)
{
- return memcmp(a, b, sizeof(struct zink_shader *) * (PIPE_SHADER_TYPES - 1)) == 0;
+ struct zink_framebuffer_state *s = (struct zink_framebuffer_state*)a;
+ return memcmp(a, b, sizeof(struct zink_framebuffer_state) + sizeof(s->attachments) * s->num_attachments) == 0;
}
static struct zink_gfx_program *
ctx->gfx_stages);
if (!entry) {
struct zink_gfx_program *prog;
- prog = zink_create_gfx_program(zink_screen(ctx->base.screen)->dev,
+ prog = zink_create_gfx_program(zink_screen(ctx->base.screen),
ctx->gfx_stages);
entry = _mesa_hash_table_insert(ctx->program_cache, prog->stages, prog);
if (!entry)
{
struct zink_context *ctx = zink_context(pctx);
struct zink_screen *screen = zink_screen(pctx->screen);
- struct zink_rasterizer_state *rast_state = ctx->gfx_pipeline_state.rast_state;
+ struct zink_rasterizer_state *rast_state = ctx->rast_state;
if (dinfo->mode >= PIPE_PRIM_QUADS ||
dinfo->mode == PIPE_PRIM_LINE_LOOP) {
if (!gfx_program)
return;
- ctx->gfx_pipeline_state.primitive_topology = zink_primitive_topology(dinfo->mode);
-
- VkPipeline pipeline = zink_get_gfx_pipeline(screen->dev, gfx_program,
- &ctx->gfx_pipeline_state);
+ VkPipeline pipeline = zink_get_gfx_pipeline(screen, gfx_program,
+ &ctx->gfx_pipeline_state,
+ dinfo->mode);
bool depth_bias = false;
switch (u_reduced_prim(dinfo->mode)) {
index_buffer = dinfo->index.resource;
}
- struct zink_cmdbuf *cmdbuf = zink_start_cmdbuf(ctx);
- if (!cmdbuf)
- return;
-
- begin_render_pass(screen, cmdbuf, ctx->gfx_pipeline_state.render_pass,
- ctx->framebuffer,
- ctx->fb_state.width, ctx->fb_state.height);
-
- vkCmdSetViewport(cmdbuf->cmdbuf, 0, ctx->num_viewports, ctx->viewports);
-
- if (ctx->num_scissors)
- vkCmdSetScissor(cmdbuf->cmdbuf, 0, ctx->num_scissors, ctx->scissors);
- else if (ctx->fb_state.width && ctx->fb_state.height) {
- VkRect2D fb_scissor = {};
- fb_scissor.extent.width = ctx->fb_state.width;
- fb_scissor.extent.height = ctx->fb_state.height;
- vkCmdSetScissor(cmdbuf->cmdbuf, 0, 1, &fb_scissor);
- }
-
- vkCmdSetStencilReference(cmdbuf->cmdbuf, VK_STENCIL_FACE_FRONT_BIT, ctx->stencil_ref[0]);
- vkCmdSetStencilReference(cmdbuf->cmdbuf, VK_STENCIL_FACE_BACK_BIT, ctx->stencil_ref[1]);
-
- if (depth_bias)
- vkCmdSetDepthBias(cmdbuf->cmdbuf, rast_state->offset_units, rast_state->offset_clamp, rast_state->offset_scale);
- else
- vkCmdSetDepthBias(cmdbuf->cmdbuf, 0.0f, 0.0f, 0.0f);
-
- if (ctx->gfx_pipeline_state.blend_state->need_blend_constants)
- vkCmdSetBlendConstants(cmdbuf->cmdbuf, ctx->blend_constants);
-
- VkDescriptorSet desc_set = allocate_descriptor_set(ctx, gfx_program->dsl);
-
VkWriteDescriptorSet wds[PIPE_SHADER_TYPES * PIPE_MAX_CONSTANT_BUFFERS + PIPE_SHADER_TYPES * PIPE_MAX_SHADER_SAMPLER_VIEWS];
VkDescriptorBufferInfo buffer_infos[PIPE_SHADER_TYPES * PIPE_MAX_CONSTANT_BUFFERS];
VkDescriptorImageInfo image_infos[PIPE_SHADER_TYPES * PIPE_MAX_SHADER_SAMPLER_VIEWS];
int num_wds = 0, num_buffer_info = 0, num_image_info = 0;
+ struct zink_resource *transitions[PIPE_SHADER_TYPES * PIPE_MAX_SHADER_SAMPLER_VIEWS];
+ int num_transitions = 0;
+
for (int i = 0; i < ARRAY_SIZE(ctx->gfx_stages); i++) {
struct zink_shader *shader = ctx->gfx_stages[i];
if (!shader)
buffer_infos[num_buffer_info].range = VK_WHOLE_SIZE;
wds[num_wds].pBufferInfo = buffer_infos + num_buffer_info;
++num_buffer_info;
- zink_cmdbuf_reference_resoure(cmdbuf, res);
} else {
struct pipe_sampler_view *psampler_view = ctx->image_views[i][index];
assert(psampler_view);
- struct zink_sampler_view *sampler_view = (struct zink_sampler_view *)psampler_view;
+ struct zink_sampler_view *sampler_view = zink_sampler_view(psampler_view);
+
struct zink_resource *res = zink_resource(psampler_view->texture);
- image_infos[num_image_info].imageLayout = res->layout;
+ VkImageLayout layout = res->layout;
+ if (layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL &&
+ layout != VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL &&
+ layout != VK_IMAGE_LAYOUT_GENERAL) {
+ transitions[num_transitions++] = res;
+ layout = VK_IMAGE_LAYOUT_GENERAL;
+ }
+ image_infos[num_image_info].imageLayout = layout;
image_infos[num_image_info].imageView = sampler_view->image_view;
image_infos[num_image_info].sampler = ctx->samplers[i][index];
wds[num_wds].pImageInfo = image_infos + num_image_info;
++num_image_info;
- zink_cmdbuf_reference_resoure(cmdbuf, res);
}
wds[num_wds].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
wds[num_wds].pNext = NULL;
- wds[num_wds].dstSet = desc_set;
wds[num_wds].dstBinding = shader->bindings[j].binding;
wds[num_wds].dstArrayElement = 0;
wds[num_wds].descriptorCount = 1;
}
}
+ struct zink_batch *batch;
+ if (num_transitions > 0) {
+ batch = zink_batch_no_rp(ctx);
+
+ for (int i = 0; i < num_transitions; ++i)
+ zink_resource_barrier(batch->cmdbuf, transitions[i],
+ transitions[i]->aspect,
+ VK_IMAGE_LAYOUT_GENERAL);
+ }
+
+ batch = zink_batch_rp(ctx);
+
+ if (batch->descs_left < gfx_program->num_descriptors) {
+ flush_batch(ctx);
+ batch = zink_batch_rp(ctx);
+ assert(batch->descs_left >= gfx_program->num_descriptors);
+ }
+
+ VkDescriptorSet desc_set = allocate_descriptor_set(screen, batch,
+ gfx_program);
+ assert(desc_set != VK_NULL_HANDLE);
+
+ for (int i = 0; i < ARRAY_SIZE(ctx->gfx_stages); i++) {
+ struct zink_shader *shader = ctx->gfx_stages[i];
+ if (!shader)
+ continue;
+
+ for (int j = 0; j < shader->num_bindings; j++) {
+ int index = shader->bindings[j].index;
+ if (shader->bindings[j].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) {
+ struct zink_resource *res = zink_resource(ctx->ubos[i][index].buffer);
+ zink_batch_reference_resoure(batch, res);
+ } else {
+ struct zink_sampler_view *sampler_view = zink_sampler_view(ctx->image_views[i][index]);
+ zink_batch_reference_sampler_view(batch, sampler_view);
+ }
+ }
+ }
+
+ vkCmdSetViewport(batch->cmdbuf, 0, ctx->num_viewports, ctx->viewports);
+
+ if (ctx->num_scissors)
+ vkCmdSetScissor(batch->cmdbuf, 0, ctx->num_scissors, ctx->scissors);
+ else if (ctx->fb_state.width && ctx->fb_state.height) {
+ VkRect2D fb_scissor = {};
+ fb_scissor.extent.width = ctx->fb_state.width;
+ fb_scissor.extent.height = ctx->fb_state.height;
+ vkCmdSetScissor(batch->cmdbuf, 0, 1, &fb_scissor);
+ }
+
+ vkCmdSetStencilReference(batch->cmdbuf, VK_STENCIL_FACE_FRONT_BIT, ctx->stencil_ref.ref_value[0]);
+ vkCmdSetStencilReference(batch->cmdbuf, VK_STENCIL_FACE_BACK_BIT, ctx->stencil_ref.ref_value[1]);
+
+ if (depth_bias)
+ vkCmdSetDepthBias(batch->cmdbuf, rast_state->offset_units, rast_state->offset_clamp, rast_state->offset_scale);
+ else
+ vkCmdSetDepthBias(batch->cmdbuf, 0.0f, 0.0f, 0.0f);
+
+ if (ctx->gfx_pipeline_state.blend_state->need_blend_constants)
+ vkCmdSetBlendConstants(batch->cmdbuf, ctx->blend_constants);
+
+ for (int i = 0; i < num_wds; ++i)
+ wds[i].dstSet = desc_set;
+
vkUpdateDescriptorSets(screen->dev, num_wds, wds, 0, NULL);
- vkCmdBindPipeline(cmdbuf->cmdbuf, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
- vkCmdBindDescriptorSets(cmdbuf->cmdbuf, VK_PIPELINE_BIND_POINT_GRAPHICS,
+ vkCmdBindPipeline(batch->cmdbuf, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
+ vkCmdBindDescriptorSets(batch->cmdbuf, VK_PIPELINE_BIND_POINT_GRAPHICS,
gfx_program->layout, 0, 1, &desc_set, 0, NULL);
- zink_bind_vertex_buffers(cmdbuf, ctx);
+ zink_bind_vertex_buffers(batch, ctx);
if (dinfo->index_size > 0) {
assert(dinfo->index_size != 1);
VkIndexType index_type = dinfo->index_size == 2 ? VK_INDEX_TYPE_UINT16 : VK_INDEX_TYPE_UINT32;
struct zink_resource *res = zink_resource(index_buffer);
- vkCmdBindIndexBuffer(cmdbuf->cmdbuf, res->buffer, index_offset, index_type);
- zink_cmdbuf_reference_resoure(cmdbuf, res);
- vkCmdDrawIndexed(cmdbuf->cmdbuf,
+ vkCmdBindIndexBuffer(batch->cmdbuf, res->buffer, index_offset, index_type);
+ zink_batch_reference_resoure(batch, res);
+ vkCmdDrawIndexed(batch->cmdbuf,
dinfo->count, dinfo->instance_count,
dinfo->start, dinfo->index_bias, dinfo->start_instance);
} else
- vkCmdDraw(cmdbuf->cmdbuf, dinfo->count, dinfo->instance_count, dinfo->start, dinfo->start_instance);
-
- vkCmdEndRenderPass(cmdbuf->cmdbuf);
-
- zink_end_cmdbuf(ctx, cmdbuf);
+ vkCmdDraw(batch->cmdbuf, dinfo->count, dinfo->instance_count, dinfo->start, dinfo->start_instance);
if (dinfo->index_size > 0 && dinfo->has_user_indices)
pipe_resource_reference(&index_buffer, NULL);
{
struct zink_context *ctx = zink_context(pctx);
+ struct zink_batch *batch = zink_curr_batch(ctx);
+ flush_batch(ctx);
+
if (pfence)
- zink_fence_reference(zink_screen(pctx->screen), (struct zink_fence **)pfence,
- zink_context_curr_cmdbuf(ctx)->fence);
+ zink_fence_reference(zink_screen(pctx->screen),
+ (struct zink_fence **)pfence,
+ batch->fence);
+
+ /* HACK:
+ * For some strange reason, we need to finish before presenting, or else
+ * we start rendering on top of the back-buffer for the next frame. This
+ * seems like a bug in the DRI-driver to me, because we really should
+ * be properly protected by fences here, and the back-buffer should
+ * either be swapped with the front-buffer, or blitted from. But for
+ * some strange reason, neither of these things happen.
+ */
+ if (flags & PIPE_FLUSH_END_OF_FRAME)
+ pctx->screen->fence_finish(pctx->screen, pctx,
+ (struct pipe_fence_handle *)batch->fence,
+ PIPE_TIMEOUT_INFINITE);
}
static void
return;
}
+ util_blitter_save_blend(ctx->blitter, ctx->gfx_pipeline_state.blend_state);
+ util_blitter_save_depth_stencil_alpha(ctx->blitter, ctx->gfx_pipeline_state.depth_stencil_alpha_state);
+ util_blitter_save_vertex_elements(ctx->blitter, ctx->element_state);
+ util_blitter_save_stencil_ref(ctx->blitter, &ctx->stencil_ref);
+ util_blitter_save_rasterizer(ctx->blitter, ctx->rast_state);
+ util_blitter_save_fragment_shader(ctx->blitter, ctx->gfx_stages[PIPE_SHADER_FRAGMENT]);
+ util_blitter_save_vertex_shader(ctx->blitter, ctx->gfx_stages[PIPE_SHADER_VERTEX]);
+ util_blitter_save_framebuffer(ctx->blitter, &ctx->fb_state);
+ util_blitter_save_viewport(ctx->blitter, ctx->viewport_states);
+ util_blitter_save_scissor(ctx->blitter, ctx->scissor_states);
+ util_blitter_save_fragment_sampler_states(ctx->blitter,
+ ctx->num_samplers[PIPE_SHADER_FRAGMENT],
+ (void **)ctx->samplers[PIPE_SHADER_FRAGMENT]);
+ util_blitter_save_fragment_sampler_views(ctx->blitter,
+ ctx->num_image_views[PIPE_SHADER_FRAGMENT],
+ ctx->image_views[PIPE_SHADER_FRAGMENT]);
util_blitter_save_fragment_constant_buffer_slot(ctx->blitter, ctx->ubos[PIPE_SHADER_FRAGMENT]);
util_blitter_save_vertex_buffer_slot(ctx->blitter, ctx->buffers);
- util_blitter_save_vertex_shader(ctx->blitter, ctx->gfx_stages[PIPE_SHADER_VERTEX]);
- util_blitter_save_fragment_shader(ctx->blitter, ctx->gfx_stages[PIPE_SHADER_FRAGMENT]);
- util_blitter_save_rasterizer(ctx->blitter, ctx->gfx_pipeline_state.rast_state);
+ util_blitter_save_sample_mask(ctx->blitter, ctx->gfx_pipeline_state.sample_mask);
util_blitter_blit(ctx->blitter, info);
+ return;
}
struct zink_resource *src = zink_resource(info->src.resource);
if (src->base.nr_samples > 1 && dst->base.nr_samples <= 1)
is_resolve = true;
- struct zink_cmdbuf *cmdbuf = zink_start_cmdbuf(ctx);
- if (!cmdbuf)
- return;
+ struct zink_batch *batch = zink_batch_no_rp(ctx);
- zink_cmdbuf_reference_resoure(cmdbuf, src);
- zink_cmdbuf_reference_resoure(cmdbuf, dst);
+ zink_batch_reference_resoure(batch, src);
+ zink_batch_reference_resoure(batch, dst);
if (is_resolve) {
VkImageResolve region = {};
region.extent.width = info->dst.box.width;
region.extent.height = info->dst.box.height;
region.extent.depth = info->dst.box.depth;
- vkCmdResolveImage(cmdbuf->cmdbuf, src->image, src->layout,
+ vkCmdResolveImage(batch->cmdbuf, src->image, src->layout,
dst->image, dst->layout,
1, ®ion);
} else {
if (dst->layout != VK_IMAGE_LAYOUT_GENERAL &&
dst->layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL)
- zink_resource_barrier(cmdbuf->cmdbuf, dst, dst->aspect,
+ zink_resource_barrier(batch->cmdbuf, dst, dst->aspect,
VK_IMAGE_LAYOUT_GENERAL);
VkImageBlit region = {};
region.dstSubresource.layerCount = 1;
}
- vkCmdBlitImage(cmdbuf->cmdbuf, src->image, src->layout,
+ vkCmdBlitImage(batch->cmdbuf, src->image, src->layout,
dst->image, dst->layout,
1, ®ion,
filter(info->filter));
}
- zink_end_cmdbuf(ctx, cmdbuf);
+
+ /* HACK: I have no idea why this is needed, but without it ioquake3
+ * randomly keeps fading to black.
+ */
+ flush_batch(ctx);
}
static void
region.extent.width = src_box->width;
region.extent.height = src_box->height;
- struct zink_cmdbuf *cmdbuf = zink_start_cmdbuf(ctx);
- if (!cmdbuf)
- return;
+ struct zink_batch *batch = zink_batch_no_rp(ctx);
+ zink_batch_reference_resoure(batch, src);
+ zink_batch_reference_resoure(batch, dst);
- zink_cmdbuf_reference_resoure(cmdbuf, src);
- zink_cmdbuf_reference_resoure(cmdbuf, dst);
+ if (src->layout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL &&
+ src->layout != VK_IMAGE_LAYOUT_GENERAL) {
+ zink_resource_barrier(batch->cmdbuf, src, src->aspect,
+ VK_IMAGE_LAYOUT_GENERAL);
+ src->layout = VK_IMAGE_LAYOUT_GENERAL;
+ }
- vkCmdCopyImage(cmdbuf->cmdbuf, src->image, src->layout,
+ if (dst->layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL &&
+ dst->layout != VK_IMAGE_LAYOUT_GENERAL) {
+ zink_resource_barrier(batch->cmdbuf, dst, dst->aspect,
+ VK_IMAGE_LAYOUT_GENERAL);
+ dst->layout = VK_IMAGE_LAYOUT_GENERAL;
+ }
+
+ vkCmdCopyImage(batch->cmdbuf, src->image, src->layout,
dst->image, dst->layout,
1, ®ion);
- zink_end_cmdbuf(ctx, cmdbuf);
} else
debug_printf("zink: TODO resource copy\n");
}
ctx->base.create_sampler_view = zink_create_sampler_view;
ctx->base.set_sampler_views = zink_set_sampler_views;
- ctx->base.sampler_view_destroy = zink_destroy_sampler_view;
+ ctx->base.sampler_view_destroy = zink_sampler_view_destroy;
ctx->base.create_vs_state = zink_create_vs_state;
ctx->base.bind_vs_state = zink_bind_vs_state;
ctx->base.set_active_query_state = zink_set_active_query_state;
ctx->base.set_blend_color = zink_set_blend_color;
+ ctx->base.set_sample_mask = zink_set_sample_mask;
+
ctx->base.clear = zink_clear;
ctx->base.draw_vbo = zink_draw_vbo;
ctx->base.flush = zink_flush;
cbai.commandPool = ctx->cmdpool;
cbai.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
cbai.commandBufferCount = 1;
- for (int i = 0; i < ARRAY_SIZE(ctx->cmdbufs); ++i) {
- if (vkAllocateCommandBuffers(screen->dev, &cbai, &ctx->cmdbufs[i].cmdbuf) != VK_SUCCESS)
- goto fail;
-
- ctx->cmdbufs[i].resources = _mesa_set_create(NULL, _mesa_hash_pointer,
- _mesa_key_pointer_equal);
- if (!ctx->cmdbufs[i].resources)
- goto fail;
-
- util_dynarray_init(&ctx->cmdbufs[i].zombie_samplers, NULL);
- }
VkDescriptorPoolSize sizes[] = {
- {VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1000}
+ {VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, ZINK_BATCH_DESC_SIZE}
};
VkDescriptorPoolCreateInfo dpci = {};
dpci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
dpci.pPoolSizes = sizes;
dpci.poolSizeCount = ARRAY_SIZE(sizes);
dpci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
- dpci.maxSets = 1000;
+ dpci.maxSets = ZINK_BATCH_DESC_SIZE;
- if(vkCreateDescriptorPool(screen->dev, &dpci, 0, &ctx->descpool) != VK_SUCCESS)
- goto fail;
+ for (int i = 0; i < ARRAY_SIZE(ctx->batches); ++i) {
+ if (vkAllocateCommandBuffers(screen->dev, &cbai, &ctx->batches[i].cmdbuf) != VK_SUCCESS)
+ goto fail;
+
+ ctx->batches[i].resources = _mesa_set_create(NULL, _mesa_hash_pointer,
+ _mesa_key_pointer_equal);
+ ctx->batches[i].sampler_views = _mesa_set_create(NULL,
+ _mesa_hash_pointer,
+ _mesa_key_pointer_equal);
+
+ if (!ctx->batches[i].resources || !ctx->batches[i].sampler_views)
+ goto fail;
+
+ util_dynarray_init(&ctx->batches[i].zombie_samplers, NULL);
+
+ if (vkCreateDescriptorPool(screen->dev, &dpci, 0,
+ &ctx->batches[i].descpool) != VK_SUCCESS)
+ goto fail;
+ }
vkGetDeviceQueue(screen->dev, screen->gfx_queue, 0, &ctx->queue);
- ctx->program_cache = _mesa_hash_table_create(NULL, hash_gfx_program, equals_gfx_program);
- if (!ctx->program_cache)
+ ctx->program_cache = _mesa_hash_table_create(NULL,
+ hash_gfx_program,
+ equals_gfx_program);
+ ctx->render_pass_cache = _mesa_hash_table_create(NULL,
+ hash_render_pass_state,
+ equals_render_pass_state);
+ ctx->framebuffer_cache = _mesa_hash_table_create(NULL,
+ hash_framebuffer_state,
+ equals_framebuffer_state);
+
+ if (!ctx->program_cache || !ctx->render_pass_cache ||
+ !ctx->framebuffer_cache)
goto fail;
ctx->dirty = ZINK_DIRTY_PROGRAM;
+ /* start the first batch */
+ zink_start_batch(ctx, zink_curr_batch(ctx));
+
return &ctx->base;
fail: