#include "zink_framebuffer.h"
#include "zink_helpers.h"
#include "zink_pipeline.h"
-#include "zink_program.h"
+#include "zink_query.h"
#include "zink_render_pass.h"
#include "zink_resource.h"
#include "zink_screen.h"
#include "indices/u_primconvert.h"
#include "util/u_blitter.h"
#include "util/u_debug.h"
-#include "util/u_format.h"
+#include "util/format/u_format.h"
#include "util/u_framebuffer.h"
#include "util/u_helpers.h"
#include "util/u_inlines.h"
#include "nir.h"
#include "util/u_memory.h"
-#include "util/u_prim.h"
#include "util/u_upload_mgr.h"
static void
if (state->compare_mode == PIPE_TEX_COMPARE_NONE)
sci.compareOp = VK_COMPARE_OP_NEVER;
- else
+ else {
sci.compareOp = compare_op(state->compare_func);
+ sci.compareEnable = VK_TRUE;
+ }
sci.borderColor = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK; // TODO
sci.unnormalizedCoordinates = !state->normalized_coords;
struct zink_context *ctx = zink_context(pctx);
for (unsigned i = 0; i < num_samplers; ++i) {
VkSampler *sampler = samplers[i];
+ ctx->sampler_states[shader][start_slot + i] = sampler;
ctx->samplers[shader][start_slot + i] = sampler ? *sampler : VK_NULL_HANDLE;
}
ctx->num_samplers[shader] = start_slot + num_samplers;
case PIPE_TEXTURE_CUBE: return VK_IMAGE_VIEW_TYPE_CUBE;
case PIPE_TEXTURE_CUBE_ARRAY: return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
case PIPE_TEXTURE_3D: return VK_IMAGE_VIEW_TYPE_3D;
- case PIPE_TEXTURE_RECT: return VK_IMAGE_VIEW_TYPE_2D; /* not sure */
+ case PIPE_TEXTURE_RECT: return VK_IMAGE_VIEW_TYPE_2D;
default:
unreachable("unexpected target");
}
else
nir = (struct nir_shader *)shader->ir.nir;
- return zink_compile_nir(zink_screen(pctx->screen), nir);
+ return zink_compile_nir(zink_screen(pctx->screen), nir, &shader->stream_output);
}
static void
else
nir = (struct nir_shader *)shader->ir.nir;
- return zink_compile_nir(zink_screen(pctx->screen), nir);
+ return zink_compile_nir(zink_screen(pctx->screen), nir, NULL);
}
static void
if (buffers) {
for (int i = 0; i < num_buffers; ++i) {
const struct pipe_vertex_buffer *vb = buffers + i;
+ struct zink_resource *res = zink_resource(vb->buffer.resource);
+
ctx->gfx_pipeline_state.bindings[start_slot + i].stride = vb->stride;
+ if (res && res->needs_xfb_barrier) {
+ /* if we're binding a previously-used xfb buffer, we need cmd buffer synchronization to ensure
+ * that we use the right buffer data
+ */
+ pctx->flush(pctx, NULL, 0);
+ res->needs_xfb_barrier = false;
+ }
}
}
{
struct zink_screen *screen = zink_screen(ctx->base.screen);
const struct pipe_framebuffer_state *fb = &ctx->fb_state;
- struct zink_render_pass_state state;
+ struct zink_render_pass_state state = { 0 };
for (int i = 0; i < fb->nr_cbufs; i++) {
- struct pipe_resource *res = fb->cbufs[i]->texture;
- state.rts[i].format = zink_get_format(screen, fb->cbufs[i]->format);
- state.rts[i].samples = res->nr_samples > 0 ? res->nr_samples :
- VK_SAMPLE_COUNT_1_BIT;
+ struct pipe_surface *surf = fb->cbufs[i];
+ state.rts[i].format = zink_get_format(screen, surf->format);
+ state.rts[i].samples = surf->nr_samples > 0 ? surf->nr_samples :
+ VK_SAMPLE_COUNT_1_BIT;
}
state.num_cbufs = fb->nr_cbufs;
}
static struct zink_framebuffer *
-get_framebuffer(struct zink_context *ctx)
+create_framebuffer(struct zink_context *ctx)
{
struct zink_screen *screen = zink_screen(ctx->base.screen);
state.height = ctx->fb_state.height;
state.layers = MAX2(ctx->fb_state.layers, 1);
- struct hash_entry *entry = _mesa_hash_table_search(ctx->framebuffer_cache,
- &state);
- if (!entry) {
- struct zink_framebuffer *fb = zink_create_framebuffer(screen, &state);
- entry = _mesa_hash_table_insert(ctx->framebuffer_cache, &state, fb);
- if (!entry)
- return NULL;
+ return zink_create_framebuffer(screen, &state);
+}
+
+static void
+framebuffer_state_buffer_barriers_setup(const struct pipe_framebuffer_state *state, struct zink_batch *batch)
+{
+ for (int i = 0; i < state->nr_cbufs; i++) {
+ struct zink_resource *res = zink_resource(state->cbufs[i]->texture);
+ if (res->layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL)
+ zink_resource_barrier(batch->cmdbuf, res, res->aspect,
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
}
- return entry->data;
+ if (state->zsbuf) {
+ struct zink_resource *res = zink_resource(state->zsbuf->texture);
+ if (res->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL)
+ zink_resource_barrier(batch->cmdbuf, res, res->aspect,
+ VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
+ }
}
void
assert(!batch->rp || batch->rp == ctx->gfx_pipeline_state.render_pass);
assert(!batch->fb || batch->fb == ctx->framebuffer);
- for (int i = 0; i < fb_state->nr_cbufs; i++) {
- struct zink_resource *res = zink_resource(fb_state->cbufs[i]->texture);
- if (res->layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL)
- zink_resource_barrier(batch->cmdbuf, res, res->aspect,
- VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
- }
-
- if (fb_state->zsbuf) {
- struct zink_resource *res = zink_resource(fb_state->zsbuf->texture);
- if (res->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL)
- zink_resource_barrier(batch->cmdbuf, res, res->aspect,
- VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
- }
+ framebuffer_state_buffer_barriers_setup(fb_state, batch);
zink_render_pass_reference(screen, &batch->rp, ctx->gfx_pipeline_state.render_pass);
zink_framebuffer_reference(screen, &batch->fb, ctx->framebuffer);
struct zink_context *ctx = zink_context(pctx);
struct zink_screen *screen = zink_screen(pctx->screen);
- VkSampleCountFlagBits rast_samples = VK_SAMPLE_COUNT_1_BIT;
- for (int i = 0; i < state->nr_cbufs; i++)
- rast_samples = MAX2(rast_samples, state->cbufs[i]->texture->nr_samples);
- if (state->zsbuf && state->zsbuf->texture->nr_samples)
- rast_samples = MAX2(rast_samples, state->zsbuf->texture->nr_samples);
-
util_copy_framebuffer_state(&ctx->fb_state, state);
- struct zink_framebuffer *fb = get_framebuffer(ctx);
+ struct zink_framebuffer *fb = ctx->framebuffer;
+ /* explicitly unref previous fb to ensure it gets destroyed */
+ if (fb)
+ zink_framebuffer_reference(screen, &fb, NULL);
+ fb = create_framebuffer(ctx);
zink_framebuffer_reference(screen, &ctx->framebuffer, fb);
zink_render_pass_reference(screen, &ctx->gfx_pipeline_state.render_pass, fb->rp);
- ctx->gfx_pipeline_state.rast_samples = rast_samples;
+ ctx->gfx_pipeline_state.rast_samples = MAX2(state->samples, 1);
ctx->gfx_pipeline_state.num_attachments = state->nr_cbufs;
struct zink_batch *batch = zink_batch_no_rp(ctx);
- for (int i = 0; i < state->nr_cbufs; i++) {
- struct zink_resource *res = zink_resource(state->cbufs[i]->texture);
- if (res->layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL)
- zink_resource_barrier(batch->cmdbuf, res, res->aspect,
- VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
- }
-
- if (state->zsbuf) {
- struct zink_resource *res = zink_resource(state->zsbuf->texture);
- if (res->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL)
- zink_resource_barrier(batch->cmdbuf, res, res->aspect,
- VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
- }
+ framebuffer_state_buffer_barriers_setup(state, batch);
}
static void
static void
zink_clear(struct pipe_context *pctx,
unsigned buffers,
+ const struct pipe_scissor_state *scissor_state,
const union pipe_color_union *pcolor,
double depth, unsigned stencil)
{
return stages[type];
}
-static VkDescriptorSet
-allocate_descriptor_set(struct zink_screen *screen,
- struct zink_batch *batch,
- struct zink_gfx_program *prog)
-{
- assert(batch->descs_left >= prog->num_descriptors);
- VkDescriptorSetAllocateInfo dsai;
- memset((void *)&dsai, 0, sizeof(dsai));
- dsai.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
- dsai.pNext = NULL;
- dsai.descriptorPool = batch->descpool;
- dsai.descriptorSetCount = 1;
- dsai.pSetLayouts = &prog->dsl;
-
- VkDescriptorSet desc_set;
- if (vkAllocateDescriptorSets(screen->dev, &dsai, &desc_set) != VK_SUCCESS) {
- debug_printf("ZINK: failed to allocate descriptor set :/");
- return VK_NULL_HANDLE;
- }
-
- batch->descs_left -= prog->num_descriptors;
- return desc_set;
-}
-
-static void
-zink_bind_vertex_buffers(struct zink_batch *batch, struct zink_context *ctx)
-{
- VkBuffer buffers[PIPE_MAX_ATTRIBS];
- VkDeviceSize buffer_offsets[PIPE_MAX_ATTRIBS];
- const struct zink_vertex_elements_state *elems = ctx->element_state;
- for (unsigned i = 0; i < elems->hw_state.num_bindings; i++) {
- struct pipe_vertex_buffer *vb = ctx->buffers + ctx->element_state->binding_map[i];
- assert(vb && vb->buffer.resource);
- struct zink_resource *res = zink_resource(vb->buffer.resource);
- buffers[i] = res->buffer;
- buffer_offsets[i] = vb->buffer_offset;
- zink_batch_reference_resoure(batch, res);
- }
-
- if (elems->hw_state.num_bindings > 0)
- vkCmdBindVertexBuffers(batch->cmdbuf, 0,
- elems->hw_state.num_bindings,
- buffers, buffer_offsets);
-}
-
static uint32_t
hash_gfx_program(const void *key)
{
return memcmp(a, b, sizeof(struct zink_render_pass_state)) == 0;
}
-static uint32_t
-hash_framebuffer_state(const void *key)
-{
- struct zink_framebuffer_state *s = (struct zink_framebuffer_state*)key;
- return _mesa_hash_data(key, sizeof(struct zink_framebuffer_state) + sizeof(s->attachments) * s->num_attachments);
-}
-
-static bool
-equals_framebuffer_state(const void *a, const void *b)
-{
- struct zink_framebuffer_state *s = (struct zink_framebuffer_state*)a;
- return memcmp(a, b, sizeof(struct zink_framebuffer_state) + sizeof(s->attachments) * s->num_attachments) == 0;
-}
-
-static struct zink_gfx_program *
-get_gfx_program(struct zink_context *ctx)
-{
- if (ctx->dirty_program) {
- struct hash_entry *entry = _mesa_hash_table_search(ctx->program_cache,
- ctx->gfx_stages);
- if (!entry) {
- struct zink_gfx_program *prog;
- prog = zink_create_gfx_program(zink_screen(ctx->base.screen),
- ctx->gfx_stages);
- entry = _mesa_hash_table_insert(ctx->program_cache, prog->stages, prog);
- if (!entry)
- return NULL;
- }
- ctx->curr_program = entry->data;
- ctx->dirty_program = false;
- }
-
- assert(ctx->curr_program);
- return ctx->curr_program;
-}
-
-static bool
-line_width_needed(enum pipe_prim_type reduced_prim,
- VkPolygonMode polygon_mode)
-{
- switch (reduced_prim) {
- case PIPE_PRIM_POINTS:
- return false;
-
- case PIPE_PRIM_LINES:
- return true;
-
- case PIPE_PRIM_TRIANGLES:
- return polygon_mode == VK_POLYGON_MODE_LINE;
-
- default:
- unreachable("unexpected reduced prim");
- }
-}
-
-static void
-zink_draw_vbo(struct pipe_context *pctx,
- const struct pipe_draw_info *dinfo)
-{
- struct zink_context *ctx = zink_context(pctx);
- struct zink_screen *screen = zink_screen(pctx->screen);
- struct zink_rasterizer_state *rast_state = ctx->rast_state;
-
- if (dinfo->mode >= PIPE_PRIM_QUADS ||
- dinfo->mode == PIPE_PRIM_LINE_LOOP ||
- dinfo->index_size == 1) {
- if (!u_trim_pipe_prim(dinfo->mode, (unsigned *)&dinfo->count))
- return;
-
- util_primconvert_save_rasterizer_state(ctx->primconvert, &rast_state->base);
- util_primconvert_draw_vbo(ctx->primconvert, dinfo);
- return;
- }
-
- struct zink_gfx_program *gfx_program = get_gfx_program(ctx);
- if (!gfx_program)
- return;
-
- VkPipeline pipeline = zink_get_gfx_pipeline(screen, gfx_program,
- &ctx->gfx_pipeline_state,
- dinfo->mode);
-
- enum pipe_prim_type reduced_prim = u_reduced_prim(dinfo->mode);
-
- bool depth_bias = false;
- switch (reduced_prim) {
- case PIPE_PRIM_POINTS:
- depth_bias = rast_state->offset_point;
- break;
-
- case PIPE_PRIM_LINES:
- depth_bias = rast_state->offset_line;
- break;
-
- case PIPE_PRIM_TRIANGLES:
- depth_bias = rast_state->offset_tri;
- break;
-
- default:
- unreachable("unexpected reduced prim");
- }
-
- unsigned index_offset = 0;
- struct pipe_resource *index_buffer = NULL;
- if (dinfo->index_size > 0) {
- if (dinfo->has_user_indices) {
- if (!util_upload_index_buffer(pctx, dinfo, &index_buffer, &index_offset)) {
- debug_printf("util_upload_index_buffer() failed\n");
- return;
- }
- } else
- index_buffer = dinfo->index.resource;
- }
-
- VkWriteDescriptorSet wds[PIPE_SHADER_TYPES * PIPE_MAX_CONSTANT_BUFFERS + PIPE_SHADER_TYPES * PIPE_MAX_SHADER_SAMPLER_VIEWS];
- VkDescriptorBufferInfo buffer_infos[PIPE_SHADER_TYPES * PIPE_MAX_CONSTANT_BUFFERS];
- VkDescriptorImageInfo image_infos[PIPE_SHADER_TYPES * PIPE_MAX_SHADER_SAMPLER_VIEWS];
- int num_wds = 0, num_buffer_info = 0, num_image_info = 0;
-
- struct zink_resource *transitions[PIPE_SHADER_TYPES * PIPE_MAX_SHADER_SAMPLER_VIEWS];
- int num_transitions = 0;
-
- for (int i = 0; i < ARRAY_SIZE(ctx->gfx_stages); i++) {
- struct zink_shader *shader = ctx->gfx_stages[i];
- if (!shader)
- continue;
-
- for (int j = 0; j < shader->num_bindings; j++) {
- int index = shader->bindings[j].index;
- if (shader->bindings[j].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) {
- assert(ctx->ubos[i][index].buffer_size > 0);
- assert(ctx->ubos[i][index].buffer_size <= screen->props.limits.maxUniformBufferRange);
- assert(ctx->ubos[i][index].buffer);
- struct zink_resource *res = zink_resource(ctx->ubos[i][index].buffer);
- buffer_infos[num_buffer_info].buffer = res->buffer;
- buffer_infos[num_buffer_info].offset = ctx->ubos[i][index].buffer_offset;
- buffer_infos[num_buffer_info].range = ctx->ubos[i][index].buffer_size;
- wds[num_wds].pBufferInfo = buffer_infos + num_buffer_info;
- ++num_buffer_info;
- } else {
- struct pipe_sampler_view *psampler_view = ctx->image_views[i][index];
- assert(psampler_view);
- struct zink_sampler_view *sampler_view = zink_sampler_view(psampler_view);
-
- struct zink_resource *res = zink_resource(psampler_view->texture);
- VkImageLayout layout = res->layout;
- if (layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL &&
- layout != VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL &&
- layout != VK_IMAGE_LAYOUT_GENERAL) {
- transitions[num_transitions++] = res;
- layout = VK_IMAGE_LAYOUT_GENERAL;
- }
- image_infos[num_image_info].imageLayout = layout;
- image_infos[num_image_info].imageView = sampler_view->image_view;
- image_infos[num_image_info].sampler = ctx->samplers[i][index];
- wds[num_wds].pImageInfo = image_infos + num_image_info;
- ++num_image_info;
- }
-
- wds[num_wds].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
- wds[num_wds].pNext = NULL;
- wds[num_wds].dstBinding = shader->bindings[j].binding;
- wds[num_wds].dstArrayElement = 0;
- wds[num_wds].descriptorCount = 1;
- wds[num_wds].descriptorType = shader->bindings[j].type;
- ++num_wds;
- }
- }
-
- struct zink_batch *batch;
- if (num_transitions > 0) {
- batch = zink_batch_no_rp(ctx);
-
- for (int i = 0; i < num_transitions; ++i)
- zink_resource_barrier(batch->cmdbuf, transitions[i],
- transitions[i]->aspect,
- VK_IMAGE_LAYOUT_GENERAL);
- }
-
- batch = zink_batch_rp(ctx);
-
- if (batch->descs_left < gfx_program->num_descriptors) {
- flush_batch(ctx);
- batch = zink_batch_rp(ctx);
- assert(batch->descs_left >= gfx_program->num_descriptors);
- }
-
- VkDescriptorSet desc_set = allocate_descriptor_set(screen, batch,
- gfx_program);
- assert(desc_set != VK_NULL_HANDLE);
-
- for (int i = 0; i < ARRAY_SIZE(ctx->gfx_stages); i++) {
- struct zink_shader *shader = ctx->gfx_stages[i];
- if (!shader)
- continue;
-
- for (int j = 0; j < shader->num_bindings; j++) {
- int index = shader->bindings[j].index;
- if (shader->bindings[j].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) {
- struct zink_resource *res = zink_resource(ctx->ubos[i][index].buffer);
- zink_batch_reference_resoure(batch, res);
- } else {
- struct zink_sampler_view *sampler_view = zink_sampler_view(ctx->image_views[i][index]);
- zink_batch_reference_sampler_view(batch, sampler_view);
- }
- }
- }
-
- vkCmdSetViewport(batch->cmdbuf, 0, ctx->num_viewports, ctx->viewports);
- if (ctx->rast_state->base.scissor)
- vkCmdSetScissor(batch->cmdbuf, 0, ctx->num_viewports, ctx->scissors);
- else if (ctx->fb_state.width && ctx->fb_state.height) {
- VkRect2D fb_scissor = {};
- fb_scissor.extent.width = ctx->fb_state.width;
- fb_scissor.extent.height = ctx->fb_state.height;
- vkCmdSetScissor(batch->cmdbuf, 0, 1, &fb_scissor);
- }
-
- if (line_width_needed(reduced_prim, rast_state->hw_state.polygon_mode)) {
- if (screen->feats.wideLines || ctx->line_width == 1.0f)
- vkCmdSetLineWidth(batch->cmdbuf, ctx->line_width);
- else
- debug_printf("BUG: wide lines not supported, needs fallback!");
- }
-
- vkCmdSetStencilReference(batch->cmdbuf, VK_STENCIL_FACE_FRONT_BIT, ctx->stencil_ref.ref_value[0]);
- vkCmdSetStencilReference(batch->cmdbuf, VK_STENCIL_FACE_BACK_BIT, ctx->stencil_ref.ref_value[1]);
-
- if (depth_bias)
- vkCmdSetDepthBias(batch->cmdbuf, rast_state->offset_units, rast_state->offset_clamp, rast_state->offset_scale);
- else
- vkCmdSetDepthBias(batch->cmdbuf, 0.0f, 0.0f, 0.0f);
-
- if (ctx->gfx_pipeline_state.blend_state->need_blend_constants)
- vkCmdSetBlendConstants(batch->cmdbuf, ctx->blend_constants);
-
- if (num_wds > 0) {
- for (int i = 0; i < num_wds; ++i)
- wds[i].dstSet = desc_set;
- vkUpdateDescriptorSets(screen->dev, num_wds, wds, 0, NULL);
- }
-
- vkCmdBindPipeline(batch->cmdbuf, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
- vkCmdBindDescriptorSets(batch->cmdbuf, VK_PIPELINE_BIND_POINT_GRAPHICS,
- gfx_program->layout, 0, 1, &desc_set, 0, NULL);
- zink_bind_vertex_buffers(batch, ctx);
-
- if (dinfo->index_size > 0) {
- assert(dinfo->index_size != 1);
- VkIndexType index_type = dinfo->index_size == 2 ? VK_INDEX_TYPE_UINT16 : VK_INDEX_TYPE_UINT32;
- struct zink_resource *res = zink_resource(index_buffer);
- vkCmdBindIndexBuffer(batch->cmdbuf, res->buffer, index_offset, index_type);
- zink_batch_reference_resoure(batch, res);
- vkCmdDrawIndexed(batch->cmdbuf,
- dinfo->count, dinfo->instance_count,
- dinfo->start, dinfo->index_bias, dinfo->start_instance);
- } else
- vkCmdDraw(batch->cmdbuf, dinfo->count, dinfo->instance_count, dinfo->start, dinfo->start_instance);
-
- if (dinfo->index_size > 0 && dinfo->has_user_indices)
- pipe_resource_reference(&index_buffer, NULL);
-}
-
static void
zink_flush(struct pipe_context *pctx,
struct pipe_fence_handle **pfence,
struct zink_batch *batch = zink_curr_batch(ctx);
flush_batch(ctx);
+ if (zink_screen(pctx->screen)->have_EXT_transform_feedback && ctx->num_so_targets)
+ ctx->dirty_so_targets = true;
+
if (pfence)
zink_fence_reference(zink_screen(pctx->screen),
(struct zink_fence **)pfence,
debug_printf("zink: TODO resource copy\n");
}
+static struct pipe_stream_output_target *
+zink_create_stream_output_target(struct pipe_context *pctx,
+ struct pipe_resource *pres,
+ unsigned buffer_offset,
+ unsigned buffer_size)
+{
+ struct zink_so_target *t;
+ t = CALLOC_STRUCT(zink_so_target);
+ if (!t)
+ return NULL;
+
+ t->base.reference.count = 1;
+ t->base.context = pctx;
+ pipe_resource_reference(&t->base.buffer, pres);
+ t->base.buffer_offset = buffer_offset;
+ t->base.buffer_size = buffer_size;
+
+ /* using PIPE_BIND_CUSTOM here lets us create a custom pipe buffer resource,
+ * which allows us to differentiate and use VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT
+ * as we must for this case
+ */
+ t->counter_buffer = pipe_buffer_create(pctx->screen, PIPE_BIND_STREAM_OUTPUT | PIPE_BIND_CUSTOM, PIPE_USAGE_DEFAULT, 4);
+ if (!t->counter_buffer) {
+ FREE(t);
+ return NULL;
+ }
+
+ return &t->base;
+}
+
+static void
+zink_stream_output_target_destroy(struct pipe_context *pctx,
+ struct pipe_stream_output_target *psot)
+{
+ struct zink_so_target *t = (struct zink_so_target *)psot;
+ pipe_resource_reference(&t->counter_buffer, NULL);
+ pipe_resource_reference(&t->base.buffer, NULL);
+ FREE(t);
+}
+
+static void
+zink_set_stream_output_targets(struct pipe_context *pctx,
+ unsigned num_targets,
+ struct pipe_stream_output_target **targets,
+ const unsigned *offsets)
+{
+ struct zink_context *ctx = zink_context(pctx);
+
+ if (num_targets == 0) {
+ for (unsigned i = 0; i < ctx->num_so_targets; i++)
+ pipe_so_target_reference(&ctx->so_targets[i], NULL);
+ ctx->num_so_targets = 0;
+ } else {
+ for (unsigned i = 0; i < num_targets; i++)
+ pipe_so_target_reference(&ctx->so_targets[i], targets[i]);
+ for (unsigned i = num_targets; i < ctx->num_so_targets; i++)
+ pipe_so_target_reference(&ctx->so_targets[i], NULL);
+ ctx->num_so_targets = num_targets;
+
+ /* emit memory barrier on next draw for synchronization */
+ if (offsets[0] == (unsigned)-1)
+ ctx->xfb_barrier = true;
+ /* TODO: possibly avoid rebinding on resume if resuming from same buffers? */
+ ctx->dirty_so_targets = true;
+ }
+}
+
struct pipe_context *
zink_context_create(struct pipe_screen *pscreen, void *priv, unsigned flags)
{
struct zink_screen *screen = zink_screen(pscreen);
struct zink_context *ctx = CALLOC_STRUCT(zink_context);
+ if (!ctx)
+ goto fail;
ctx->base.screen = pscreen;
ctx->base.priv = priv;
ctx->base.resource_copy_region = zink_resource_copy_region;
ctx->base.blit = zink_blit;
+ ctx->base.create_stream_output_target = zink_create_stream_output_target;
+ ctx->base.stream_output_target_destroy = zink_stream_output_target_destroy;
+ ctx->base.set_stream_output_targets = zink_set_stream_output_targets;
ctx->base.flush_resource = zink_flush_resource;
zink_context_surface_init(&ctx->base);
zink_context_resource_init(&ctx->base);
cbai.commandBufferCount = 1;
VkDescriptorPoolSize sizes[] = {
- {VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, ZINK_BATCH_DESC_SIZE}
+ {VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, ZINK_BATCH_DESC_SIZE},
+ {VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, ZINK_BATCH_DESC_SIZE}
};
VkDescriptorPoolCreateInfo dpci = {};
dpci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
ctx->render_pass_cache = _mesa_hash_table_create(NULL,
hash_render_pass_state,
equals_render_pass_state);
- ctx->framebuffer_cache = _mesa_hash_table_create(NULL,
- hash_framebuffer_state,
- equals_framebuffer_state);
+ if (!ctx->program_cache || !ctx->render_pass_cache)
+ goto fail;
- if (!ctx->program_cache || !ctx->render_pass_cache ||
- !ctx->framebuffer_cache)
+ const uint8_t data[] = { 0 };
+ ctx->dummy_buffer = pipe_buffer_create_with_data(&ctx->base,
+ PIPE_BIND_VERTEX_BUFFER, PIPE_USAGE_IMMUTABLE, sizeof(data), data);
+ if (!ctx->dummy_buffer)
goto fail;
ctx->dirty_program = true;