Added few more stubs so that control reaches to DestroyDevice().
[mesa.git] / src / gallium / drivers / zink / zink_context.c
1 /*
2 * Copyright 2018 Collabora Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "zink_context.h"
25
26 #include "zink_batch.h"
27 #include "zink_compiler.h"
28 #include "zink_fence.h"
29 #include "zink_framebuffer.h"
30 #include "zink_helpers.h"
31 #include "zink_program.h"
32 #include "zink_pipeline.h"
33 #include "zink_query.h"
34 #include "zink_render_pass.h"
35 #include "zink_resource.h"
36 #include "zink_screen.h"
37 #include "zink_state.h"
38 #include "zink_surface.h"
39
40 #include "indices/u_primconvert.h"
41 #include "util/u_blitter.h"
42 #include "util/u_debug.h"
43 #include "util/format/u_format.h"
44 #include "util/u_framebuffer.h"
45 #include "util/u_helpers.h"
46 #include "util/u_inlines.h"
47
48 #include "nir.h"
49
50 #include "util/u_memory.h"
51 #include "util/u_upload_mgr.h"
52
53 static void
54 zink_context_destroy(struct pipe_context *pctx)
55 {
56 struct zink_context *ctx = zink_context(pctx);
57 struct zink_screen *screen = zink_screen(pctx->screen);
58
59 if (vkQueueWaitIdle(ctx->queue) != VK_SUCCESS)
60 debug_printf("vkQueueWaitIdle failed\n");
61
62 for (unsigned i = 0; i < ARRAY_SIZE(ctx->null_buffers); i++)
63 pipe_resource_reference(&ctx->null_buffers[i], NULL);
64
65 for (int i = 0; i < ARRAY_SIZE(ctx->batches); ++i) {
66 vkDestroyDescriptorPool(screen->dev, ctx->batches[i].descpool, NULL);
67 vkFreeCommandBuffers(screen->dev, ctx->cmdpool, 1, &ctx->batches[i].cmdbuf);
68 }
69 vkDestroyCommandPool(screen->dev, ctx->cmdpool, NULL);
70
71 util_primconvert_destroy(ctx->primconvert);
72 u_upload_destroy(pctx->stream_uploader);
73 slab_destroy_child(&ctx->transfer_pool);
74 util_blitter_destroy(ctx->blitter);
75 FREE(ctx);
76 }
77
78 static VkSamplerMipmapMode
79 sampler_mipmap_mode(enum pipe_tex_mipfilter filter)
80 {
81 switch (filter) {
82 case PIPE_TEX_MIPFILTER_NEAREST: return VK_SAMPLER_MIPMAP_MODE_NEAREST;
83 case PIPE_TEX_MIPFILTER_LINEAR: return VK_SAMPLER_MIPMAP_MODE_LINEAR;
84 case PIPE_TEX_MIPFILTER_NONE:
85 unreachable("PIPE_TEX_MIPFILTER_NONE should be dealt with earlier");
86 }
87 unreachable("unexpected filter");
88 }
89
90 static VkSamplerAddressMode
91 sampler_address_mode(enum pipe_tex_wrap filter)
92 {
93 switch (filter) {
94 case PIPE_TEX_WRAP_REPEAT: return VK_SAMPLER_ADDRESS_MODE_REPEAT;
95 case PIPE_TEX_WRAP_CLAMP: return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; /* not technically correct, but kinda works */
96 case PIPE_TEX_WRAP_CLAMP_TO_EDGE: return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
97 case PIPE_TEX_WRAP_CLAMP_TO_BORDER: return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER;
98 case PIPE_TEX_WRAP_MIRROR_REPEAT: return VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT;
99 case PIPE_TEX_WRAP_MIRROR_CLAMP: return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; /* not technically correct, but kinda works */
100 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE: return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE;
101 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER: return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; /* not technically correct, but kinda works */
102 }
103 unreachable("unexpected wrap");
104 }
105
106 static VkCompareOp
107 compare_op(enum pipe_compare_func op)
108 {
109 switch (op) {
110 case PIPE_FUNC_NEVER: return VK_COMPARE_OP_NEVER;
111 case PIPE_FUNC_LESS: return VK_COMPARE_OP_LESS;
112 case PIPE_FUNC_EQUAL: return VK_COMPARE_OP_EQUAL;
113 case PIPE_FUNC_LEQUAL: return VK_COMPARE_OP_LESS_OR_EQUAL;
114 case PIPE_FUNC_GREATER: return VK_COMPARE_OP_GREATER;
115 case PIPE_FUNC_NOTEQUAL: return VK_COMPARE_OP_NOT_EQUAL;
116 case PIPE_FUNC_GEQUAL: return VK_COMPARE_OP_GREATER_OR_EQUAL;
117 case PIPE_FUNC_ALWAYS: return VK_COMPARE_OP_ALWAYS;
118 }
119 unreachable("unexpected compare");
120 }
121
122 static void *
123 zink_create_sampler_state(struct pipe_context *pctx,
124 const struct pipe_sampler_state *state)
125 {
126 struct zink_screen *screen = zink_screen(pctx->screen);
127
128 VkSamplerCreateInfo sci = {};
129 sci.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
130 sci.magFilter = zink_filter(state->mag_img_filter);
131 sci.minFilter = zink_filter(state->min_img_filter);
132
133 if (state->min_mip_filter != PIPE_TEX_MIPFILTER_NONE) {
134 sci.mipmapMode = sampler_mipmap_mode(state->min_mip_filter);
135 sci.minLod = state->min_lod;
136 sci.maxLod = state->max_lod;
137 } else {
138 sci.mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST;
139 sci.minLod = 0;
140 sci.maxLod = 0;
141 }
142
143 sci.addressModeU = sampler_address_mode(state->wrap_s);
144 sci.addressModeV = sampler_address_mode(state->wrap_t);
145 sci.addressModeW = sampler_address_mode(state->wrap_r);
146 sci.mipLodBias = state->lod_bias;
147
148 if (state->compare_mode == PIPE_TEX_COMPARE_NONE)
149 sci.compareOp = VK_COMPARE_OP_NEVER;
150 else {
151 sci.compareOp = compare_op(state->compare_func);
152 sci.compareEnable = VK_TRUE;
153 }
154
155 sci.borderColor = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK; // TODO
156 sci.unnormalizedCoordinates = !state->normalized_coords;
157
158 if (state->max_anisotropy > 1) {
159 sci.maxAnisotropy = state->max_anisotropy;
160 sci.anisotropyEnable = VK_TRUE;
161 }
162
163 VkSampler *sampler = CALLOC(1, sizeof(VkSampler));
164 if (!sampler)
165 return NULL;
166
167 if (vkCreateSampler(screen->dev, &sci, NULL, sampler) != VK_SUCCESS) {
168 FREE(sampler);
169 return NULL;
170 }
171
172 return sampler;
173 }
174
175 static void
176 zink_bind_sampler_states(struct pipe_context *pctx,
177 enum pipe_shader_type shader,
178 unsigned start_slot,
179 unsigned num_samplers,
180 void **samplers)
181 {
182 struct zink_context *ctx = zink_context(pctx);
183 for (unsigned i = 0; i < num_samplers; ++i) {
184 VkSampler *sampler = samplers[i];
185 ctx->sampler_states[shader][start_slot + i] = sampler;
186 ctx->samplers[shader][start_slot + i] = sampler ? *sampler : VK_NULL_HANDLE;
187 }
188 ctx->num_samplers[shader] = start_slot + num_samplers;
189 }
190
191 static void
192 zink_delete_sampler_state(struct pipe_context *pctx,
193 void *sampler_state)
194 {
195 struct zink_batch *batch = zink_curr_batch(zink_context(pctx));
196 util_dynarray_append(&batch->zombie_samplers, VkSampler,
197 *(VkSampler *)sampler_state);
198 FREE(sampler_state);
199 }
200
201
202 static VkImageViewType
203 image_view_type(enum pipe_texture_target target)
204 {
205 switch (target) {
206 case PIPE_TEXTURE_1D: return VK_IMAGE_VIEW_TYPE_1D;
207 case PIPE_TEXTURE_1D_ARRAY: return VK_IMAGE_VIEW_TYPE_1D_ARRAY;
208 case PIPE_TEXTURE_2D: return VK_IMAGE_VIEW_TYPE_2D;
209 case PIPE_TEXTURE_2D_ARRAY: return VK_IMAGE_VIEW_TYPE_2D_ARRAY;
210 case PIPE_TEXTURE_CUBE: return VK_IMAGE_VIEW_TYPE_CUBE;
211 case PIPE_TEXTURE_CUBE_ARRAY: return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
212 case PIPE_TEXTURE_3D: return VK_IMAGE_VIEW_TYPE_3D;
213 case PIPE_TEXTURE_RECT: return VK_IMAGE_VIEW_TYPE_2D;
214 default:
215 unreachable("unexpected target");
216 }
217 }
218
219 static VkComponentSwizzle
220 component_mapping(enum pipe_swizzle swizzle)
221 {
222 switch (swizzle) {
223 case PIPE_SWIZZLE_X: return VK_COMPONENT_SWIZZLE_R;
224 case PIPE_SWIZZLE_Y: return VK_COMPONENT_SWIZZLE_G;
225 case PIPE_SWIZZLE_Z: return VK_COMPONENT_SWIZZLE_B;
226 case PIPE_SWIZZLE_W: return VK_COMPONENT_SWIZZLE_A;
227 case PIPE_SWIZZLE_0: return VK_COMPONENT_SWIZZLE_ZERO;
228 case PIPE_SWIZZLE_1: return VK_COMPONENT_SWIZZLE_ONE;
229 case PIPE_SWIZZLE_NONE: return VK_COMPONENT_SWIZZLE_IDENTITY; // ???
230 default:
231 unreachable("unexpected swizzle");
232 }
233 }
234
235 static VkImageAspectFlags
236 sampler_aspect_from_format(enum pipe_format fmt)
237 {
238 if (util_format_is_depth_or_stencil(fmt)) {
239 const struct util_format_description *desc = util_format_description(fmt);
240 if (util_format_has_depth(desc))
241 return VK_IMAGE_ASPECT_DEPTH_BIT;
242 assert(util_format_has_stencil(desc));
243 return VK_IMAGE_ASPECT_STENCIL_BIT;
244 } else
245 return VK_IMAGE_ASPECT_COLOR_BIT;
246 }
247
248 static struct pipe_sampler_view *
249 zink_create_sampler_view(struct pipe_context *pctx, struct pipe_resource *pres,
250 const struct pipe_sampler_view *state)
251 {
252 struct zink_screen *screen = zink_screen(pctx->screen);
253 struct zink_resource *res = zink_resource(pres);
254 struct zink_sampler_view *sampler_view = CALLOC_STRUCT(zink_sampler_view);
255
256 sampler_view->base = *state;
257 sampler_view->base.texture = NULL;
258 pipe_resource_reference(&sampler_view->base.texture, pres);
259 sampler_view->base.reference.count = 1;
260 sampler_view->base.context = pctx;
261
262 VkImageViewCreateInfo ivci = {};
263 ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
264 ivci.image = res->image;
265 ivci.viewType = image_view_type(state->target);
266 ivci.format = zink_get_format(screen, state->format);
267 ivci.components.r = component_mapping(state->swizzle_r);
268 ivci.components.g = component_mapping(state->swizzle_g);
269 ivci.components.b = component_mapping(state->swizzle_b);
270 ivci.components.a = component_mapping(state->swizzle_a);
271
272 ivci.subresourceRange.aspectMask = sampler_aspect_from_format(state->format);
273 ivci.subresourceRange.baseMipLevel = state->u.tex.first_level;
274 ivci.subresourceRange.baseArrayLayer = state->u.tex.first_layer;
275 ivci.subresourceRange.levelCount = state->u.tex.last_level - state->u.tex.first_level + 1;
276 ivci.subresourceRange.layerCount = state->u.tex.last_layer - state->u.tex.first_layer + 1;
277
278 VkResult err = vkCreateImageView(screen->dev, &ivci, NULL, &sampler_view->image_view);
279 if (err != VK_SUCCESS) {
280 FREE(sampler_view);
281 return NULL;
282 }
283
284 return &sampler_view->base;
285 }
286
287 static void
288 zink_sampler_view_destroy(struct pipe_context *pctx,
289 struct pipe_sampler_view *pview)
290 {
291 struct zink_sampler_view *view = zink_sampler_view(pview);
292 vkDestroyImageView(zink_screen(pctx->screen)->dev, view->image_view, NULL);
293 FREE(view);
294 }
295
296 static void
297 zink_set_polygon_stipple(struct pipe_context *pctx,
298 const struct pipe_poly_stipple *ps)
299 {
300 }
301
302 static void
303 zink_set_vertex_buffers(struct pipe_context *pctx,
304 unsigned start_slot,
305 unsigned num_buffers,
306 const struct pipe_vertex_buffer *buffers)
307 {
308 struct zink_context *ctx = zink_context(pctx);
309
310 if (buffers) {
311 for (int i = 0; i < num_buffers; ++i) {
312 const struct pipe_vertex_buffer *vb = buffers + i;
313 struct zink_resource *res = zink_resource(vb->buffer.resource);
314
315 ctx->gfx_pipeline_state.bindings[start_slot + i].stride = vb->stride;
316 if (res && res->needs_xfb_barrier) {
317 /* if we're binding a previously-used xfb buffer, we need cmd buffer synchronization to ensure
318 * that we use the right buffer data
319 */
320 pctx->flush(pctx, NULL, 0);
321 res->needs_xfb_barrier = false;
322 }
323 }
324 ctx->gfx_pipeline_state.hash = 0;
325 }
326
327 util_set_vertex_buffers_mask(ctx->buffers, &ctx->buffers_enabled_mask,
328 buffers, start_slot, num_buffers);
329 }
330
331 static void
332 zink_set_viewport_states(struct pipe_context *pctx,
333 unsigned start_slot,
334 unsigned num_viewports,
335 const struct pipe_viewport_state *state)
336 {
337 struct zink_context *ctx = zink_context(pctx);
338
339 for (unsigned i = 0; i < num_viewports; ++i) {
340 VkViewport viewport = {
341 state[i].translate[0] - state[i].scale[0],
342 state[i].translate[1] - state[i].scale[1],
343 state[i].scale[0] * 2,
344 state[i].scale[1] * 2,
345 state[i].translate[2] - state[i].scale[2],
346 state[i].translate[2] + state[i].scale[2]
347 };
348 ctx->viewport_states[start_slot + i] = state[i];
349 ctx->viewports[start_slot + i] = viewport;
350 }
351 ctx->num_viewports = start_slot + num_viewports;
352 }
353
354 static void
355 zink_set_scissor_states(struct pipe_context *pctx,
356 unsigned start_slot, unsigned num_scissors,
357 const struct pipe_scissor_state *states)
358 {
359 struct zink_context *ctx = zink_context(pctx);
360
361 for (unsigned i = 0; i < num_scissors; i++) {
362 VkRect2D scissor;
363
364 scissor.offset.x = states[i].minx;
365 scissor.offset.y = states[i].miny;
366 scissor.extent.width = states[i].maxx - states[i].minx;
367 scissor.extent.height = states[i].maxy - states[i].miny;
368 ctx->scissor_states[start_slot + i] = states[i];
369 ctx->scissors[start_slot + i] = scissor;
370 }
371 }
372
373 static void
374 zink_set_constant_buffer(struct pipe_context *pctx,
375 enum pipe_shader_type shader, uint index,
376 const struct pipe_constant_buffer *cb)
377 {
378 struct zink_context *ctx = zink_context(pctx);
379
380 if (cb) {
381 struct pipe_resource *buffer = cb->buffer;
382 unsigned offset = cb->buffer_offset;
383 if (cb->user_buffer) {
384 struct zink_screen *screen = zink_screen(pctx->screen);
385 u_upload_data(ctx->base.const_uploader, 0, cb->buffer_size,
386 screen->props.limits.minUniformBufferOffsetAlignment,
387 cb->user_buffer, &offset, &buffer);
388 }
389
390 pipe_resource_reference(&ctx->ubos[shader][index].buffer, buffer);
391 ctx->ubos[shader][index].buffer_offset = offset;
392 ctx->ubos[shader][index].buffer_size = cb->buffer_size;
393 ctx->ubos[shader][index].user_buffer = NULL;
394
395 if (cb->user_buffer)
396 pipe_resource_reference(&buffer, NULL);
397 } else {
398 pipe_resource_reference(&ctx->ubos[shader][index].buffer, NULL);
399 ctx->ubos[shader][index].buffer_offset = 0;
400 ctx->ubos[shader][index].buffer_size = 0;
401 ctx->ubos[shader][index].user_buffer = NULL;
402 }
403 }
404
405 static void
406 zink_set_sampler_views(struct pipe_context *pctx,
407 enum pipe_shader_type shader_type,
408 unsigned start_slot,
409 unsigned num_views,
410 struct pipe_sampler_view **views)
411 {
412 struct zink_context *ctx = zink_context(pctx);
413 assert(views);
414 for (unsigned i = 0; i < num_views; ++i) {
415 pipe_sampler_view_reference(
416 &ctx->image_views[shader_type][start_slot + i],
417 views[i]);
418 }
419 ctx->num_image_views[shader_type] = start_slot + num_views;
420 }
421
422 static void
423 zink_set_stencil_ref(struct pipe_context *pctx,
424 const struct pipe_stencil_ref *ref)
425 {
426 struct zink_context *ctx = zink_context(pctx);
427 ctx->stencil_ref = *ref;
428 }
429
430 static void
431 zink_set_clip_state(struct pipe_context *pctx,
432 const struct pipe_clip_state *pcs)
433 {
434 }
435
436 static struct zink_render_pass *
437 get_render_pass(struct zink_context *ctx)
438 {
439 struct zink_screen *screen = zink_screen(ctx->base.screen);
440 const struct pipe_framebuffer_state *fb = &ctx->fb_state;
441 struct zink_render_pass_state state = { 0 };
442
443 for (int i = 0; i < fb->nr_cbufs; i++) {
444 struct pipe_surface *surf = fb->cbufs[i];
445 if (surf) {
446 state.rts[i].format = zink_get_format(screen, surf->format);
447 state.rts[i].samples = surf->nr_samples > 0 ? surf->nr_samples :
448 VK_SAMPLE_COUNT_1_BIT;
449 } else {
450 state.rts[i].format = VK_FORMAT_R8_UINT;
451 state.rts[i].samples = MAX2(fb->samples, 1);
452 }
453 }
454 state.num_cbufs = fb->nr_cbufs;
455
456 if (fb->zsbuf) {
457 struct zink_resource *zsbuf = zink_resource(fb->zsbuf->texture);
458 state.rts[fb->nr_cbufs].format = zsbuf->format;
459 state.rts[fb->nr_cbufs].samples = zsbuf->base.nr_samples > 0 ? zsbuf->base.nr_samples : VK_SAMPLE_COUNT_1_BIT;
460 }
461 state.have_zsbuf = fb->zsbuf != NULL;
462
463 struct hash_entry *entry = _mesa_hash_table_search(ctx->render_pass_cache,
464 &state);
465 if (!entry) {
466 struct zink_render_pass *rp;
467 rp = zink_create_render_pass(screen, &state);
468 entry = _mesa_hash_table_insert(ctx->render_pass_cache, &state, rp);
469 if (!entry)
470 return NULL;
471 }
472
473 return entry->data;
474 }
475
476 static struct zink_framebuffer *
477 create_framebuffer(struct zink_context *ctx)
478 {
479 struct zink_screen *screen = zink_screen(ctx->base.screen);
480
481 struct zink_framebuffer_state state = {};
482 state.rp = get_render_pass(ctx);
483 for (int i = 0; i < ctx->fb_state.nr_cbufs; i++) {
484 struct pipe_surface *psurf = ctx->fb_state.cbufs[i];
485 state.attachments[i] = zink_surface(psurf);
486 state.has_null_attachments |= !state.attachments[i];
487 }
488
489 state.num_attachments = ctx->fb_state.nr_cbufs;
490 if (ctx->fb_state.zsbuf) {
491 struct pipe_surface *psurf = ctx->fb_state.zsbuf;
492 state.attachments[state.num_attachments++] = zink_surface(psurf);
493 }
494
495 state.width = ctx->fb_state.width;
496 state.height = ctx->fb_state.height;
497 state.layers = MAX2(ctx->fb_state.layers, 1);
498 state.samples = ctx->fb_state.samples;
499
500 return zink_create_framebuffer(ctx, screen, &state);
501 }
502
503 static void
504 framebuffer_state_buffer_barriers_setup(struct zink_context *ctx,
505 const struct pipe_framebuffer_state *state, struct zink_batch *batch)
506 {
507 for (int i = 0; i < state->nr_cbufs; i++) {
508 struct pipe_surface *surf = state->cbufs[i];
509 if (!surf)
510 surf = ctx->framebuffer->null_surface;
511 struct zink_resource *res = zink_resource(surf->texture);
512 if (res->layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL)
513 zink_resource_barrier(batch->cmdbuf, res, res->aspect,
514 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
515 }
516
517 if (state->zsbuf) {
518 struct zink_resource *res = zink_resource(state->zsbuf->texture);
519 if (res->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL)
520 zink_resource_barrier(batch->cmdbuf, res, res->aspect,
521 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
522 }
523 }
524
525 void
526 zink_begin_render_pass(struct zink_context *ctx, struct zink_batch *batch)
527 {
528 struct zink_screen *screen = zink_screen(ctx->base.screen);
529 assert(batch == zink_curr_batch(ctx));
530 assert(ctx->gfx_pipeline_state.render_pass);
531
532 struct pipe_framebuffer_state *fb_state = &ctx->fb_state;
533
534 VkRenderPassBeginInfo rpbi = {};
535 rpbi.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
536 rpbi.renderPass = ctx->gfx_pipeline_state.render_pass->render_pass;
537 rpbi.renderArea.offset.x = 0;
538 rpbi.renderArea.offset.y = 0;
539 rpbi.renderArea.extent.width = fb_state->width;
540 rpbi.renderArea.extent.height = fb_state->height;
541 rpbi.clearValueCount = 0;
542 rpbi.pClearValues = NULL;
543 rpbi.framebuffer = ctx->framebuffer->fb;
544
545 assert(ctx->gfx_pipeline_state.render_pass && ctx->framebuffer);
546 assert(!batch->rp || batch->rp == ctx->gfx_pipeline_state.render_pass);
547 assert(!batch->fb || batch->fb == ctx->framebuffer);
548
549 framebuffer_state_buffer_barriers_setup(ctx, fb_state, batch);
550
551 zink_render_pass_reference(screen, &batch->rp, ctx->gfx_pipeline_state.render_pass);
552 zink_framebuffer_reference(screen, &batch->fb, ctx->framebuffer);
553
554 vkCmdBeginRenderPass(batch->cmdbuf, &rpbi, VK_SUBPASS_CONTENTS_INLINE);
555 }
556
557 static void
558 flush_batch(struct zink_context *ctx)
559 {
560 struct zink_batch *batch = zink_curr_batch(ctx);
561 if (batch->rp)
562 vkCmdEndRenderPass(batch->cmdbuf);
563
564 zink_end_batch(ctx, batch);
565
566 ctx->curr_batch++;
567 if (ctx->curr_batch == ARRAY_SIZE(ctx->batches))
568 ctx->curr_batch = 0;
569
570 zink_start_batch(ctx, zink_curr_batch(ctx));
571 }
572
573 struct zink_batch *
574 zink_batch_rp(struct zink_context *ctx)
575 {
576 struct zink_batch *batch = zink_curr_batch(ctx);
577 if (!batch->rp) {
578 zink_begin_render_pass(ctx, batch);
579 assert(batch->rp);
580 }
581 return batch;
582 }
583
584 struct zink_batch *
585 zink_batch_no_rp(struct zink_context *ctx)
586 {
587 struct zink_batch *batch = zink_curr_batch(ctx);
588 if (batch->rp) {
589 /* flush batch and get a new one */
590 flush_batch(ctx);
591 batch = zink_curr_batch(ctx);
592 assert(!batch->rp);
593 }
594 return batch;
595 }
596
597 static void
598 zink_set_framebuffer_state(struct pipe_context *pctx,
599 const struct pipe_framebuffer_state *state)
600 {
601 struct zink_context *ctx = zink_context(pctx);
602 struct zink_screen *screen = zink_screen(pctx->screen);
603
604 util_copy_framebuffer_state(&ctx->fb_state, state);
605
606 struct zink_framebuffer *fb = ctx->framebuffer;
607 /* explicitly unref previous fb to ensure it gets destroyed */
608 if (fb)
609 zink_framebuffer_reference(screen, &fb, NULL);
610 fb = create_framebuffer(ctx);
611 zink_framebuffer_reference(screen, &ctx->framebuffer, fb);
612 zink_render_pass_reference(screen, &ctx->gfx_pipeline_state.render_pass, fb->rp);
613
614 ctx->gfx_pipeline_state.rast_samples = MAX2(state->samples, 1);
615 ctx->gfx_pipeline_state.num_attachments = state->nr_cbufs;
616 ctx->gfx_pipeline_state.hash = 0;
617
618 struct zink_batch *batch = zink_batch_no_rp(ctx);
619
620 framebuffer_state_buffer_barriers_setup(ctx, state, batch);
621 }
622
623 static void
624 zink_set_blend_color(struct pipe_context *pctx,
625 const struct pipe_blend_color *color)
626 {
627 struct zink_context *ctx = zink_context(pctx);
628 memcpy(ctx->blend_constants, color->color, sizeof(float) * 4);
629 }
630
631 static void
632 zink_set_sample_mask(struct pipe_context *pctx, unsigned sample_mask)
633 {
634 struct zink_context *ctx = zink_context(pctx);
635 ctx->gfx_pipeline_state.sample_mask = sample_mask;
636 ctx->gfx_pipeline_state.hash = 0;
637 }
638
639 static VkAccessFlags
640 access_src_flags(VkImageLayout layout)
641 {
642 switch (layout) {
643 case VK_IMAGE_LAYOUT_UNDEFINED:
644 case VK_IMAGE_LAYOUT_GENERAL:
645 return 0;
646
647 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
648 return VK_ACCESS_COLOR_ATTACHMENT_READ_BIT;
649 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
650 return VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
651
652 case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
653 return VK_ACCESS_SHADER_READ_BIT;
654
655 case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
656 return VK_ACCESS_TRANSFER_READ_BIT;
657
658 case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
659 return VK_ACCESS_TRANSFER_WRITE_BIT;
660
661 case VK_IMAGE_LAYOUT_PREINITIALIZED:
662 return VK_ACCESS_HOST_WRITE_BIT;
663
664 default:
665 unreachable("unexpected layout");
666 }
667 }
668
669 static VkAccessFlags
670 access_dst_flags(VkImageLayout layout)
671 {
672 switch (layout) {
673 case VK_IMAGE_LAYOUT_UNDEFINED:
674 case VK_IMAGE_LAYOUT_GENERAL:
675 return 0;
676
677 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
678 return VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
679 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
680 return VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
681
682 case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
683 return VK_ACCESS_TRANSFER_READ_BIT;
684
685 case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
686 return VK_ACCESS_TRANSFER_WRITE_BIT;
687
688 default:
689 unreachable("unexpected layout");
690 }
691 }
692
693 static VkPipelineStageFlags
694 pipeline_dst_stage(VkImageLayout layout)
695 {
696 switch (layout) {
697 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
698 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
699 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
700 return VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
701
702 case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
703 return VK_PIPELINE_STAGE_TRANSFER_BIT;
704 case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
705 return VK_PIPELINE_STAGE_TRANSFER_BIT;
706
707 default:
708 return VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
709 }
710 }
711
712 static VkPipelineStageFlags
713 pipeline_src_stage(VkImageLayout layout)
714 {
715 switch (layout) {
716 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
717 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
718 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
719 return VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT;
720
721 case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
722 return VK_PIPELINE_STAGE_TRANSFER_BIT;
723 case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
724 return VK_PIPELINE_STAGE_TRANSFER_BIT;
725
726 default:
727 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
728 }
729 }
730
731
732 void
733 zink_resource_barrier(VkCommandBuffer cmdbuf, struct zink_resource *res,
734 VkImageAspectFlags aspect, VkImageLayout new_layout)
735 {
736 VkImageSubresourceRange isr = {
737 aspect,
738 0, VK_REMAINING_MIP_LEVELS,
739 0, VK_REMAINING_ARRAY_LAYERS
740 };
741
742 VkImageMemoryBarrier imb = {
743 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
744 NULL,
745 access_src_flags(res->layout),
746 access_dst_flags(new_layout),
747 res->layout,
748 new_layout,
749 VK_QUEUE_FAMILY_IGNORED,
750 VK_QUEUE_FAMILY_IGNORED,
751 res->image,
752 isr
753 };
754 vkCmdPipelineBarrier(
755 cmdbuf,
756 pipeline_src_stage(res->layout),
757 pipeline_dst_stage(new_layout),
758 0,
759 0, NULL,
760 0, NULL,
761 1, &imb
762 );
763
764 res->layout = new_layout;
765 }
766
767 static void
768 zink_clear(struct pipe_context *pctx,
769 unsigned buffers,
770 const struct pipe_scissor_state *scissor_state,
771 const union pipe_color_union *pcolor,
772 double depth, unsigned stencil)
773 {
774 struct zink_context *ctx = zink_context(pctx);
775 struct pipe_framebuffer_state *fb = &ctx->fb_state;
776
777 /* FIXME: this is very inefficient; if no renderpass has been started yet,
778 * we should record the clear if it's full-screen, and apply it as we
779 * start the render-pass. Otherwise we can do a partial out-of-renderpass
780 * clear.
781 */
782 struct zink_batch *batch = zink_batch_rp(ctx);
783
784 VkClearAttachment attachments[1 + PIPE_MAX_COLOR_BUFS];
785 int num_attachments = 0;
786
787 if (buffers & PIPE_CLEAR_COLOR) {
788 VkClearColorValue color;
789 color.float32[0] = pcolor->f[0];
790 color.float32[1] = pcolor->f[1];
791 color.float32[2] = pcolor->f[2];
792 color.float32[3] = pcolor->f[3];
793
794 for (unsigned i = 0; i < fb->nr_cbufs; i++) {
795 if (!(buffers & (PIPE_CLEAR_COLOR0 << i)) || !fb->cbufs[i])
796 continue;
797
798 attachments[num_attachments].aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
799 attachments[num_attachments].colorAttachment = i;
800 attachments[num_attachments].clearValue.color = color;
801 ++num_attachments;
802 }
803 }
804
805 if (buffers & PIPE_CLEAR_DEPTHSTENCIL && fb->zsbuf) {
806 VkImageAspectFlags aspect = 0;
807 if (buffers & PIPE_CLEAR_DEPTH)
808 aspect |= VK_IMAGE_ASPECT_DEPTH_BIT;
809 if (buffers & PIPE_CLEAR_STENCIL)
810 aspect |= VK_IMAGE_ASPECT_STENCIL_BIT;
811
812 attachments[num_attachments].aspectMask = aspect;
813 attachments[num_attachments].clearValue.depthStencil.depth = depth;
814 attachments[num_attachments].clearValue.depthStencil.stencil = stencil;
815 ++num_attachments;
816 }
817
818 VkClearRect cr;
819 cr.rect.offset.x = 0;
820 cr.rect.offset.y = 0;
821 cr.rect.extent.width = fb->width;
822 cr.rect.extent.height = fb->height;
823 cr.baseArrayLayer = 0;
824 cr.layerCount = util_framebuffer_get_num_layers(fb);
825 vkCmdClearAttachments(batch->cmdbuf, num_attachments, attachments, 1, &cr);
826 }
827
828 VkShaderStageFlagBits
829 zink_shader_stage(enum pipe_shader_type type)
830 {
831 VkShaderStageFlagBits stages[] = {
832 [PIPE_SHADER_VERTEX] = VK_SHADER_STAGE_VERTEX_BIT,
833 [PIPE_SHADER_FRAGMENT] = VK_SHADER_STAGE_FRAGMENT_BIT,
834 [PIPE_SHADER_GEOMETRY] = VK_SHADER_STAGE_GEOMETRY_BIT,
835 [PIPE_SHADER_TESS_CTRL] = VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
836 [PIPE_SHADER_TESS_EVAL] = VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
837 [PIPE_SHADER_COMPUTE] = VK_SHADER_STAGE_COMPUTE_BIT,
838 };
839 return stages[type];
840 }
841
842 static uint32_t
843 hash_gfx_program(const void *key)
844 {
845 return _mesa_hash_data(key, sizeof(struct zink_shader *) * (ZINK_SHADER_COUNT));
846 }
847
848 static bool
849 equals_gfx_program(const void *a, const void *b)
850 {
851 return memcmp(a, b, sizeof(struct zink_shader *) * (ZINK_SHADER_COUNT)) == 0;
852 }
853
854 static uint32_t
855 hash_render_pass_state(const void *key)
856 {
857 return _mesa_hash_data(key, sizeof(struct zink_render_pass_state));
858 }
859
860 static bool
861 equals_render_pass_state(const void *a, const void *b)
862 {
863 return memcmp(a, b, sizeof(struct zink_render_pass_state)) == 0;
864 }
865
866 static void
867 zink_flush(struct pipe_context *pctx,
868 struct pipe_fence_handle **pfence,
869 enum pipe_flush_flags flags)
870 {
871 struct zink_context *ctx = zink_context(pctx);
872
873 struct zink_batch *batch = zink_curr_batch(ctx);
874 flush_batch(ctx);
875
876 if (zink_screen(pctx->screen)->have_EXT_transform_feedback && ctx->num_so_targets)
877 ctx->dirty_so_targets = true;
878
879 if (pfence)
880 zink_fence_reference(zink_screen(pctx->screen),
881 (struct zink_fence **)pfence,
882 batch->fence);
883
884 /* HACK:
885 * For some strange reason, we need to finish before presenting, or else
886 * we start rendering on top of the back-buffer for the next frame. This
887 * seems like a bug in the DRI-driver to me, because we really should
888 * be properly protected by fences here, and the back-buffer should
889 * either be swapped with the front-buffer, or blitted from. But for
890 * some strange reason, neither of these things happen.
891 */
892 if (flags & PIPE_FLUSH_END_OF_FRAME)
893 pctx->screen->fence_finish(pctx->screen, pctx,
894 (struct pipe_fence_handle *)batch->fence,
895 PIPE_TIMEOUT_INFINITE);
896 }
897
898 static void
899 zink_flush_resource(struct pipe_context *pipe,
900 struct pipe_resource *resource)
901 {
902 }
903
904 static void
905 zink_resource_copy_region(struct pipe_context *pctx,
906 struct pipe_resource *pdst,
907 unsigned dst_level, unsigned dstx, unsigned dsty, unsigned dstz,
908 struct pipe_resource *psrc,
909 unsigned src_level, const struct pipe_box *src_box)
910 {
911 struct zink_resource *dst = zink_resource(pdst);
912 struct zink_resource *src = zink_resource(psrc);
913 struct zink_context *ctx = zink_context(pctx);
914 if (dst->base.target != PIPE_BUFFER && src->base.target != PIPE_BUFFER) {
915 VkImageCopy region = {};
916
917 region.srcSubresource.aspectMask = src->aspect;
918 region.srcSubresource.mipLevel = src_level;
919 region.srcSubresource.layerCount = 1;
920 if (src->base.array_size > 1) {
921 region.srcSubresource.baseArrayLayer = src_box->z;
922 region.srcSubresource.layerCount = src_box->depth;
923 region.extent.depth = 1;
924 } else {
925 region.srcOffset.z = src_box->z;
926 region.srcSubresource.layerCount = 1;
927 region.extent.depth = src_box->depth;
928 }
929
930 region.srcOffset.x = src_box->x;
931 region.srcOffset.y = src_box->y;
932
933 region.dstSubresource.aspectMask = dst->aspect;
934 region.dstSubresource.mipLevel = dst_level;
935 if (dst->base.array_size > 1) {
936 region.dstSubresource.baseArrayLayer = dstz;
937 region.dstSubresource.layerCount = src_box->depth;
938 } else {
939 region.dstOffset.z = dstz;
940 region.dstSubresource.layerCount = 1;
941 }
942
943 region.dstOffset.x = dstx;
944 region.dstOffset.y = dsty;
945 region.extent.width = src_box->width;
946 region.extent.height = src_box->height;
947
948 struct zink_batch *batch = zink_batch_no_rp(ctx);
949 zink_batch_reference_resoure(batch, src);
950 zink_batch_reference_resoure(batch, dst);
951
952 if (src->layout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) {
953 zink_resource_barrier(batch->cmdbuf, src, src->aspect,
954 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
955 }
956
957 if (dst->layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
958 zink_resource_barrier(batch->cmdbuf, dst, dst->aspect,
959 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
960 }
961
962 vkCmdCopyImage(batch->cmdbuf, src->image, src->layout,
963 dst->image, dst->layout,
964 1, &region);
965 } else if (dst->base.target == PIPE_BUFFER &&
966 src->base.target == PIPE_BUFFER) {
967 VkBufferCopy region;
968 region.srcOffset = src_box->x;
969 region.dstOffset = dstx;
970 region.size = src_box->width;
971
972 struct zink_batch *batch = zink_batch_no_rp(ctx);
973 zink_batch_reference_resoure(batch, src);
974 zink_batch_reference_resoure(batch, dst);
975
976 vkCmdCopyBuffer(batch->cmdbuf, src->buffer, dst->buffer, 1, &region);
977 } else
978 debug_printf("zink: TODO resource copy\n");
979 }
980
981 static struct pipe_stream_output_target *
982 zink_create_stream_output_target(struct pipe_context *pctx,
983 struct pipe_resource *pres,
984 unsigned buffer_offset,
985 unsigned buffer_size)
986 {
987 struct zink_so_target *t;
988 t = CALLOC_STRUCT(zink_so_target);
989 if (!t)
990 return NULL;
991
992 t->base.reference.count = 1;
993 t->base.context = pctx;
994 pipe_resource_reference(&t->base.buffer, pres);
995 t->base.buffer_offset = buffer_offset;
996 t->base.buffer_size = buffer_size;
997
998 /* using PIPE_BIND_CUSTOM here lets us create a custom pipe buffer resource,
999 * which allows us to differentiate and use VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT
1000 * as we must for this case
1001 */
1002 t->counter_buffer = pipe_buffer_create(pctx->screen, PIPE_BIND_STREAM_OUTPUT | PIPE_BIND_CUSTOM, PIPE_USAGE_DEFAULT, 4);
1003 if (!t->counter_buffer) {
1004 FREE(t);
1005 return NULL;
1006 }
1007
1008 return &t->base;
1009 }
1010
1011 static void
1012 zink_stream_output_target_destroy(struct pipe_context *pctx,
1013 struct pipe_stream_output_target *psot)
1014 {
1015 struct zink_so_target *t = (struct zink_so_target *)psot;
1016 pipe_resource_reference(&t->counter_buffer, NULL);
1017 pipe_resource_reference(&t->base.buffer, NULL);
1018 FREE(t);
1019 }
1020
1021 static void
1022 zink_set_stream_output_targets(struct pipe_context *pctx,
1023 unsigned num_targets,
1024 struct pipe_stream_output_target **targets,
1025 const unsigned *offsets)
1026 {
1027 struct zink_context *ctx = zink_context(pctx);
1028
1029 if (num_targets == 0) {
1030 for (unsigned i = 0; i < ctx->num_so_targets; i++)
1031 pipe_so_target_reference(&ctx->so_targets[i], NULL);
1032 ctx->num_so_targets = 0;
1033 } else {
1034 for (unsigned i = 0; i < num_targets; i++)
1035 pipe_so_target_reference(&ctx->so_targets[i], targets[i]);
1036 for (unsigned i = num_targets; i < ctx->num_so_targets; i++)
1037 pipe_so_target_reference(&ctx->so_targets[i], NULL);
1038 ctx->num_so_targets = num_targets;
1039
1040 /* emit memory barrier on next draw for synchronization */
1041 if (offsets[0] == (unsigned)-1)
1042 ctx->xfb_barrier = true;
1043 /* TODO: possibly avoid rebinding on resume if resuming from same buffers? */
1044 ctx->dirty_so_targets = true;
1045 }
1046 }
1047
1048 struct pipe_context *
1049 zink_context_create(struct pipe_screen *pscreen, void *priv, unsigned flags)
1050 {
1051 struct zink_screen *screen = zink_screen(pscreen);
1052 struct zink_context *ctx = CALLOC_STRUCT(zink_context);
1053 if (!ctx)
1054 goto fail;
1055
1056 ctx->gfx_pipeline_state.hash = 0;
1057
1058 ctx->base.screen = pscreen;
1059 ctx->base.priv = priv;
1060
1061 ctx->base.destroy = zink_context_destroy;
1062
1063 zink_context_state_init(&ctx->base);
1064
1065 ctx->base.create_sampler_state = zink_create_sampler_state;
1066 ctx->base.bind_sampler_states = zink_bind_sampler_states;
1067 ctx->base.delete_sampler_state = zink_delete_sampler_state;
1068
1069 ctx->base.create_sampler_view = zink_create_sampler_view;
1070 ctx->base.set_sampler_views = zink_set_sampler_views;
1071 ctx->base.sampler_view_destroy = zink_sampler_view_destroy;
1072
1073 zink_program_init(ctx);
1074
1075 ctx->base.set_polygon_stipple = zink_set_polygon_stipple;
1076 ctx->base.set_vertex_buffers = zink_set_vertex_buffers;
1077 ctx->base.set_viewport_states = zink_set_viewport_states;
1078 ctx->base.set_scissor_states = zink_set_scissor_states;
1079 ctx->base.set_constant_buffer = zink_set_constant_buffer;
1080 ctx->base.set_framebuffer_state = zink_set_framebuffer_state;
1081 ctx->base.set_stencil_ref = zink_set_stencil_ref;
1082 ctx->base.set_clip_state = zink_set_clip_state;
1083 ctx->base.set_blend_color = zink_set_blend_color;
1084
1085 ctx->base.set_sample_mask = zink_set_sample_mask;
1086
1087 ctx->base.clear = zink_clear;
1088 ctx->base.draw_vbo = zink_draw_vbo;
1089 ctx->base.flush = zink_flush;
1090
1091 ctx->base.resource_copy_region = zink_resource_copy_region;
1092 ctx->base.blit = zink_blit;
1093 ctx->base.create_stream_output_target = zink_create_stream_output_target;
1094 ctx->base.stream_output_target_destroy = zink_stream_output_target_destroy;
1095
1096 ctx->base.set_stream_output_targets = zink_set_stream_output_targets;
1097 ctx->base.flush_resource = zink_flush_resource;
1098 zink_context_surface_init(&ctx->base);
1099 zink_context_resource_init(&ctx->base);
1100 zink_context_query_init(&ctx->base);
1101
1102 slab_create_child(&ctx->transfer_pool, &screen->transfer_pool);
1103
1104 ctx->base.stream_uploader = u_upload_create_default(&ctx->base);
1105 ctx->base.const_uploader = ctx->base.stream_uploader;
1106
1107 int prim_hwsupport = 1 << PIPE_PRIM_POINTS |
1108 1 << PIPE_PRIM_LINES |
1109 1 << PIPE_PRIM_LINE_STRIP |
1110 1 << PIPE_PRIM_TRIANGLES |
1111 1 << PIPE_PRIM_TRIANGLE_STRIP |
1112 1 << PIPE_PRIM_TRIANGLE_FAN;
1113
1114 ctx->primconvert = util_primconvert_create(&ctx->base, prim_hwsupport);
1115 if (!ctx->primconvert)
1116 goto fail;
1117
1118 ctx->blitter = util_blitter_create(&ctx->base);
1119 if (!ctx->blitter)
1120 goto fail;
1121
1122 VkCommandPoolCreateInfo cpci = {};
1123 cpci.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
1124 cpci.queueFamilyIndex = screen->gfx_queue;
1125 cpci.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
1126 if (vkCreateCommandPool(screen->dev, &cpci, NULL, &ctx->cmdpool) != VK_SUCCESS)
1127 goto fail;
1128
1129 VkCommandBufferAllocateInfo cbai = {};
1130 cbai.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
1131 cbai.commandPool = ctx->cmdpool;
1132 cbai.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
1133 cbai.commandBufferCount = 1;
1134
1135 VkDescriptorPoolSize sizes[] = {
1136 {VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, ZINK_BATCH_DESC_SIZE},
1137 {VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, ZINK_BATCH_DESC_SIZE}
1138 };
1139 VkDescriptorPoolCreateInfo dpci = {};
1140 dpci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
1141 dpci.pPoolSizes = sizes;
1142 dpci.poolSizeCount = ARRAY_SIZE(sizes);
1143 dpci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
1144 dpci.maxSets = ZINK_BATCH_DESC_SIZE;
1145
1146 for (int i = 0; i < ARRAY_SIZE(ctx->batches); ++i) {
1147 if (vkAllocateCommandBuffers(screen->dev, &cbai, &ctx->batches[i].cmdbuf) != VK_SUCCESS)
1148 goto fail;
1149
1150 ctx->batches[i].resources = _mesa_set_create(NULL, _mesa_hash_pointer,
1151 _mesa_key_pointer_equal);
1152 ctx->batches[i].sampler_views = _mesa_set_create(NULL,
1153 _mesa_hash_pointer,
1154 _mesa_key_pointer_equal);
1155 ctx->batches[i].programs = _mesa_set_create(NULL,
1156 _mesa_hash_pointer,
1157 _mesa_key_pointer_equal);
1158
1159 if (!ctx->batches[i].resources || !ctx->batches[i].sampler_views)
1160 goto fail;
1161
1162 util_dynarray_init(&ctx->batches[i].zombie_samplers, NULL);
1163
1164 if (vkCreateDescriptorPool(screen->dev, &dpci, 0,
1165 &ctx->batches[i].descpool) != VK_SUCCESS)
1166 goto fail;
1167 }
1168
1169 vkGetDeviceQueue(screen->dev, screen->gfx_queue, 0, &ctx->queue);
1170
1171 ctx->program_cache = _mesa_hash_table_create(NULL,
1172 hash_gfx_program,
1173 equals_gfx_program);
1174 ctx->render_pass_cache = _mesa_hash_table_create(NULL,
1175 hash_render_pass_state,
1176 equals_render_pass_state);
1177 if (!ctx->program_cache || !ctx->render_pass_cache)
1178 goto fail;
1179
1180 const uint8_t data[] = { 0 };
1181 ctx->dummy_buffer = pipe_buffer_create_with_data(&ctx->base,
1182 PIPE_BIND_VERTEX_BUFFER, PIPE_USAGE_IMMUTABLE, sizeof(data), data);
1183 if (!ctx->dummy_buffer)
1184 goto fail;
1185
1186 /* start the first batch */
1187 zink_start_batch(ctx, zink_curr_batch(ctx));
1188
1189 return &ctx->base;
1190
1191 fail:
1192 if (ctx) {
1193 vkDestroyCommandPool(screen->dev, ctx->cmdpool, NULL);
1194 FREE(ctx);
1195 }
1196 return NULL;
1197 }