zink: use u_blitter when format-reinterpreting
[mesa.git] / src / gallium / drivers / zink / zink_context.c
1 /*
2 * Copyright 2018 Collabora Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "zink_context.h"
25
26 #include "zink_batch.h"
27 #include "zink_compiler.h"
28 #include "zink_fence.h"
29 #include "zink_framebuffer.h"
30 #include "zink_pipeline.h"
31 #include "zink_program.h"
32 #include "zink_render_pass.h"
33 #include "zink_resource.h"
34 #include "zink_screen.h"
35 #include "zink_state.h"
36 #include "zink_surface.h"
37
38 #include "indices/u_primconvert.h"
39 #include "util/u_blitter.h"
40 #include "util/u_debug.h"
41 #include "util/u_format.h"
42 #include "util/u_framebuffer.h"
43 #include "util/u_helpers.h"
44 #include "util/u_inlines.h"
45
46 #include "nir.h"
47
48 #include "util/u_memory.h"
49 #include "util/u_prim.h"
50 #include "util/u_upload_mgr.h"
51
52 static void
53 zink_context_destroy(struct pipe_context *pctx)
54 {
55 struct zink_context *ctx = zink_context(pctx);
56 struct zink_screen *screen = zink_screen(pctx->screen);
57
58 if (vkQueueWaitIdle(ctx->queue) != VK_SUCCESS)
59 debug_printf("vkQueueWaitIdle failed\n");
60
61 for (int i = 0; i < ARRAY_SIZE(ctx->batches); ++i)
62 vkFreeCommandBuffers(screen->dev, ctx->cmdpool, 1, &ctx->batches[i].cmdbuf);
63 vkDestroyCommandPool(screen->dev, ctx->cmdpool, NULL);
64
65 util_primconvert_destroy(ctx->primconvert);
66 u_upload_destroy(pctx->stream_uploader);
67 slab_destroy_child(&ctx->transfer_pool);
68 util_blitter_destroy(ctx->blitter);
69 FREE(ctx);
70 }
71
72 static VkFilter
73 filter(enum pipe_tex_filter filter)
74 {
75 switch (filter) {
76 case PIPE_TEX_FILTER_NEAREST: return VK_FILTER_NEAREST;
77 case PIPE_TEX_FILTER_LINEAR: return VK_FILTER_LINEAR;
78 }
79 unreachable("unexpected filter");
80 }
81
82 static VkSamplerMipmapMode
83 sampler_mipmap_mode(enum pipe_tex_mipfilter filter)
84 {
85 switch (filter) {
86 case PIPE_TEX_MIPFILTER_NEAREST: return VK_SAMPLER_MIPMAP_MODE_NEAREST;
87 case PIPE_TEX_MIPFILTER_LINEAR: return VK_SAMPLER_MIPMAP_MODE_LINEAR;
88 case PIPE_TEX_MIPFILTER_NONE:
89 unreachable("PIPE_TEX_MIPFILTER_NONE should be dealt with earlier");
90 }
91 unreachable("unexpected filter");
92 }
93
94 static VkSamplerAddressMode
95 sampler_address_mode(enum pipe_tex_wrap filter)
96 {
97 switch (filter) {
98 case PIPE_TEX_WRAP_REPEAT: return VK_SAMPLER_ADDRESS_MODE_REPEAT;
99 case PIPE_TEX_WRAP_CLAMP: return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; /* not technically correct, but kinda works */
100 case PIPE_TEX_WRAP_CLAMP_TO_EDGE: return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
101 case PIPE_TEX_WRAP_CLAMP_TO_BORDER: return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER;
102 case PIPE_TEX_WRAP_MIRROR_REPEAT: return VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT;
103 case PIPE_TEX_WRAP_MIRROR_CLAMP: return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; /* not technically correct, but kinda works */
104 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE: return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE;
105 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER: return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; /* not technically correct, but kinda works */
106 }
107 unreachable("unexpected wrap");
108 }
109
110 static VkCompareOp
111 compare_op(enum pipe_compare_func op)
112 {
113 switch (op) {
114 case PIPE_FUNC_NEVER: return VK_COMPARE_OP_NEVER;
115 case PIPE_FUNC_LESS: return VK_COMPARE_OP_LESS;
116 case PIPE_FUNC_EQUAL: return VK_COMPARE_OP_EQUAL;
117 case PIPE_FUNC_LEQUAL: return VK_COMPARE_OP_LESS_OR_EQUAL;
118 case PIPE_FUNC_GREATER: return VK_COMPARE_OP_GREATER;
119 case PIPE_FUNC_NOTEQUAL: return VK_COMPARE_OP_NOT_EQUAL;
120 case PIPE_FUNC_GEQUAL: return VK_COMPARE_OP_GREATER_OR_EQUAL;
121 case PIPE_FUNC_ALWAYS: return VK_COMPARE_OP_ALWAYS;
122 }
123 unreachable("unexpected compare");
124 }
125
126 static void *
127 zink_create_sampler_state(struct pipe_context *pctx,
128 const struct pipe_sampler_state *state)
129 {
130 struct zink_screen *screen = zink_screen(pctx->screen);
131
132 VkSamplerCreateInfo sci = {};
133 sci.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
134 sci.magFilter = filter(state->mag_img_filter);
135 sci.minFilter = filter(state->min_img_filter);
136
137 if (state->min_mip_filter != PIPE_TEX_MIPFILTER_NONE) {
138 sci.mipmapMode = sampler_mipmap_mode(state->min_mip_filter);
139 sci.minLod = state->min_lod;
140 sci.maxLod = state->max_lod;
141 } else {
142 sci.mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST;
143 sci.minLod = 0;
144 sci.maxLod = 0;
145 }
146
147 sci.addressModeU = sampler_address_mode(state->wrap_s);
148 sci.addressModeV = sampler_address_mode(state->wrap_t);
149 sci.addressModeW = sampler_address_mode(state->wrap_r);
150 sci.mipLodBias = state->lod_bias;
151
152 if (state->compare_mode == PIPE_TEX_COMPARE_NONE)
153 sci.compareOp = VK_COMPARE_OP_NEVER;
154 else
155 sci.compareOp = compare_op(state->compare_func);
156
157 sci.borderColor = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK; // TODO
158 sci.unnormalizedCoordinates = !state->normalized_coords;
159
160 if (state->max_anisotropy > 1) {
161 sci.maxAnisotropy = state->max_anisotropy;
162 sci.anisotropyEnable = VK_TRUE;
163 }
164
165 VkSampler *sampler = CALLOC(1, sizeof(VkSampler));
166 if (!sampler)
167 return NULL;
168
169 if (vkCreateSampler(screen->dev, &sci, NULL, sampler) != VK_SUCCESS) {
170 FREE(sampler);
171 return NULL;
172 }
173
174 return sampler;
175 }
176
177 static void
178 zink_bind_sampler_states(struct pipe_context *pctx,
179 enum pipe_shader_type shader,
180 unsigned start_slot,
181 unsigned num_samplers,
182 void **samplers)
183 {
184 struct zink_context *ctx = zink_context(pctx);
185 for (unsigned i = 0; i < num_samplers; ++i) {
186 VkSampler *sampler = samplers[i];
187 ctx->samplers[shader][start_slot + i] = sampler ? *sampler : VK_NULL_HANDLE;
188 }
189 ctx->num_samplers[shader] = start_slot + num_samplers;
190 }
191
192 static void
193 zink_delete_sampler_state(struct pipe_context *pctx,
194 void *sampler_state)
195 {
196 struct zink_batch *batch = zink_curr_batch(zink_context(pctx));
197 util_dynarray_append(&batch->zombie_samplers, VkSampler,
198 *(VkSampler *)sampler_state);
199 FREE(sampler_state);
200 }
201
202
203 static VkImageViewType
204 image_view_type(enum pipe_texture_target target)
205 {
206 switch (target) {
207 case PIPE_TEXTURE_1D: return VK_IMAGE_VIEW_TYPE_1D;
208 case PIPE_TEXTURE_1D_ARRAY: return VK_IMAGE_VIEW_TYPE_1D_ARRAY;
209 case PIPE_TEXTURE_2D: return VK_IMAGE_VIEW_TYPE_2D;
210 case PIPE_TEXTURE_2D_ARRAY: return VK_IMAGE_VIEW_TYPE_2D_ARRAY;
211 case PIPE_TEXTURE_CUBE: return VK_IMAGE_VIEW_TYPE_CUBE;
212 case PIPE_TEXTURE_CUBE_ARRAY: return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
213 case PIPE_TEXTURE_3D: return VK_IMAGE_VIEW_TYPE_3D;
214 case PIPE_TEXTURE_RECT: return VK_IMAGE_VIEW_TYPE_2D; /* not sure */
215 default:
216 unreachable("unexpected target");
217 }
218 }
219
220 static VkComponentSwizzle
221 component_mapping(enum pipe_swizzle swizzle)
222 {
223 switch (swizzle) {
224 case PIPE_SWIZZLE_X: return VK_COMPONENT_SWIZZLE_R;
225 case PIPE_SWIZZLE_Y: return VK_COMPONENT_SWIZZLE_G;
226 case PIPE_SWIZZLE_Z: return VK_COMPONENT_SWIZZLE_B;
227 case PIPE_SWIZZLE_W: return VK_COMPONENT_SWIZZLE_A;
228 case PIPE_SWIZZLE_0: return VK_COMPONENT_SWIZZLE_ZERO;
229 case PIPE_SWIZZLE_1: return VK_COMPONENT_SWIZZLE_ONE;
230 case PIPE_SWIZZLE_NONE: return VK_COMPONENT_SWIZZLE_IDENTITY; // ???
231 default:
232 unreachable("unexpected swizzle");
233 }
234 }
235
236 static VkImageAspectFlags
237 sampler_aspect_from_format(enum pipe_format fmt)
238 {
239 if (util_format_is_depth_or_stencil(fmt)) {
240 const struct util_format_description *desc = util_format_description(fmt);
241 if (util_format_has_depth(desc))
242 return VK_IMAGE_ASPECT_DEPTH_BIT;
243 assert(util_format_has_stencil(desc));
244 return VK_IMAGE_ASPECT_STENCIL_BIT;
245 } else
246 return VK_IMAGE_ASPECT_COLOR_BIT;
247 }
248
249 static struct pipe_sampler_view *
250 zink_create_sampler_view(struct pipe_context *pctx, struct pipe_resource *pres,
251 const struct pipe_sampler_view *state)
252 {
253 struct zink_screen *screen = zink_screen(pctx->screen);
254 struct zink_resource *res = zink_resource(pres);
255 struct zink_sampler_view *sampler_view = CALLOC_STRUCT(zink_sampler_view);
256
257 sampler_view->base = *state;
258 sampler_view->base.texture = NULL;
259 pipe_resource_reference(&sampler_view->base.texture, pres);
260 sampler_view->base.reference.count = 1;
261 sampler_view->base.context = pctx;
262
263 VkImageViewCreateInfo ivci = {};
264 ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
265 ivci.image = res->image;
266 ivci.viewType = image_view_type(state->target);
267 ivci.format = zink_get_format(screen, state->format);
268 ivci.components.r = component_mapping(state->swizzle_r);
269 ivci.components.g = component_mapping(state->swizzle_g);
270 ivci.components.b = component_mapping(state->swizzle_b);
271 ivci.components.a = component_mapping(state->swizzle_a);
272
273 ivci.subresourceRange.aspectMask = sampler_aspect_from_format(state->format);
274 ivci.subresourceRange.baseMipLevel = state->u.tex.first_level;
275 ivci.subresourceRange.baseArrayLayer = state->u.tex.first_layer;
276 ivci.subresourceRange.levelCount = state->u.tex.last_level - state->u.tex.first_level + 1;
277 ivci.subresourceRange.layerCount = state->u.tex.last_layer - state->u.tex.first_layer + 1;
278
279 VkResult err = vkCreateImageView(screen->dev, &ivci, NULL, &sampler_view->image_view);
280 if (err != VK_SUCCESS) {
281 FREE(sampler_view);
282 return NULL;
283 }
284
285 return &sampler_view->base;
286 }
287
288 static void
289 zink_sampler_view_destroy(struct pipe_context *pctx,
290 struct pipe_sampler_view *pview)
291 {
292 struct zink_sampler_view *view = zink_sampler_view(pview);
293 vkDestroyImageView(zink_screen(pctx->screen)->dev, view->image_view, NULL);
294 FREE(view);
295 }
296
297 static void *
298 zink_create_vs_state(struct pipe_context *pctx,
299 const struct pipe_shader_state *shader)
300 {
301 struct nir_shader *nir;
302 if (shader->type != PIPE_SHADER_IR_NIR)
303 nir = zink_tgsi_to_nir(pctx->screen, shader->tokens);
304 else
305 nir = (struct nir_shader *)shader->ir.nir;
306
307 return zink_compile_nir(zink_screen(pctx->screen), nir);
308 }
309
310 static void
311 bind_stage(struct zink_context *ctx, enum pipe_shader_type stage,
312 struct zink_shader *shader)
313 {
314 assert(stage < PIPE_SHADER_COMPUTE);
315 ctx->gfx_stages[stage] = shader;
316 ctx->dirty_program = true;
317 }
318
319 static void
320 zink_bind_vs_state(struct pipe_context *pctx,
321 void *cso)
322 {
323 bind_stage(zink_context(pctx), PIPE_SHADER_VERTEX, cso);
324 }
325
326 static void
327 zink_delete_vs_state(struct pipe_context *pctx,
328 void *cso)
329 {
330 zink_shader_free(zink_screen(pctx->screen), cso);
331 }
332
333 static void *
334 zink_create_fs_state(struct pipe_context *pctx,
335 const struct pipe_shader_state *shader)
336 {
337 struct nir_shader *nir;
338 if (shader->type != PIPE_SHADER_IR_NIR)
339 nir = zink_tgsi_to_nir(pctx->screen, shader->tokens);
340 else
341 nir = (struct nir_shader *)shader->ir.nir;
342
343 return zink_compile_nir(zink_screen(pctx->screen), nir);
344 }
345
346 static void
347 zink_bind_fs_state(struct pipe_context *pctx,
348 void *cso)
349 {
350 bind_stage(zink_context(pctx), PIPE_SHADER_FRAGMENT, cso);
351 }
352
353 static void
354 zink_delete_fs_state(struct pipe_context *pctx,
355 void *cso)
356 {
357 zink_shader_free(zink_screen(pctx->screen), cso);
358 }
359
360 static void
361 zink_set_polygon_stipple(struct pipe_context *pctx,
362 const struct pipe_poly_stipple *ps)
363 {
364 }
365
366 static void
367 zink_set_vertex_buffers(struct pipe_context *pctx,
368 unsigned start_slot,
369 unsigned num_buffers,
370 const struct pipe_vertex_buffer *buffers)
371 {
372 struct zink_context *ctx = zink_context(pctx);
373
374 if (buffers) {
375 for (int i = 0; i < num_buffers; ++i) {
376 const struct pipe_vertex_buffer *vb = buffers + i;
377 ctx->gfx_pipeline_state.bindings[start_slot + i].stride = vb->stride;
378 }
379 }
380
381 util_set_vertex_buffers_mask(ctx->buffers, &ctx->buffers_enabled_mask,
382 buffers, start_slot, num_buffers);
383 }
384
385 static void
386 zink_set_viewport_states(struct pipe_context *pctx,
387 unsigned start_slot,
388 unsigned num_viewports,
389 const struct pipe_viewport_state *state)
390 {
391 struct zink_context *ctx = zink_context(pctx);
392
393 for (unsigned i = 0; i < num_viewports; ++i) {
394 VkViewport viewport = {
395 state[i].translate[0] - state[i].scale[0],
396 state[i].translate[1] - state[i].scale[1],
397 state[i].scale[0] * 2,
398 state[i].scale[1] * 2,
399 state[i].translate[2] - state[i].scale[2],
400 state[i].translate[2] + state[i].scale[2]
401 };
402 ctx->viewport_states[start_slot + i] = state[i];
403 ctx->viewports[start_slot + i] = viewport;
404 }
405 ctx->num_viewports = start_slot + num_viewports;
406 }
407
408 static void
409 zink_set_scissor_states(struct pipe_context *pctx,
410 unsigned start_slot, unsigned num_scissors,
411 const struct pipe_scissor_state *states)
412 {
413 struct zink_context *ctx = zink_context(pctx);
414
415 for (unsigned i = 0; i < num_scissors; i++) {
416 VkRect2D scissor;
417
418 scissor.offset.x = states[i].minx;
419 scissor.offset.y = states[i].miny;
420 scissor.extent.width = states[i].maxx - states[i].minx;
421 scissor.extent.height = states[i].maxy - states[i].miny;
422 ctx->scissor_states[start_slot + i] = states[i];
423 ctx->scissors[start_slot + i] = scissor;
424 }
425 }
426
427 static void
428 zink_set_constant_buffer(struct pipe_context *pctx,
429 enum pipe_shader_type shader, uint index,
430 const struct pipe_constant_buffer *cb)
431 {
432 struct zink_context *ctx = zink_context(pctx);
433
434 if (cb) {
435 struct pipe_resource *buffer = cb->buffer;
436 unsigned offset = cb->buffer_offset;
437 if (cb->user_buffer) {
438 struct zink_screen *screen = zink_screen(pctx->screen);
439 u_upload_data(ctx->base.const_uploader, 0, cb->buffer_size,
440 screen->props.limits.minUniformBufferOffsetAlignment,
441 cb->user_buffer, &offset, &buffer);
442 }
443
444 pipe_resource_reference(&ctx->ubos[shader][index].buffer, buffer);
445 ctx->ubos[shader][index].buffer_offset = offset;
446 ctx->ubos[shader][index].buffer_size = cb->buffer_size;
447 ctx->ubos[shader][index].user_buffer = NULL;
448
449 if (cb->user_buffer)
450 pipe_resource_reference(&buffer, NULL);
451 } else {
452 pipe_resource_reference(&ctx->ubos[shader][index].buffer, NULL);
453 ctx->ubos[shader][index].buffer_offset = 0;
454 ctx->ubos[shader][index].buffer_size = 0;
455 ctx->ubos[shader][index].user_buffer = NULL;
456 }
457 }
458
459 static void
460 zink_set_sampler_views(struct pipe_context *pctx,
461 enum pipe_shader_type shader_type,
462 unsigned start_slot,
463 unsigned num_views,
464 struct pipe_sampler_view **views)
465 {
466 struct zink_context *ctx = zink_context(pctx);
467 assert(views);
468 for (unsigned i = 0; i < num_views; ++i) {
469 pipe_sampler_view_reference(
470 &ctx->image_views[shader_type][start_slot + i],
471 views[i]);
472 }
473 ctx->num_image_views[shader_type] = start_slot + num_views;
474 }
475
476 static void
477 zink_set_stencil_ref(struct pipe_context *pctx,
478 const struct pipe_stencil_ref *ref)
479 {
480 struct zink_context *ctx = zink_context(pctx);
481 ctx->stencil_ref = *ref;
482 }
483
484 static void
485 zink_set_clip_state(struct pipe_context *pctx,
486 const struct pipe_clip_state *pcs)
487 {
488 }
489
490 static struct zink_render_pass *
491 get_render_pass(struct zink_context *ctx)
492 {
493 struct zink_screen *screen = zink_screen(ctx->base.screen);
494 const struct pipe_framebuffer_state *fb = &ctx->fb_state;
495 struct zink_render_pass_state state;
496
497 for (int i = 0; i < fb->nr_cbufs; i++) {
498 struct pipe_resource *res = fb->cbufs[i]->texture;
499 state.rts[i].format = zink_get_format(screen, fb->cbufs[i]->format);
500 state.rts[i].samples = res->nr_samples > 0 ? res->nr_samples :
501 VK_SAMPLE_COUNT_1_BIT;
502 }
503 state.num_cbufs = fb->nr_cbufs;
504
505 if (fb->zsbuf) {
506 struct zink_resource *zsbuf = zink_resource(fb->zsbuf->texture);
507 state.rts[fb->nr_cbufs].format = zsbuf->format;
508 state.rts[fb->nr_cbufs].samples = zsbuf->base.nr_samples > 0 ? zsbuf->base.nr_samples : VK_SAMPLE_COUNT_1_BIT;
509 }
510 state.have_zsbuf = fb->zsbuf != NULL;
511
512 struct hash_entry *entry = _mesa_hash_table_search(ctx->render_pass_cache,
513 &state);
514 if (!entry) {
515 struct zink_render_pass *rp;
516 rp = zink_create_render_pass(screen, &state);
517 entry = _mesa_hash_table_insert(ctx->render_pass_cache, &state, rp);
518 if (!entry)
519 return NULL;
520 }
521
522 return entry->data;
523 }
524
525 static struct zink_framebuffer *
526 get_framebuffer(struct zink_context *ctx)
527 {
528 struct zink_screen *screen = zink_screen(ctx->base.screen);
529
530 struct zink_framebuffer_state state = {};
531 state.rp = get_render_pass(ctx);
532 for (int i = 0; i < ctx->fb_state.nr_cbufs; i++) {
533 struct pipe_surface *psurf = ctx->fb_state.cbufs[i];
534 state.attachments[i] = zink_surface(psurf);
535 }
536
537 state.num_attachments = ctx->fb_state.nr_cbufs;
538 if (ctx->fb_state.zsbuf) {
539 struct pipe_surface *psurf = ctx->fb_state.zsbuf;
540 state.attachments[state.num_attachments++] = zink_surface(psurf);
541 }
542
543 state.width = ctx->fb_state.width;
544 state.height = ctx->fb_state.height;
545 state.layers = MAX2(ctx->fb_state.layers, 1);
546
547 struct hash_entry *entry = _mesa_hash_table_search(ctx->framebuffer_cache,
548 &state);
549 if (!entry) {
550 struct zink_framebuffer *fb = zink_create_framebuffer(screen, &state);
551 entry = _mesa_hash_table_insert(ctx->framebuffer_cache, &state, fb);
552 if (!entry)
553 return NULL;
554 }
555
556 return entry->data;
557 }
558
559 void
560 zink_begin_render_pass(struct zink_context *ctx, struct zink_batch *batch)
561 {
562 struct zink_screen *screen = zink_screen(ctx->base.screen);
563 assert(batch == zink_curr_batch(ctx));
564 assert(ctx->gfx_pipeline_state.render_pass);
565
566 struct pipe_framebuffer_state *fb_state = &ctx->fb_state;
567
568 VkRenderPassBeginInfo rpbi = {};
569 rpbi.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
570 rpbi.renderPass = ctx->gfx_pipeline_state.render_pass->render_pass;
571 rpbi.renderArea.offset.x = 0;
572 rpbi.renderArea.offset.y = 0;
573 rpbi.renderArea.extent.width = fb_state->width;
574 rpbi.renderArea.extent.height = fb_state->height;
575 rpbi.clearValueCount = 0;
576 rpbi.pClearValues = NULL;
577 rpbi.framebuffer = ctx->framebuffer->fb;
578
579 assert(ctx->gfx_pipeline_state.render_pass && ctx->framebuffer);
580 assert(!batch->rp || batch->rp == ctx->gfx_pipeline_state.render_pass);
581 assert(!batch->fb || batch->fb == ctx->framebuffer);
582
583 for (int i = 0; i < fb_state->nr_cbufs; i++) {
584 struct zink_resource *res = zink_resource(fb_state->cbufs[i]->texture);
585 if (res->layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL)
586 zink_resource_barrier(batch->cmdbuf, res, res->aspect,
587 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
588 }
589
590 if (fb_state->zsbuf) {
591 struct zink_resource *res = zink_resource(fb_state->zsbuf->texture);
592 if (res->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL)
593 zink_resource_barrier(batch->cmdbuf, res, res->aspect,
594 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
595 }
596
597 zink_render_pass_reference(screen, &batch->rp, ctx->gfx_pipeline_state.render_pass);
598 zink_framebuffer_reference(screen, &batch->fb, ctx->framebuffer);
599
600 vkCmdBeginRenderPass(batch->cmdbuf, &rpbi, VK_SUBPASS_CONTENTS_INLINE);
601 }
602
603 static void
604 flush_batch(struct zink_context *ctx)
605 {
606 struct zink_batch *batch = zink_curr_batch(ctx);
607 if (batch->rp)
608 vkCmdEndRenderPass(batch->cmdbuf);
609
610 zink_end_batch(ctx, batch);
611
612 ctx->curr_batch++;
613 if (ctx->curr_batch == ARRAY_SIZE(ctx->batches))
614 ctx->curr_batch = 0;
615
616 zink_start_batch(ctx, zink_curr_batch(ctx));
617 }
618
619 struct zink_batch *
620 zink_batch_rp(struct zink_context *ctx)
621 {
622 struct zink_batch *batch = zink_curr_batch(ctx);
623 if (!batch->rp) {
624 zink_begin_render_pass(ctx, batch);
625 assert(batch->rp);
626 }
627 return batch;
628 }
629
630 struct zink_batch *
631 zink_batch_no_rp(struct zink_context *ctx)
632 {
633 struct zink_batch *batch = zink_curr_batch(ctx);
634 if (batch->rp) {
635 /* flush batch and get a new one */
636 flush_batch(ctx);
637 batch = zink_curr_batch(ctx);
638 assert(!batch->rp);
639 }
640 return batch;
641 }
642
643 static void
644 zink_set_framebuffer_state(struct pipe_context *pctx,
645 const struct pipe_framebuffer_state *state)
646 {
647 struct zink_context *ctx = zink_context(pctx);
648 struct zink_screen *screen = zink_screen(pctx->screen);
649
650 VkSampleCountFlagBits rast_samples = VK_SAMPLE_COUNT_1_BIT;
651 for (int i = 0; i < state->nr_cbufs; i++)
652 rast_samples = MAX2(rast_samples, state->cbufs[i]->texture->nr_samples);
653 if (state->zsbuf && state->zsbuf->texture->nr_samples)
654 rast_samples = MAX2(rast_samples, state->zsbuf->texture->nr_samples);
655
656 util_copy_framebuffer_state(&ctx->fb_state, state);
657
658 struct zink_framebuffer *fb = get_framebuffer(ctx);
659 zink_framebuffer_reference(screen, &ctx->framebuffer, fb);
660 zink_render_pass_reference(screen, &ctx->gfx_pipeline_state.render_pass, fb->rp);
661
662 ctx->gfx_pipeline_state.rast_samples = rast_samples;
663 ctx->gfx_pipeline_state.num_attachments = state->nr_cbufs;
664
665 struct zink_batch *batch = zink_batch_no_rp(ctx);
666
667 for (int i = 0; i < state->nr_cbufs; i++) {
668 struct zink_resource *res = zink_resource(state->cbufs[i]->texture);
669 if (res->layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL)
670 zink_resource_barrier(batch->cmdbuf, res, res->aspect,
671 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
672 }
673
674 if (state->zsbuf) {
675 struct zink_resource *res = zink_resource(state->zsbuf->texture);
676 if (res->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL)
677 zink_resource_barrier(batch->cmdbuf, res, res->aspect,
678 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
679 }
680 }
681
682 static void
683 zink_set_blend_color(struct pipe_context *pctx,
684 const struct pipe_blend_color *color)
685 {
686 struct zink_context *ctx = zink_context(pctx);
687 memcpy(ctx->blend_constants, color->color, sizeof(float) * 4);
688 }
689
690 static void
691 zink_set_sample_mask(struct pipe_context *pctx, unsigned sample_mask)
692 {
693 struct zink_context *ctx = zink_context(pctx);
694 ctx->gfx_pipeline_state.sample_mask = sample_mask;
695 }
696
697 static VkAccessFlags
698 access_src_flags(VkImageLayout layout)
699 {
700 switch (layout) {
701 case VK_IMAGE_LAYOUT_UNDEFINED:
702 case VK_IMAGE_LAYOUT_GENERAL:
703 return 0;
704
705 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
706 return VK_ACCESS_COLOR_ATTACHMENT_READ_BIT;
707 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
708 return VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
709
710 case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
711 return VK_ACCESS_SHADER_READ_BIT;
712
713 case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
714 return VK_ACCESS_TRANSFER_READ_BIT;
715
716 case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
717 return VK_ACCESS_TRANSFER_WRITE_BIT;
718
719 case VK_IMAGE_LAYOUT_PREINITIALIZED:
720 return VK_ACCESS_HOST_WRITE_BIT;
721
722 default:
723 unreachable("unexpected layout");
724 }
725 }
726
727 static VkAccessFlags
728 access_dst_flags(VkImageLayout layout)
729 {
730 switch (layout) {
731 case VK_IMAGE_LAYOUT_UNDEFINED:
732 case VK_IMAGE_LAYOUT_GENERAL:
733 return 0;
734
735 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
736 return VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
737 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
738 return VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
739
740 case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
741 return VK_ACCESS_TRANSFER_READ_BIT;
742
743 case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
744 return VK_ACCESS_TRANSFER_WRITE_BIT;
745
746 default:
747 unreachable("unexpected layout");
748 }
749 }
750
751 static VkPipelineStageFlags
752 pipeline_dst_stage(VkImageLayout layout)
753 {
754 switch (layout) {
755 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
756 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
757 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
758 return VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
759
760 case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
761 return VK_PIPELINE_STAGE_TRANSFER_BIT;
762 case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
763 return VK_PIPELINE_STAGE_TRANSFER_BIT;
764
765 default:
766 return VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
767 }
768 }
769
770 static VkPipelineStageFlags
771 pipeline_src_stage(VkImageLayout layout)
772 {
773 switch (layout) {
774 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
775 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
776 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
777 return VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT;
778
779 case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
780 return VK_PIPELINE_STAGE_TRANSFER_BIT;
781 case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
782 return VK_PIPELINE_STAGE_TRANSFER_BIT;
783
784 default:
785 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
786 }
787 }
788
789
790 void
791 zink_resource_barrier(VkCommandBuffer cmdbuf, struct zink_resource *res,
792 VkImageAspectFlags aspect, VkImageLayout new_layout)
793 {
794 VkImageSubresourceRange isr = {
795 aspect,
796 0, VK_REMAINING_MIP_LEVELS,
797 0, VK_REMAINING_ARRAY_LAYERS
798 };
799
800 VkImageMemoryBarrier imb = {
801 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
802 NULL,
803 access_src_flags(res->layout),
804 access_dst_flags(new_layout),
805 res->layout,
806 new_layout,
807 VK_QUEUE_FAMILY_IGNORED,
808 VK_QUEUE_FAMILY_IGNORED,
809 res->image,
810 isr
811 };
812 vkCmdPipelineBarrier(
813 cmdbuf,
814 pipeline_src_stage(res->layout),
815 pipeline_dst_stage(new_layout),
816 0,
817 0, NULL,
818 0, NULL,
819 1, &imb
820 );
821
822 res->layout = new_layout;
823 }
824
825 static void
826 zink_clear(struct pipe_context *pctx,
827 unsigned buffers,
828 const union pipe_color_union *pcolor,
829 double depth, unsigned stencil)
830 {
831 struct zink_context *ctx = zink_context(pctx);
832 struct pipe_framebuffer_state *fb = &ctx->fb_state;
833
834 /* FIXME: this is very inefficient; if no renderpass has been started yet,
835 * we should record the clear if it's full-screen, and apply it as we
836 * start the render-pass. Otherwise we can do a partial out-of-renderpass
837 * clear.
838 */
839 struct zink_batch *batch = zink_batch_rp(ctx);
840
841 VkClearAttachment attachments[1 + PIPE_MAX_COLOR_BUFS];
842 int num_attachments = 0;
843
844 if (buffers & PIPE_CLEAR_COLOR) {
845 VkClearColorValue color;
846 color.float32[0] = pcolor->f[0];
847 color.float32[1] = pcolor->f[1];
848 color.float32[2] = pcolor->f[2];
849 color.float32[3] = pcolor->f[3];
850
851 for (unsigned i = 0; i < fb->nr_cbufs; i++) {
852 if (!(buffers & (PIPE_CLEAR_COLOR0 << i)) || !fb->cbufs[i])
853 continue;
854
855 attachments[num_attachments].aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
856 attachments[num_attachments].colorAttachment = i;
857 attachments[num_attachments].clearValue.color = color;
858 ++num_attachments;
859 }
860 }
861
862 if (buffers & PIPE_CLEAR_DEPTHSTENCIL && fb->zsbuf) {
863 VkImageAspectFlags aspect = 0;
864 if (buffers & PIPE_CLEAR_DEPTH)
865 aspect |= VK_IMAGE_ASPECT_DEPTH_BIT;
866 if (buffers & PIPE_CLEAR_STENCIL)
867 aspect |= VK_IMAGE_ASPECT_STENCIL_BIT;
868
869 attachments[num_attachments].aspectMask = aspect;
870 attachments[num_attachments].clearValue.depthStencil.depth = depth;
871 attachments[num_attachments].clearValue.depthStencil.stencil = stencil;
872 ++num_attachments;
873 }
874
875 VkClearRect cr;
876 cr.rect.offset.x = 0;
877 cr.rect.offset.y = 0;
878 cr.rect.extent.width = fb->width;
879 cr.rect.extent.height = fb->height;
880 cr.baseArrayLayer = 0;
881 cr.layerCount = util_framebuffer_get_num_layers(fb);
882 vkCmdClearAttachments(batch->cmdbuf, num_attachments, attachments, 1, &cr);
883 }
884
885 VkShaderStageFlagBits
886 zink_shader_stage(enum pipe_shader_type type)
887 {
888 VkShaderStageFlagBits stages[] = {
889 [PIPE_SHADER_VERTEX] = VK_SHADER_STAGE_VERTEX_BIT,
890 [PIPE_SHADER_FRAGMENT] = VK_SHADER_STAGE_FRAGMENT_BIT,
891 [PIPE_SHADER_GEOMETRY] = VK_SHADER_STAGE_GEOMETRY_BIT,
892 [PIPE_SHADER_TESS_CTRL] = VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
893 [PIPE_SHADER_TESS_EVAL] = VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
894 [PIPE_SHADER_COMPUTE] = VK_SHADER_STAGE_COMPUTE_BIT,
895 };
896 return stages[type];
897 }
898
899 static VkDescriptorSet
900 allocate_descriptor_set(struct zink_screen *screen,
901 struct zink_batch *batch,
902 struct zink_gfx_program *prog)
903 {
904 assert(batch->descs_left >= prog->num_descriptors);
905 VkDescriptorSetAllocateInfo dsai;
906 memset((void *)&dsai, 0, sizeof(dsai));
907 dsai.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
908 dsai.pNext = NULL;
909 dsai.descriptorPool = batch->descpool;
910 dsai.descriptorSetCount = 1;
911 dsai.pSetLayouts = &prog->dsl;
912
913 VkDescriptorSet desc_set;
914 if (vkAllocateDescriptorSets(screen->dev, &dsai, &desc_set) != VK_SUCCESS) {
915 debug_printf("ZINK: failed to allocate descriptor set :/");
916 return VK_NULL_HANDLE;
917 }
918
919 batch->descs_left -= prog->num_descriptors;
920 return desc_set;
921 }
922
923 static void
924 zink_bind_vertex_buffers(struct zink_batch *batch, struct zink_context *ctx)
925 {
926 VkBuffer buffers[PIPE_MAX_ATTRIBS];
927 VkDeviceSize buffer_offsets[PIPE_MAX_ATTRIBS];
928 const struct zink_vertex_elements_state *elems = ctx->element_state;
929 for (unsigned i = 0; i < elems->hw_state.num_bindings; i++) {
930 struct pipe_vertex_buffer *vb = ctx->buffers + ctx->element_state->binding_map[i];
931 assert(vb && vb->buffer.resource);
932 struct zink_resource *res = zink_resource(vb->buffer.resource);
933 buffers[i] = res->buffer;
934 buffer_offsets[i] = vb->buffer_offset;
935 zink_batch_reference_resoure(batch, res);
936 }
937
938 if (elems->hw_state.num_bindings > 0)
939 vkCmdBindVertexBuffers(batch->cmdbuf, 0,
940 elems->hw_state.num_bindings,
941 buffers, buffer_offsets);
942 }
943
944 static uint32_t
945 hash_gfx_program(const void *key)
946 {
947 return _mesa_hash_data(key, sizeof(struct zink_shader *) * (PIPE_SHADER_TYPES - 1));
948 }
949
950 static bool
951 equals_gfx_program(const void *a, const void *b)
952 {
953 return memcmp(a, b, sizeof(struct zink_shader *) * (PIPE_SHADER_TYPES - 1)) == 0;
954 }
955
956 static uint32_t
957 hash_render_pass_state(const void *key)
958 {
959 return _mesa_hash_data(key, sizeof(struct zink_render_pass_state));
960 }
961
962 static bool
963 equals_render_pass_state(const void *a, const void *b)
964 {
965 return memcmp(a, b, sizeof(struct zink_render_pass_state)) == 0;
966 }
967
968 static uint32_t
969 hash_framebuffer_state(const void *key)
970 {
971 struct zink_framebuffer_state *s = (struct zink_framebuffer_state*)key;
972 return _mesa_hash_data(key, sizeof(struct zink_framebuffer_state) + sizeof(s->attachments) * s->num_attachments);
973 }
974
975 static bool
976 equals_framebuffer_state(const void *a, const void *b)
977 {
978 struct zink_framebuffer_state *s = (struct zink_framebuffer_state*)a;
979 return memcmp(a, b, sizeof(struct zink_framebuffer_state) + sizeof(s->attachments) * s->num_attachments) == 0;
980 }
981
982 static struct zink_gfx_program *
983 get_gfx_program(struct zink_context *ctx)
984 {
985 if (ctx->dirty_program) {
986 struct hash_entry *entry = _mesa_hash_table_search(ctx->program_cache,
987 ctx->gfx_stages);
988 if (!entry) {
989 struct zink_gfx_program *prog;
990 prog = zink_create_gfx_program(zink_screen(ctx->base.screen),
991 ctx->gfx_stages);
992 entry = _mesa_hash_table_insert(ctx->program_cache, prog->stages, prog);
993 if (!entry)
994 return NULL;
995 }
996 ctx->curr_program = entry->data;
997 ctx->dirty_program = false;
998 }
999
1000 assert(ctx->curr_program);
1001 return ctx->curr_program;
1002 }
1003
1004 static bool
1005 line_width_needed(enum pipe_prim_type reduced_prim,
1006 VkPolygonMode polygon_mode)
1007 {
1008 switch (reduced_prim) {
1009 case PIPE_PRIM_POINTS:
1010 return false;
1011
1012 case PIPE_PRIM_LINES:
1013 return true;
1014
1015 case PIPE_PRIM_TRIANGLES:
1016 return polygon_mode == VK_POLYGON_MODE_LINE;
1017
1018 default:
1019 unreachable("unexpected reduced prim");
1020 }
1021 }
1022
1023 static void
1024 zink_draw_vbo(struct pipe_context *pctx,
1025 const struct pipe_draw_info *dinfo)
1026 {
1027 struct zink_context *ctx = zink_context(pctx);
1028 struct zink_screen *screen = zink_screen(pctx->screen);
1029 struct zink_rasterizer_state *rast_state = ctx->rast_state;
1030
1031 if (dinfo->mode >= PIPE_PRIM_QUADS ||
1032 dinfo->mode == PIPE_PRIM_LINE_LOOP ||
1033 dinfo->index_size == 1) {
1034 if (!u_trim_pipe_prim(dinfo->mode, (unsigned *)&dinfo->count))
1035 return;
1036
1037 util_primconvert_save_rasterizer_state(ctx->primconvert, &rast_state->base);
1038 util_primconvert_draw_vbo(ctx->primconvert, dinfo);
1039 return;
1040 }
1041
1042 struct zink_gfx_program *gfx_program = get_gfx_program(ctx);
1043 if (!gfx_program)
1044 return;
1045
1046 VkPipeline pipeline = zink_get_gfx_pipeline(screen, gfx_program,
1047 &ctx->gfx_pipeline_state,
1048 dinfo->mode);
1049
1050 enum pipe_prim_type reduced_prim = u_reduced_prim(dinfo->mode);
1051
1052 bool depth_bias = false;
1053 switch (reduced_prim) {
1054 case PIPE_PRIM_POINTS:
1055 depth_bias = rast_state->offset_point;
1056 break;
1057
1058 case PIPE_PRIM_LINES:
1059 depth_bias = rast_state->offset_line;
1060 break;
1061
1062 case PIPE_PRIM_TRIANGLES:
1063 depth_bias = rast_state->offset_tri;
1064 break;
1065
1066 default:
1067 unreachable("unexpected reduced prim");
1068 }
1069
1070 unsigned index_offset = 0;
1071 struct pipe_resource *index_buffer = NULL;
1072 if (dinfo->index_size > 0) {
1073 if (dinfo->has_user_indices) {
1074 if (!util_upload_index_buffer(pctx, dinfo, &index_buffer, &index_offset)) {
1075 debug_printf("util_upload_index_buffer() failed\n");
1076 return;
1077 }
1078 } else
1079 index_buffer = dinfo->index.resource;
1080 }
1081
1082 VkWriteDescriptorSet wds[PIPE_SHADER_TYPES * PIPE_MAX_CONSTANT_BUFFERS + PIPE_SHADER_TYPES * PIPE_MAX_SHADER_SAMPLER_VIEWS];
1083 VkDescriptorBufferInfo buffer_infos[PIPE_SHADER_TYPES * PIPE_MAX_CONSTANT_BUFFERS];
1084 VkDescriptorImageInfo image_infos[PIPE_SHADER_TYPES * PIPE_MAX_SHADER_SAMPLER_VIEWS];
1085 int num_wds = 0, num_buffer_info = 0, num_image_info = 0;
1086
1087 struct zink_resource *transitions[PIPE_SHADER_TYPES * PIPE_MAX_SHADER_SAMPLER_VIEWS];
1088 int num_transitions = 0;
1089
1090 for (int i = 0; i < ARRAY_SIZE(ctx->gfx_stages); i++) {
1091 struct zink_shader *shader = ctx->gfx_stages[i];
1092 if (!shader)
1093 continue;
1094
1095 for (int j = 0; j < shader->num_bindings; j++) {
1096 int index = shader->bindings[j].index;
1097 if (shader->bindings[j].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) {
1098 assert(ctx->ubos[i][index].buffer_size > 0);
1099 assert(ctx->ubos[i][index].buffer_size <= screen->props.limits.maxUniformBufferRange);
1100 assert(ctx->ubos[i][index].buffer);
1101 struct zink_resource *res = zink_resource(ctx->ubos[i][index].buffer);
1102 buffer_infos[num_buffer_info].buffer = res->buffer;
1103 buffer_infos[num_buffer_info].offset = ctx->ubos[i][index].buffer_offset;
1104 buffer_infos[num_buffer_info].range = ctx->ubos[i][index].buffer_size;
1105 wds[num_wds].pBufferInfo = buffer_infos + num_buffer_info;
1106 ++num_buffer_info;
1107 } else {
1108 struct pipe_sampler_view *psampler_view = ctx->image_views[i][index];
1109 assert(psampler_view);
1110 struct zink_sampler_view *sampler_view = zink_sampler_view(psampler_view);
1111
1112 struct zink_resource *res = zink_resource(psampler_view->texture);
1113 VkImageLayout layout = res->layout;
1114 if (layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL &&
1115 layout != VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL &&
1116 layout != VK_IMAGE_LAYOUT_GENERAL) {
1117 transitions[num_transitions++] = res;
1118 layout = VK_IMAGE_LAYOUT_GENERAL;
1119 }
1120 image_infos[num_image_info].imageLayout = layout;
1121 image_infos[num_image_info].imageView = sampler_view->image_view;
1122 image_infos[num_image_info].sampler = ctx->samplers[i][index];
1123 wds[num_wds].pImageInfo = image_infos + num_image_info;
1124 ++num_image_info;
1125 }
1126
1127 wds[num_wds].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
1128 wds[num_wds].pNext = NULL;
1129 wds[num_wds].dstBinding = shader->bindings[j].binding;
1130 wds[num_wds].dstArrayElement = 0;
1131 wds[num_wds].descriptorCount = 1;
1132 wds[num_wds].descriptorType = shader->bindings[j].type;
1133 ++num_wds;
1134 }
1135 }
1136
1137 struct zink_batch *batch;
1138 if (num_transitions > 0) {
1139 batch = zink_batch_no_rp(ctx);
1140
1141 for (int i = 0; i < num_transitions; ++i)
1142 zink_resource_barrier(batch->cmdbuf, transitions[i],
1143 transitions[i]->aspect,
1144 VK_IMAGE_LAYOUT_GENERAL);
1145 }
1146
1147 batch = zink_batch_rp(ctx);
1148
1149 if (batch->descs_left < gfx_program->num_descriptors) {
1150 flush_batch(ctx);
1151 batch = zink_batch_rp(ctx);
1152 assert(batch->descs_left >= gfx_program->num_descriptors);
1153 }
1154
1155 VkDescriptorSet desc_set = allocate_descriptor_set(screen, batch,
1156 gfx_program);
1157 assert(desc_set != VK_NULL_HANDLE);
1158
1159 for (int i = 0; i < ARRAY_SIZE(ctx->gfx_stages); i++) {
1160 struct zink_shader *shader = ctx->gfx_stages[i];
1161 if (!shader)
1162 continue;
1163
1164 for (int j = 0; j < shader->num_bindings; j++) {
1165 int index = shader->bindings[j].index;
1166 if (shader->bindings[j].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) {
1167 struct zink_resource *res = zink_resource(ctx->ubos[i][index].buffer);
1168 zink_batch_reference_resoure(batch, res);
1169 } else {
1170 struct zink_sampler_view *sampler_view = zink_sampler_view(ctx->image_views[i][index]);
1171 zink_batch_reference_sampler_view(batch, sampler_view);
1172 }
1173 }
1174 }
1175
1176 vkCmdSetViewport(batch->cmdbuf, 0, ctx->num_viewports, ctx->viewports);
1177 if (ctx->rast_state->base.scissor)
1178 vkCmdSetScissor(batch->cmdbuf, 0, ctx->num_viewports, ctx->scissors);
1179 else if (ctx->fb_state.width && ctx->fb_state.height) {
1180 VkRect2D fb_scissor = {};
1181 fb_scissor.extent.width = ctx->fb_state.width;
1182 fb_scissor.extent.height = ctx->fb_state.height;
1183 vkCmdSetScissor(batch->cmdbuf, 0, 1, &fb_scissor);
1184 }
1185
1186 if (line_width_needed(reduced_prim, rast_state->hw_state.polygon_mode)) {
1187 if (screen->feats.wideLines || ctx->line_width == 1.0f)
1188 vkCmdSetLineWidth(batch->cmdbuf, ctx->line_width);
1189 else
1190 debug_printf("BUG: wide lines not supported, needs fallback!");
1191 }
1192
1193 vkCmdSetStencilReference(batch->cmdbuf, VK_STENCIL_FACE_FRONT_BIT, ctx->stencil_ref.ref_value[0]);
1194 vkCmdSetStencilReference(batch->cmdbuf, VK_STENCIL_FACE_BACK_BIT, ctx->stencil_ref.ref_value[1]);
1195
1196 if (depth_bias)
1197 vkCmdSetDepthBias(batch->cmdbuf, rast_state->offset_units, rast_state->offset_clamp, rast_state->offset_scale);
1198 else
1199 vkCmdSetDepthBias(batch->cmdbuf, 0.0f, 0.0f, 0.0f);
1200
1201 if (ctx->gfx_pipeline_state.blend_state->need_blend_constants)
1202 vkCmdSetBlendConstants(batch->cmdbuf, ctx->blend_constants);
1203
1204 if (num_wds > 0) {
1205 for (int i = 0; i < num_wds; ++i)
1206 wds[i].dstSet = desc_set;
1207 vkUpdateDescriptorSets(screen->dev, num_wds, wds, 0, NULL);
1208 }
1209
1210 vkCmdBindPipeline(batch->cmdbuf, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
1211 vkCmdBindDescriptorSets(batch->cmdbuf, VK_PIPELINE_BIND_POINT_GRAPHICS,
1212 gfx_program->layout, 0, 1, &desc_set, 0, NULL);
1213 zink_bind_vertex_buffers(batch, ctx);
1214
1215 if (dinfo->index_size > 0) {
1216 assert(dinfo->index_size != 1);
1217 VkIndexType index_type = dinfo->index_size == 2 ? VK_INDEX_TYPE_UINT16 : VK_INDEX_TYPE_UINT32;
1218 struct zink_resource *res = zink_resource(index_buffer);
1219 vkCmdBindIndexBuffer(batch->cmdbuf, res->buffer, index_offset, index_type);
1220 zink_batch_reference_resoure(batch, res);
1221 vkCmdDrawIndexed(batch->cmdbuf,
1222 dinfo->count, dinfo->instance_count,
1223 dinfo->start, dinfo->index_bias, dinfo->start_instance);
1224 } else
1225 vkCmdDraw(batch->cmdbuf, dinfo->count, dinfo->instance_count, dinfo->start, dinfo->start_instance);
1226
1227 if (dinfo->index_size > 0 && dinfo->has_user_indices)
1228 pipe_resource_reference(&index_buffer, NULL);
1229 }
1230
1231 static void
1232 zink_flush(struct pipe_context *pctx,
1233 struct pipe_fence_handle **pfence,
1234 enum pipe_flush_flags flags)
1235 {
1236 struct zink_context *ctx = zink_context(pctx);
1237
1238 struct zink_batch *batch = zink_curr_batch(ctx);
1239 flush_batch(ctx);
1240
1241 if (pfence)
1242 zink_fence_reference(zink_screen(pctx->screen),
1243 (struct zink_fence **)pfence,
1244 batch->fence);
1245
1246 /* HACK:
1247 * For some strange reason, we need to finish before presenting, or else
1248 * we start rendering on top of the back-buffer for the next frame. This
1249 * seems like a bug in the DRI-driver to me, because we really should
1250 * be properly protected by fences here, and the back-buffer should
1251 * either be swapped with the front-buffer, or blitted from. But for
1252 * some strange reason, neither of these things happen.
1253 */
1254 if (flags & PIPE_FLUSH_END_OF_FRAME)
1255 pctx->screen->fence_finish(pctx->screen, pctx,
1256 (struct pipe_fence_handle *)batch->fence,
1257 PIPE_TIMEOUT_INFINITE);
1258 }
1259
1260 static bool
1261 blit_resolve(struct zink_context *ctx, const struct pipe_blit_info *info)
1262 {
1263 if (info->mask != PIPE_MASK_RGBA ||
1264 info->scissor_enable ||
1265 info->alpha_blend)
1266 return false;
1267
1268 struct zink_resource *src = zink_resource(info->src.resource);
1269 struct zink_resource *dst = zink_resource(info->dst.resource);
1270
1271 struct zink_screen *screen = zink_screen(ctx->base.screen);
1272 if (src->format != zink_get_format(screen, info->src.format) ||
1273 dst->format != zink_get_format(screen, info->dst.format))
1274 return false;
1275
1276 struct zink_batch *batch = zink_batch_no_rp(ctx);
1277
1278 zink_batch_reference_resoure(batch, src);
1279 zink_batch_reference_resoure(batch, dst);
1280
1281 if (src->layout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL)
1282 zink_resource_barrier(batch->cmdbuf, src, src->aspect,
1283 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
1284
1285 if (dst->layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL)
1286 zink_resource_barrier(batch->cmdbuf, dst, dst->aspect,
1287 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
1288
1289 VkImageResolve region = {};
1290
1291 region.srcSubresource.aspectMask = src->aspect;
1292 region.srcSubresource.mipLevel = info->src.level;
1293 region.srcSubresource.baseArrayLayer = 0; // no clue
1294 region.srcSubresource.layerCount = 1; // no clue
1295 region.srcOffset.x = info->src.box.x;
1296 region.srcOffset.y = info->src.box.y;
1297 region.srcOffset.z = info->src.box.z;
1298
1299 region.dstSubresource.aspectMask = dst->aspect;
1300 region.dstSubresource.mipLevel = info->dst.level;
1301 region.dstSubresource.baseArrayLayer = 0; // no clue
1302 region.dstSubresource.layerCount = 1; // no clue
1303 region.dstOffset.x = info->dst.box.x;
1304 region.dstOffset.y = info->dst.box.y;
1305 region.dstOffset.z = info->dst.box.z;
1306
1307 region.extent.width = info->dst.box.width;
1308 region.extent.height = info->dst.box.height;
1309 region.extent.depth = info->dst.box.depth;
1310 vkCmdResolveImage(batch->cmdbuf, src->image, src->layout,
1311 dst->image, dst->layout,
1312 1, &region);
1313
1314 /* HACK: I have no idea why this is needed, but without it ioquake3
1315 * randomly keeps fading to black.
1316 */
1317 flush_batch(ctx);
1318
1319 return true;
1320 }
1321
1322 static bool
1323 blit_native(struct zink_context *ctx, const struct pipe_blit_info *info)
1324 {
1325 if (info->mask != PIPE_MASK_RGBA ||
1326 info->scissor_enable ||
1327 info->alpha_blend)
1328 return false;
1329
1330 struct zink_resource *src = zink_resource(info->src.resource);
1331 struct zink_resource *dst = zink_resource(info->dst.resource);
1332
1333 struct zink_screen *screen = zink_screen(ctx->base.screen);
1334 if (src->format != zink_get_format(screen, info->src.format) ||
1335 dst->format != zink_get_format(screen, info->dst.format))
1336 return false;
1337
1338 struct zink_batch *batch = zink_batch_no_rp(ctx);
1339 zink_batch_reference_resoure(batch, src);
1340 zink_batch_reference_resoure(batch, dst);
1341
1342 if (src->layout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL)
1343 zink_resource_barrier(batch->cmdbuf, src, src->aspect,
1344 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
1345
1346 if (dst->layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL)
1347 zink_resource_barrier(batch->cmdbuf, dst, dst->aspect,
1348 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
1349
1350 VkImageBlit region = {};
1351 region.srcSubresource.aspectMask = src->aspect;
1352 region.srcSubresource.mipLevel = info->src.level;
1353 region.srcOffsets[0].x = info->src.box.x;
1354 region.srcOffsets[0].y = info->src.box.y;
1355 region.srcOffsets[1].x = info->src.box.x + info->src.box.width;
1356 region.srcOffsets[1].y = info->src.box.y + info->src.box.height;
1357
1358 if (src->base.array_size > 1) {
1359 region.srcOffsets[0].z = 0;
1360 region.srcOffsets[1].z = 1;
1361 region.srcSubresource.baseArrayLayer = info->src.box.z;
1362 region.srcSubresource.layerCount = info->src.box.depth;
1363 } else {
1364 region.srcOffsets[0].z = info->src.box.z;
1365 region.srcOffsets[1].z = info->src.box.z + info->src.box.depth;
1366 region.srcSubresource.baseArrayLayer = 0;
1367 region.srcSubresource.layerCount = 1;
1368 }
1369
1370 region.dstSubresource.aspectMask = dst->aspect;
1371 region.dstSubresource.mipLevel = info->dst.level;
1372 region.dstOffsets[0].x = info->dst.box.x;
1373 region.dstOffsets[0].y = info->dst.box.y;
1374 region.dstOffsets[1].x = info->dst.box.x + info->dst.box.width;
1375 region.dstOffsets[1].y = info->dst.box.y + info->dst.box.height;
1376
1377 if (dst->base.array_size > 1) {
1378 region.dstOffsets[0].z = 0;
1379 region.dstOffsets[1].z = 1;
1380 region.dstSubresource.baseArrayLayer = info->dst.box.z;
1381 region.dstSubresource.layerCount = info->dst.box.depth;
1382 } else {
1383 region.dstOffsets[0].z = info->dst.box.z;
1384 region.dstOffsets[1].z = info->dst.box.z + info->dst.box.depth;
1385 region.dstSubresource.baseArrayLayer = 0;
1386 region.dstSubresource.layerCount = 1;
1387 }
1388
1389 vkCmdBlitImage(batch->cmdbuf, src->image, src->layout,
1390 dst->image, dst->layout,
1391 1, &region,
1392 filter(info->filter));
1393
1394 /* HACK: I have no idea why this is needed, but without it ioquake3
1395 * randomly keeps fading to black.
1396 */
1397 flush_batch(ctx);
1398
1399 return true;
1400 }
1401
1402 static void
1403 zink_blit(struct pipe_context *pctx,
1404 const struct pipe_blit_info *info)
1405 {
1406 struct zink_context *ctx = zink_context(pctx);
1407 if (info->src.resource->nr_samples > 1 &&
1408 info->dst.resource->nr_samples <= 1) {
1409 if (blit_resolve(ctx, info))
1410 return;
1411 } else {
1412 if (blit_native(ctx, info))
1413 return;
1414 }
1415
1416 if (!util_blitter_is_blit_supported(ctx->blitter, info)) {
1417 debug_printf("blit unsupported %s -> %s\n",
1418 util_format_short_name(info->src.resource->format),
1419 util_format_short_name(info->dst.resource->format));
1420 return;
1421 }
1422
1423 util_blitter_save_blend(ctx->blitter, ctx->gfx_pipeline_state.blend_state);
1424 util_blitter_save_depth_stencil_alpha(ctx->blitter, ctx->gfx_pipeline_state.depth_stencil_alpha_state);
1425 util_blitter_save_vertex_elements(ctx->blitter, ctx->element_state);
1426 util_blitter_save_stencil_ref(ctx->blitter, &ctx->stencil_ref);
1427 util_blitter_save_rasterizer(ctx->blitter, ctx->rast_state);
1428 util_blitter_save_fragment_shader(ctx->blitter, ctx->gfx_stages[PIPE_SHADER_FRAGMENT]);
1429 util_blitter_save_vertex_shader(ctx->blitter, ctx->gfx_stages[PIPE_SHADER_VERTEX]);
1430 util_blitter_save_framebuffer(ctx->blitter, &ctx->fb_state);
1431 util_blitter_save_viewport(ctx->blitter, ctx->viewport_states);
1432 util_blitter_save_scissor(ctx->blitter, ctx->scissor_states);
1433 util_blitter_save_fragment_sampler_states(ctx->blitter,
1434 ctx->num_samplers[PIPE_SHADER_FRAGMENT],
1435 (void **)ctx->samplers[PIPE_SHADER_FRAGMENT]);
1436 util_blitter_save_fragment_sampler_views(ctx->blitter,
1437 ctx->num_image_views[PIPE_SHADER_FRAGMENT],
1438 ctx->image_views[PIPE_SHADER_FRAGMENT]);
1439 util_blitter_save_fragment_constant_buffer_slot(ctx->blitter, ctx->ubos[PIPE_SHADER_FRAGMENT]);
1440 util_blitter_save_vertex_buffer_slot(ctx->blitter, ctx->buffers);
1441 util_blitter_save_sample_mask(ctx->blitter, ctx->gfx_pipeline_state.sample_mask);
1442
1443 util_blitter_blit(ctx->blitter, info);
1444 }
1445
1446 static void
1447 zink_flush_resource(struct pipe_context *pipe,
1448 struct pipe_resource *resource)
1449 {
1450 }
1451
1452 static void
1453 zink_resource_copy_region(struct pipe_context *pctx,
1454 struct pipe_resource *pdst,
1455 unsigned dst_level, unsigned dstx, unsigned dsty, unsigned dstz,
1456 struct pipe_resource *psrc,
1457 unsigned src_level, const struct pipe_box *src_box)
1458 {
1459 struct zink_resource *dst = zink_resource(pdst);
1460 struct zink_resource *src = zink_resource(psrc);
1461 struct zink_context *ctx = zink_context(pctx);
1462 if (dst->base.target != PIPE_BUFFER && src->base.target != PIPE_BUFFER) {
1463 VkImageCopy region = {};
1464
1465 region.srcSubresource.aspectMask = src->aspect;
1466 region.srcSubresource.mipLevel = src_level;
1467 region.srcSubresource.layerCount = 1;
1468 if (src->base.array_size > 1) {
1469 region.srcSubresource.baseArrayLayer = src_box->z;
1470 region.srcSubresource.layerCount = src_box->depth;
1471 region.extent.depth = 1;
1472 } else {
1473 region.srcOffset.z = src_box->z;
1474 region.srcSubresource.layerCount = 1;
1475 region.extent.depth = src_box->depth;
1476 }
1477
1478 region.srcOffset.x = src_box->x;
1479 region.srcOffset.y = src_box->y;
1480
1481 region.dstSubresource.aspectMask = dst->aspect;
1482 region.dstSubresource.mipLevel = dst_level;
1483 if (dst->base.array_size > 1) {
1484 region.dstSubresource.baseArrayLayer = dstz;
1485 region.dstSubresource.layerCount = src_box->depth;
1486 } else {
1487 region.dstOffset.z = dstz;
1488 region.dstSubresource.layerCount = 1;
1489 }
1490
1491 region.dstOffset.x = dstx;
1492 region.dstOffset.y = dsty;
1493 region.extent.width = src_box->width;
1494 region.extent.height = src_box->height;
1495
1496 struct zink_batch *batch = zink_batch_no_rp(ctx);
1497 zink_batch_reference_resoure(batch, src);
1498 zink_batch_reference_resoure(batch, dst);
1499
1500 if (src->layout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) {
1501 zink_resource_barrier(batch->cmdbuf, src, src->aspect,
1502 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
1503 }
1504
1505 if (dst->layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
1506 zink_resource_barrier(batch->cmdbuf, dst, dst->aspect,
1507 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
1508 }
1509
1510 vkCmdCopyImage(batch->cmdbuf, src->image, src->layout,
1511 dst->image, dst->layout,
1512 1, &region);
1513 } else
1514 debug_printf("zink: TODO resource copy\n");
1515 }
1516
1517 struct pipe_context *
1518 zink_context_create(struct pipe_screen *pscreen, void *priv, unsigned flags)
1519 {
1520 struct zink_screen *screen = zink_screen(pscreen);
1521 struct zink_context *ctx = CALLOC_STRUCT(zink_context);
1522
1523 ctx->base.screen = pscreen;
1524 ctx->base.priv = priv;
1525
1526 ctx->base.destroy = zink_context_destroy;
1527
1528 zink_context_state_init(&ctx->base);
1529
1530 ctx->base.create_sampler_state = zink_create_sampler_state;
1531 ctx->base.bind_sampler_states = zink_bind_sampler_states;
1532 ctx->base.delete_sampler_state = zink_delete_sampler_state;
1533
1534 ctx->base.create_sampler_view = zink_create_sampler_view;
1535 ctx->base.set_sampler_views = zink_set_sampler_views;
1536 ctx->base.sampler_view_destroy = zink_sampler_view_destroy;
1537
1538 ctx->base.create_vs_state = zink_create_vs_state;
1539 ctx->base.bind_vs_state = zink_bind_vs_state;
1540 ctx->base.delete_vs_state = zink_delete_vs_state;
1541
1542 ctx->base.create_fs_state = zink_create_fs_state;
1543 ctx->base.bind_fs_state = zink_bind_fs_state;
1544 ctx->base.delete_fs_state = zink_delete_fs_state;
1545
1546 ctx->base.set_polygon_stipple = zink_set_polygon_stipple;
1547 ctx->base.set_vertex_buffers = zink_set_vertex_buffers;
1548 ctx->base.set_viewport_states = zink_set_viewport_states;
1549 ctx->base.set_scissor_states = zink_set_scissor_states;
1550 ctx->base.set_constant_buffer = zink_set_constant_buffer;
1551 ctx->base.set_framebuffer_state = zink_set_framebuffer_state;
1552 ctx->base.set_stencil_ref = zink_set_stencil_ref;
1553 ctx->base.set_clip_state = zink_set_clip_state;
1554 ctx->base.set_blend_color = zink_set_blend_color;
1555
1556 ctx->base.set_sample_mask = zink_set_sample_mask;
1557
1558 ctx->base.clear = zink_clear;
1559 ctx->base.draw_vbo = zink_draw_vbo;
1560 ctx->base.flush = zink_flush;
1561
1562 ctx->base.resource_copy_region = zink_resource_copy_region;
1563 ctx->base.blit = zink_blit;
1564
1565 ctx->base.flush_resource = zink_flush_resource;
1566 zink_context_surface_init(&ctx->base);
1567 zink_context_resource_init(&ctx->base);
1568 zink_context_query_init(&ctx->base);
1569
1570 slab_create_child(&ctx->transfer_pool, &screen->transfer_pool);
1571
1572 ctx->base.stream_uploader = u_upload_create_default(&ctx->base);
1573 ctx->base.const_uploader = ctx->base.stream_uploader;
1574
1575 int prim_hwsupport = 1 << PIPE_PRIM_POINTS |
1576 1 << PIPE_PRIM_LINES |
1577 1 << PIPE_PRIM_LINE_STRIP |
1578 1 << PIPE_PRIM_TRIANGLES |
1579 1 << PIPE_PRIM_TRIANGLE_STRIP |
1580 1 << PIPE_PRIM_TRIANGLE_FAN;
1581
1582 ctx->primconvert = util_primconvert_create(&ctx->base, prim_hwsupport);
1583 if (!ctx->primconvert)
1584 goto fail;
1585
1586 ctx->blitter = util_blitter_create(&ctx->base);
1587 if (!ctx->blitter)
1588 goto fail;
1589
1590 VkCommandPoolCreateInfo cpci = {};
1591 cpci.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
1592 cpci.queueFamilyIndex = screen->gfx_queue;
1593 cpci.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
1594 if (vkCreateCommandPool(screen->dev, &cpci, NULL, &ctx->cmdpool) != VK_SUCCESS)
1595 goto fail;
1596
1597 VkCommandBufferAllocateInfo cbai = {};
1598 cbai.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
1599 cbai.commandPool = ctx->cmdpool;
1600 cbai.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
1601 cbai.commandBufferCount = 1;
1602
1603 VkDescriptorPoolSize sizes[] = {
1604 {VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, ZINK_BATCH_DESC_SIZE}
1605 };
1606 VkDescriptorPoolCreateInfo dpci = {};
1607 dpci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
1608 dpci.pPoolSizes = sizes;
1609 dpci.poolSizeCount = ARRAY_SIZE(sizes);
1610 dpci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
1611 dpci.maxSets = ZINK_BATCH_DESC_SIZE;
1612
1613 for (int i = 0; i < ARRAY_SIZE(ctx->batches); ++i) {
1614 if (vkAllocateCommandBuffers(screen->dev, &cbai, &ctx->batches[i].cmdbuf) != VK_SUCCESS)
1615 goto fail;
1616
1617 ctx->batches[i].resources = _mesa_set_create(NULL, _mesa_hash_pointer,
1618 _mesa_key_pointer_equal);
1619 ctx->batches[i].sampler_views = _mesa_set_create(NULL,
1620 _mesa_hash_pointer,
1621 _mesa_key_pointer_equal);
1622
1623 if (!ctx->batches[i].resources || !ctx->batches[i].sampler_views)
1624 goto fail;
1625
1626 util_dynarray_init(&ctx->batches[i].zombie_samplers, NULL);
1627
1628 if (vkCreateDescriptorPool(screen->dev, &dpci, 0,
1629 &ctx->batches[i].descpool) != VK_SUCCESS)
1630 goto fail;
1631 }
1632
1633 vkGetDeviceQueue(screen->dev, screen->gfx_queue, 0, &ctx->queue);
1634
1635 ctx->program_cache = _mesa_hash_table_create(NULL,
1636 hash_gfx_program,
1637 equals_gfx_program);
1638 ctx->render_pass_cache = _mesa_hash_table_create(NULL,
1639 hash_render_pass_state,
1640 equals_render_pass_state);
1641 ctx->framebuffer_cache = _mesa_hash_table_create(NULL,
1642 hash_framebuffer_state,
1643 equals_framebuffer_state);
1644
1645 if (!ctx->program_cache || !ctx->render_pass_cache ||
1646 !ctx->framebuffer_cache)
1647 goto fail;
1648
1649 ctx->dirty_program = true;
1650
1651 /* start the first batch */
1652 zink_start_batch(ctx, zink_curr_batch(ctx));
1653
1654 return &ctx->base;
1655
1656 fail:
1657 if (ctx) {
1658 vkDestroyCommandPool(screen->dev, ctx->cmdpool, NULL);
1659 FREE(ctx);
1660 }
1661 return NULL;
1662 }