panfrost: Fix panfrost_bo_access memory leak
[mesa.git] / src / gallium / drivers / zink / zink_context.c
1 /*
2 * Copyright 2018 Collabora Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "zink_context.h"
25
26 #include "zink_batch.h"
27 #include "zink_compiler.h"
28 #include "zink_fence.h"
29 #include "zink_framebuffer.h"
30 #include "zink_helpers.h"
31 #include "zink_pipeline.h"
32 #include "zink_render_pass.h"
33 #include "zink_resource.h"
34 #include "zink_screen.h"
35 #include "zink_state.h"
36 #include "zink_surface.h"
37
38 #include "indices/u_primconvert.h"
39 #include "util/u_blitter.h"
40 #include "util/u_debug.h"
41 #include "util/format/u_format.h"
42 #include "util/u_framebuffer.h"
43 #include "util/u_helpers.h"
44 #include "util/u_inlines.h"
45
46 #include "nir.h"
47
48 #include "util/u_memory.h"
49 #include "util/u_upload_mgr.h"
50
51 static void
52 zink_context_destroy(struct pipe_context *pctx)
53 {
54 struct zink_context *ctx = zink_context(pctx);
55 struct zink_screen *screen = zink_screen(pctx->screen);
56
57 if (vkQueueWaitIdle(ctx->queue) != VK_SUCCESS)
58 debug_printf("vkQueueWaitIdle failed\n");
59
60 for (int i = 0; i < ARRAY_SIZE(ctx->batches); ++i)
61 vkFreeCommandBuffers(screen->dev, ctx->cmdpool, 1, &ctx->batches[i].cmdbuf);
62 vkDestroyCommandPool(screen->dev, ctx->cmdpool, NULL);
63
64 util_primconvert_destroy(ctx->primconvert);
65 u_upload_destroy(pctx->stream_uploader);
66 slab_destroy_child(&ctx->transfer_pool);
67 util_blitter_destroy(ctx->blitter);
68 FREE(ctx);
69 }
70
71 static VkSamplerMipmapMode
72 sampler_mipmap_mode(enum pipe_tex_mipfilter filter)
73 {
74 switch (filter) {
75 case PIPE_TEX_MIPFILTER_NEAREST: return VK_SAMPLER_MIPMAP_MODE_NEAREST;
76 case PIPE_TEX_MIPFILTER_LINEAR: return VK_SAMPLER_MIPMAP_MODE_LINEAR;
77 case PIPE_TEX_MIPFILTER_NONE:
78 unreachable("PIPE_TEX_MIPFILTER_NONE should be dealt with earlier");
79 }
80 unreachable("unexpected filter");
81 }
82
83 static VkSamplerAddressMode
84 sampler_address_mode(enum pipe_tex_wrap filter)
85 {
86 switch (filter) {
87 case PIPE_TEX_WRAP_REPEAT: return VK_SAMPLER_ADDRESS_MODE_REPEAT;
88 case PIPE_TEX_WRAP_CLAMP: return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; /* not technically correct, but kinda works */
89 case PIPE_TEX_WRAP_CLAMP_TO_EDGE: return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
90 case PIPE_TEX_WRAP_CLAMP_TO_BORDER: return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER;
91 case PIPE_TEX_WRAP_MIRROR_REPEAT: return VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT;
92 case PIPE_TEX_WRAP_MIRROR_CLAMP: return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; /* not technically correct, but kinda works */
93 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE: return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE;
94 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER: return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; /* not technically correct, but kinda works */
95 }
96 unreachable("unexpected wrap");
97 }
98
99 static VkCompareOp
100 compare_op(enum pipe_compare_func op)
101 {
102 switch (op) {
103 case PIPE_FUNC_NEVER: return VK_COMPARE_OP_NEVER;
104 case PIPE_FUNC_LESS: return VK_COMPARE_OP_LESS;
105 case PIPE_FUNC_EQUAL: return VK_COMPARE_OP_EQUAL;
106 case PIPE_FUNC_LEQUAL: return VK_COMPARE_OP_LESS_OR_EQUAL;
107 case PIPE_FUNC_GREATER: return VK_COMPARE_OP_GREATER;
108 case PIPE_FUNC_NOTEQUAL: return VK_COMPARE_OP_NOT_EQUAL;
109 case PIPE_FUNC_GEQUAL: return VK_COMPARE_OP_GREATER_OR_EQUAL;
110 case PIPE_FUNC_ALWAYS: return VK_COMPARE_OP_ALWAYS;
111 }
112 unreachable("unexpected compare");
113 }
114
115 static void *
116 zink_create_sampler_state(struct pipe_context *pctx,
117 const struct pipe_sampler_state *state)
118 {
119 struct zink_screen *screen = zink_screen(pctx->screen);
120
121 VkSamplerCreateInfo sci = {};
122 sci.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
123 sci.magFilter = zink_filter(state->mag_img_filter);
124 sci.minFilter = zink_filter(state->min_img_filter);
125
126 if (state->min_mip_filter != PIPE_TEX_MIPFILTER_NONE) {
127 sci.mipmapMode = sampler_mipmap_mode(state->min_mip_filter);
128 sci.minLod = state->min_lod;
129 sci.maxLod = state->max_lod;
130 } else {
131 sci.mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST;
132 sci.minLod = 0;
133 sci.maxLod = 0;
134 }
135
136 sci.addressModeU = sampler_address_mode(state->wrap_s);
137 sci.addressModeV = sampler_address_mode(state->wrap_t);
138 sci.addressModeW = sampler_address_mode(state->wrap_r);
139 sci.mipLodBias = state->lod_bias;
140
141 if (state->compare_mode == PIPE_TEX_COMPARE_NONE)
142 sci.compareOp = VK_COMPARE_OP_NEVER;
143 else
144 sci.compareOp = compare_op(state->compare_func);
145
146 sci.borderColor = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK; // TODO
147 sci.unnormalizedCoordinates = !state->normalized_coords;
148
149 if (state->max_anisotropy > 1) {
150 sci.maxAnisotropy = state->max_anisotropy;
151 sci.anisotropyEnable = VK_TRUE;
152 }
153
154 VkSampler *sampler = CALLOC(1, sizeof(VkSampler));
155 if (!sampler)
156 return NULL;
157
158 if (vkCreateSampler(screen->dev, &sci, NULL, sampler) != VK_SUCCESS) {
159 FREE(sampler);
160 return NULL;
161 }
162
163 return sampler;
164 }
165
166 static void
167 zink_bind_sampler_states(struct pipe_context *pctx,
168 enum pipe_shader_type shader,
169 unsigned start_slot,
170 unsigned num_samplers,
171 void **samplers)
172 {
173 struct zink_context *ctx = zink_context(pctx);
174 for (unsigned i = 0; i < num_samplers; ++i) {
175 VkSampler *sampler = samplers[i];
176 ctx->sampler_states[shader][start_slot + i] = sampler;
177 ctx->samplers[shader][start_slot + i] = sampler ? *sampler : VK_NULL_HANDLE;
178 }
179 ctx->num_samplers[shader] = start_slot + num_samplers;
180 }
181
182 static void
183 zink_delete_sampler_state(struct pipe_context *pctx,
184 void *sampler_state)
185 {
186 struct zink_batch *batch = zink_curr_batch(zink_context(pctx));
187 util_dynarray_append(&batch->zombie_samplers, VkSampler,
188 *(VkSampler *)sampler_state);
189 FREE(sampler_state);
190 }
191
192
193 static VkImageViewType
194 image_view_type(enum pipe_texture_target target)
195 {
196 switch (target) {
197 case PIPE_TEXTURE_1D: return VK_IMAGE_VIEW_TYPE_1D;
198 case PIPE_TEXTURE_1D_ARRAY: return VK_IMAGE_VIEW_TYPE_1D_ARRAY;
199 case PIPE_TEXTURE_2D: return VK_IMAGE_VIEW_TYPE_2D;
200 case PIPE_TEXTURE_2D_ARRAY: return VK_IMAGE_VIEW_TYPE_2D_ARRAY;
201 case PIPE_TEXTURE_CUBE: return VK_IMAGE_VIEW_TYPE_CUBE;
202 case PIPE_TEXTURE_CUBE_ARRAY: return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
203 case PIPE_TEXTURE_3D: return VK_IMAGE_VIEW_TYPE_3D;
204 case PIPE_TEXTURE_RECT: return VK_IMAGE_VIEW_TYPE_2D; /* not sure */
205 default:
206 unreachable("unexpected target");
207 }
208 }
209
210 static VkComponentSwizzle
211 component_mapping(enum pipe_swizzle swizzle)
212 {
213 switch (swizzle) {
214 case PIPE_SWIZZLE_X: return VK_COMPONENT_SWIZZLE_R;
215 case PIPE_SWIZZLE_Y: return VK_COMPONENT_SWIZZLE_G;
216 case PIPE_SWIZZLE_Z: return VK_COMPONENT_SWIZZLE_B;
217 case PIPE_SWIZZLE_W: return VK_COMPONENT_SWIZZLE_A;
218 case PIPE_SWIZZLE_0: return VK_COMPONENT_SWIZZLE_ZERO;
219 case PIPE_SWIZZLE_1: return VK_COMPONENT_SWIZZLE_ONE;
220 case PIPE_SWIZZLE_NONE: return VK_COMPONENT_SWIZZLE_IDENTITY; // ???
221 default:
222 unreachable("unexpected swizzle");
223 }
224 }
225
226 static VkImageAspectFlags
227 sampler_aspect_from_format(enum pipe_format fmt)
228 {
229 if (util_format_is_depth_or_stencil(fmt)) {
230 const struct util_format_description *desc = util_format_description(fmt);
231 if (util_format_has_depth(desc))
232 return VK_IMAGE_ASPECT_DEPTH_BIT;
233 assert(util_format_has_stencil(desc));
234 return VK_IMAGE_ASPECT_STENCIL_BIT;
235 } else
236 return VK_IMAGE_ASPECT_COLOR_BIT;
237 }
238
239 static struct pipe_sampler_view *
240 zink_create_sampler_view(struct pipe_context *pctx, struct pipe_resource *pres,
241 const struct pipe_sampler_view *state)
242 {
243 struct zink_screen *screen = zink_screen(pctx->screen);
244 struct zink_resource *res = zink_resource(pres);
245 struct zink_sampler_view *sampler_view = CALLOC_STRUCT(zink_sampler_view);
246
247 sampler_view->base = *state;
248 sampler_view->base.texture = NULL;
249 pipe_resource_reference(&sampler_view->base.texture, pres);
250 sampler_view->base.reference.count = 1;
251 sampler_view->base.context = pctx;
252
253 VkImageViewCreateInfo ivci = {};
254 ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
255 ivci.image = res->image;
256 ivci.viewType = image_view_type(state->target);
257 ivci.format = zink_get_format(screen, state->format);
258 ivci.components.r = component_mapping(state->swizzle_r);
259 ivci.components.g = component_mapping(state->swizzle_g);
260 ivci.components.b = component_mapping(state->swizzle_b);
261 ivci.components.a = component_mapping(state->swizzle_a);
262
263 ivci.subresourceRange.aspectMask = sampler_aspect_from_format(state->format);
264 ivci.subresourceRange.baseMipLevel = state->u.tex.first_level;
265 ivci.subresourceRange.baseArrayLayer = state->u.tex.first_layer;
266 ivci.subresourceRange.levelCount = state->u.tex.last_level - state->u.tex.first_level + 1;
267 ivci.subresourceRange.layerCount = state->u.tex.last_layer - state->u.tex.first_layer + 1;
268
269 VkResult err = vkCreateImageView(screen->dev, &ivci, NULL, &sampler_view->image_view);
270 if (err != VK_SUCCESS) {
271 FREE(sampler_view);
272 return NULL;
273 }
274
275 return &sampler_view->base;
276 }
277
278 static void
279 zink_sampler_view_destroy(struct pipe_context *pctx,
280 struct pipe_sampler_view *pview)
281 {
282 struct zink_sampler_view *view = zink_sampler_view(pview);
283 vkDestroyImageView(zink_screen(pctx->screen)->dev, view->image_view, NULL);
284 FREE(view);
285 }
286
287 static void *
288 zink_create_vs_state(struct pipe_context *pctx,
289 const struct pipe_shader_state *shader)
290 {
291 struct nir_shader *nir;
292 if (shader->type != PIPE_SHADER_IR_NIR)
293 nir = zink_tgsi_to_nir(pctx->screen, shader->tokens);
294 else
295 nir = (struct nir_shader *)shader->ir.nir;
296
297 return zink_compile_nir(zink_screen(pctx->screen), nir);
298 }
299
300 static void
301 bind_stage(struct zink_context *ctx, enum pipe_shader_type stage,
302 struct zink_shader *shader)
303 {
304 assert(stage < PIPE_SHADER_COMPUTE);
305 ctx->gfx_stages[stage] = shader;
306 ctx->dirty_program = true;
307 }
308
309 static void
310 zink_bind_vs_state(struct pipe_context *pctx,
311 void *cso)
312 {
313 bind_stage(zink_context(pctx), PIPE_SHADER_VERTEX, cso);
314 }
315
316 static void
317 zink_delete_vs_state(struct pipe_context *pctx,
318 void *cso)
319 {
320 zink_shader_free(zink_screen(pctx->screen), cso);
321 }
322
323 static void *
324 zink_create_fs_state(struct pipe_context *pctx,
325 const struct pipe_shader_state *shader)
326 {
327 struct nir_shader *nir;
328 if (shader->type != PIPE_SHADER_IR_NIR)
329 nir = zink_tgsi_to_nir(pctx->screen, shader->tokens);
330 else
331 nir = (struct nir_shader *)shader->ir.nir;
332
333 return zink_compile_nir(zink_screen(pctx->screen), nir);
334 }
335
336 static void
337 zink_bind_fs_state(struct pipe_context *pctx,
338 void *cso)
339 {
340 bind_stage(zink_context(pctx), PIPE_SHADER_FRAGMENT, cso);
341 }
342
343 static void
344 zink_delete_fs_state(struct pipe_context *pctx,
345 void *cso)
346 {
347 zink_shader_free(zink_screen(pctx->screen), cso);
348 }
349
350 static void
351 zink_set_polygon_stipple(struct pipe_context *pctx,
352 const struct pipe_poly_stipple *ps)
353 {
354 }
355
356 static void
357 zink_set_vertex_buffers(struct pipe_context *pctx,
358 unsigned start_slot,
359 unsigned num_buffers,
360 const struct pipe_vertex_buffer *buffers)
361 {
362 struct zink_context *ctx = zink_context(pctx);
363
364 if (buffers) {
365 for (int i = 0; i < num_buffers; ++i) {
366 const struct pipe_vertex_buffer *vb = buffers + i;
367 ctx->gfx_pipeline_state.bindings[start_slot + i].stride = vb->stride;
368 }
369 }
370
371 util_set_vertex_buffers_mask(ctx->buffers, &ctx->buffers_enabled_mask,
372 buffers, start_slot, num_buffers);
373 }
374
375 static void
376 zink_set_viewport_states(struct pipe_context *pctx,
377 unsigned start_slot,
378 unsigned num_viewports,
379 const struct pipe_viewport_state *state)
380 {
381 struct zink_context *ctx = zink_context(pctx);
382
383 for (unsigned i = 0; i < num_viewports; ++i) {
384 VkViewport viewport = {
385 state[i].translate[0] - state[i].scale[0],
386 state[i].translate[1] - state[i].scale[1],
387 state[i].scale[0] * 2,
388 state[i].scale[1] * 2,
389 state[i].translate[2] - state[i].scale[2],
390 state[i].translate[2] + state[i].scale[2]
391 };
392 ctx->viewport_states[start_slot + i] = state[i];
393 ctx->viewports[start_slot + i] = viewport;
394 }
395 ctx->num_viewports = start_slot + num_viewports;
396 }
397
398 static void
399 zink_set_scissor_states(struct pipe_context *pctx,
400 unsigned start_slot, unsigned num_scissors,
401 const struct pipe_scissor_state *states)
402 {
403 struct zink_context *ctx = zink_context(pctx);
404
405 for (unsigned i = 0; i < num_scissors; i++) {
406 VkRect2D scissor;
407
408 scissor.offset.x = states[i].minx;
409 scissor.offset.y = states[i].miny;
410 scissor.extent.width = states[i].maxx - states[i].minx;
411 scissor.extent.height = states[i].maxy - states[i].miny;
412 ctx->scissor_states[start_slot + i] = states[i];
413 ctx->scissors[start_slot + i] = scissor;
414 }
415 }
416
417 static void
418 zink_set_constant_buffer(struct pipe_context *pctx,
419 enum pipe_shader_type shader, uint index,
420 const struct pipe_constant_buffer *cb)
421 {
422 struct zink_context *ctx = zink_context(pctx);
423
424 if (cb) {
425 struct pipe_resource *buffer = cb->buffer;
426 unsigned offset = cb->buffer_offset;
427 if (cb->user_buffer) {
428 struct zink_screen *screen = zink_screen(pctx->screen);
429 u_upload_data(ctx->base.const_uploader, 0, cb->buffer_size,
430 screen->props.limits.minUniformBufferOffsetAlignment,
431 cb->user_buffer, &offset, &buffer);
432 }
433
434 pipe_resource_reference(&ctx->ubos[shader][index].buffer, buffer);
435 ctx->ubos[shader][index].buffer_offset = offset;
436 ctx->ubos[shader][index].buffer_size = cb->buffer_size;
437 ctx->ubos[shader][index].user_buffer = NULL;
438
439 if (cb->user_buffer)
440 pipe_resource_reference(&buffer, NULL);
441 } else {
442 pipe_resource_reference(&ctx->ubos[shader][index].buffer, NULL);
443 ctx->ubos[shader][index].buffer_offset = 0;
444 ctx->ubos[shader][index].buffer_size = 0;
445 ctx->ubos[shader][index].user_buffer = NULL;
446 }
447 }
448
449 static void
450 zink_set_sampler_views(struct pipe_context *pctx,
451 enum pipe_shader_type shader_type,
452 unsigned start_slot,
453 unsigned num_views,
454 struct pipe_sampler_view **views)
455 {
456 struct zink_context *ctx = zink_context(pctx);
457 assert(views);
458 for (unsigned i = 0; i < num_views; ++i) {
459 pipe_sampler_view_reference(
460 &ctx->image_views[shader_type][start_slot + i],
461 views[i]);
462 }
463 ctx->num_image_views[shader_type] = start_slot + num_views;
464 }
465
466 static void
467 zink_set_stencil_ref(struct pipe_context *pctx,
468 const struct pipe_stencil_ref *ref)
469 {
470 struct zink_context *ctx = zink_context(pctx);
471 ctx->stencil_ref = *ref;
472 }
473
474 static void
475 zink_set_clip_state(struct pipe_context *pctx,
476 const struct pipe_clip_state *pcs)
477 {
478 }
479
480 static struct zink_render_pass *
481 get_render_pass(struct zink_context *ctx)
482 {
483 struct zink_screen *screen = zink_screen(ctx->base.screen);
484 const struct pipe_framebuffer_state *fb = &ctx->fb_state;
485 struct zink_render_pass_state state;
486
487 for (int i = 0; i < fb->nr_cbufs; i++) {
488 struct pipe_resource *res = fb->cbufs[i]->texture;
489 state.rts[i].format = zink_get_format(screen, fb->cbufs[i]->format);
490 state.rts[i].samples = res->nr_samples > 0 ? res->nr_samples :
491 VK_SAMPLE_COUNT_1_BIT;
492 }
493 state.num_cbufs = fb->nr_cbufs;
494
495 if (fb->zsbuf) {
496 struct zink_resource *zsbuf = zink_resource(fb->zsbuf->texture);
497 state.rts[fb->nr_cbufs].format = zsbuf->format;
498 state.rts[fb->nr_cbufs].samples = zsbuf->base.nr_samples > 0 ? zsbuf->base.nr_samples : VK_SAMPLE_COUNT_1_BIT;
499 }
500 state.have_zsbuf = fb->zsbuf != NULL;
501
502 struct hash_entry *entry = _mesa_hash_table_search(ctx->render_pass_cache,
503 &state);
504 if (!entry) {
505 struct zink_render_pass *rp;
506 rp = zink_create_render_pass(screen, &state);
507 entry = _mesa_hash_table_insert(ctx->render_pass_cache, &state, rp);
508 if (!entry)
509 return NULL;
510 }
511
512 return entry->data;
513 }
514
515 static struct zink_framebuffer *
516 get_framebuffer(struct zink_context *ctx)
517 {
518 struct zink_screen *screen = zink_screen(ctx->base.screen);
519
520 struct zink_framebuffer_state state = {};
521 state.rp = get_render_pass(ctx);
522 for (int i = 0; i < ctx->fb_state.nr_cbufs; i++) {
523 struct pipe_surface *psurf = ctx->fb_state.cbufs[i];
524 state.attachments[i] = zink_surface(psurf);
525 }
526
527 state.num_attachments = ctx->fb_state.nr_cbufs;
528 if (ctx->fb_state.zsbuf) {
529 struct pipe_surface *psurf = ctx->fb_state.zsbuf;
530 state.attachments[state.num_attachments++] = zink_surface(psurf);
531 }
532
533 state.width = ctx->fb_state.width;
534 state.height = ctx->fb_state.height;
535 state.layers = MAX2(ctx->fb_state.layers, 1);
536
537 struct hash_entry *entry = _mesa_hash_table_search(ctx->framebuffer_cache,
538 &state);
539 if (!entry) {
540 struct zink_framebuffer *fb = zink_create_framebuffer(screen, &state);
541 entry = _mesa_hash_table_insert(ctx->framebuffer_cache, &state, fb);
542 if (!entry)
543 return NULL;
544 }
545
546 return entry->data;
547 }
548
549 void
550 zink_begin_render_pass(struct zink_context *ctx, struct zink_batch *batch)
551 {
552 struct zink_screen *screen = zink_screen(ctx->base.screen);
553 assert(batch == zink_curr_batch(ctx));
554 assert(ctx->gfx_pipeline_state.render_pass);
555
556 struct pipe_framebuffer_state *fb_state = &ctx->fb_state;
557
558 VkRenderPassBeginInfo rpbi = {};
559 rpbi.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
560 rpbi.renderPass = ctx->gfx_pipeline_state.render_pass->render_pass;
561 rpbi.renderArea.offset.x = 0;
562 rpbi.renderArea.offset.y = 0;
563 rpbi.renderArea.extent.width = fb_state->width;
564 rpbi.renderArea.extent.height = fb_state->height;
565 rpbi.clearValueCount = 0;
566 rpbi.pClearValues = NULL;
567 rpbi.framebuffer = ctx->framebuffer->fb;
568
569 assert(ctx->gfx_pipeline_state.render_pass && ctx->framebuffer);
570 assert(!batch->rp || batch->rp == ctx->gfx_pipeline_state.render_pass);
571 assert(!batch->fb || batch->fb == ctx->framebuffer);
572
573 for (int i = 0; i < fb_state->nr_cbufs; i++) {
574 struct zink_resource *res = zink_resource(fb_state->cbufs[i]->texture);
575 if (res->layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL)
576 zink_resource_barrier(batch->cmdbuf, res, res->aspect,
577 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
578 }
579
580 if (fb_state->zsbuf) {
581 struct zink_resource *res = zink_resource(fb_state->zsbuf->texture);
582 if (res->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL)
583 zink_resource_barrier(batch->cmdbuf, res, res->aspect,
584 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
585 }
586
587 zink_render_pass_reference(screen, &batch->rp, ctx->gfx_pipeline_state.render_pass);
588 zink_framebuffer_reference(screen, &batch->fb, ctx->framebuffer);
589
590 vkCmdBeginRenderPass(batch->cmdbuf, &rpbi, VK_SUBPASS_CONTENTS_INLINE);
591 }
592
593 static void
594 flush_batch(struct zink_context *ctx)
595 {
596 struct zink_batch *batch = zink_curr_batch(ctx);
597 if (batch->rp)
598 vkCmdEndRenderPass(batch->cmdbuf);
599
600 zink_end_batch(ctx, batch);
601
602 ctx->curr_batch++;
603 if (ctx->curr_batch == ARRAY_SIZE(ctx->batches))
604 ctx->curr_batch = 0;
605
606 zink_start_batch(ctx, zink_curr_batch(ctx));
607 }
608
609 struct zink_batch *
610 zink_batch_rp(struct zink_context *ctx)
611 {
612 struct zink_batch *batch = zink_curr_batch(ctx);
613 if (!batch->rp) {
614 zink_begin_render_pass(ctx, batch);
615 assert(batch->rp);
616 }
617 return batch;
618 }
619
620 struct zink_batch *
621 zink_batch_no_rp(struct zink_context *ctx)
622 {
623 struct zink_batch *batch = zink_curr_batch(ctx);
624 if (batch->rp) {
625 /* flush batch and get a new one */
626 flush_batch(ctx);
627 batch = zink_curr_batch(ctx);
628 assert(!batch->rp);
629 }
630 return batch;
631 }
632
633 static void
634 zink_set_framebuffer_state(struct pipe_context *pctx,
635 const struct pipe_framebuffer_state *state)
636 {
637 struct zink_context *ctx = zink_context(pctx);
638 struct zink_screen *screen = zink_screen(pctx->screen);
639
640 VkSampleCountFlagBits rast_samples = VK_SAMPLE_COUNT_1_BIT;
641 for (int i = 0; i < state->nr_cbufs; i++)
642 rast_samples = MAX2(rast_samples, state->cbufs[i]->texture->nr_samples);
643 if (state->zsbuf && state->zsbuf->texture->nr_samples)
644 rast_samples = MAX2(rast_samples, state->zsbuf->texture->nr_samples);
645
646 util_copy_framebuffer_state(&ctx->fb_state, state);
647
648 struct zink_framebuffer *fb = get_framebuffer(ctx);
649 zink_framebuffer_reference(screen, &ctx->framebuffer, fb);
650 zink_render_pass_reference(screen, &ctx->gfx_pipeline_state.render_pass, fb->rp);
651
652 ctx->gfx_pipeline_state.rast_samples = rast_samples;
653 ctx->gfx_pipeline_state.num_attachments = state->nr_cbufs;
654
655 struct zink_batch *batch = zink_batch_no_rp(ctx);
656
657 for (int i = 0; i < state->nr_cbufs; i++) {
658 struct zink_resource *res = zink_resource(state->cbufs[i]->texture);
659 if (res->layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL)
660 zink_resource_barrier(batch->cmdbuf, res, res->aspect,
661 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
662 }
663
664 if (state->zsbuf) {
665 struct zink_resource *res = zink_resource(state->zsbuf->texture);
666 if (res->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL)
667 zink_resource_barrier(batch->cmdbuf, res, res->aspect,
668 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
669 }
670 }
671
672 static void
673 zink_set_blend_color(struct pipe_context *pctx,
674 const struct pipe_blend_color *color)
675 {
676 struct zink_context *ctx = zink_context(pctx);
677 memcpy(ctx->blend_constants, color->color, sizeof(float) * 4);
678 }
679
680 static void
681 zink_set_sample_mask(struct pipe_context *pctx, unsigned sample_mask)
682 {
683 struct zink_context *ctx = zink_context(pctx);
684 ctx->gfx_pipeline_state.sample_mask = sample_mask;
685 }
686
687 static VkAccessFlags
688 access_src_flags(VkImageLayout layout)
689 {
690 switch (layout) {
691 case VK_IMAGE_LAYOUT_UNDEFINED:
692 case VK_IMAGE_LAYOUT_GENERAL:
693 return 0;
694
695 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
696 return VK_ACCESS_COLOR_ATTACHMENT_READ_BIT;
697 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
698 return VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
699
700 case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
701 return VK_ACCESS_SHADER_READ_BIT;
702
703 case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
704 return VK_ACCESS_TRANSFER_READ_BIT;
705
706 case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
707 return VK_ACCESS_TRANSFER_WRITE_BIT;
708
709 case VK_IMAGE_LAYOUT_PREINITIALIZED:
710 return VK_ACCESS_HOST_WRITE_BIT;
711
712 default:
713 unreachable("unexpected layout");
714 }
715 }
716
717 static VkAccessFlags
718 access_dst_flags(VkImageLayout layout)
719 {
720 switch (layout) {
721 case VK_IMAGE_LAYOUT_UNDEFINED:
722 case VK_IMAGE_LAYOUT_GENERAL:
723 return 0;
724
725 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
726 return VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
727 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
728 return VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
729
730 case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
731 return VK_ACCESS_TRANSFER_READ_BIT;
732
733 case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
734 return VK_ACCESS_TRANSFER_WRITE_BIT;
735
736 default:
737 unreachable("unexpected layout");
738 }
739 }
740
741 static VkPipelineStageFlags
742 pipeline_dst_stage(VkImageLayout layout)
743 {
744 switch (layout) {
745 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
746 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
747 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
748 return VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
749
750 case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
751 return VK_PIPELINE_STAGE_TRANSFER_BIT;
752 case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
753 return VK_PIPELINE_STAGE_TRANSFER_BIT;
754
755 default:
756 return VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
757 }
758 }
759
760 static VkPipelineStageFlags
761 pipeline_src_stage(VkImageLayout layout)
762 {
763 switch (layout) {
764 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
765 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
766 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
767 return VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT;
768
769 case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
770 return VK_PIPELINE_STAGE_TRANSFER_BIT;
771 case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
772 return VK_PIPELINE_STAGE_TRANSFER_BIT;
773
774 default:
775 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
776 }
777 }
778
779
780 void
781 zink_resource_barrier(VkCommandBuffer cmdbuf, struct zink_resource *res,
782 VkImageAspectFlags aspect, VkImageLayout new_layout)
783 {
784 VkImageSubresourceRange isr = {
785 aspect,
786 0, VK_REMAINING_MIP_LEVELS,
787 0, VK_REMAINING_ARRAY_LAYERS
788 };
789
790 VkImageMemoryBarrier imb = {
791 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
792 NULL,
793 access_src_flags(res->layout),
794 access_dst_flags(new_layout),
795 res->layout,
796 new_layout,
797 VK_QUEUE_FAMILY_IGNORED,
798 VK_QUEUE_FAMILY_IGNORED,
799 res->image,
800 isr
801 };
802 vkCmdPipelineBarrier(
803 cmdbuf,
804 pipeline_src_stage(res->layout),
805 pipeline_dst_stage(new_layout),
806 0,
807 0, NULL,
808 0, NULL,
809 1, &imb
810 );
811
812 res->layout = new_layout;
813 }
814
815 static void
816 zink_clear(struct pipe_context *pctx,
817 unsigned buffers,
818 const union pipe_color_union *pcolor,
819 double depth, unsigned stencil)
820 {
821 struct zink_context *ctx = zink_context(pctx);
822 struct pipe_framebuffer_state *fb = &ctx->fb_state;
823
824 /* FIXME: this is very inefficient; if no renderpass has been started yet,
825 * we should record the clear if it's full-screen, and apply it as we
826 * start the render-pass. Otherwise we can do a partial out-of-renderpass
827 * clear.
828 */
829 struct zink_batch *batch = zink_batch_rp(ctx);
830
831 VkClearAttachment attachments[1 + PIPE_MAX_COLOR_BUFS];
832 int num_attachments = 0;
833
834 if (buffers & PIPE_CLEAR_COLOR) {
835 VkClearColorValue color;
836 color.float32[0] = pcolor->f[0];
837 color.float32[1] = pcolor->f[1];
838 color.float32[2] = pcolor->f[2];
839 color.float32[3] = pcolor->f[3];
840
841 for (unsigned i = 0; i < fb->nr_cbufs; i++) {
842 if (!(buffers & (PIPE_CLEAR_COLOR0 << i)) || !fb->cbufs[i])
843 continue;
844
845 attachments[num_attachments].aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
846 attachments[num_attachments].colorAttachment = i;
847 attachments[num_attachments].clearValue.color = color;
848 ++num_attachments;
849 }
850 }
851
852 if (buffers & PIPE_CLEAR_DEPTHSTENCIL && fb->zsbuf) {
853 VkImageAspectFlags aspect = 0;
854 if (buffers & PIPE_CLEAR_DEPTH)
855 aspect |= VK_IMAGE_ASPECT_DEPTH_BIT;
856 if (buffers & PIPE_CLEAR_STENCIL)
857 aspect |= VK_IMAGE_ASPECT_STENCIL_BIT;
858
859 attachments[num_attachments].aspectMask = aspect;
860 attachments[num_attachments].clearValue.depthStencil.depth = depth;
861 attachments[num_attachments].clearValue.depthStencil.stencil = stencil;
862 ++num_attachments;
863 }
864
865 VkClearRect cr;
866 cr.rect.offset.x = 0;
867 cr.rect.offset.y = 0;
868 cr.rect.extent.width = fb->width;
869 cr.rect.extent.height = fb->height;
870 cr.baseArrayLayer = 0;
871 cr.layerCount = util_framebuffer_get_num_layers(fb);
872 vkCmdClearAttachments(batch->cmdbuf, num_attachments, attachments, 1, &cr);
873 }
874
875 VkShaderStageFlagBits
876 zink_shader_stage(enum pipe_shader_type type)
877 {
878 VkShaderStageFlagBits stages[] = {
879 [PIPE_SHADER_VERTEX] = VK_SHADER_STAGE_VERTEX_BIT,
880 [PIPE_SHADER_FRAGMENT] = VK_SHADER_STAGE_FRAGMENT_BIT,
881 [PIPE_SHADER_GEOMETRY] = VK_SHADER_STAGE_GEOMETRY_BIT,
882 [PIPE_SHADER_TESS_CTRL] = VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
883 [PIPE_SHADER_TESS_EVAL] = VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
884 [PIPE_SHADER_COMPUTE] = VK_SHADER_STAGE_COMPUTE_BIT,
885 };
886 return stages[type];
887 }
888
889 static uint32_t
890 hash_gfx_program(const void *key)
891 {
892 return _mesa_hash_data(key, sizeof(struct zink_shader *) * (PIPE_SHADER_TYPES - 1));
893 }
894
895 static bool
896 equals_gfx_program(const void *a, const void *b)
897 {
898 return memcmp(a, b, sizeof(struct zink_shader *) * (PIPE_SHADER_TYPES - 1)) == 0;
899 }
900
901 static uint32_t
902 hash_render_pass_state(const void *key)
903 {
904 return _mesa_hash_data(key, sizeof(struct zink_render_pass_state));
905 }
906
907 static bool
908 equals_render_pass_state(const void *a, const void *b)
909 {
910 return memcmp(a, b, sizeof(struct zink_render_pass_state)) == 0;
911 }
912
913 static uint32_t
914 hash_framebuffer_state(const void *key)
915 {
916 struct zink_framebuffer_state *s = (struct zink_framebuffer_state*)key;
917 return _mesa_hash_data(key, sizeof(struct zink_framebuffer_state) + sizeof(s->attachments) * s->num_attachments);
918 }
919
920 static bool
921 equals_framebuffer_state(const void *a, const void *b)
922 {
923 struct zink_framebuffer_state *s = (struct zink_framebuffer_state*)a;
924 return memcmp(a, b, sizeof(struct zink_framebuffer_state) + sizeof(s->attachments) * s->num_attachments) == 0;
925 }
926
927 static void
928 zink_flush(struct pipe_context *pctx,
929 struct pipe_fence_handle **pfence,
930 enum pipe_flush_flags flags)
931 {
932 struct zink_context *ctx = zink_context(pctx);
933
934 struct zink_batch *batch = zink_curr_batch(ctx);
935 flush_batch(ctx);
936
937 if (pfence)
938 zink_fence_reference(zink_screen(pctx->screen),
939 (struct zink_fence **)pfence,
940 batch->fence);
941
942 /* HACK:
943 * For some strange reason, we need to finish before presenting, or else
944 * we start rendering on top of the back-buffer for the next frame. This
945 * seems like a bug in the DRI-driver to me, because we really should
946 * be properly protected by fences here, and the back-buffer should
947 * either be swapped with the front-buffer, or blitted from. But for
948 * some strange reason, neither of these things happen.
949 */
950 if (flags & PIPE_FLUSH_END_OF_FRAME)
951 pctx->screen->fence_finish(pctx->screen, pctx,
952 (struct pipe_fence_handle *)batch->fence,
953 PIPE_TIMEOUT_INFINITE);
954 }
955
956 static void
957 zink_flush_resource(struct pipe_context *pipe,
958 struct pipe_resource *resource)
959 {
960 }
961
962 static void
963 zink_resource_copy_region(struct pipe_context *pctx,
964 struct pipe_resource *pdst,
965 unsigned dst_level, unsigned dstx, unsigned dsty, unsigned dstz,
966 struct pipe_resource *psrc,
967 unsigned src_level, const struct pipe_box *src_box)
968 {
969 struct zink_resource *dst = zink_resource(pdst);
970 struct zink_resource *src = zink_resource(psrc);
971 struct zink_context *ctx = zink_context(pctx);
972 if (dst->base.target != PIPE_BUFFER && src->base.target != PIPE_BUFFER) {
973 VkImageCopy region = {};
974
975 region.srcSubresource.aspectMask = src->aspect;
976 region.srcSubresource.mipLevel = src_level;
977 region.srcSubresource.layerCount = 1;
978 if (src->base.array_size > 1) {
979 region.srcSubresource.baseArrayLayer = src_box->z;
980 region.srcSubresource.layerCount = src_box->depth;
981 region.extent.depth = 1;
982 } else {
983 region.srcOffset.z = src_box->z;
984 region.srcSubresource.layerCount = 1;
985 region.extent.depth = src_box->depth;
986 }
987
988 region.srcOffset.x = src_box->x;
989 region.srcOffset.y = src_box->y;
990
991 region.dstSubresource.aspectMask = dst->aspect;
992 region.dstSubresource.mipLevel = dst_level;
993 if (dst->base.array_size > 1) {
994 region.dstSubresource.baseArrayLayer = dstz;
995 region.dstSubresource.layerCount = src_box->depth;
996 } else {
997 region.dstOffset.z = dstz;
998 region.dstSubresource.layerCount = 1;
999 }
1000
1001 region.dstOffset.x = dstx;
1002 region.dstOffset.y = dsty;
1003 region.extent.width = src_box->width;
1004 region.extent.height = src_box->height;
1005
1006 struct zink_batch *batch = zink_batch_no_rp(ctx);
1007 zink_batch_reference_resoure(batch, src);
1008 zink_batch_reference_resoure(batch, dst);
1009
1010 if (src->layout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) {
1011 zink_resource_barrier(batch->cmdbuf, src, src->aspect,
1012 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
1013 }
1014
1015 if (dst->layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
1016 zink_resource_barrier(batch->cmdbuf, dst, dst->aspect,
1017 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
1018 }
1019
1020 vkCmdCopyImage(batch->cmdbuf, src->image, src->layout,
1021 dst->image, dst->layout,
1022 1, &region);
1023 } else if (dst->base.target == PIPE_BUFFER &&
1024 src->base.target == PIPE_BUFFER) {
1025 VkBufferCopy region;
1026 region.srcOffset = src_box->x;
1027 region.dstOffset = dstx;
1028 region.size = src_box->width;
1029
1030 struct zink_batch *batch = zink_batch_no_rp(ctx);
1031 zink_batch_reference_resoure(batch, src);
1032 zink_batch_reference_resoure(batch, dst);
1033
1034 vkCmdCopyBuffer(batch->cmdbuf, src->buffer, dst->buffer, 1, &region);
1035 } else
1036 debug_printf("zink: TODO resource copy\n");
1037 }
1038
1039 struct pipe_context *
1040 zink_context_create(struct pipe_screen *pscreen, void *priv, unsigned flags)
1041 {
1042 struct zink_screen *screen = zink_screen(pscreen);
1043 struct zink_context *ctx = CALLOC_STRUCT(zink_context);
1044 if (!ctx)
1045 goto fail;
1046
1047 ctx->base.screen = pscreen;
1048 ctx->base.priv = priv;
1049
1050 ctx->base.destroy = zink_context_destroy;
1051
1052 zink_context_state_init(&ctx->base);
1053
1054 ctx->base.create_sampler_state = zink_create_sampler_state;
1055 ctx->base.bind_sampler_states = zink_bind_sampler_states;
1056 ctx->base.delete_sampler_state = zink_delete_sampler_state;
1057
1058 ctx->base.create_sampler_view = zink_create_sampler_view;
1059 ctx->base.set_sampler_views = zink_set_sampler_views;
1060 ctx->base.sampler_view_destroy = zink_sampler_view_destroy;
1061
1062 ctx->base.create_vs_state = zink_create_vs_state;
1063 ctx->base.bind_vs_state = zink_bind_vs_state;
1064 ctx->base.delete_vs_state = zink_delete_vs_state;
1065
1066 ctx->base.create_fs_state = zink_create_fs_state;
1067 ctx->base.bind_fs_state = zink_bind_fs_state;
1068 ctx->base.delete_fs_state = zink_delete_fs_state;
1069
1070 ctx->base.set_polygon_stipple = zink_set_polygon_stipple;
1071 ctx->base.set_vertex_buffers = zink_set_vertex_buffers;
1072 ctx->base.set_viewport_states = zink_set_viewport_states;
1073 ctx->base.set_scissor_states = zink_set_scissor_states;
1074 ctx->base.set_constant_buffer = zink_set_constant_buffer;
1075 ctx->base.set_framebuffer_state = zink_set_framebuffer_state;
1076 ctx->base.set_stencil_ref = zink_set_stencil_ref;
1077 ctx->base.set_clip_state = zink_set_clip_state;
1078 ctx->base.set_blend_color = zink_set_blend_color;
1079
1080 ctx->base.set_sample_mask = zink_set_sample_mask;
1081
1082 ctx->base.clear = zink_clear;
1083 ctx->base.draw_vbo = zink_draw_vbo;
1084 ctx->base.flush = zink_flush;
1085
1086 ctx->base.resource_copy_region = zink_resource_copy_region;
1087 ctx->base.blit = zink_blit;
1088
1089 ctx->base.flush_resource = zink_flush_resource;
1090 zink_context_surface_init(&ctx->base);
1091 zink_context_resource_init(&ctx->base);
1092 zink_context_query_init(&ctx->base);
1093
1094 slab_create_child(&ctx->transfer_pool, &screen->transfer_pool);
1095
1096 ctx->base.stream_uploader = u_upload_create_default(&ctx->base);
1097 ctx->base.const_uploader = ctx->base.stream_uploader;
1098
1099 int prim_hwsupport = 1 << PIPE_PRIM_POINTS |
1100 1 << PIPE_PRIM_LINES |
1101 1 << PIPE_PRIM_LINE_STRIP |
1102 1 << PIPE_PRIM_TRIANGLES |
1103 1 << PIPE_PRIM_TRIANGLE_STRIP |
1104 1 << PIPE_PRIM_TRIANGLE_FAN;
1105
1106 ctx->primconvert = util_primconvert_create(&ctx->base, prim_hwsupport);
1107 if (!ctx->primconvert)
1108 goto fail;
1109
1110 ctx->blitter = util_blitter_create(&ctx->base);
1111 if (!ctx->blitter)
1112 goto fail;
1113
1114 VkCommandPoolCreateInfo cpci = {};
1115 cpci.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
1116 cpci.queueFamilyIndex = screen->gfx_queue;
1117 cpci.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
1118 if (vkCreateCommandPool(screen->dev, &cpci, NULL, &ctx->cmdpool) != VK_SUCCESS)
1119 goto fail;
1120
1121 VkCommandBufferAllocateInfo cbai = {};
1122 cbai.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
1123 cbai.commandPool = ctx->cmdpool;
1124 cbai.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
1125 cbai.commandBufferCount = 1;
1126
1127 VkDescriptorPoolSize sizes[] = {
1128 {VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, ZINK_BATCH_DESC_SIZE}
1129 };
1130 VkDescriptorPoolCreateInfo dpci = {};
1131 dpci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
1132 dpci.pPoolSizes = sizes;
1133 dpci.poolSizeCount = ARRAY_SIZE(sizes);
1134 dpci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
1135 dpci.maxSets = ZINK_BATCH_DESC_SIZE;
1136
1137 for (int i = 0; i < ARRAY_SIZE(ctx->batches); ++i) {
1138 if (vkAllocateCommandBuffers(screen->dev, &cbai, &ctx->batches[i].cmdbuf) != VK_SUCCESS)
1139 goto fail;
1140
1141 ctx->batches[i].resources = _mesa_set_create(NULL, _mesa_hash_pointer,
1142 _mesa_key_pointer_equal);
1143 ctx->batches[i].sampler_views = _mesa_set_create(NULL,
1144 _mesa_hash_pointer,
1145 _mesa_key_pointer_equal);
1146
1147 if (!ctx->batches[i].resources || !ctx->batches[i].sampler_views)
1148 goto fail;
1149
1150 util_dynarray_init(&ctx->batches[i].zombie_samplers, NULL);
1151
1152 if (vkCreateDescriptorPool(screen->dev, &dpci, 0,
1153 &ctx->batches[i].descpool) != VK_SUCCESS)
1154 goto fail;
1155 }
1156
1157 vkGetDeviceQueue(screen->dev, screen->gfx_queue, 0, &ctx->queue);
1158
1159 ctx->program_cache = _mesa_hash_table_create(NULL,
1160 hash_gfx_program,
1161 equals_gfx_program);
1162 ctx->render_pass_cache = _mesa_hash_table_create(NULL,
1163 hash_render_pass_state,
1164 equals_render_pass_state);
1165 ctx->framebuffer_cache = _mesa_hash_table_create(NULL,
1166 hash_framebuffer_state,
1167 equals_framebuffer_state);
1168
1169 if (!ctx->program_cache || !ctx->render_pass_cache ||
1170 !ctx->framebuffer_cache)
1171 goto fail;
1172
1173 ctx->dirty_program = true;
1174
1175 /* start the first batch */
1176 zink_start_batch(ctx, zink_curr_batch(ctx));
1177
1178 return &ctx->base;
1179
1180 fail:
1181 if (ctx) {
1182 vkDestroyCommandPool(screen->dev, ctx->cmdpool, NULL);
1183 FREE(ctx);
1184 }
1185 return NULL;
1186 }