util: Move gallium's PIPE_FORMAT utils to /util/format/
[mesa.git] / src / gallium / drivers / zink / zink_context.c
1 /*
2 * Copyright 2018 Collabora Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "zink_context.h"
25
26 #include "zink_batch.h"
27 #include "zink_compiler.h"
28 #include "zink_fence.h"
29 #include "zink_framebuffer.h"
30 #include "zink_helpers.h"
31 #include "zink_pipeline.h"
32 #include "zink_render_pass.h"
33 #include "zink_resource.h"
34 #include "zink_screen.h"
35 #include "zink_state.h"
36 #include "zink_surface.h"
37
38 #include "indices/u_primconvert.h"
39 #include "util/u_blitter.h"
40 #include "util/u_debug.h"
41 #include "util/format/u_format.h"
42 #include "util/u_framebuffer.h"
43 #include "util/u_helpers.h"
44 #include "util/u_inlines.h"
45
46 #include "nir.h"
47
48 #include "util/u_memory.h"
49 #include "util/u_upload_mgr.h"
50
51 static void
52 zink_context_destroy(struct pipe_context *pctx)
53 {
54 struct zink_context *ctx = zink_context(pctx);
55 struct zink_screen *screen = zink_screen(pctx->screen);
56
57 if (vkQueueWaitIdle(ctx->queue) != VK_SUCCESS)
58 debug_printf("vkQueueWaitIdle failed\n");
59
60 for (int i = 0; i < ARRAY_SIZE(ctx->batches); ++i)
61 vkFreeCommandBuffers(screen->dev, ctx->cmdpool, 1, &ctx->batches[i].cmdbuf);
62 vkDestroyCommandPool(screen->dev, ctx->cmdpool, NULL);
63
64 util_primconvert_destroy(ctx->primconvert);
65 u_upload_destroy(pctx->stream_uploader);
66 slab_destroy_child(&ctx->transfer_pool);
67 util_blitter_destroy(ctx->blitter);
68 FREE(ctx);
69 }
70
71 static VkSamplerMipmapMode
72 sampler_mipmap_mode(enum pipe_tex_mipfilter filter)
73 {
74 switch (filter) {
75 case PIPE_TEX_MIPFILTER_NEAREST: return VK_SAMPLER_MIPMAP_MODE_NEAREST;
76 case PIPE_TEX_MIPFILTER_LINEAR: return VK_SAMPLER_MIPMAP_MODE_LINEAR;
77 case PIPE_TEX_MIPFILTER_NONE:
78 unreachable("PIPE_TEX_MIPFILTER_NONE should be dealt with earlier");
79 }
80 unreachable("unexpected filter");
81 }
82
83 static VkSamplerAddressMode
84 sampler_address_mode(enum pipe_tex_wrap filter)
85 {
86 switch (filter) {
87 case PIPE_TEX_WRAP_REPEAT: return VK_SAMPLER_ADDRESS_MODE_REPEAT;
88 case PIPE_TEX_WRAP_CLAMP: return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; /* not technically correct, but kinda works */
89 case PIPE_TEX_WRAP_CLAMP_TO_EDGE: return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
90 case PIPE_TEX_WRAP_CLAMP_TO_BORDER: return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER;
91 case PIPE_TEX_WRAP_MIRROR_REPEAT: return VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT;
92 case PIPE_TEX_WRAP_MIRROR_CLAMP: return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; /* not technically correct, but kinda works */
93 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE: return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE;
94 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER: return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; /* not technically correct, but kinda works */
95 }
96 unreachable("unexpected wrap");
97 }
98
99 static VkCompareOp
100 compare_op(enum pipe_compare_func op)
101 {
102 switch (op) {
103 case PIPE_FUNC_NEVER: return VK_COMPARE_OP_NEVER;
104 case PIPE_FUNC_LESS: return VK_COMPARE_OP_LESS;
105 case PIPE_FUNC_EQUAL: return VK_COMPARE_OP_EQUAL;
106 case PIPE_FUNC_LEQUAL: return VK_COMPARE_OP_LESS_OR_EQUAL;
107 case PIPE_FUNC_GREATER: return VK_COMPARE_OP_GREATER;
108 case PIPE_FUNC_NOTEQUAL: return VK_COMPARE_OP_NOT_EQUAL;
109 case PIPE_FUNC_GEQUAL: return VK_COMPARE_OP_GREATER_OR_EQUAL;
110 case PIPE_FUNC_ALWAYS: return VK_COMPARE_OP_ALWAYS;
111 }
112 unreachable("unexpected compare");
113 }
114
115 static void *
116 zink_create_sampler_state(struct pipe_context *pctx,
117 const struct pipe_sampler_state *state)
118 {
119 struct zink_screen *screen = zink_screen(pctx->screen);
120
121 VkSamplerCreateInfo sci = {};
122 sci.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
123 sci.magFilter = zink_filter(state->mag_img_filter);
124 sci.minFilter = zink_filter(state->min_img_filter);
125
126 if (state->min_mip_filter != PIPE_TEX_MIPFILTER_NONE) {
127 sci.mipmapMode = sampler_mipmap_mode(state->min_mip_filter);
128 sci.minLod = state->min_lod;
129 sci.maxLod = state->max_lod;
130 } else {
131 sci.mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST;
132 sci.minLod = 0;
133 sci.maxLod = 0;
134 }
135
136 sci.addressModeU = sampler_address_mode(state->wrap_s);
137 sci.addressModeV = sampler_address_mode(state->wrap_t);
138 sci.addressModeW = sampler_address_mode(state->wrap_r);
139 sci.mipLodBias = state->lod_bias;
140
141 if (state->compare_mode == PIPE_TEX_COMPARE_NONE)
142 sci.compareOp = VK_COMPARE_OP_NEVER;
143 else
144 sci.compareOp = compare_op(state->compare_func);
145
146 sci.borderColor = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK; // TODO
147 sci.unnormalizedCoordinates = !state->normalized_coords;
148
149 if (state->max_anisotropy > 1) {
150 sci.maxAnisotropy = state->max_anisotropy;
151 sci.anisotropyEnable = VK_TRUE;
152 }
153
154 VkSampler *sampler = CALLOC(1, sizeof(VkSampler));
155 if (!sampler)
156 return NULL;
157
158 if (vkCreateSampler(screen->dev, &sci, NULL, sampler) != VK_SUCCESS) {
159 FREE(sampler);
160 return NULL;
161 }
162
163 return sampler;
164 }
165
166 static void
167 zink_bind_sampler_states(struct pipe_context *pctx,
168 enum pipe_shader_type shader,
169 unsigned start_slot,
170 unsigned num_samplers,
171 void **samplers)
172 {
173 struct zink_context *ctx = zink_context(pctx);
174 for (unsigned i = 0; i < num_samplers; ++i) {
175 VkSampler *sampler = samplers[i];
176 ctx->samplers[shader][start_slot + i] = sampler ? *sampler : VK_NULL_HANDLE;
177 }
178 ctx->num_samplers[shader] = start_slot + num_samplers;
179 }
180
181 static void
182 zink_delete_sampler_state(struct pipe_context *pctx,
183 void *sampler_state)
184 {
185 struct zink_batch *batch = zink_curr_batch(zink_context(pctx));
186 util_dynarray_append(&batch->zombie_samplers, VkSampler,
187 *(VkSampler *)sampler_state);
188 FREE(sampler_state);
189 }
190
191
192 static VkImageViewType
193 image_view_type(enum pipe_texture_target target)
194 {
195 switch (target) {
196 case PIPE_TEXTURE_1D: return VK_IMAGE_VIEW_TYPE_1D;
197 case PIPE_TEXTURE_1D_ARRAY: return VK_IMAGE_VIEW_TYPE_1D_ARRAY;
198 case PIPE_TEXTURE_2D: return VK_IMAGE_VIEW_TYPE_2D;
199 case PIPE_TEXTURE_2D_ARRAY: return VK_IMAGE_VIEW_TYPE_2D_ARRAY;
200 case PIPE_TEXTURE_CUBE: return VK_IMAGE_VIEW_TYPE_CUBE;
201 case PIPE_TEXTURE_CUBE_ARRAY: return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
202 case PIPE_TEXTURE_3D: return VK_IMAGE_VIEW_TYPE_3D;
203 case PIPE_TEXTURE_RECT: return VK_IMAGE_VIEW_TYPE_2D; /* not sure */
204 default:
205 unreachable("unexpected target");
206 }
207 }
208
209 static VkComponentSwizzle
210 component_mapping(enum pipe_swizzle swizzle)
211 {
212 switch (swizzle) {
213 case PIPE_SWIZZLE_X: return VK_COMPONENT_SWIZZLE_R;
214 case PIPE_SWIZZLE_Y: return VK_COMPONENT_SWIZZLE_G;
215 case PIPE_SWIZZLE_Z: return VK_COMPONENT_SWIZZLE_B;
216 case PIPE_SWIZZLE_W: return VK_COMPONENT_SWIZZLE_A;
217 case PIPE_SWIZZLE_0: return VK_COMPONENT_SWIZZLE_ZERO;
218 case PIPE_SWIZZLE_1: return VK_COMPONENT_SWIZZLE_ONE;
219 case PIPE_SWIZZLE_NONE: return VK_COMPONENT_SWIZZLE_IDENTITY; // ???
220 default:
221 unreachable("unexpected swizzle");
222 }
223 }
224
225 static VkImageAspectFlags
226 sampler_aspect_from_format(enum pipe_format fmt)
227 {
228 if (util_format_is_depth_or_stencil(fmt)) {
229 const struct util_format_description *desc = util_format_description(fmt);
230 if (util_format_has_depth(desc))
231 return VK_IMAGE_ASPECT_DEPTH_BIT;
232 assert(util_format_has_stencil(desc));
233 return VK_IMAGE_ASPECT_STENCIL_BIT;
234 } else
235 return VK_IMAGE_ASPECT_COLOR_BIT;
236 }
237
238 static struct pipe_sampler_view *
239 zink_create_sampler_view(struct pipe_context *pctx, struct pipe_resource *pres,
240 const struct pipe_sampler_view *state)
241 {
242 struct zink_screen *screen = zink_screen(pctx->screen);
243 struct zink_resource *res = zink_resource(pres);
244 struct zink_sampler_view *sampler_view = CALLOC_STRUCT(zink_sampler_view);
245
246 sampler_view->base = *state;
247 sampler_view->base.texture = NULL;
248 pipe_resource_reference(&sampler_view->base.texture, pres);
249 sampler_view->base.reference.count = 1;
250 sampler_view->base.context = pctx;
251
252 VkImageViewCreateInfo ivci = {};
253 ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
254 ivci.image = res->image;
255 ivci.viewType = image_view_type(state->target);
256 ivci.format = zink_get_format(screen, state->format);
257 ivci.components.r = component_mapping(state->swizzle_r);
258 ivci.components.g = component_mapping(state->swizzle_g);
259 ivci.components.b = component_mapping(state->swizzle_b);
260 ivci.components.a = component_mapping(state->swizzle_a);
261
262 ivci.subresourceRange.aspectMask = sampler_aspect_from_format(state->format);
263 ivci.subresourceRange.baseMipLevel = state->u.tex.first_level;
264 ivci.subresourceRange.baseArrayLayer = state->u.tex.first_layer;
265 ivci.subresourceRange.levelCount = state->u.tex.last_level - state->u.tex.first_level + 1;
266 ivci.subresourceRange.layerCount = state->u.tex.last_layer - state->u.tex.first_layer + 1;
267
268 VkResult err = vkCreateImageView(screen->dev, &ivci, NULL, &sampler_view->image_view);
269 if (err != VK_SUCCESS) {
270 FREE(sampler_view);
271 return NULL;
272 }
273
274 return &sampler_view->base;
275 }
276
277 static void
278 zink_sampler_view_destroy(struct pipe_context *pctx,
279 struct pipe_sampler_view *pview)
280 {
281 struct zink_sampler_view *view = zink_sampler_view(pview);
282 vkDestroyImageView(zink_screen(pctx->screen)->dev, view->image_view, NULL);
283 FREE(view);
284 }
285
286 static void *
287 zink_create_vs_state(struct pipe_context *pctx,
288 const struct pipe_shader_state *shader)
289 {
290 struct nir_shader *nir;
291 if (shader->type != PIPE_SHADER_IR_NIR)
292 nir = zink_tgsi_to_nir(pctx->screen, shader->tokens);
293 else
294 nir = (struct nir_shader *)shader->ir.nir;
295
296 return zink_compile_nir(zink_screen(pctx->screen), nir);
297 }
298
299 static void
300 bind_stage(struct zink_context *ctx, enum pipe_shader_type stage,
301 struct zink_shader *shader)
302 {
303 assert(stage < PIPE_SHADER_COMPUTE);
304 ctx->gfx_stages[stage] = shader;
305 ctx->dirty_program = true;
306 }
307
308 static void
309 zink_bind_vs_state(struct pipe_context *pctx,
310 void *cso)
311 {
312 bind_stage(zink_context(pctx), PIPE_SHADER_VERTEX, cso);
313 }
314
315 static void
316 zink_delete_vs_state(struct pipe_context *pctx,
317 void *cso)
318 {
319 zink_shader_free(zink_screen(pctx->screen), cso);
320 }
321
322 static void *
323 zink_create_fs_state(struct pipe_context *pctx,
324 const struct pipe_shader_state *shader)
325 {
326 struct nir_shader *nir;
327 if (shader->type != PIPE_SHADER_IR_NIR)
328 nir = zink_tgsi_to_nir(pctx->screen, shader->tokens);
329 else
330 nir = (struct nir_shader *)shader->ir.nir;
331
332 return zink_compile_nir(zink_screen(pctx->screen), nir);
333 }
334
335 static void
336 zink_bind_fs_state(struct pipe_context *pctx,
337 void *cso)
338 {
339 bind_stage(zink_context(pctx), PIPE_SHADER_FRAGMENT, cso);
340 }
341
342 static void
343 zink_delete_fs_state(struct pipe_context *pctx,
344 void *cso)
345 {
346 zink_shader_free(zink_screen(pctx->screen), cso);
347 }
348
349 static void
350 zink_set_polygon_stipple(struct pipe_context *pctx,
351 const struct pipe_poly_stipple *ps)
352 {
353 }
354
355 static void
356 zink_set_vertex_buffers(struct pipe_context *pctx,
357 unsigned start_slot,
358 unsigned num_buffers,
359 const struct pipe_vertex_buffer *buffers)
360 {
361 struct zink_context *ctx = zink_context(pctx);
362
363 if (buffers) {
364 for (int i = 0; i < num_buffers; ++i) {
365 const struct pipe_vertex_buffer *vb = buffers + i;
366 ctx->gfx_pipeline_state.bindings[start_slot + i].stride = vb->stride;
367 }
368 }
369
370 util_set_vertex_buffers_mask(ctx->buffers, &ctx->buffers_enabled_mask,
371 buffers, start_slot, num_buffers);
372 }
373
374 static void
375 zink_set_viewport_states(struct pipe_context *pctx,
376 unsigned start_slot,
377 unsigned num_viewports,
378 const struct pipe_viewport_state *state)
379 {
380 struct zink_context *ctx = zink_context(pctx);
381
382 for (unsigned i = 0; i < num_viewports; ++i) {
383 VkViewport viewport = {
384 state[i].translate[0] - state[i].scale[0],
385 state[i].translate[1] - state[i].scale[1],
386 state[i].scale[0] * 2,
387 state[i].scale[1] * 2,
388 state[i].translate[2] - state[i].scale[2],
389 state[i].translate[2] + state[i].scale[2]
390 };
391 ctx->viewport_states[start_slot + i] = state[i];
392 ctx->viewports[start_slot + i] = viewport;
393 }
394 ctx->num_viewports = start_slot + num_viewports;
395 }
396
397 static void
398 zink_set_scissor_states(struct pipe_context *pctx,
399 unsigned start_slot, unsigned num_scissors,
400 const struct pipe_scissor_state *states)
401 {
402 struct zink_context *ctx = zink_context(pctx);
403
404 for (unsigned i = 0; i < num_scissors; i++) {
405 VkRect2D scissor;
406
407 scissor.offset.x = states[i].minx;
408 scissor.offset.y = states[i].miny;
409 scissor.extent.width = states[i].maxx - states[i].minx;
410 scissor.extent.height = states[i].maxy - states[i].miny;
411 ctx->scissor_states[start_slot + i] = states[i];
412 ctx->scissors[start_slot + i] = scissor;
413 }
414 }
415
416 static void
417 zink_set_constant_buffer(struct pipe_context *pctx,
418 enum pipe_shader_type shader, uint index,
419 const struct pipe_constant_buffer *cb)
420 {
421 struct zink_context *ctx = zink_context(pctx);
422
423 if (cb) {
424 struct pipe_resource *buffer = cb->buffer;
425 unsigned offset = cb->buffer_offset;
426 if (cb->user_buffer) {
427 struct zink_screen *screen = zink_screen(pctx->screen);
428 u_upload_data(ctx->base.const_uploader, 0, cb->buffer_size,
429 screen->props.limits.minUniformBufferOffsetAlignment,
430 cb->user_buffer, &offset, &buffer);
431 }
432
433 pipe_resource_reference(&ctx->ubos[shader][index].buffer, buffer);
434 ctx->ubos[shader][index].buffer_offset = offset;
435 ctx->ubos[shader][index].buffer_size = cb->buffer_size;
436 ctx->ubos[shader][index].user_buffer = NULL;
437
438 if (cb->user_buffer)
439 pipe_resource_reference(&buffer, NULL);
440 } else {
441 pipe_resource_reference(&ctx->ubos[shader][index].buffer, NULL);
442 ctx->ubos[shader][index].buffer_offset = 0;
443 ctx->ubos[shader][index].buffer_size = 0;
444 ctx->ubos[shader][index].user_buffer = NULL;
445 }
446 }
447
448 static void
449 zink_set_sampler_views(struct pipe_context *pctx,
450 enum pipe_shader_type shader_type,
451 unsigned start_slot,
452 unsigned num_views,
453 struct pipe_sampler_view **views)
454 {
455 struct zink_context *ctx = zink_context(pctx);
456 assert(views);
457 for (unsigned i = 0; i < num_views; ++i) {
458 pipe_sampler_view_reference(
459 &ctx->image_views[shader_type][start_slot + i],
460 views[i]);
461 }
462 ctx->num_image_views[shader_type] = start_slot + num_views;
463 }
464
465 static void
466 zink_set_stencil_ref(struct pipe_context *pctx,
467 const struct pipe_stencil_ref *ref)
468 {
469 struct zink_context *ctx = zink_context(pctx);
470 ctx->stencil_ref = *ref;
471 }
472
473 static void
474 zink_set_clip_state(struct pipe_context *pctx,
475 const struct pipe_clip_state *pcs)
476 {
477 }
478
479 static struct zink_render_pass *
480 get_render_pass(struct zink_context *ctx)
481 {
482 struct zink_screen *screen = zink_screen(ctx->base.screen);
483 const struct pipe_framebuffer_state *fb = &ctx->fb_state;
484 struct zink_render_pass_state state;
485
486 for (int i = 0; i < fb->nr_cbufs; i++) {
487 struct pipe_resource *res = fb->cbufs[i]->texture;
488 state.rts[i].format = zink_get_format(screen, fb->cbufs[i]->format);
489 state.rts[i].samples = res->nr_samples > 0 ? res->nr_samples :
490 VK_SAMPLE_COUNT_1_BIT;
491 }
492 state.num_cbufs = fb->nr_cbufs;
493
494 if (fb->zsbuf) {
495 struct zink_resource *zsbuf = zink_resource(fb->zsbuf->texture);
496 state.rts[fb->nr_cbufs].format = zsbuf->format;
497 state.rts[fb->nr_cbufs].samples = zsbuf->base.nr_samples > 0 ? zsbuf->base.nr_samples : VK_SAMPLE_COUNT_1_BIT;
498 }
499 state.have_zsbuf = fb->zsbuf != NULL;
500
501 struct hash_entry *entry = _mesa_hash_table_search(ctx->render_pass_cache,
502 &state);
503 if (!entry) {
504 struct zink_render_pass *rp;
505 rp = zink_create_render_pass(screen, &state);
506 entry = _mesa_hash_table_insert(ctx->render_pass_cache, &state, rp);
507 if (!entry)
508 return NULL;
509 }
510
511 return entry->data;
512 }
513
514 static struct zink_framebuffer *
515 get_framebuffer(struct zink_context *ctx)
516 {
517 struct zink_screen *screen = zink_screen(ctx->base.screen);
518
519 struct zink_framebuffer_state state = {};
520 state.rp = get_render_pass(ctx);
521 for (int i = 0; i < ctx->fb_state.nr_cbufs; i++) {
522 struct pipe_surface *psurf = ctx->fb_state.cbufs[i];
523 state.attachments[i] = zink_surface(psurf);
524 }
525
526 state.num_attachments = ctx->fb_state.nr_cbufs;
527 if (ctx->fb_state.zsbuf) {
528 struct pipe_surface *psurf = ctx->fb_state.zsbuf;
529 state.attachments[state.num_attachments++] = zink_surface(psurf);
530 }
531
532 state.width = ctx->fb_state.width;
533 state.height = ctx->fb_state.height;
534 state.layers = MAX2(ctx->fb_state.layers, 1);
535
536 struct hash_entry *entry = _mesa_hash_table_search(ctx->framebuffer_cache,
537 &state);
538 if (!entry) {
539 struct zink_framebuffer *fb = zink_create_framebuffer(screen, &state);
540 entry = _mesa_hash_table_insert(ctx->framebuffer_cache, &state, fb);
541 if (!entry)
542 return NULL;
543 }
544
545 return entry->data;
546 }
547
548 void
549 zink_begin_render_pass(struct zink_context *ctx, struct zink_batch *batch)
550 {
551 struct zink_screen *screen = zink_screen(ctx->base.screen);
552 assert(batch == zink_curr_batch(ctx));
553 assert(ctx->gfx_pipeline_state.render_pass);
554
555 struct pipe_framebuffer_state *fb_state = &ctx->fb_state;
556
557 VkRenderPassBeginInfo rpbi = {};
558 rpbi.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
559 rpbi.renderPass = ctx->gfx_pipeline_state.render_pass->render_pass;
560 rpbi.renderArea.offset.x = 0;
561 rpbi.renderArea.offset.y = 0;
562 rpbi.renderArea.extent.width = fb_state->width;
563 rpbi.renderArea.extent.height = fb_state->height;
564 rpbi.clearValueCount = 0;
565 rpbi.pClearValues = NULL;
566 rpbi.framebuffer = ctx->framebuffer->fb;
567
568 assert(ctx->gfx_pipeline_state.render_pass && ctx->framebuffer);
569 assert(!batch->rp || batch->rp == ctx->gfx_pipeline_state.render_pass);
570 assert(!batch->fb || batch->fb == ctx->framebuffer);
571
572 for (int i = 0; i < fb_state->nr_cbufs; i++) {
573 struct zink_resource *res = zink_resource(fb_state->cbufs[i]->texture);
574 if (res->layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL)
575 zink_resource_barrier(batch->cmdbuf, res, res->aspect,
576 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
577 }
578
579 if (fb_state->zsbuf) {
580 struct zink_resource *res = zink_resource(fb_state->zsbuf->texture);
581 if (res->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL)
582 zink_resource_barrier(batch->cmdbuf, res, res->aspect,
583 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
584 }
585
586 zink_render_pass_reference(screen, &batch->rp, ctx->gfx_pipeline_state.render_pass);
587 zink_framebuffer_reference(screen, &batch->fb, ctx->framebuffer);
588
589 vkCmdBeginRenderPass(batch->cmdbuf, &rpbi, VK_SUBPASS_CONTENTS_INLINE);
590 }
591
592 static void
593 flush_batch(struct zink_context *ctx)
594 {
595 struct zink_batch *batch = zink_curr_batch(ctx);
596 if (batch->rp)
597 vkCmdEndRenderPass(batch->cmdbuf);
598
599 zink_end_batch(ctx, batch);
600
601 ctx->curr_batch++;
602 if (ctx->curr_batch == ARRAY_SIZE(ctx->batches))
603 ctx->curr_batch = 0;
604
605 zink_start_batch(ctx, zink_curr_batch(ctx));
606 }
607
608 struct zink_batch *
609 zink_batch_rp(struct zink_context *ctx)
610 {
611 struct zink_batch *batch = zink_curr_batch(ctx);
612 if (!batch->rp) {
613 zink_begin_render_pass(ctx, batch);
614 assert(batch->rp);
615 }
616 return batch;
617 }
618
619 struct zink_batch *
620 zink_batch_no_rp(struct zink_context *ctx)
621 {
622 struct zink_batch *batch = zink_curr_batch(ctx);
623 if (batch->rp) {
624 /* flush batch and get a new one */
625 flush_batch(ctx);
626 batch = zink_curr_batch(ctx);
627 assert(!batch->rp);
628 }
629 return batch;
630 }
631
632 static void
633 zink_set_framebuffer_state(struct pipe_context *pctx,
634 const struct pipe_framebuffer_state *state)
635 {
636 struct zink_context *ctx = zink_context(pctx);
637 struct zink_screen *screen = zink_screen(pctx->screen);
638
639 VkSampleCountFlagBits rast_samples = VK_SAMPLE_COUNT_1_BIT;
640 for (int i = 0; i < state->nr_cbufs; i++)
641 rast_samples = MAX2(rast_samples, state->cbufs[i]->texture->nr_samples);
642 if (state->zsbuf && state->zsbuf->texture->nr_samples)
643 rast_samples = MAX2(rast_samples, state->zsbuf->texture->nr_samples);
644
645 util_copy_framebuffer_state(&ctx->fb_state, state);
646
647 struct zink_framebuffer *fb = get_framebuffer(ctx);
648 zink_framebuffer_reference(screen, &ctx->framebuffer, fb);
649 zink_render_pass_reference(screen, &ctx->gfx_pipeline_state.render_pass, fb->rp);
650
651 ctx->gfx_pipeline_state.rast_samples = rast_samples;
652 ctx->gfx_pipeline_state.num_attachments = state->nr_cbufs;
653
654 struct zink_batch *batch = zink_batch_no_rp(ctx);
655
656 for (int i = 0; i < state->nr_cbufs; i++) {
657 struct zink_resource *res = zink_resource(state->cbufs[i]->texture);
658 if (res->layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL)
659 zink_resource_barrier(batch->cmdbuf, res, res->aspect,
660 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
661 }
662
663 if (state->zsbuf) {
664 struct zink_resource *res = zink_resource(state->zsbuf->texture);
665 if (res->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL)
666 zink_resource_barrier(batch->cmdbuf, res, res->aspect,
667 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
668 }
669 }
670
671 static void
672 zink_set_blend_color(struct pipe_context *pctx,
673 const struct pipe_blend_color *color)
674 {
675 struct zink_context *ctx = zink_context(pctx);
676 memcpy(ctx->blend_constants, color->color, sizeof(float) * 4);
677 }
678
679 static void
680 zink_set_sample_mask(struct pipe_context *pctx, unsigned sample_mask)
681 {
682 struct zink_context *ctx = zink_context(pctx);
683 ctx->gfx_pipeline_state.sample_mask = sample_mask;
684 }
685
686 static VkAccessFlags
687 access_src_flags(VkImageLayout layout)
688 {
689 switch (layout) {
690 case VK_IMAGE_LAYOUT_UNDEFINED:
691 case VK_IMAGE_LAYOUT_GENERAL:
692 return 0;
693
694 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
695 return VK_ACCESS_COLOR_ATTACHMENT_READ_BIT;
696 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
697 return VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
698
699 case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
700 return VK_ACCESS_SHADER_READ_BIT;
701
702 case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
703 return VK_ACCESS_TRANSFER_READ_BIT;
704
705 case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
706 return VK_ACCESS_TRANSFER_WRITE_BIT;
707
708 case VK_IMAGE_LAYOUT_PREINITIALIZED:
709 return VK_ACCESS_HOST_WRITE_BIT;
710
711 default:
712 unreachable("unexpected layout");
713 }
714 }
715
716 static VkAccessFlags
717 access_dst_flags(VkImageLayout layout)
718 {
719 switch (layout) {
720 case VK_IMAGE_LAYOUT_UNDEFINED:
721 case VK_IMAGE_LAYOUT_GENERAL:
722 return 0;
723
724 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
725 return VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
726 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
727 return VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
728
729 case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
730 return VK_ACCESS_TRANSFER_READ_BIT;
731
732 case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
733 return VK_ACCESS_TRANSFER_WRITE_BIT;
734
735 default:
736 unreachable("unexpected layout");
737 }
738 }
739
740 static VkPipelineStageFlags
741 pipeline_dst_stage(VkImageLayout layout)
742 {
743 switch (layout) {
744 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
745 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
746 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
747 return VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
748
749 case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
750 return VK_PIPELINE_STAGE_TRANSFER_BIT;
751 case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
752 return VK_PIPELINE_STAGE_TRANSFER_BIT;
753
754 default:
755 return VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
756 }
757 }
758
759 static VkPipelineStageFlags
760 pipeline_src_stage(VkImageLayout layout)
761 {
762 switch (layout) {
763 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
764 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
765 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
766 return VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT;
767
768 case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
769 return VK_PIPELINE_STAGE_TRANSFER_BIT;
770 case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
771 return VK_PIPELINE_STAGE_TRANSFER_BIT;
772
773 default:
774 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
775 }
776 }
777
778
779 void
780 zink_resource_barrier(VkCommandBuffer cmdbuf, struct zink_resource *res,
781 VkImageAspectFlags aspect, VkImageLayout new_layout)
782 {
783 VkImageSubresourceRange isr = {
784 aspect,
785 0, VK_REMAINING_MIP_LEVELS,
786 0, VK_REMAINING_ARRAY_LAYERS
787 };
788
789 VkImageMemoryBarrier imb = {
790 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
791 NULL,
792 access_src_flags(res->layout),
793 access_dst_flags(new_layout),
794 res->layout,
795 new_layout,
796 VK_QUEUE_FAMILY_IGNORED,
797 VK_QUEUE_FAMILY_IGNORED,
798 res->image,
799 isr
800 };
801 vkCmdPipelineBarrier(
802 cmdbuf,
803 pipeline_src_stage(res->layout),
804 pipeline_dst_stage(new_layout),
805 0,
806 0, NULL,
807 0, NULL,
808 1, &imb
809 );
810
811 res->layout = new_layout;
812 }
813
814 static void
815 zink_clear(struct pipe_context *pctx,
816 unsigned buffers,
817 const union pipe_color_union *pcolor,
818 double depth, unsigned stencil)
819 {
820 struct zink_context *ctx = zink_context(pctx);
821 struct pipe_framebuffer_state *fb = &ctx->fb_state;
822
823 /* FIXME: this is very inefficient; if no renderpass has been started yet,
824 * we should record the clear if it's full-screen, and apply it as we
825 * start the render-pass. Otherwise we can do a partial out-of-renderpass
826 * clear.
827 */
828 struct zink_batch *batch = zink_batch_rp(ctx);
829
830 VkClearAttachment attachments[1 + PIPE_MAX_COLOR_BUFS];
831 int num_attachments = 0;
832
833 if (buffers & PIPE_CLEAR_COLOR) {
834 VkClearColorValue color;
835 color.float32[0] = pcolor->f[0];
836 color.float32[1] = pcolor->f[1];
837 color.float32[2] = pcolor->f[2];
838 color.float32[3] = pcolor->f[3];
839
840 for (unsigned i = 0; i < fb->nr_cbufs; i++) {
841 if (!(buffers & (PIPE_CLEAR_COLOR0 << i)) || !fb->cbufs[i])
842 continue;
843
844 attachments[num_attachments].aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
845 attachments[num_attachments].colorAttachment = i;
846 attachments[num_attachments].clearValue.color = color;
847 ++num_attachments;
848 }
849 }
850
851 if (buffers & PIPE_CLEAR_DEPTHSTENCIL && fb->zsbuf) {
852 VkImageAspectFlags aspect = 0;
853 if (buffers & PIPE_CLEAR_DEPTH)
854 aspect |= VK_IMAGE_ASPECT_DEPTH_BIT;
855 if (buffers & PIPE_CLEAR_STENCIL)
856 aspect |= VK_IMAGE_ASPECT_STENCIL_BIT;
857
858 attachments[num_attachments].aspectMask = aspect;
859 attachments[num_attachments].clearValue.depthStencil.depth = depth;
860 attachments[num_attachments].clearValue.depthStencil.stencil = stencil;
861 ++num_attachments;
862 }
863
864 VkClearRect cr;
865 cr.rect.offset.x = 0;
866 cr.rect.offset.y = 0;
867 cr.rect.extent.width = fb->width;
868 cr.rect.extent.height = fb->height;
869 cr.baseArrayLayer = 0;
870 cr.layerCount = util_framebuffer_get_num_layers(fb);
871 vkCmdClearAttachments(batch->cmdbuf, num_attachments, attachments, 1, &cr);
872 }
873
874 VkShaderStageFlagBits
875 zink_shader_stage(enum pipe_shader_type type)
876 {
877 VkShaderStageFlagBits stages[] = {
878 [PIPE_SHADER_VERTEX] = VK_SHADER_STAGE_VERTEX_BIT,
879 [PIPE_SHADER_FRAGMENT] = VK_SHADER_STAGE_FRAGMENT_BIT,
880 [PIPE_SHADER_GEOMETRY] = VK_SHADER_STAGE_GEOMETRY_BIT,
881 [PIPE_SHADER_TESS_CTRL] = VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
882 [PIPE_SHADER_TESS_EVAL] = VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
883 [PIPE_SHADER_COMPUTE] = VK_SHADER_STAGE_COMPUTE_BIT,
884 };
885 return stages[type];
886 }
887
888 static uint32_t
889 hash_gfx_program(const void *key)
890 {
891 return _mesa_hash_data(key, sizeof(struct zink_shader *) * (PIPE_SHADER_TYPES - 1));
892 }
893
894 static bool
895 equals_gfx_program(const void *a, const void *b)
896 {
897 return memcmp(a, b, sizeof(struct zink_shader *) * (PIPE_SHADER_TYPES - 1)) == 0;
898 }
899
900 static uint32_t
901 hash_render_pass_state(const void *key)
902 {
903 return _mesa_hash_data(key, sizeof(struct zink_render_pass_state));
904 }
905
906 static bool
907 equals_render_pass_state(const void *a, const void *b)
908 {
909 return memcmp(a, b, sizeof(struct zink_render_pass_state)) == 0;
910 }
911
912 static uint32_t
913 hash_framebuffer_state(const void *key)
914 {
915 struct zink_framebuffer_state *s = (struct zink_framebuffer_state*)key;
916 return _mesa_hash_data(key, sizeof(struct zink_framebuffer_state) + sizeof(s->attachments) * s->num_attachments);
917 }
918
919 static bool
920 equals_framebuffer_state(const void *a, const void *b)
921 {
922 struct zink_framebuffer_state *s = (struct zink_framebuffer_state*)a;
923 return memcmp(a, b, sizeof(struct zink_framebuffer_state) + sizeof(s->attachments) * s->num_attachments) == 0;
924 }
925
926 static void
927 zink_flush(struct pipe_context *pctx,
928 struct pipe_fence_handle **pfence,
929 enum pipe_flush_flags flags)
930 {
931 struct zink_context *ctx = zink_context(pctx);
932
933 struct zink_batch *batch = zink_curr_batch(ctx);
934 flush_batch(ctx);
935
936 if (pfence)
937 zink_fence_reference(zink_screen(pctx->screen),
938 (struct zink_fence **)pfence,
939 batch->fence);
940
941 /* HACK:
942 * For some strange reason, we need to finish before presenting, or else
943 * we start rendering on top of the back-buffer for the next frame. This
944 * seems like a bug in the DRI-driver to me, because we really should
945 * be properly protected by fences here, and the back-buffer should
946 * either be swapped with the front-buffer, or blitted from. But for
947 * some strange reason, neither of these things happen.
948 */
949 if (flags & PIPE_FLUSH_END_OF_FRAME)
950 pctx->screen->fence_finish(pctx->screen, pctx,
951 (struct pipe_fence_handle *)batch->fence,
952 PIPE_TIMEOUT_INFINITE);
953 }
954
955 static void
956 zink_flush_resource(struct pipe_context *pipe,
957 struct pipe_resource *resource)
958 {
959 }
960
961 static void
962 zink_resource_copy_region(struct pipe_context *pctx,
963 struct pipe_resource *pdst,
964 unsigned dst_level, unsigned dstx, unsigned dsty, unsigned dstz,
965 struct pipe_resource *psrc,
966 unsigned src_level, const struct pipe_box *src_box)
967 {
968 struct zink_resource *dst = zink_resource(pdst);
969 struct zink_resource *src = zink_resource(psrc);
970 struct zink_context *ctx = zink_context(pctx);
971 if (dst->base.target != PIPE_BUFFER && src->base.target != PIPE_BUFFER) {
972 VkImageCopy region = {};
973
974 region.srcSubresource.aspectMask = src->aspect;
975 region.srcSubresource.mipLevel = src_level;
976 region.srcSubresource.layerCount = 1;
977 if (src->base.array_size > 1) {
978 region.srcSubresource.baseArrayLayer = src_box->z;
979 region.srcSubresource.layerCount = src_box->depth;
980 region.extent.depth = 1;
981 } else {
982 region.srcOffset.z = src_box->z;
983 region.srcSubresource.layerCount = 1;
984 region.extent.depth = src_box->depth;
985 }
986
987 region.srcOffset.x = src_box->x;
988 region.srcOffset.y = src_box->y;
989
990 region.dstSubresource.aspectMask = dst->aspect;
991 region.dstSubresource.mipLevel = dst_level;
992 if (dst->base.array_size > 1) {
993 region.dstSubresource.baseArrayLayer = dstz;
994 region.dstSubresource.layerCount = src_box->depth;
995 } else {
996 region.dstOffset.z = dstz;
997 region.dstSubresource.layerCount = 1;
998 }
999
1000 region.dstOffset.x = dstx;
1001 region.dstOffset.y = dsty;
1002 region.extent.width = src_box->width;
1003 region.extent.height = src_box->height;
1004
1005 struct zink_batch *batch = zink_batch_no_rp(ctx);
1006 zink_batch_reference_resoure(batch, src);
1007 zink_batch_reference_resoure(batch, dst);
1008
1009 if (src->layout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) {
1010 zink_resource_barrier(batch->cmdbuf, src, src->aspect,
1011 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
1012 }
1013
1014 if (dst->layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
1015 zink_resource_barrier(batch->cmdbuf, dst, dst->aspect,
1016 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
1017 }
1018
1019 vkCmdCopyImage(batch->cmdbuf, src->image, src->layout,
1020 dst->image, dst->layout,
1021 1, &region);
1022 } else if (dst->base.target == PIPE_BUFFER &&
1023 src->base.target == PIPE_BUFFER) {
1024 VkBufferCopy region;
1025 region.srcOffset = src_box->x;
1026 region.dstOffset = dstx;
1027 region.size = src_box->width;
1028
1029 struct zink_batch *batch = zink_batch_no_rp(ctx);
1030 zink_batch_reference_resoure(batch, src);
1031 zink_batch_reference_resoure(batch, dst);
1032
1033 vkCmdCopyBuffer(batch->cmdbuf, src->buffer, dst->buffer, 1, &region);
1034 } else
1035 debug_printf("zink: TODO resource copy\n");
1036 }
1037
1038 struct pipe_context *
1039 zink_context_create(struct pipe_screen *pscreen, void *priv, unsigned flags)
1040 {
1041 struct zink_screen *screen = zink_screen(pscreen);
1042 struct zink_context *ctx = CALLOC_STRUCT(zink_context);
1043
1044 ctx->base.screen = pscreen;
1045 ctx->base.priv = priv;
1046
1047 ctx->base.destroy = zink_context_destroy;
1048
1049 zink_context_state_init(&ctx->base);
1050
1051 ctx->base.create_sampler_state = zink_create_sampler_state;
1052 ctx->base.bind_sampler_states = zink_bind_sampler_states;
1053 ctx->base.delete_sampler_state = zink_delete_sampler_state;
1054
1055 ctx->base.create_sampler_view = zink_create_sampler_view;
1056 ctx->base.set_sampler_views = zink_set_sampler_views;
1057 ctx->base.sampler_view_destroy = zink_sampler_view_destroy;
1058
1059 ctx->base.create_vs_state = zink_create_vs_state;
1060 ctx->base.bind_vs_state = zink_bind_vs_state;
1061 ctx->base.delete_vs_state = zink_delete_vs_state;
1062
1063 ctx->base.create_fs_state = zink_create_fs_state;
1064 ctx->base.bind_fs_state = zink_bind_fs_state;
1065 ctx->base.delete_fs_state = zink_delete_fs_state;
1066
1067 ctx->base.set_polygon_stipple = zink_set_polygon_stipple;
1068 ctx->base.set_vertex_buffers = zink_set_vertex_buffers;
1069 ctx->base.set_viewport_states = zink_set_viewport_states;
1070 ctx->base.set_scissor_states = zink_set_scissor_states;
1071 ctx->base.set_constant_buffer = zink_set_constant_buffer;
1072 ctx->base.set_framebuffer_state = zink_set_framebuffer_state;
1073 ctx->base.set_stencil_ref = zink_set_stencil_ref;
1074 ctx->base.set_clip_state = zink_set_clip_state;
1075 ctx->base.set_blend_color = zink_set_blend_color;
1076
1077 ctx->base.set_sample_mask = zink_set_sample_mask;
1078
1079 ctx->base.clear = zink_clear;
1080 ctx->base.draw_vbo = zink_draw_vbo;
1081 ctx->base.flush = zink_flush;
1082
1083 ctx->base.resource_copy_region = zink_resource_copy_region;
1084 ctx->base.blit = zink_blit;
1085
1086 ctx->base.flush_resource = zink_flush_resource;
1087 zink_context_surface_init(&ctx->base);
1088 zink_context_resource_init(&ctx->base);
1089 zink_context_query_init(&ctx->base);
1090
1091 slab_create_child(&ctx->transfer_pool, &screen->transfer_pool);
1092
1093 ctx->base.stream_uploader = u_upload_create_default(&ctx->base);
1094 ctx->base.const_uploader = ctx->base.stream_uploader;
1095
1096 int prim_hwsupport = 1 << PIPE_PRIM_POINTS |
1097 1 << PIPE_PRIM_LINES |
1098 1 << PIPE_PRIM_LINE_STRIP |
1099 1 << PIPE_PRIM_TRIANGLES |
1100 1 << PIPE_PRIM_TRIANGLE_STRIP |
1101 1 << PIPE_PRIM_TRIANGLE_FAN;
1102
1103 ctx->primconvert = util_primconvert_create(&ctx->base, prim_hwsupport);
1104 if (!ctx->primconvert)
1105 goto fail;
1106
1107 ctx->blitter = util_blitter_create(&ctx->base);
1108 if (!ctx->blitter)
1109 goto fail;
1110
1111 VkCommandPoolCreateInfo cpci = {};
1112 cpci.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
1113 cpci.queueFamilyIndex = screen->gfx_queue;
1114 cpci.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
1115 if (vkCreateCommandPool(screen->dev, &cpci, NULL, &ctx->cmdpool) != VK_SUCCESS)
1116 goto fail;
1117
1118 VkCommandBufferAllocateInfo cbai = {};
1119 cbai.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
1120 cbai.commandPool = ctx->cmdpool;
1121 cbai.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
1122 cbai.commandBufferCount = 1;
1123
1124 VkDescriptorPoolSize sizes[] = {
1125 {VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, ZINK_BATCH_DESC_SIZE}
1126 };
1127 VkDescriptorPoolCreateInfo dpci = {};
1128 dpci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
1129 dpci.pPoolSizes = sizes;
1130 dpci.poolSizeCount = ARRAY_SIZE(sizes);
1131 dpci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
1132 dpci.maxSets = ZINK_BATCH_DESC_SIZE;
1133
1134 for (int i = 0; i < ARRAY_SIZE(ctx->batches); ++i) {
1135 if (vkAllocateCommandBuffers(screen->dev, &cbai, &ctx->batches[i].cmdbuf) != VK_SUCCESS)
1136 goto fail;
1137
1138 ctx->batches[i].resources = _mesa_set_create(NULL, _mesa_hash_pointer,
1139 _mesa_key_pointer_equal);
1140 ctx->batches[i].sampler_views = _mesa_set_create(NULL,
1141 _mesa_hash_pointer,
1142 _mesa_key_pointer_equal);
1143
1144 if (!ctx->batches[i].resources || !ctx->batches[i].sampler_views)
1145 goto fail;
1146
1147 util_dynarray_init(&ctx->batches[i].zombie_samplers, NULL);
1148
1149 if (vkCreateDescriptorPool(screen->dev, &dpci, 0,
1150 &ctx->batches[i].descpool) != VK_SUCCESS)
1151 goto fail;
1152 }
1153
1154 vkGetDeviceQueue(screen->dev, screen->gfx_queue, 0, &ctx->queue);
1155
1156 ctx->program_cache = _mesa_hash_table_create(NULL,
1157 hash_gfx_program,
1158 equals_gfx_program);
1159 ctx->render_pass_cache = _mesa_hash_table_create(NULL,
1160 hash_render_pass_state,
1161 equals_render_pass_state);
1162 ctx->framebuffer_cache = _mesa_hash_table_create(NULL,
1163 hash_framebuffer_state,
1164 equals_framebuffer_state);
1165
1166 if (!ctx->program_cache || !ctx->render_pass_cache ||
1167 !ctx->framebuffer_cache)
1168 goto fail;
1169
1170 ctx->dirty_program = true;
1171
1172 /* start the first batch */
1173 zink_start_batch(ctx, zink_curr_batch(ctx));
1174
1175 return &ctx->base;
1176
1177 fail:
1178 if (ctx) {
1179 vkDestroyCommandPool(screen->dev, ctx->cmdpool, NULL);
1180 FREE(ctx);
1181 }
1182 return NULL;
1183 }