792027ceb93ef7b84fe7cad24e90ff6206e46259
[mesa.git] / src / gallium / drivers / zink / zink_context.c
1 /*
2 * Copyright 2018 Collabora Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "zink_context.h"
25
26 #include "zink_batch.h"
27 #include "zink_compiler.h"
28 #include "zink_fence.h"
29 #include "zink_framebuffer.h"
30 #include "zink_helpers.h"
31 #include "zink_pipeline.h"
32 #include "zink_query.h"
33 #include "zink_render_pass.h"
34 #include "zink_resource.h"
35 #include "zink_screen.h"
36 #include "zink_state.h"
37 #include "zink_surface.h"
38
39 #include "indices/u_primconvert.h"
40 #include "util/u_blitter.h"
41 #include "util/u_debug.h"
42 #include "util/format/u_format.h"
43 #include "util/u_framebuffer.h"
44 #include "util/u_helpers.h"
45 #include "util/u_inlines.h"
46
47 #include "nir.h"
48
49 #include "util/u_memory.h"
50 #include "util/u_upload_mgr.h"
51
52 static void
53 zink_context_destroy(struct pipe_context *pctx)
54 {
55 struct zink_context *ctx = zink_context(pctx);
56 struct zink_screen *screen = zink_screen(pctx->screen);
57
58 if (vkQueueWaitIdle(ctx->queue) != VK_SUCCESS)
59 debug_printf("vkQueueWaitIdle failed\n");
60
61 for (unsigned i = 0; i < ARRAY_SIZE(ctx->null_buffers); i++)
62 pipe_resource_reference(&ctx->null_buffers[i], NULL);
63
64 for (int i = 0; i < ARRAY_SIZE(ctx->batches); ++i) {
65 vkDestroyDescriptorPool(screen->dev, ctx->batches[i].descpool, NULL);
66 vkFreeCommandBuffers(screen->dev, ctx->cmdpool, 1, &ctx->batches[i].cmdbuf);
67 }
68 vkDestroyCommandPool(screen->dev, ctx->cmdpool, NULL);
69
70 util_primconvert_destroy(ctx->primconvert);
71 u_upload_destroy(pctx->stream_uploader);
72 slab_destroy_child(&ctx->transfer_pool);
73 util_blitter_destroy(ctx->blitter);
74 FREE(ctx);
75 }
76
77 static VkSamplerMipmapMode
78 sampler_mipmap_mode(enum pipe_tex_mipfilter filter)
79 {
80 switch (filter) {
81 case PIPE_TEX_MIPFILTER_NEAREST: return VK_SAMPLER_MIPMAP_MODE_NEAREST;
82 case PIPE_TEX_MIPFILTER_LINEAR: return VK_SAMPLER_MIPMAP_MODE_LINEAR;
83 case PIPE_TEX_MIPFILTER_NONE:
84 unreachable("PIPE_TEX_MIPFILTER_NONE should be dealt with earlier");
85 }
86 unreachable("unexpected filter");
87 }
88
89 static VkSamplerAddressMode
90 sampler_address_mode(enum pipe_tex_wrap filter)
91 {
92 switch (filter) {
93 case PIPE_TEX_WRAP_REPEAT: return VK_SAMPLER_ADDRESS_MODE_REPEAT;
94 case PIPE_TEX_WRAP_CLAMP: return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; /* not technically correct, but kinda works */
95 case PIPE_TEX_WRAP_CLAMP_TO_EDGE: return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
96 case PIPE_TEX_WRAP_CLAMP_TO_BORDER: return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER;
97 case PIPE_TEX_WRAP_MIRROR_REPEAT: return VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT;
98 case PIPE_TEX_WRAP_MIRROR_CLAMP: return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; /* not technically correct, but kinda works */
99 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE: return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE;
100 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER: return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; /* not technically correct, but kinda works */
101 }
102 unreachable("unexpected wrap");
103 }
104
105 static VkCompareOp
106 compare_op(enum pipe_compare_func op)
107 {
108 switch (op) {
109 case PIPE_FUNC_NEVER: return VK_COMPARE_OP_NEVER;
110 case PIPE_FUNC_LESS: return VK_COMPARE_OP_LESS;
111 case PIPE_FUNC_EQUAL: return VK_COMPARE_OP_EQUAL;
112 case PIPE_FUNC_LEQUAL: return VK_COMPARE_OP_LESS_OR_EQUAL;
113 case PIPE_FUNC_GREATER: return VK_COMPARE_OP_GREATER;
114 case PIPE_FUNC_NOTEQUAL: return VK_COMPARE_OP_NOT_EQUAL;
115 case PIPE_FUNC_GEQUAL: return VK_COMPARE_OP_GREATER_OR_EQUAL;
116 case PIPE_FUNC_ALWAYS: return VK_COMPARE_OP_ALWAYS;
117 }
118 unreachable("unexpected compare");
119 }
120
121 static void *
122 zink_create_sampler_state(struct pipe_context *pctx,
123 const struct pipe_sampler_state *state)
124 {
125 struct zink_screen *screen = zink_screen(pctx->screen);
126
127 VkSamplerCreateInfo sci = {};
128 sci.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
129 sci.magFilter = zink_filter(state->mag_img_filter);
130 sci.minFilter = zink_filter(state->min_img_filter);
131
132 if (state->min_mip_filter != PIPE_TEX_MIPFILTER_NONE) {
133 sci.mipmapMode = sampler_mipmap_mode(state->min_mip_filter);
134 sci.minLod = state->min_lod;
135 sci.maxLod = state->max_lod;
136 } else {
137 sci.mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST;
138 sci.minLod = 0;
139 sci.maxLod = 0;
140 }
141
142 sci.addressModeU = sampler_address_mode(state->wrap_s);
143 sci.addressModeV = sampler_address_mode(state->wrap_t);
144 sci.addressModeW = sampler_address_mode(state->wrap_r);
145 sci.mipLodBias = state->lod_bias;
146
147 if (state->compare_mode == PIPE_TEX_COMPARE_NONE)
148 sci.compareOp = VK_COMPARE_OP_NEVER;
149 else {
150 sci.compareOp = compare_op(state->compare_func);
151 sci.compareEnable = VK_TRUE;
152 }
153
154 sci.borderColor = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK; // TODO
155 sci.unnormalizedCoordinates = !state->normalized_coords;
156
157 if (state->max_anisotropy > 1) {
158 sci.maxAnisotropy = state->max_anisotropy;
159 sci.anisotropyEnable = VK_TRUE;
160 }
161
162 VkSampler *sampler = CALLOC(1, sizeof(VkSampler));
163 if (!sampler)
164 return NULL;
165
166 if (vkCreateSampler(screen->dev, &sci, NULL, sampler) != VK_SUCCESS) {
167 FREE(sampler);
168 return NULL;
169 }
170
171 return sampler;
172 }
173
174 static void
175 zink_bind_sampler_states(struct pipe_context *pctx,
176 enum pipe_shader_type shader,
177 unsigned start_slot,
178 unsigned num_samplers,
179 void **samplers)
180 {
181 struct zink_context *ctx = zink_context(pctx);
182 for (unsigned i = 0; i < num_samplers; ++i) {
183 VkSampler *sampler = samplers[i];
184 ctx->sampler_states[shader][start_slot + i] = sampler;
185 ctx->samplers[shader][start_slot + i] = sampler ? *sampler : VK_NULL_HANDLE;
186 }
187 ctx->num_samplers[shader] = start_slot + num_samplers;
188 }
189
190 static void
191 zink_delete_sampler_state(struct pipe_context *pctx,
192 void *sampler_state)
193 {
194 struct zink_batch *batch = zink_curr_batch(zink_context(pctx));
195 util_dynarray_append(&batch->zombie_samplers, VkSampler,
196 *(VkSampler *)sampler_state);
197 FREE(sampler_state);
198 }
199
200
201 static VkImageViewType
202 image_view_type(enum pipe_texture_target target)
203 {
204 switch (target) {
205 case PIPE_TEXTURE_1D: return VK_IMAGE_VIEW_TYPE_1D;
206 case PIPE_TEXTURE_1D_ARRAY: return VK_IMAGE_VIEW_TYPE_1D_ARRAY;
207 case PIPE_TEXTURE_2D: return VK_IMAGE_VIEW_TYPE_2D;
208 case PIPE_TEXTURE_2D_ARRAY: return VK_IMAGE_VIEW_TYPE_2D_ARRAY;
209 case PIPE_TEXTURE_CUBE: return VK_IMAGE_VIEW_TYPE_CUBE;
210 case PIPE_TEXTURE_CUBE_ARRAY: return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
211 case PIPE_TEXTURE_3D: return VK_IMAGE_VIEW_TYPE_3D;
212 case PIPE_TEXTURE_RECT: return VK_IMAGE_VIEW_TYPE_2D;
213 default:
214 unreachable("unexpected target");
215 }
216 }
217
218 static VkComponentSwizzle
219 component_mapping(enum pipe_swizzle swizzle)
220 {
221 switch (swizzle) {
222 case PIPE_SWIZZLE_X: return VK_COMPONENT_SWIZZLE_R;
223 case PIPE_SWIZZLE_Y: return VK_COMPONENT_SWIZZLE_G;
224 case PIPE_SWIZZLE_Z: return VK_COMPONENT_SWIZZLE_B;
225 case PIPE_SWIZZLE_W: return VK_COMPONENT_SWIZZLE_A;
226 case PIPE_SWIZZLE_0: return VK_COMPONENT_SWIZZLE_ZERO;
227 case PIPE_SWIZZLE_1: return VK_COMPONENT_SWIZZLE_ONE;
228 case PIPE_SWIZZLE_NONE: return VK_COMPONENT_SWIZZLE_IDENTITY; // ???
229 default:
230 unreachable("unexpected swizzle");
231 }
232 }
233
234 static VkImageAspectFlags
235 sampler_aspect_from_format(enum pipe_format fmt)
236 {
237 if (util_format_is_depth_or_stencil(fmt)) {
238 const struct util_format_description *desc = util_format_description(fmt);
239 if (util_format_has_depth(desc))
240 return VK_IMAGE_ASPECT_DEPTH_BIT;
241 assert(util_format_has_stencil(desc));
242 return VK_IMAGE_ASPECT_STENCIL_BIT;
243 } else
244 return VK_IMAGE_ASPECT_COLOR_BIT;
245 }
246
247 static struct pipe_sampler_view *
248 zink_create_sampler_view(struct pipe_context *pctx, struct pipe_resource *pres,
249 const struct pipe_sampler_view *state)
250 {
251 struct zink_screen *screen = zink_screen(pctx->screen);
252 struct zink_resource *res = zink_resource(pres);
253 struct zink_sampler_view *sampler_view = CALLOC_STRUCT(zink_sampler_view);
254
255 sampler_view->base = *state;
256 sampler_view->base.texture = NULL;
257 pipe_resource_reference(&sampler_view->base.texture, pres);
258 sampler_view->base.reference.count = 1;
259 sampler_view->base.context = pctx;
260
261 VkImageViewCreateInfo ivci = {};
262 ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
263 ivci.image = res->image;
264 ivci.viewType = image_view_type(state->target);
265 ivci.format = zink_get_format(screen, state->format);
266 ivci.components.r = component_mapping(state->swizzle_r);
267 ivci.components.g = component_mapping(state->swizzle_g);
268 ivci.components.b = component_mapping(state->swizzle_b);
269 ivci.components.a = component_mapping(state->swizzle_a);
270
271 ivci.subresourceRange.aspectMask = sampler_aspect_from_format(state->format);
272 ivci.subresourceRange.baseMipLevel = state->u.tex.first_level;
273 ivci.subresourceRange.baseArrayLayer = state->u.tex.first_layer;
274 ivci.subresourceRange.levelCount = state->u.tex.last_level - state->u.tex.first_level + 1;
275 ivci.subresourceRange.layerCount = state->u.tex.last_layer - state->u.tex.first_layer + 1;
276
277 VkResult err = vkCreateImageView(screen->dev, &ivci, NULL, &sampler_view->image_view);
278 if (err != VK_SUCCESS) {
279 FREE(sampler_view);
280 return NULL;
281 }
282
283 return &sampler_view->base;
284 }
285
286 static void
287 zink_sampler_view_destroy(struct pipe_context *pctx,
288 struct pipe_sampler_view *pview)
289 {
290 struct zink_sampler_view *view = zink_sampler_view(pview);
291 vkDestroyImageView(zink_screen(pctx->screen)->dev, view->image_view, NULL);
292 FREE(view);
293 }
294
295 static void *
296 zink_create_vs_state(struct pipe_context *pctx,
297 const struct pipe_shader_state *shader)
298 {
299 struct nir_shader *nir;
300 if (shader->type != PIPE_SHADER_IR_NIR)
301 nir = zink_tgsi_to_nir(pctx->screen, shader->tokens);
302 else
303 nir = (struct nir_shader *)shader->ir.nir;
304
305 return zink_compile_nir(zink_screen(pctx->screen), nir, &shader->stream_output);
306 }
307
308 static void
309 bind_stage(struct zink_context *ctx, enum pipe_shader_type stage,
310 struct zink_shader *shader)
311 {
312 assert(stage < PIPE_SHADER_COMPUTE);
313 ctx->gfx_stages[stage] = shader;
314 ctx->dirty_program = true;
315 }
316
317 static void
318 zink_bind_vs_state(struct pipe_context *pctx,
319 void *cso)
320 {
321 bind_stage(zink_context(pctx), PIPE_SHADER_VERTEX, cso);
322 }
323
324 static void
325 zink_delete_vs_state(struct pipe_context *pctx,
326 void *cso)
327 {
328 zink_shader_free(zink_context(pctx), cso);
329 }
330
331 static void *
332 zink_create_fs_state(struct pipe_context *pctx,
333 const struct pipe_shader_state *shader)
334 {
335 struct nir_shader *nir;
336 if (shader->type != PIPE_SHADER_IR_NIR)
337 nir = zink_tgsi_to_nir(pctx->screen, shader->tokens);
338 else
339 nir = (struct nir_shader *)shader->ir.nir;
340
341 return zink_compile_nir(zink_screen(pctx->screen), nir, NULL);
342 }
343
344 static void
345 zink_bind_fs_state(struct pipe_context *pctx,
346 void *cso)
347 {
348 bind_stage(zink_context(pctx), PIPE_SHADER_FRAGMENT, cso);
349 }
350
351 static void
352 zink_delete_fs_state(struct pipe_context *pctx,
353 void *cso)
354 {
355 zink_shader_free(zink_context(pctx), cso);
356 }
357
358 static void
359 zink_set_polygon_stipple(struct pipe_context *pctx,
360 const struct pipe_poly_stipple *ps)
361 {
362 }
363
364 static void
365 zink_set_vertex_buffers(struct pipe_context *pctx,
366 unsigned start_slot,
367 unsigned num_buffers,
368 const struct pipe_vertex_buffer *buffers)
369 {
370 struct zink_context *ctx = zink_context(pctx);
371
372 if (buffers) {
373 for (int i = 0; i < num_buffers; ++i) {
374 const struct pipe_vertex_buffer *vb = buffers + i;
375 struct zink_resource *res = zink_resource(vb->buffer.resource);
376
377 ctx->gfx_pipeline_state.bindings[start_slot + i].stride = vb->stride;
378 if (res && res->needs_xfb_barrier) {
379 /* if we're binding a previously-used xfb buffer, we need cmd buffer synchronization to ensure
380 * that we use the right buffer data
381 */
382 pctx->flush(pctx, NULL, 0);
383 res->needs_xfb_barrier = false;
384 }
385 }
386 }
387
388 util_set_vertex_buffers_mask(ctx->buffers, &ctx->buffers_enabled_mask,
389 buffers, start_slot, num_buffers);
390 }
391
392 static void
393 zink_set_viewport_states(struct pipe_context *pctx,
394 unsigned start_slot,
395 unsigned num_viewports,
396 const struct pipe_viewport_state *state)
397 {
398 struct zink_context *ctx = zink_context(pctx);
399
400 for (unsigned i = 0; i < num_viewports; ++i) {
401 VkViewport viewport = {
402 state[i].translate[0] - state[i].scale[0],
403 state[i].translate[1] - state[i].scale[1],
404 state[i].scale[0] * 2,
405 state[i].scale[1] * 2,
406 state[i].translate[2] - state[i].scale[2],
407 state[i].translate[2] + state[i].scale[2]
408 };
409 ctx->viewport_states[start_slot + i] = state[i];
410 ctx->viewports[start_slot + i] = viewport;
411 }
412 ctx->num_viewports = start_slot + num_viewports;
413 }
414
415 static void
416 zink_set_scissor_states(struct pipe_context *pctx,
417 unsigned start_slot, unsigned num_scissors,
418 const struct pipe_scissor_state *states)
419 {
420 struct zink_context *ctx = zink_context(pctx);
421
422 for (unsigned i = 0; i < num_scissors; i++) {
423 VkRect2D scissor;
424
425 scissor.offset.x = states[i].minx;
426 scissor.offset.y = states[i].miny;
427 scissor.extent.width = states[i].maxx - states[i].minx;
428 scissor.extent.height = states[i].maxy - states[i].miny;
429 ctx->scissor_states[start_slot + i] = states[i];
430 ctx->scissors[start_slot + i] = scissor;
431 }
432 }
433
434 static void
435 zink_set_constant_buffer(struct pipe_context *pctx,
436 enum pipe_shader_type shader, uint index,
437 const struct pipe_constant_buffer *cb)
438 {
439 struct zink_context *ctx = zink_context(pctx);
440
441 if (cb) {
442 struct pipe_resource *buffer = cb->buffer;
443 unsigned offset = cb->buffer_offset;
444 if (cb->user_buffer) {
445 struct zink_screen *screen = zink_screen(pctx->screen);
446 u_upload_data(ctx->base.const_uploader, 0, cb->buffer_size,
447 screen->props.limits.minUniformBufferOffsetAlignment,
448 cb->user_buffer, &offset, &buffer);
449 }
450
451 pipe_resource_reference(&ctx->ubos[shader][index].buffer, buffer);
452 ctx->ubos[shader][index].buffer_offset = offset;
453 ctx->ubos[shader][index].buffer_size = cb->buffer_size;
454 ctx->ubos[shader][index].user_buffer = NULL;
455
456 if (cb->user_buffer)
457 pipe_resource_reference(&buffer, NULL);
458 } else {
459 pipe_resource_reference(&ctx->ubos[shader][index].buffer, NULL);
460 ctx->ubos[shader][index].buffer_offset = 0;
461 ctx->ubos[shader][index].buffer_size = 0;
462 ctx->ubos[shader][index].user_buffer = NULL;
463 }
464 }
465
466 static void
467 zink_set_sampler_views(struct pipe_context *pctx,
468 enum pipe_shader_type shader_type,
469 unsigned start_slot,
470 unsigned num_views,
471 struct pipe_sampler_view **views)
472 {
473 struct zink_context *ctx = zink_context(pctx);
474 assert(views);
475 for (unsigned i = 0; i < num_views; ++i) {
476 pipe_sampler_view_reference(
477 &ctx->image_views[shader_type][start_slot + i],
478 views[i]);
479 }
480 ctx->num_image_views[shader_type] = start_slot + num_views;
481 }
482
483 static void
484 zink_set_stencil_ref(struct pipe_context *pctx,
485 const struct pipe_stencil_ref *ref)
486 {
487 struct zink_context *ctx = zink_context(pctx);
488 ctx->stencil_ref = *ref;
489 }
490
491 static void
492 zink_set_clip_state(struct pipe_context *pctx,
493 const struct pipe_clip_state *pcs)
494 {
495 }
496
497 static struct zink_render_pass *
498 get_render_pass(struct zink_context *ctx)
499 {
500 struct zink_screen *screen = zink_screen(ctx->base.screen);
501 const struct pipe_framebuffer_state *fb = &ctx->fb_state;
502 struct zink_render_pass_state state = { 0 };
503
504 for (int i = 0; i < fb->nr_cbufs; i++) {
505 struct pipe_surface *surf = fb->cbufs[i];
506 if (surf) {
507 state.rts[i].format = zink_get_format(screen, surf->format);
508 state.rts[i].samples = surf->nr_samples > 0 ? surf->nr_samples :
509 VK_SAMPLE_COUNT_1_BIT;
510 } else {
511 state.rts[i].format = VK_FORMAT_R8_UINT;
512 state.rts[i].samples = MAX2(fb->samples, 1);
513 }
514 }
515 state.num_cbufs = fb->nr_cbufs;
516
517 if (fb->zsbuf) {
518 struct zink_resource *zsbuf = zink_resource(fb->zsbuf->texture);
519 state.rts[fb->nr_cbufs].format = zsbuf->format;
520 state.rts[fb->nr_cbufs].samples = zsbuf->base.nr_samples > 0 ? zsbuf->base.nr_samples : VK_SAMPLE_COUNT_1_BIT;
521 }
522 state.have_zsbuf = fb->zsbuf != NULL;
523
524 struct hash_entry *entry = _mesa_hash_table_search(ctx->render_pass_cache,
525 &state);
526 if (!entry) {
527 struct zink_render_pass *rp;
528 rp = zink_create_render_pass(screen, &state);
529 entry = _mesa_hash_table_insert(ctx->render_pass_cache, &state, rp);
530 if (!entry)
531 return NULL;
532 }
533
534 return entry->data;
535 }
536
537 static struct zink_framebuffer *
538 create_framebuffer(struct zink_context *ctx)
539 {
540 struct zink_screen *screen = zink_screen(ctx->base.screen);
541
542 struct zink_framebuffer_state state = {};
543 state.rp = get_render_pass(ctx);
544 for (int i = 0; i < ctx->fb_state.nr_cbufs; i++) {
545 struct pipe_surface *psurf = ctx->fb_state.cbufs[i];
546 state.attachments[i] = zink_surface(psurf);
547 state.has_null_attachments |= !state.attachments[i];
548 }
549
550 state.num_attachments = ctx->fb_state.nr_cbufs;
551 if (ctx->fb_state.zsbuf) {
552 struct pipe_surface *psurf = ctx->fb_state.zsbuf;
553 state.attachments[state.num_attachments++] = zink_surface(psurf);
554 }
555
556 state.width = ctx->fb_state.width;
557 state.height = ctx->fb_state.height;
558 state.layers = MAX2(ctx->fb_state.layers, 1);
559 state.samples = ctx->fb_state.samples;
560
561 return zink_create_framebuffer(ctx, screen, &state);
562 }
563
564 static void
565 framebuffer_state_buffer_barriers_setup(struct zink_context *ctx,
566 const struct pipe_framebuffer_state *state, struct zink_batch *batch)
567 {
568 for (int i = 0; i < state->nr_cbufs; i++) {
569 struct pipe_surface *surf = state->cbufs[i];
570 if (!surf)
571 surf = ctx->framebuffer->null_surface;
572 struct zink_resource *res = zink_resource(surf->texture);
573 if (res->layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL)
574 zink_resource_barrier(batch->cmdbuf, res, res->aspect,
575 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
576 }
577
578 if (state->zsbuf) {
579 struct zink_resource *res = zink_resource(state->zsbuf->texture);
580 if (res->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL)
581 zink_resource_barrier(batch->cmdbuf, res, res->aspect,
582 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
583 }
584 }
585
586 void
587 zink_begin_render_pass(struct zink_context *ctx, struct zink_batch *batch)
588 {
589 struct zink_screen *screen = zink_screen(ctx->base.screen);
590 assert(batch == zink_curr_batch(ctx));
591 assert(ctx->gfx_pipeline_state.render_pass);
592
593 struct pipe_framebuffer_state *fb_state = &ctx->fb_state;
594
595 VkRenderPassBeginInfo rpbi = {};
596 rpbi.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
597 rpbi.renderPass = ctx->gfx_pipeline_state.render_pass->render_pass;
598 rpbi.renderArea.offset.x = 0;
599 rpbi.renderArea.offset.y = 0;
600 rpbi.renderArea.extent.width = fb_state->width;
601 rpbi.renderArea.extent.height = fb_state->height;
602 rpbi.clearValueCount = 0;
603 rpbi.pClearValues = NULL;
604 rpbi.framebuffer = ctx->framebuffer->fb;
605
606 assert(ctx->gfx_pipeline_state.render_pass && ctx->framebuffer);
607 assert(!batch->rp || batch->rp == ctx->gfx_pipeline_state.render_pass);
608 assert(!batch->fb || batch->fb == ctx->framebuffer);
609
610 framebuffer_state_buffer_barriers_setup(ctx, fb_state, batch);
611
612 zink_render_pass_reference(screen, &batch->rp, ctx->gfx_pipeline_state.render_pass);
613 zink_framebuffer_reference(screen, &batch->fb, ctx->framebuffer);
614
615 vkCmdBeginRenderPass(batch->cmdbuf, &rpbi, VK_SUBPASS_CONTENTS_INLINE);
616 }
617
618 static void
619 flush_batch(struct zink_context *ctx)
620 {
621 struct zink_batch *batch = zink_curr_batch(ctx);
622 if (batch->rp)
623 vkCmdEndRenderPass(batch->cmdbuf);
624
625 zink_end_batch(ctx, batch);
626
627 ctx->curr_batch++;
628 if (ctx->curr_batch == ARRAY_SIZE(ctx->batches))
629 ctx->curr_batch = 0;
630
631 zink_start_batch(ctx, zink_curr_batch(ctx));
632 }
633
634 struct zink_batch *
635 zink_batch_rp(struct zink_context *ctx)
636 {
637 struct zink_batch *batch = zink_curr_batch(ctx);
638 if (!batch->rp) {
639 zink_begin_render_pass(ctx, batch);
640 assert(batch->rp);
641 }
642 return batch;
643 }
644
645 struct zink_batch *
646 zink_batch_no_rp(struct zink_context *ctx)
647 {
648 struct zink_batch *batch = zink_curr_batch(ctx);
649 if (batch->rp) {
650 /* flush batch and get a new one */
651 flush_batch(ctx);
652 batch = zink_curr_batch(ctx);
653 assert(!batch->rp);
654 }
655 return batch;
656 }
657
658 static void
659 zink_set_framebuffer_state(struct pipe_context *pctx,
660 const struct pipe_framebuffer_state *state)
661 {
662 struct zink_context *ctx = zink_context(pctx);
663 struct zink_screen *screen = zink_screen(pctx->screen);
664
665 util_copy_framebuffer_state(&ctx->fb_state, state);
666
667 struct zink_framebuffer *fb = ctx->framebuffer;
668 /* explicitly unref previous fb to ensure it gets destroyed */
669 if (fb)
670 zink_framebuffer_reference(screen, &fb, NULL);
671 fb = create_framebuffer(ctx);
672 zink_framebuffer_reference(screen, &ctx->framebuffer, fb);
673 zink_render_pass_reference(screen, &ctx->gfx_pipeline_state.render_pass, fb->rp);
674
675 ctx->gfx_pipeline_state.rast_samples = MAX2(state->samples, 1);
676 ctx->gfx_pipeline_state.num_attachments = state->nr_cbufs;
677
678 struct zink_batch *batch = zink_batch_no_rp(ctx);
679
680 framebuffer_state_buffer_barriers_setup(ctx, state, batch);
681 }
682
683 static void
684 zink_set_blend_color(struct pipe_context *pctx,
685 const struct pipe_blend_color *color)
686 {
687 struct zink_context *ctx = zink_context(pctx);
688 memcpy(ctx->blend_constants, color->color, sizeof(float) * 4);
689 }
690
691 static void
692 zink_set_sample_mask(struct pipe_context *pctx, unsigned sample_mask)
693 {
694 struct zink_context *ctx = zink_context(pctx);
695 ctx->gfx_pipeline_state.sample_mask = sample_mask;
696 }
697
698 static VkAccessFlags
699 access_src_flags(VkImageLayout layout)
700 {
701 switch (layout) {
702 case VK_IMAGE_LAYOUT_UNDEFINED:
703 case VK_IMAGE_LAYOUT_GENERAL:
704 return 0;
705
706 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
707 return VK_ACCESS_COLOR_ATTACHMENT_READ_BIT;
708 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
709 return VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
710
711 case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
712 return VK_ACCESS_SHADER_READ_BIT;
713
714 case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
715 return VK_ACCESS_TRANSFER_READ_BIT;
716
717 case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
718 return VK_ACCESS_TRANSFER_WRITE_BIT;
719
720 case VK_IMAGE_LAYOUT_PREINITIALIZED:
721 return VK_ACCESS_HOST_WRITE_BIT;
722
723 default:
724 unreachable("unexpected layout");
725 }
726 }
727
728 static VkAccessFlags
729 access_dst_flags(VkImageLayout layout)
730 {
731 switch (layout) {
732 case VK_IMAGE_LAYOUT_UNDEFINED:
733 case VK_IMAGE_LAYOUT_GENERAL:
734 return 0;
735
736 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
737 return VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
738 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
739 return VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
740
741 case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
742 return VK_ACCESS_TRANSFER_READ_BIT;
743
744 case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
745 return VK_ACCESS_TRANSFER_WRITE_BIT;
746
747 default:
748 unreachable("unexpected layout");
749 }
750 }
751
752 static VkPipelineStageFlags
753 pipeline_dst_stage(VkImageLayout layout)
754 {
755 switch (layout) {
756 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
757 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
758 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
759 return VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
760
761 case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
762 return VK_PIPELINE_STAGE_TRANSFER_BIT;
763 case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
764 return VK_PIPELINE_STAGE_TRANSFER_BIT;
765
766 default:
767 return VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
768 }
769 }
770
771 static VkPipelineStageFlags
772 pipeline_src_stage(VkImageLayout layout)
773 {
774 switch (layout) {
775 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
776 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
777 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
778 return VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT;
779
780 case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
781 return VK_PIPELINE_STAGE_TRANSFER_BIT;
782 case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
783 return VK_PIPELINE_STAGE_TRANSFER_BIT;
784
785 default:
786 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
787 }
788 }
789
790
791 void
792 zink_resource_barrier(VkCommandBuffer cmdbuf, struct zink_resource *res,
793 VkImageAspectFlags aspect, VkImageLayout new_layout)
794 {
795 VkImageSubresourceRange isr = {
796 aspect,
797 0, VK_REMAINING_MIP_LEVELS,
798 0, VK_REMAINING_ARRAY_LAYERS
799 };
800
801 VkImageMemoryBarrier imb = {
802 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
803 NULL,
804 access_src_flags(res->layout),
805 access_dst_flags(new_layout),
806 res->layout,
807 new_layout,
808 VK_QUEUE_FAMILY_IGNORED,
809 VK_QUEUE_FAMILY_IGNORED,
810 res->image,
811 isr
812 };
813 vkCmdPipelineBarrier(
814 cmdbuf,
815 pipeline_src_stage(res->layout),
816 pipeline_dst_stage(new_layout),
817 0,
818 0, NULL,
819 0, NULL,
820 1, &imb
821 );
822
823 res->layout = new_layout;
824 }
825
826 static void
827 zink_clear(struct pipe_context *pctx,
828 unsigned buffers,
829 const struct pipe_scissor_state *scissor_state,
830 const union pipe_color_union *pcolor,
831 double depth, unsigned stencil)
832 {
833 struct zink_context *ctx = zink_context(pctx);
834 struct pipe_framebuffer_state *fb = &ctx->fb_state;
835
836 /* FIXME: this is very inefficient; if no renderpass has been started yet,
837 * we should record the clear if it's full-screen, and apply it as we
838 * start the render-pass. Otherwise we can do a partial out-of-renderpass
839 * clear.
840 */
841 struct zink_batch *batch = zink_batch_rp(ctx);
842
843 VkClearAttachment attachments[1 + PIPE_MAX_COLOR_BUFS];
844 int num_attachments = 0;
845
846 if (buffers & PIPE_CLEAR_COLOR) {
847 VkClearColorValue color;
848 color.float32[0] = pcolor->f[0];
849 color.float32[1] = pcolor->f[1];
850 color.float32[2] = pcolor->f[2];
851 color.float32[3] = pcolor->f[3];
852
853 for (unsigned i = 0; i < fb->nr_cbufs; i++) {
854 if (!(buffers & (PIPE_CLEAR_COLOR0 << i)) || !fb->cbufs[i])
855 continue;
856
857 attachments[num_attachments].aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
858 attachments[num_attachments].colorAttachment = i;
859 attachments[num_attachments].clearValue.color = color;
860 ++num_attachments;
861 }
862 }
863
864 if (buffers & PIPE_CLEAR_DEPTHSTENCIL && fb->zsbuf) {
865 VkImageAspectFlags aspect = 0;
866 if (buffers & PIPE_CLEAR_DEPTH)
867 aspect |= VK_IMAGE_ASPECT_DEPTH_BIT;
868 if (buffers & PIPE_CLEAR_STENCIL)
869 aspect |= VK_IMAGE_ASPECT_STENCIL_BIT;
870
871 attachments[num_attachments].aspectMask = aspect;
872 attachments[num_attachments].clearValue.depthStencil.depth = depth;
873 attachments[num_attachments].clearValue.depthStencil.stencil = stencil;
874 ++num_attachments;
875 }
876
877 VkClearRect cr;
878 cr.rect.offset.x = 0;
879 cr.rect.offset.y = 0;
880 cr.rect.extent.width = fb->width;
881 cr.rect.extent.height = fb->height;
882 cr.baseArrayLayer = 0;
883 cr.layerCount = util_framebuffer_get_num_layers(fb);
884 vkCmdClearAttachments(batch->cmdbuf, num_attachments, attachments, 1, &cr);
885 }
886
887 VkShaderStageFlagBits
888 zink_shader_stage(enum pipe_shader_type type)
889 {
890 VkShaderStageFlagBits stages[] = {
891 [PIPE_SHADER_VERTEX] = VK_SHADER_STAGE_VERTEX_BIT,
892 [PIPE_SHADER_FRAGMENT] = VK_SHADER_STAGE_FRAGMENT_BIT,
893 [PIPE_SHADER_GEOMETRY] = VK_SHADER_STAGE_GEOMETRY_BIT,
894 [PIPE_SHADER_TESS_CTRL] = VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
895 [PIPE_SHADER_TESS_EVAL] = VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
896 [PIPE_SHADER_COMPUTE] = VK_SHADER_STAGE_COMPUTE_BIT,
897 };
898 return stages[type];
899 }
900
901 static uint32_t
902 hash_gfx_program(const void *key)
903 {
904 return _mesa_hash_data(key, sizeof(struct zink_shader *) * (PIPE_SHADER_TYPES - 1));
905 }
906
907 static bool
908 equals_gfx_program(const void *a, const void *b)
909 {
910 return memcmp(a, b, sizeof(struct zink_shader *) * (PIPE_SHADER_TYPES - 1)) == 0;
911 }
912
913 static uint32_t
914 hash_render_pass_state(const void *key)
915 {
916 return _mesa_hash_data(key, sizeof(struct zink_render_pass_state));
917 }
918
919 static bool
920 equals_render_pass_state(const void *a, const void *b)
921 {
922 return memcmp(a, b, sizeof(struct zink_render_pass_state)) == 0;
923 }
924
925 static void
926 zink_flush(struct pipe_context *pctx,
927 struct pipe_fence_handle **pfence,
928 enum pipe_flush_flags flags)
929 {
930 struct zink_context *ctx = zink_context(pctx);
931
932 struct zink_batch *batch = zink_curr_batch(ctx);
933 flush_batch(ctx);
934
935 if (zink_screen(pctx->screen)->have_EXT_transform_feedback && ctx->num_so_targets)
936 ctx->dirty_so_targets = true;
937
938 if (pfence)
939 zink_fence_reference(zink_screen(pctx->screen),
940 (struct zink_fence **)pfence,
941 batch->fence);
942
943 /* HACK:
944 * For some strange reason, we need to finish before presenting, or else
945 * we start rendering on top of the back-buffer for the next frame. This
946 * seems like a bug in the DRI-driver to me, because we really should
947 * be properly protected by fences here, and the back-buffer should
948 * either be swapped with the front-buffer, or blitted from. But for
949 * some strange reason, neither of these things happen.
950 */
951 if (flags & PIPE_FLUSH_END_OF_FRAME)
952 pctx->screen->fence_finish(pctx->screen, pctx,
953 (struct pipe_fence_handle *)batch->fence,
954 PIPE_TIMEOUT_INFINITE);
955 }
956
957 static void
958 zink_flush_resource(struct pipe_context *pipe,
959 struct pipe_resource *resource)
960 {
961 }
962
963 static void
964 zink_resource_copy_region(struct pipe_context *pctx,
965 struct pipe_resource *pdst,
966 unsigned dst_level, unsigned dstx, unsigned dsty, unsigned dstz,
967 struct pipe_resource *psrc,
968 unsigned src_level, const struct pipe_box *src_box)
969 {
970 struct zink_resource *dst = zink_resource(pdst);
971 struct zink_resource *src = zink_resource(psrc);
972 struct zink_context *ctx = zink_context(pctx);
973 if (dst->base.target != PIPE_BUFFER && src->base.target != PIPE_BUFFER) {
974 VkImageCopy region = {};
975
976 region.srcSubresource.aspectMask = src->aspect;
977 region.srcSubresource.mipLevel = src_level;
978 region.srcSubresource.layerCount = 1;
979 if (src->base.array_size > 1) {
980 region.srcSubresource.baseArrayLayer = src_box->z;
981 region.srcSubresource.layerCount = src_box->depth;
982 region.extent.depth = 1;
983 } else {
984 region.srcOffset.z = src_box->z;
985 region.srcSubresource.layerCount = 1;
986 region.extent.depth = src_box->depth;
987 }
988
989 region.srcOffset.x = src_box->x;
990 region.srcOffset.y = src_box->y;
991
992 region.dstSubresource.aspectMask = dst->aspect;
993 region.dstSubresource.mipLevel = dst_level;
994 if (dst->base.array_size > 1) {
995 region.dstSubresource.baseArrayLayer = dstz;
996 region.dstSubresource.layerCount = src_box->depth;
997 } else {
998 region.dstOffset.z = dstz;
999 region.dstSubresource.layerCount = 1;
1000 }
1001
1002 region.dstOffset.x = dstx;
1003 region.dstOffset.y = dsty;
1004 region.extent.width = src_box->width;
1005 region.extent.height = src_box->height;
1006
1007 struct zink_batch *batch = zink_batch_no_rp(ctx);
1008 zink_batch_reference_resoure(batch, src);
1009 zink_batch_reference_resoure(batch, dst);
1010
1011 if (src->layout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) {
1012 zink_resource_barrier(batch->cmdbuf, src, src->aspect,
1013 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
1014 }
1015
1016 if (dst->layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
1017 zink_resource_barrier(batch->cmdbuf, dst, dst->aspect,
1018 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
1019 }
1020
1021 vkCmdCopyImage(batch->cmdbuf, src->image, src->layout,
1022 dst->image, dst->layout,
1023 1, &region);
1024 } else if (dst->base.target == PIPE_BUFFER &&
1025 src->base.target == PIPE_BUFFER) {
1026 VkBufferCopy region;
1027 region.srcOffset = src_box->x;
1028 region.dstOffset = dstx;
1029 region.size = src_box->width;
1030
1031 struct zink_batch *batch = zink_batch_no_rp(ctx);
1032 zink_batch_reference_resoure(batch, src);
1033 zink_batch_reference_resoure(batch, dst);
1034
1035 vkCmdCopyBuffer(batch->cmdbuf, src->buffer, dst->buffer, 1, &region);
1036 } else
1037 debug_printf("zink: TODO resource copy\n");
1038 }
1039
1040 static struct pipe_stream_output_target *
1041 zink_create_stream_output_target(struct pipe_context *pctx,
1042 struct pipe_resource *pres,
1043 unsigned buffer_offset,
1044 unsigned buffer_size)
1045 {
1046 struct zink_so_target *t;
1047 t = CALLOC_STRUCT(zink_so_target);
1048 if (!t)
1049 return NULL;
1050
1051 t->base.reference.count = 1;
1052 t->base.context = pctx;
1053 pipe_resource_reference(&t->base.buffer, pres);
1054 t->base.buffer_offset = buffer_offset;
1055 t->base.buffer_size = buffer_size;
1056
1057 /* using PIPE_BIND_CUSTOM here lets us create a custom pipe buffer resource,
1058 * which allows us to differentiate and use VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT
1059 * as we must for this case
1060 */
1061 t->counter_buffer = pipe_buffer_create(pctx->screen, PIPE_BIND_STREAM_OUTPUT | PIPE_BIND_CUSTOM, PIPE_USAGE_DEFAULT, 4);
1062 if (!t->counter_buffer) {
1063 FREE(t);
1064 return NULL;
1065 }
1066
1067 return &t->base;
1068 }
1069
1070 static void
1071 zink_stream_output_target_destroy(struct pipe_context *pctx,
1072 struct pipe_stream_output_target *psot)
1073 {
1074 struct zink_so_target *t = (struct zink_so_target *)psot;
1075 pipe_resource_reference(&t->counter_buffer, NULL);
1076 pipe_resource_reference(&t->base.buffer, NULL);
1077 FREE(t);
1078 }
1079
1080 static void
1081 zink_set_stream_output_targets(struct pipe_context *pctx,
1082 unsigned num_targets,
1083 struct pipe_stream_output_target **targets,
1084 const unsigned *offsets)
1085 {
1086 struct zink_context *ctx = zink_context(pctx);
1087
1088 if (num_targets == 0) {
1089 for (unsigned i = 0; i < ctx->num_so_targets; i++)
1090 pipe_so_target_reference(&ctx->so_targets[i], NULL);
1091 ctx->num_so_targets = 0;
1092 } else {
1093 for (unsigned i = 0; i < num_targets; i++)
1094 pipe_so_target_reference(&ctx->so_targets[i], targets[i]);
1095 for (unsigned i = num_targets; i < ctx->num_so_targets; i++)
1096 pipe_so_target_reference(&ctx->so_targets[i], NULL);
1097 ctx->num_so_targets = num_targets;
1098
1099 /* emit memory barrier on next draw for synchronization */
1100 if (offsets[0] == (unsigned)-1)
1101 ctx->xfb_barrier = true;
1102 /* TODO: possibly avoid rebinding on resume if resuming from same buffers? */
1103 ctx->dirty_so_targets = true;
1104 }
1105 }
1106
1107 struct pipe_context *
1108 zink_context_create(struct pipe_screen *pscreen, void *priv, unsigned flags)
1109 {
1110 struct zink_screen *screen = zink_screen(pscreen);
1111 struct zink_context *ctx = CALLOC_STRUCT(zink_context);
1112 if (!ctx)
1113 goto fail;
1114
1115 ctx->base.screen = pscreen;
1116 ctx->base.priv = priv;
1117
1118 ctx->base.destroy = zink_context_destroy;
1119
1120 zink_context_state_init(&ctx->base);
1121
1122 ctx->base.create_sampler_state = zink_create_sampler_state;
1123 ctx->base.bind_sampler_states = zink_bind_sampler_states;
1124 ctx->base.delete_sampler_state = zink_delete_sampler_state;
1125
1126 ctx->base.create_sampler_view = zink_create_sampler_view;
1127 ctx->base.set_sampler_views = zink_set_sampler_views;
1128 ctx->base.sampler_view_destroy = zink_sampler_view_destroy;
1129
1130 ctx->base.create_vs_state = zink_create_vs_state;
1131 ctx->base.bind_vs_state = zink_bind_vs_state;
1132 ctx->base.delete_vs_state = zink_delete_vs_state;
1133
1134 ctx->base.create_fs_state = zink_create_fs_state;
1135 ctx->base.bind_fs_state = zink_bind_fs_state;
1136 ctx->base.delete_fs_state = zink_delete_fs_state;
1137
1138 ctx->base.set_polygon_stipple = zink_set_polygon_stipple;
1139 ctx->base.set_vertex_buffers = zink_set_vertex_buffers;
1140 ctx->base.set_viewport_states = zink_set_viewport_states;
1141 ctx->base.set_scissor_states = zink_set_scissor_states;
1142 ctx->base.set_constant_buffer = zink_set_constant_buffer;
1143 ctx->base.set_framebuffer_state = zink_set_framebuffer_state;
1144 ctx->base.set_stencil_ref = zink_set_stencil_ref;
1145 ctx->base.set_clip_state = zink_set_clip_state;
1146 ctx->base.set_blend_color = zink_set_blend_color;
1147
1148 ctx->base.set_sample_mask = zink_set_sample_mask;
1149
1150 ctx->base.clear = zink_clear;
1151 ctx->base.draw_vbo = zink_draw_vbo;
1152 ctx->base.flush = zink_flush;
1153
1154 ctx->base.resource_copy_region = zink_resource_copy_region;
1155 ctx->base.blit = zink_blit;
1156 ctx->base.create_stream_output_target = zink_create_stream_output_target;
1157 ctx->base.stream_output_target_destroy = zink_stream_output_target_destroy;
1158
1159 ctx->base.set_stream_output_targets = zink_set_stream_output_targets;
1160 ctx->base.flush_resource = zink_flush_resource;
1161 zink_context_surface_init(&ctx->base);
1162 zink_context_resource_init(&ctx->base);
1163 zink_context_query_init(&ctx->base);
1164
1165 slab_create_child(&ctx->transfer_pool, &screen->transfer_pool);
1166
1167 ctx->base.stream_uploader = u_upload_create_default(&ctx->base);
1168 ctx->base.const_uploader = ctx->base.stream_uploader;
1169
1170 int prim_hwsupport = 1 << PIPE_PRIM_POINTS |
1171 1 << PIPE_PRIM_LINES |
1172 1 << PIPE_PRIM_LINE_STRIP |
1173 1 << PIPE_PRIM_TRIANGLES |
1174 1 << PIPE_PRIM_TRIANGLE_STRIP |
1175 1 << PIPE_PRIM_TRIANGLE_FAN;
1176
1177 ctx->primconvert = util_primconvert_create(&ctx->base, prim_hwsupport);
1178 if (!ctx->primconvert)
1179 goto fail;
1180
1181 ctx->blitter = util_blitter_create(&ctx->base);
1182 if (!ctx->blitter)
1183 goto fail;
1184
1185 VkCommandPoolCreateInfo cpci = {};
1186 cpci.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
1187 cpci.queueFamilyIndex = screen->gfx_queue;
1188 cpci.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
1189 if (vkCreateCommandPool(screen->dev, &cpci, NULL, &ctx->cmdpool) != VK_SUCCESS)
1190 goto fail;
1191
1192 VkCommandBufferAllocateInfo cbai = {};
1193 cbai.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
1194 cbai.commandPool = ctx->cmdpool;
1195 cbai.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
1196 cbai.commandBufferCount = 1;
1197
1198 VkDescriptorPoolSize sizes[] = {
1199 {VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, ZINK_BATCH_DESC_SIZE},
1200 {VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, ZINK_BATCH_DESC_SIZE}
1201 };
1202 VkDescriptorPoolCreateInfo dpci = {};
1203 dpci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
1204 dpci.pPoolSizes = sizes;
1205 dpci.poolSizeCount = ARRAY_SIZE(sizes);
1206 dpci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
1207 dpci.maxSets = ZINK_BATCH_DESC_SIZE;
1208
1209 for (int i = 0; i < ARRAY_SIZE(ctx->batches); ++i) {
1210 if (vkAllocateCommandBuffers(screen->dev, &cbai, &ctx->batches[i].cmdbuf) != VK_SUCCESS)
1211 goto fail;
1212
1213 ctx->batches[i].resources = _mesa_set_create(NULL, _mesa_hash_pointer,
1214 _mesa_key_pointer_equal);
1215 ctx->batches[i].sampler_views = _mesa_set_create(NULL,
1216 _mesa_hash_pointer,
1217 _mesa_key_pointer_equal);
1218
1219 if (!ctx->batches[i].resources || !ctx->batches[i].sampler_views)
1220 goto fail;
1221
1222 util_dynarray_init(&ctx->batches[i].zombie_samplers, NULL);
1223
1224 if (vkCreateDescriptorPool(screen->dev, &dpci, 0,
1225 &ctx->batches[i].descpool) != VK_SUCCESS)
1226 goto fail;
1227 }
1228
1229 vkGetDeviceQueue(screen->dev, screen->gfx_queue, 0, &ctx->queue);
1230
1231 ctx->program_cache = _mesa_hash_table_create(NULL,
1232 hash_gfx_program,
1233 equals_gfx_program);
1234 ctx->render_pass_cache = _mesa_hash_table_create(NULL,
1235 hash_render_pass_state,
1236 equals_render_pass_state);
1237 if (!ctx->program_cache || !ctx->render_pass_cache)
1238 goto fail;
1239
1240 const uint8_t data[] = { 0 };
1241 ctx->dummy_buffer = pipe_buffer_create_with_data(&ctx->base,
1242 PIPE_BIND_VERTEX_BUFFER, PIPE_USAGE_IMMUTABLE, sizeof(data), data);
1243 if (!ctx->dummy_buffer)
1244 goto fail;
1245
1246 ctx->dirty_program = true;
1247
1248 /* start the first batch */
1249 zink_start_batch(ctx, zink_curr_batch(ctx));
1250
1251 return &ctx->base;
1252
1253 fail:
1254 if (ctx) {
1255 vkDestroyCommandPool(screen->dev, ctx->cmdpool, NULL);
1256 FREE(ctx);
1257 }
1258 return NULL;
1259 }