zink: implement transform feedback support to finish off opengl 3.0
[mesa.git] / src / gallium / drivers / zink / zink_context.c
1 /*
2 * Copyright 2018 Collabora Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "zink_context.h"
25
26 #include "zink_batch.h"
27 #include "zink_compiler.h"
28 #include "zink_fence.h"
29 #include "zink_framebuffer.h"
30 #include "zink_helpers.h"
31 #include "zink_pipeline.h"
32 #include "zink_render_pass.h"
33 #include "zink_resource.h"
34 #include "zink_screen.h"
35 #include "zink_state.h"
36 #include "zink_surface.h"
37
38 #include "indices/u_primconvert.h"
39 #include "util/u_blitter.h"
40 #include "util/u_debug.h"
41 #include "util/format/u_format.h"
42 #include "util/u_framebuffer.h"
43 #include "util/u_helpers.h"
44 #include "util/u_inlines.h"
45
46 #include "nir.h"
47
48 #include "util/u_memory.h"
49 #include "util/u_upload_mgr.h"
50
51 static void
52 zink_context_destroy(struct pipe_context *pctx)
53 {
54 struct zink_context *ctx = zink_context(pctx);
55 struct zink_screen *screen = zink_screen(pctx->screen);
56
57 if (vkQueueWaitIdle(ctx->queue) != VK_SUCCESS)
58 debug_printf("vkQueueWaitIdle failed\n");
59
60 for (int i = 0; i < ARRAY_SIZE(ctx->batches); ++i)
61 vkFreeCommandBuffers(screen->dev, ctx->cmdpool, 1, &ctx->batches[i].cmdbuf);
62 vkDestroyCommandPool(screen->dev, ctx->cmdpool, NULL);
63
64 util_primconvert_destroy(ctx->primconvert);
65 u_upload_destroy(pctx->stream_uploader);
66 slab_destroy_child(&ctx->transfer_pool);
67 util_blitter_destroy(ctx->blitter);
68 FREE(ctx);
69 }
70
71 static VkSamplerMipmapMode
72 sampler_mipmap_mode(enum pipe_tex_mipfilter filter)
73 {
74 switch (filter) {
75 case PIPE_TEX_MIPFILTER_NEAREST: return VK_SAMPLER_MIPMAP_MODE_NEAREST;
76 case PIPE_TEX_MIPFILTER_LINEAR: return VK_SAMPLER_MIPMAP_MODE_LINEAR;
77 case PIPE_TEX_MIPFILTER_NONE:
78 unreachable("PIPE_TEX_MIPFILTER_NONE should be dealt with earlier");
79 }
80 unreachable("unexpected filter");
81 }
82
83 static VkSamplerAddressMode
84 sampler_address_mode(enum pipe_tex_wrap filter)
85 {
86 switch (filter) {
87 case PIPE_TEX_WRAP_REPEAT: return VK_SAMPLER_ADDRESS_MODE_REPEAT;
88 case PIPE_TEX_WRAP_CLAMP: return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; /* not technically correct, but kinda works */
89 case PIPE_TEX_WRAP_CLAMP_TO_EDGE: return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
90 case PIPE_TEX_WRAP_CLAMP_TO_BORDER: return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER;
91 case PIPE_TEX_WRAP_MIRROR_REPEAT: return VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT;
92 case PIPE_TEX_WRAP_MIRROR_CLAMP: return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; /* not technically correct, but kinda works */
93 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE: return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE;
94 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER: return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; /* not technically correct, but kinda works */
95 }
96 unreachable("unexpected wrap");
97 }
98
99 static VkCompareOp
100 compare_op(enum pipe_compare_func op)
101 {
102 switch (op) {
103 case PIPE_FUNC_NEVER: return VK_COMPARE_OP_NEVER;
104 case PIPE_FUNC_LESS: return VK_COMPARE_OP_LESS;
105 case PIPE_FUNC_EQUAL: return VK_COMPARE_OP_EQUAL;
106 case PIPE_FUNC_LEQUAL: return VK_COMPARE_OP_LESS_OR_EQUAL;
107 case PIPE_FUNC_GREATER: return VK_COMPARE_OP_GREATER;
108 case PIPE_FUNC_NOTEQUAL: return VK_COMPARE_OP_NOT_EQUAL;
109 case PIPE_FUNC_GEQUAL: return VK_COMPARE_OP_GREATER_OR_EQUAL;
110 case PIPE_FUNC_ALWAYS: return VK_COMPARE_OP_ALWAYS;
111 }
112 unreachable("unexpected compare");
113 }
114
115 static void *
116 zink_create_sampler_state(struct pipe_context *pctx,
117 const struct pipe_sampler_state *state)
118 {
119 struct zink_screen *screen = zink_screen(pctx->screen);
120
121 VkSamplerCreateInfo sci = {};
122 sci.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
123 sci.magFilter = zink_filter(state->mag_img_filter);
124 sci.minFilter = zink_filter(state->min_img_filter);
125
126 if (state->min_mip_filter != PIPE_TEX_MIPFILTER_NONE) {
127 sci.mipmapMode = sampler_mipmap_mode(state->min_mip_filter);
128 sci.minLod = state->min_lod;
129 sci.maxLod = state->max_lod;
130 } else {
131 sci.mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST;
132 sci.minLod = 0;
133 sci.maxLod = 0;
134 }
135
136 sci.addressModeU = sampler_address_mode(state->wrap_s);
137 sci.addressModeV = sampler_address_mode(state->wrap_t);
138 sci.addressModeW = sampler_address_mode(state->wrap_r);
139 sci.mipLodBias = state->lod_bias;
140
141 if (state->compare_mode == PIPE_TEX_COMPARE_NONE)
142 sci.compareOp = VK_COMPARE_OP_NEVER;
143 else {
144 sci.compareOp = compare_op(state->compare_func);
145 sci.compareEnable = VK_TRUE;
146 }
147
148 sci.borderColor = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK; // TODO
149 sci.unnormalizedCoordinates = !state->normalized_coords;
150
151 if (state->max_anisotropy > 1) {
152 sci.maxAnisotropy = state->max_anisotropy;
153 sci.anisotropyEnable = VK_TRUE;
154 }
155
156 VkSampler *sampler = CALLOC(1, sizeof(VkSampler));
157 if (!sampler)
158 return NULL;
159
160 if (vkCreateSampler(screen->dev, &sci, NULL, sampler) != VK_SUCCESS) {
161 FREE(sampler);
162 return NULL;
163 }
164
165 return sampler;
166 }
167
168 static void
169 zink_bind_sampler_states(struct pipe_context *pctx,
170 enum pipe_shader_type shader,
171 unsigned start_slot,
172 unsigned num_samplers,
173 void **samplers)
174 {
175 struct zink_context *ctx = zink_context(pctx);
176 for (unsigned i = 0; i < num_samplers; ++i) {
177 VkSampler *sampler = samplers[i];
178 ctx->sampler_states[shader][start_slot + i] = sampler;
179 ctx->samplers[shader][start_slot + i] = sampler ? *sampler : VK_NULL_HANDLE;
180 }
181 ctx->num_samplers[shader] = start_slot + num_samplers;
182 }
183
184 static void
185 zink_delete_sampler_state(struct pipe_context *pctx,
186 void *sampler_state)
187 {
188 struct zink_batch *batch = zink_curr_batch(zink_context(pctx));
189 util_dynarray_append(&batch->zombie_samplers, VkSampler,
190 *(VkSampler *)sampler_state);
191 FREE(sampler_state);
192 }
193
194
195 static VkImageViewType
196 image_view_type(enum pipe_texture_target target)
197 {
198 switch (target) {
199 case PIPE_TEXTURE_1D: return VK_IMAGE_VIEW_TYPE_1D;
200 case PIPE_TEXTURE_1D_ARRAY: return VK_IMAGE_VIEW_TYPE_1D_ARRAY;
201 case PIPE_TEXTURE_2D: return VK_IMAGE_VIEW_TYPE_2D;
202 case PIPE_TEXTURE_2D_ARRAY: return VK_IMAGE_VIEW_TYPE_2D_ARRAY;
203 case PIPE_TEXTURE_CUBE: return VK_IMAGE_VIEW_TYPE_CUBE;
204 case PIPE_TEXTURE_CUBE_ARRAY: return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
205 case PIPE_TEXTURE_3D: return VK_IMAGE_VIEW_TYPE_3D;
206 case PIPE_TEXTURE_RECT: return VK_IMAGE_VIEW_TYPE_2D;
207 default:
208 unreachable("unexpected target");
209 }
210 }
211
212 static VkComponentSwizzle
213 component_mapping(enum pipe_swizzle swizzle)
214 {
215 switch (swizzle) {
216 case PIPE_SWIZZLE_X: return VK_COMPONENT_SWIZZLE_R;
217 case PIPE_SWIZZLE_Y: return VK_COMPONENT_SWIZZLE_G;
218 case PIPE_SWIZZLE_Z: return VK_COMPONENT_SWIZZLE_B;
219 case PIPE_SWIZZLE_W: return VK_COMPONENT_SWIZZLE_A;
220 case PIPE_SWIZZLE_0: return VK_COMPONENT_SWIZZLE_ZERO;
221 case PIPE_SWIZZLE_1: return VK_COMPONENT_SWIZZLE_ONE;
222 case PIPE_SWIZZLE_NONE: return VK_COMPONENT_SWIZZLE_IDENTITY; // ???
223 default:
224 unreachable("unexpected swizzle");
225 }
226 }
227
228 static VkImageAspectFlags
229 sampler_aspect_from_format(enum pipe_format fmt)
230 {
231 if (util_format_is_depth_or_stencil(fmt)) {
232 const struct util_format_description *desc = util_format_description(fmt);
233 if (util_format_has_depth(desc))
234 return VK_IMAGE_ASPECT_DEPTH_BIT;
235 assert(util_format_has_stencil(desc));
236 return VK_IMAGE_ASPECT_STENCIL_BIT;
237 } else
238 return VK_IMAGE_ASPECT_COLOR_BIT;
239 }
240
241 static struct pipe_sampler_view *
242 zink_create_sampler_view(struct pipe_context *pctx, struct pipe_resource *pres,
243 const struct pipe_sampler_view *state)
244 {
245 struct zink_screen *screen = zink_screen(pctx->screen);
246 struct zink_resource *res = zink_resource(pres);
247 struct zink_sampler_view *sampler_view = CALLOC_STRUCT(zink_sampler_view);
248
249 sampler_view->base = *state;
250 sampler_view->base.texture = NULL;
251 pipe_resource_reference(&sampler_view->base.texture, pres);
252 sampler_view->base.reference.count = 1;
253 sampler_view->base.context = pctx;
254
255 VkImageViewCreateInfo ivci = {};
256 ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
257 ivci.image = res->image;
258 ivci.viewType = image_view_type(state->target);
259 ivci.format = zink_get_format(screen, state->format);
260 ivci.components.r = component_mapping(state->swizzle_r);
261 ivci.components.g = component_mapping(state->swizzle_g);
262 ivci.components.b = component_mapping(state->swizzle_b);
263 ivci.components.a = component_mapping(state->swizzle_a);
264
265 ivci.subresourceRange.aspectMask = sampler_aspect_from_format(state->format);
266 ivci.subresourceRange.baseMipLevel = state->u.tex.first_level;
267 ivci.subresourceRange.baseArrayLayer = state->u.tex.first_layer;
268 ivci.subresourceRange.levelCount = state->u.tex.last_level - state->u.tex.first_level + 1;
269 ivci.subresourceRange.layerCount = state->u.tex.last_layer - state->u.tex.first_layer + 1;
270
271 VkResult err = vkCreateImageView(screen->dev, &ivci, NULL, &sampler_view->image_view);
272 if (err != VK_SUCCESS) {
273 FREE(sampler_view);
274 return NULL;
275 }
276
277 return &sampler_view->base;
278 }
279
280 static void
281 zink_sampler_view_destroy(struct pipe_context *pctx,
282 struct pipe_sampler_view *pview)
283 {
284 struct zink_sampler_view *view = zink_sampler_view(pview);
285 vkDestroyImageView(zink_screen(pctx->screen)->dev, view->image_view, NULL);
286 FREE(view);
287 }
288
289 static void *
290 zink_create_vs_state(struct pipe_context *pctx,
291 const struct pipe_shader_state *shader)
292 {
293 struct nir_shader *nir;
294 if (shader->type != PIPE_SHADER_IR_NIR)
295 nir = zink_tgsi_to_nir(pctx->screen, shader->tokens);
296 else
297 nir = (struct nir_shader *)shader->ir.nir;
298
299 return zink_compile_nir(zink_screen(pctx->screen), nir, &shader->stream_output);
300 }
301
302 static void
303 bind_stage(struct zink_context *ctx, enum pipe_shader_type stage,
304 struct zink_shader *shader)
305 {
306 assert(stage < PIPE_SHADER_COMPUTE);
307 ctx->gfx_stages[stage] = shader;
308 ctx->dirty_program = true;
309 }
310
311 static void
312 zink_bind_vs_state(struct pipe_context *pctx,
313 void *cso)
314 {
315 bind_stage(zink_context(pctx), PIPE_SHADER_VERTEX, cso);
316 }
317
318 static void
319 zink_delete_vs_state(struct pipe_context *pctx,
320 void *cso)
321 {
322 zink_shader_free(zink_screen(pctx->screen), cso);
323 }
324
325 static void *
326 zink_create_fs_state(struct pipe_context *pctx,
327 const struct pipe_shader_state *shader)
328 {
329 struct nir_shader *nir;
330 if (shader->type != PIPE_SHADER_IR_NIR)
331 nir = zink_tgsi_to_nir(pctx->screen, shader->tokens);
332 else
333 nir = (struct nir_shader *)shader->ir.nir;
334
335 return zink_compile_nir(zink_screen(pctx->screen), nir, NULL);
336 }
337
338 static void
339 zink_bind_fs_state(struct pipe_context *pctx,
340 void *cso)
341 {
342 bind_stage(zink_context(pctx), PIPE_SHADER_FRAGMENT, cso);
343 }
344
345 static void
346 zink_delete_fs_state(struct pipe_context *pctx,
347 void *cso)
348 {
349 zink_shader_free(zink_screen(pctx->screen), cso);
350 }
351
352 static void
353 zink_set_polygon_stipple(struct pipe_context *pctx,
354 const struct pipe_poly_stipple *ps)
355 {
356 }
357
358 static void
359 zink_set_vertex_buffers(struct pipe_context *pctx,
360 unsigned start_slot,
361 unsigned num_buffers,
362 const struct pipe_vertex_buffer *buffers)
363 {
364 struct zink_context *ctx = zink_context(pctx);
365
366 if (buffers) {
367 for (int i = 0; i < num_buffers; ++i) {
368 const struct pipe_vertex_buffer *vb = buffers + i;
369 struct zink_resource *res = zink_resource(vb->buffer.resource);
370
371 ctx->gfx_pipeline_state.bindings[start_slot + i].stride = vb->stride;
372 if (res && res->needs_xfb_barrier) {
373 /* if we're binding a previously-used xfb buffer, we need cmd buffer synchronization to ensure
374 * that we use the right buffer data
375 */
376 pctx->flush(pctx, NULL, 0);
377 res->needs_xfb_barrier = false;
378 }
379 }
380 }
381
382 util_set_vertex_buffers_mask(ctx->buffers, &ctx->buffers_enabled_mask,
383 buffers, start_slot, num_buffers);
384 }
385
386 static void
387 zink_set_viewport_states(struct pipe_context *pctx,
388 unsigned start_slot,
389 unsigned num_viewports,
390 const struct pipe_viewport_state *state)
391 {
392 struct zink_context *ctx = zink_context(pctx);
393
394 for (unsigned i = 0; i < num_viewports; ++i) {
395 VkViewport viewport = {
396 state[i].translate[0] - state[i].scale[0],
397 state[i].translate[1] - state[i].scale[1],
398 state[i].scale[0] * 2,
399 state[i].scale[1] * 2,
400 state[i].translate[2] - state[i].scale[2],
401 state[i].translate[2] + state[i].scale[2]
402 };
403 ctx->viewport_states[start_slot + i] = state[i];
404 ctx->viewports[start_slot + i] = viewport;
405 }
406 ctx->num_viewports = start_slot + num_viewports;
407 }
408
409 static void
410 zink_set_scissor_states(struct pipe_context *pctx,
411 unsigned start_slot, unsigned num_scissors,
412 const struct pipe_scissor_state *states)
413 {
414 struct zink_context *ctx = zink_context(pctx);
415
416 for (unsigned i = 0; i < num_scissors; i++) {
417 VkRect2D scissor;
418
419 scissor.offset.x = states[i].minx;
420 scissor.offset.y = states[i].miny;
421 scissor.extent.width = states[i].maxx - states[i].minx;
422 scissor.extent.height = states[i].maxy - states[i].miny;
423 ctx->scissor_states[start_slot + i] = states[i];
424 ctx->scissors[start_slot + i] = scissor;
425 }
426 }
427
428 static void
429 zink_set_constant_buffer(struct pipe_context *pctx,
430 enum pipe_shader_type shader, uint index,
431 const struct pipe_constant_buffer *cb)
432 {
433 struct zink_context *ctx = zink_context(pctx);
434
435 if (cb) {
436 struct pipe_resource *buffer = cb->buffer;
437 unsigned offset = cb->buffer_offset;
438 if (cb->user_buffer) {
439 struct zink_screen *screen = zink_screen(pctx->screen);
440 u_upload_data(ctx->base.const_uploader, 0, cb->buffer_size,
441 screen->props.limits.minUniformBufferOffsetAlignment,
442 cb->user_buffer, &offset, &buffer);
443 }
444
445 pipe_resource_reference(&ctx->ubos[shader][index].buffer, buffer);
446 ctx->ubos[shader][index].buffer_offset = offset;
447 ctx->ubos[shader][index].buffer_size = cb->buffer_size;
448 ctx->ubos[shader][index].user_buffer = NULL;
449
450 if (cb->user_buffer)
451 pipe_resource_reference(&buffer, NULL);
452 } else {
453 pipe_resource_reference(&ctx->ubos[shader][index].buffer, NULL);
454 ctx->ubos[shader][index].buffer_offset = 0;
455 ctx->ubos[shader][index].buffer_size = 0;
456 ctx->ubos[shader][index].user_buffer = NULL;
457 }
458 }
459
460 static void
461 zink_set_sampler_views(struct pipe_context *pctx,
462 enum pipe_shader_type shader_type,
463 unsigned start_slot,
464 unsigned num_views,
465 struct pipe_sampler_view **views)
466 {
467 struct zink_context *ctx = zink_context(pctx);
468 assert(views);
469 for (unsigned i = 0; i < num_views; ++i) {
470 pipe_sampler_view_reference(
471 &ctx->image_views[shader_type][start_slot + i],
472 views[i]);
473 }
474 ctx->num_image_views[shader_type] = start_slot + num_views;
475 }
476
477 static void
478 zink_set_stencil_ref(struct pipe_context *pctx,
479 const struct pipe_stencil_ref *ref)
480 {
481 struct zink_context *ctx = zink_context(pctx);
482 ctx->stencil_ref = *ref;
483 }
484
485 static void
486 zink_set_clip_state(struct pipe_context *pctx,
487 const struct pipe_clip_state *pcs)
488 {
489 }
490
491 static struct zink_render_pass *
492 get_render_pass(struct zink_context *ctx)
493 {
494 struct zink_screen *screen = zink_screen(ctx->base.screen);
495 const struct pipe_framebuffer_state *fb = &ctx->fb_state;
496 struct zink_render_pass_state state = { 0 };
497
498 for (int i = 0; i < fb->nr_cbufs; i++) {
499 struct pipe_surface *surf = fb->cbufs[i];
500 state.rts[i].format = zink_get_format(screen, surf->format);
501 state.rts[i].samples = surf->nr_samples > 0 ? surf->nr_samples :
502 VK_SAMPLE_COUNT_1_BIT;
503 }
504 state.num_cbufs = fb->nr_cbufs;
505
506 if (fb->zsbuf) {
507 struct zink_resource *zsbuf = zink_resource(fb->zsbuf->texture);
508 state.rts[fb->nr_cbufs].format = zsbuf->format;
509 state.rts[fb->nr_cbufs].samples = zsbuf->base.nr_samples > 0 ? zsbuf->base.nr_samples : VK_SAMPLE_COUNT_1_BIT;
510 }
511 state.have_zsbuf = fb->zsbuf != NULL;
512
513 struct hash_entry *entry = _mesa_hash_table_search(ctx->render_pass_cache,
514 &state);
515 if (!entry) {
516 struct zink_render_pass *rp;
517 rp = zink_create_render_pass(screen, &state);
518 entry = _mesa_hash_table_insert(ctx->render_pass_cache, &state, rp);
519 if (!entry)
520 return NULL;
521 }
522
523 return entry->data;
524 }
525
526 static struct zink_framebuffer *
527 create_framebuffer(struct zink_context *ctx)
528 {
529 struct zink_screen *screen = zink_screen(ctx->base.screen);
530
531 struct zink_framebuffer_state state = {};
532 state.rp = get_render_pass(ctx);
533 for (int i = 0; i < ctx->fb_state.nr_cbufs; i++) {
534 struct pipe_surface *psurf = ctx->fb_state.cbufs[i];
535 state.attachments[i] = zink_surface(psurf);
536 }
537
538 state.num_attachments = ctx->fb_state.nr_cbufs;
539 if (ctx->fb_state.zsbuf) {
540 struct pipe_surface *psurf = ctx->fb_state.zsbuf;
541 state.attachments[state.num_attachments++] = zink_surface(psurf);
542 }
543
544 state.width = ctx->fb_state.width;
545 state.height = ctx->fb_state.height;
546 state.layers = MAX2(ctx->fb_state.layers, 1);
547
548 return zink_create_framebuffer(screen, &state);
549 }
550
551 void
552 zink_begin_render_pass(struct zink_context *ctx, struct zink_batch *batch)
553 {
554 struct zink_screen *screen = zink_screen(ctx->base.screen);
555 assert(batch == zink_curr_batch(ctx));
556 assert(ctx->gfx_pipeline_state.render_pass);
557
558 struct pipe_framebuffer_state *fb_state = &ctx->fb_state;
559
560 VkRenderPassBeginInfo rpbi = {};
561 rpbi.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
562 rpbi.renderPass = ctx->gfx_pipeline_state.render_pass->render_pass;
563 rpbi.renderArea.offset.x = 0;
564 rpbi.renderArea.offset.y = 0;
565 rpbi.renderArea.extent.width = fb_state->width;
566 rpbi.renderArea.extent.height = fb_state->height;
567 rpbi.clearValueCount = 0;
568 rpbi.pClearValues = NULL;
569 rpbi.framebuffer = ctx->framebuffer->fb;
570
571 assert(ctx->gfx_pipeline_state.render_pass && ctx->framebuffer);
572 assert(!batch->rp || batch->rp == ctx->gfx_pipeline_state.render_pass);
573 assert(!batch->fb || batch->fb == ctx->framebuffer);
574
575 for (int i = 0; i < fb_state->nr_cbufs; i++) {
576 struct zink_resource *res = zink_resource(fb_state->cbufs[i]->texture);
577 if (res->layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL)
578 zink_resource_barrier(batch->cmdbuf, res, res->aspect,
579 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
580 }
581
582 if (fb_state->zsbuf) {
583 struct zink_resource *res = zink_resource(fb_state->zsbuf->texture);
584 if (res->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL)
585 zink_resource_barrier(batch->cmdbuf, res, res->aspect,
586 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
587 }
588
589 zink_render_pass_reference(screen, &batch->rp, ctx->gfx_pipeline_state.render_pass);
590 zink_framebuffer_reference(screen, &batch->fb, ctx->framebuffer);
591
592 vkCmdBeginRenderPass(batch->cmdbuf, &rpbi, VK_SUBPASS_CONTENTS_INLINE);
593 }
594
595 static void
596 flush_batch(struct zink_context *ctx)
597 {
598 struct zink_batch *batch = zink_curr_batch(ctx);
599 if (batch->rp)
600 vkCmdEndRenderPass(batch->cmdbuf);
601
602 zink_end_batch(ctx, batch);
603
604 ctx->curr_batch++;
605 if (ctx->curr_batch == ARRAY_SIZE(ctx->batches))
606 ctx->curr_batch = 0;
607
608 zink_start_batch(ctx, zink_curr_batch(ctx));
609 }
610
611 struct zink_batch *
612 zink_batch_rp(struct zink_context *ctx)
613 {
614 struct zink_batch *batch = zink_curr_batch(ctx);
615 if (!batch->rp) {
616 zink_begin_render_pass(ctx, batch);
617 assert(batch->rp);
618 }
619 return batch;
620 }
621
622 struct zink_batch *
623 zink_batch_no_rp(struct zink_context *ctx)
624 {
625 struct zink_batch *batch = zink_curr_batch(ctx);
626 if (batch->rp) {
627 /* flush batch and get a new one */
628 flush_batch(ctx);
629 batch = zink_curr_batch(ctx);
630 assert(!batch->rp);
631 }
632 return batch;
633 }
634
635 static void
636 zink_set_framebuffer_state(struct pipe_context *pctx,
637 const struct pipe_framebuffer_state *state)
638 {
639 struct zink_context *ctx = zink_context(pctx);
640 struct zink_screen *screen = zink_screen(pctx->screen);
641
642 util_copy_framebuffer_state(&ctx->fb_state, state);
643
644 struct zink_framebuffer *fb = ctx->framebuffer;
645 /* explicitly unref previous fb to ensure it gets destroyed */
646 if (fb)
647 zink_framebuffer_reference(screen, &fb, NULL);
648 fb = create_framebuffer(ctx);
649 zink_framebuffer_reference(screen, &ctx->framebuffer, fb);
650 zink_render_pass_reference(screen, &ctx->gfx_pipeline_state.render_pass, fb->rp);
651
652 ctx->gfx_pipeline_state.rast_samples = MAX2(state->samples, 1);
653 ctx->gfx_pipeline_state.num_attachments = state->nr_cbufs;
654
655 struct zink_batch *batch = zink_batch_no_rp(ctx);
656
657 for (int i = 0; i < state->nr_cbufs; i++) {
658 struct zink_resource *res = zink_resource(state->cbufs[i]->texture);
659 if (res->layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL)
660 zink_resource_barrier(batch->cmdbuf, res, res->aspect,
661 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
662 }
663
664 if (state->zsbuf) {
665 struct zink_resource *res = zink_resource(state->zsbuf->texture);
666 if (res->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL)
667 zink_resource_barrier(batch->cmdbuf, res, res->aspect,
668 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
669 }
670 }
671
672 static void
673 zink_set_blend_color(struct pipe_context *pctx,
674 const struct pipe_blend_color *color)
675 {
676 struct zink_context *ctx = zink_context(pctx);
677 memcpy(ctx->blend_constants, color->color, sizeof(float) * 4);
678 }
679
680 static void
681 zink_set_sample_mask(struct pipe_context *pctx, unsigned sample_mask)
682 {
683 struct zink_context *ctx = zink_context(pctx);
684 ctx->gfx_pipeline_state.sample_mask = sample_mask;
685 }
686
687 static VkAccessFlags
688 access_src_flags(VkImageLayout layout)
689 {
690 switch (layout) {
691 case VK_IMAGE_LAYOUT_UNDEFINED:
692 case VK_IMAGE_LAYOUT_GENERAL:
693 return 0;
694
695 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
696 return VK_ACCESS_COLOR_ATTACHMENT_READ_BIT;
697 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
698 return VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
699
700 case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
701 return VK_ACCESS_SHADER_READ_BIT;
702
703 case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
704 return VK_ACCESS_TRANSFER_READ_BIT;
705
706 case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
707 return VK_ACCESS_TRANSFER_WRITE_BIT;
708
709 case VK_IMAGE_LAYOUT_PREINITIALIZED:
710 return VK_ACCESS_HOST_WRITE_BIT;
711
712 default:
713 unreachable("unexpected layout");
714 }
715 }
716
717 static VkAccessFlags
718 access_dst_flags(VkImageLayout layout)
719 {
720 switch (layout) {
721 case VK_IMAGE_LAYOUT_UNDEFINED:
722 case VK_IMAGE_LAYOUT_GENERAL:
723 return 0;
724
725 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
726 return VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
727 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
728 return VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
729
730 case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
731 return VK_ACCESS_TRANSFER_READ_BIT;
732
733 case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
734 return VK_ACCESS_TRANSFER_WRITE_BIT;
735
736 default:
737 unreachable("unexpected layout");
738 }
739 }
740
741 static VkPipelineStageFlags
742 pipeline_dst_stage(VkImageLayout layout)
743 {
744 switch (layout) {
745 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
746 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
747 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
748 return VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
749
750 case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
751 return VK_PIPELINE_STAGE_TRANSFER_BIT;
752 case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
753 return VK_PIPELINE_STAGE_TRANSFER_BIT;
754
755 default:
756 return VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
757 }
758 }
759
760 static VkPipelineStageFlags
761 pipeline_src_stage(VkImageLayout layout)
762 {
763 switch (layout) {
764 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
765 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
766 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
767 return VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT;
768
769 case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
770 return VK_PIPELINE_STAGE_TRANSFER_BIT;
771 case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
772 return VK_PIPELINE_STAGE_TRANSFER_BIT;
773
774 default:
775 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
776 }
777 }
778
779
780 void
781 zink_resource_barrier(VkCommandBuffer cmdbuf, struct zink_resource *res,
782 VkImageAspectFlags aspect, VkImageLayout new_layout)
783 {
784 VkImageSubresourceRange isr = {
785 aspect,
786 0, VK_REMAINING_MIP_LEVELS,
787 0, VK_REMAINING_ARRAY_LAYERS
788 };
789
790 VkImageMemoryBarrier imb = {
791 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
792 NULL,
793 access_src_flags(res->layout),
794 access_dst_flags(new_layout),
795 res->layout,
796 new_layout,
797 VK_QUEUE_FAMILY_IGNORED,
798 VK_QUEUE_FAMILY_IGNORED,
799 res->image,
800 isr
801 };
802 vkCmdPipelineBarrier(
803 cmdbuf,
804 pipeline_src_stage(res->layout),
805 pipeline_dst_stage(new_layout),
806 0,
807 0, NULL,
808 0, NULL,
809 1, &imb
810 );
811
812 res->layout = new_layout;
813 }
814
815 static void
816 zink_clear(struct pipe_context *pctx,
817 unsigned buffers,
818 const struct pipe_scissor_state *scissor_state,
819 const union pipe_color_union *pcolor,
820 double depth, unsigned stencil)
821 {
822 struct zink_context *ctx = zink_context(pctx);
823 struct pipe_framebuffer_state *fb = &ctx->fb_state;
824
825 /* FIXME: this is very inefficient; if no renderpass has been started yet,
826 * we should record the clear if it's full-screen, and apply it as we
827 * start the render-pass. Otherwise we can do a partial out-of-renderpass
828 * clear.
829 */
830 struct zink_batch *batch = zink_batch_rp(ctx);
831
832 VkClearAttachment attachments[1 + PIPE_MAX_COLOR_BUFS];
833 int num_attachments = 0;
834
835 if (buffers & PIPE_CLEAR_COLOR) {
836 VkClearColorValue color;
837 color.float32[0] = pcolor->f[0];
838 color.float32[1] = pcolor->f[1];
839 color.float32[2] = pcolor->f[2];
840 color.float32[3] = pcolor->f[3];
841
842 for (unsigned i = 0; i < fb->nr_cbufs; i++) {
843 if (!(buffers & (PIPE_CLEAR_COLOR0 << i)) || !fb->cbufs[i])
844 continue;
845
846 attachments[num_attachments].aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
847 attachments[num_attachments].colorAttachment = i;
848 attachments[num_attachments].clearValue.color = color;
849 ++num_attachments;
850 }
851 }
852
853 if (buffers & PIPE_CLEAR_DEPTHSTENCIL && fb->zsbuf) {
854 VkImageAspectFlags aspect = 0;
855 if (buffers & PIPE_CLEAR_DEPTH)
856 aspect |= VK_IMAGE_ASPECT_DEPTH_BIT;
857 if (buffers & PIPE_CLEAR_STENCIL)
858 aspect |= VK_IMAGE_ASPECT_STENCIL_BIT;
859
860 attachments[num_attachments].aspectMask = aspect;
861 attachments[num_attachments].clearValue.depthStencil.depth = depth;
862 attachments[num_attachments].clearValue.depthStencil.stencil = stencil;
863 ++num_attachments;
864 }
865
866 VkClearRect cr;
867 cr.rect.offset.x = 0;
868 cr.rect.offset.y = 0;
869 cr.rect.extent.width = fb->width;
870 cr.rect.extent.height = fb->height;
871 cr.baseArrayLayer = 0;
872 cr.layerCount = util_framebuffer_get_num_layers(fb);
873 vkCmdClearAttachments(batch->cmdbuf, num_attachments, attachments, 1, &cr);
874 }
875
876 VkShaderStageFlagBits
877 zink_shader_stage(enum pipe_shader_type type)
878 {
879 VkShaderStageFlagBits stages[] = {
880 [PIPE_SHADER_VERTEX] = VK_SHADER_STAGE_VERTEX_BIT,
881 [PIPE_SHADER_FRAGMENT] = VK_SHADER_STAGE_FRAGMENT_BIT,
882 [PIPE_SHADER_GEOMETRY] = VK_SHADER_STAGE_GEOMETRY_BIT,
883 [PIPE_SHADER_TESS_CTRL] = VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
884 [PIPE_SHADER_TESS_EVAL] = VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
885 [PIPE_SHADER_COMPUTE] = VK_SHADER_STAGE_COMPUTE_BIT,
886 };
887 return stages[type];
888 }
889
890 static uint32_t
891 hash_gfx_program(const void *key)
892 {
893 return _mesa_hash_data(key, sizeof(struct zink_shader *) * (PIPE_SHADER_TYPES - 1));
894 }
895
896 static bool
897 equals_gfx_program(const void *a, const void *b)
898 {
899 return memcmp(a, b, sizeof(struct zink_shader *) * (PIPE_SHADER_TYPES - 1)) == 0;
900 }
901
902 static uint32_t
903 hash_render_pass_state(const void *key)
904 {
905 return _mesa_hash_data(key, sizeof(struct zink_render_pass_state));
906 }
907
908 static bool
909 equals_render_pass_state(const void *a, const void *b)
910 {
911 return memcmp(a, b, sizeof(struct zink_render_pass_state)) == 0;
912 }
913
914 static void
915 zink_flush(struct pipe_context *pctx,
916 struct pipe_fence_handle **pfence,
917 enum pipe_flush_flags flags)
918 {
919 struct zink_context *ctx = zink_context(pctx);
920
921 struct zink_batch *batch = zink_curr_batch(ctx);
922 flush_batch(ctx);
923
924 if (zink_screen(pctx->screen)->have_EXT_transform_feedback && ctx->num_so_targets)
925 ctx->dirty_so_targets = true;
926
927 if (pfence)
928 zink_fence_reference(zink_screen(pctx->screen),
929 (struct zink_fence **)pfence,
930 batch->fence);
931
932 /* HACK:
933 * For some strange reason, we need to finish before presenting, or else
934 * we start rendering on top of the back-buffer for the next frame. This
935 * seems like a bug in the DRI-driver to me, because we really should
936 * be properly protected by fences here, and the back-buffer should
937 * either be swapped with the front-buffer, or blitted from. But for
938 * some strange reason, neither of these things happen.
939 */
940 if (flags & PIPE_FLUSH_END_OF_FRAME)
941 pctx->screen->fence_finish(pctx->screen, pctx,
942 (struct pipe_fence_handle *)batch->fence,
943 PIPE_TIMEOUT_INFINITE);
944 }
945
946 static void
947 zink_flush_resource(struct pipe_context *pipe,
948 struct pipe_resource *resource)
949 {
950 }
951
952 static void
953 zink_resource_copy_region(struct pipe_context *pctx,
954 struct pipe_resource *pdst,
955 unsigned dst_level, unsigned dstx, unsigned dsty, unsigned dstz,
956 struct pipe_resource *psrc,
957 unsigned src_level, const struct pipe_box *src_box)
958 {
959 struct zink_resource *dst = zink_resource(pdst);
960 struct zink_resource *src = zink_resource(psrc);
961 struct zink_context *ctx = zink_context(pctx);
962 if (dst->base.target != PIPE_BUFFER && src->base.target != PIPE_BUFFER) {
963 VkImageCopy region = {};
964
965 region.srcSubresource.aspectMask = src->aspect;
966 region.srcSubresource.mipLevel = src_level;
967 region.srcSubresource.layerCount = 1;
968 if (src->base.array_size > 1) {
969 region.srcSubresource.baseArrayLayer = src_box->z;
970 region.srcSubresource.layerCount = src_box->depth;
971 region.extent.depth = 1;
972 } else {
973 region.srcOffset.z = src_box->z;
974 region.srcSubresource.layerCount = 1;
975 region.extent.depth = src_box->depth;
976 }
977
978 region.srcOffset.x = src_box->x;
979 region.srcOffset.y = src_box->y;
980
981 region.dstSubresource.aspectMask = dst->aspect;
982 region.dstSubresource.mipLevel = dst_level;
983 if (dst->base.array_size > 1) {
984 region.dstSubresource.baseArrayLayer = dstz;
985 region.dstSubresource.layerCount = src_box->depth;
986 } else {
987 region.dstOffset.z = dstz;
988 region.dstSubresource.layerCount = 1;
989 }
990
991 region.dstOffset.x = dstx;
992 region.dstOffset.y = dsty;
993 region.extent.width = src_box->width;
994 region.extent.height = src_box->height;
995
996 struct zink_batch *batch = zink_batch_no_rp(ctx);
997 zink_batch_reference_resoure(batch, src);
998 zink_batch_reference_resoure(batch, dst);
999
1000 if (src->layout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) {
1001 zink_resource_barrier(batch->cmdbuf, src, src->aspect,
1002 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
1003 }
1004
1005 if (dst->layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
1006 zink_resource_barrier(batch->cmdbuf, dst, dst->aspect,
1007 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
1008 }
1009
1010 vkCmdCopyImage(batch->cmdbuf, src->image, src->layout,
1011 dst->image, dst->layout,
1012 1, &region);
1013 } else if (dst->base.target == PIPE_BUFFER &&
1014 src->base.target == PIPE_BUFFER) {
1015 VkBufferCopy region;
1016 region.srcOffset = src_box->x;
1017 region.dstOffset = dstx;
1018 region.size = src_box->width;
1019
1020 struct zink_batch *batch = zink_batch_no_rp(ctx);
1021 zink_batch_reference_resoure(batch, src);
1022 zink_batch_reference_resoure(batch, dst);
1023
1024 vkCmdCopyBuffer(batch->cmdbuf, src->buffer, dst->buffer, 1, &region);
1025 } else
1026 debug_printf("zink: TODO resource copy\n");
1027 }
1028
1029 static struct pipe_stream_output_target *
1030 zink_create_stream_output_target(struct pipe_context *pctx,
1031 struct pipe_resource *pres,
1032 unsigned buffer_offset,
1033 unsigned buffer_size)
1034 {
1035 struct zink_so_target *t;
1036 t = CALLOC_STRUCT(zink_so_target);
1037 if (!t)
1038 return NULL;
1039
1040 t->base.reference.count = 1;
1041 t->base.context = pctx;
1042 pipe_resource_reference(&t->base.buffer, pres);
1043 t->base.buffer_offset = buffer_offset;
1044 t->base.buffer_size = buffer_size;
1045
1046 /* using PIPE_BIND_CUSTOM here lets us create a custom pipe buffer resource,
1047 * which allows us to differentiate and use VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT
1048 * as we must for this case
1049 */
1050 t->counter_buffer = pipe_buffer_create(pctx->screen, PIPE_BIND_STREAM_OUTPUT | PIPE_BIND_CUSTOM, PIPE_USAGE_DEFAULT, 4);
1051 if (!t->counter_buffer) {
1052 FREE(t);
1053 return NULL;
1054 }
1055
1056 return &t->base;
1057 }
1058
1059 static void
1060 zink_stream_output_target_destroy(struct pipe_context *pctx,
1061 struct pipe_stream_output_target *psot)
1062 {
1063 struct zink_so_target *t = (struct zink_so_target *)psot;
1064 pipe_resource_reference(&t->counter_buffer, NULL);
1065 pipe_resource_reference(&t->base.buffer, NULL);
1066 FREE(t);
1067 }
1068
1069 static void
1070 zink_set_stream_output_targets(struct pipe_context *pctx,
1071 unsigned num_targets,
1072 struct pipe_stream_output_target **targets,
1073 const unsigned *offsets)
1074 {
1075 struct zink_context *ctx = zink_context(pctx);
1076
1077 if (num_targets == 0) {
1078 for (unsigned i = 0; i < ctx->num_so_targets; i++)
1079 pipe_so_target_reference(&ctx->so_targets[i], NULL);
1080 ctx->num_so_targets = 0;
1081 } else {
1082 for (unsigned i = 0; i < num_targets; i++)
1083 pipe_so_target_reference(&ctx->so_targets[i], targets[i]);
1084 for (unsigned i = num_targets; i < ctx->num_so_targets; i++)
1085 pipe_so_target_reference(&ctx->so_targets[i], NULL);
1086 ctx->num_so_targets = num_targets;
1087
1088 /* emit memory barrier on next draw for synchronization */
1089 if (offsets[0] == (unsigned)-1)
1090 ctx->xfb_barrier = true;
1091 /* TODO: possibly avoid rebinding on resume if resuming from same buffers? */
1092 ctx->dirty_so_targets = true;
1093 }
1094 }
1095
1096 struct pipe_context *
1097 zink_context_create(struct pipe_screen *pscreen, void *priv, unsigned flags)
1098 {
1099 struct zink_screen *screen = zink_screen(pscreen);
1100 struct zink_context *ctx = CALLOC_STRUCT(zink_context);
1101 if (!ctx)
1102 goto fail;
1103
1104 ctx->base.screen = pscreen;
1105 ctx->base.priv = priv;
1106
1107 ctx->base.destroy = zink_context_destroy;
1108
1109 zink_context_state_init(&ctx->base);
1110
1111 ctx->base.create_sampler_state = zink_create_sampler_state;
1112 ctx->base.bind_sampler_states = zink_bind_sampler_states;
1113 ctx->base.delete_sampler_state = zink_delete_sampler_state;
1114
1115 ctx->base.create_sampler_view = zink_create_sampler_view;
1116 ctx->base.set_sampler_views = zink_set_sampler_views;
1117 ctx->base.sampler_view_destroy = zink_sampler_view_destroy;
1118
1119 ctx->base.create_vs_state = zink_create_vs_state;
1120 ctx->base.bind_vs_state = zink_bind_vs_state;
1121 ctx->base.delete_vs_state = zink_delete_vs_state;
1122
1123 ctx->base.create_fs_state = zink_create_fs_state;
1124 ctx->base.bind_fs_state = zink_bind_fs_state;
1125 ctx->base.delete_fs_state = zink_delete_fs_state;
1126
1127 ctx->base.set_polygon_stipple = zink_set_polygon_stipple;
1128 ctx->base.set_vertex_buffers = zink_set_vertex_buffers;
1129 ctx->base.set_viewport_states = zink_set_viewport_states;
1130 ctx->base.set_scissor_states = zink_set_scissor_states;
1131 ctx->base.set_constant_buffer = zink_set_constant_buffer;
1132 ctx->base.set_framebuffer_state = zink_set_framebuffer_state;
1133 ctx->base.set_stencil_ref = zink_set_stencil_ref;
1134 ctx->base.set_clip_state = zink_set_clip_state;
1135 ctx->base.set_blend_color = zink_set_blend_color;
1136
1137 ctx->base.set_sample_mask = zink_set_sample_mask;
1138
1139 ctx->base.clear = zink_clear;
1140 ctx->base.draw_vbo = zink_draw_vbo;
1141 ctx->base.flush = zink_flush;
1142
1143 ctx->base.resource_copy_region = zink_resource_copy_region;
1144 ctx->base.blit = zink_blit;
1145 ctx->base.create_stream_output_target = zink_create_stream_output_target;
1146 ctx->base.stream_output_target_destroy = zink_stream_output_target_destroy;
1147
1148 ctx->base.set_stream_output_targets = zink_set_stream_output_targets;
1149 ctx->base.flush_resource = zink_flush_resource;
1150 zink_context_surface_init(&ctx->base);
1151 zink_context_resource_init(&ctx->base);
1152 zink_context_query_init(&ctx->base);
1153
1154 slab_create_child(&ctx->transfer_pool, &screen->transfer_pool);
1155
1156 ctx->base.stream_uploader = u_upload_create_default(&ctx->base);
1157 ctx->base.const_uploader = ctx->base.stream_uploader;
1158
1159 int prim_hwsupport = 1 << PIPE_PRIM_POINTS |
1160 1 << PIPE_PRIM_LINES |
1161 1 << PIPE_PRIM_LINE_STRIP |
1162 1 << PIPE_PRIM_TRIANGLES |
1163 1 << PIPE_PRIM_TRIANGLE_STRIP |
1164 1 << PIPE_PRIM_TRIANGLE_FAN;
1165
1166 ctx->primconvert = util_primconvert_create(&ctx->base, prim_hwsupport);
1167 if (!ctx->primconvert)
1168 goto fail;
1169
1170 ctx->blitter = util_blitter_create(&ctx->base);
1171 if (!ctx->blitter)
1172 goto fail;
1173
1174 VkCommandPoolCreateInfo cpci = {};
1175 cpci.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
1176 cpci.queueFamilyIndex = screen->gfx_queue;
1177 cpci.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
1178 if (vkCreateCommandPool(screen->dev, &cpci, NULL, &ctx->cmdpool) != VK_SUCCESS)
1179 goto fail;
1180
1181 VkCommandBufferAllocateInfo cbai = {};
1182 cbai.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
1183 cbai.commandPool = ctx->cmdpool;
1184 cbai.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
1185 cbai.commandBufferCount = 1;
1186
1187 VkDescriptorPoolSize sizes[] = {
1188 {VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, ZINK_BATCH_DESC_SIZE},
1189 {VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, ZINK_BATCH_DESC_SIZE}
1190 };
1191 VkDescriptorPoolCreateInfo dpci = {};
1192 dpci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
1193 dpci.pPoolSizes = sizes;
1194 dpci.poolSizeCount = ARRAY_SIZE(sizes);
1195 dpci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
1196 dpci.maxSets = ZINK_BATCH_DESC_SIZE;
1197
1198 for (int i = 0; i < ARRAY_SIZE(ctx->batches); ++i) {
1199 if (vkAllocateCommandBuffers(screen->dev, &cbai, &ctx->batches[i].cmdbuf) != VK_SUCCESS)
1200 goto fail;
1201
1202 ctx->batches[i].resources = _mesa_set_create(NULL, _mesa_hash_pointer,
1203 _mesa_key_pointer_equal);
1204 ctx->batches[i].sampler_views = _mesa_set_create(NULL,
1205 _mesa_hash_pointer,
1206 _mesa_key_pointer_equal);
1207
1208 if (!ctx->batches[i].resources || !ctx->batches[i].sampler_views)
1209 goto fail;
1210
1211 util_dynarray_init(&ctx->batches[i].zombie_samplers, NULL);
1212
1213 if (vkCreateDescriptorPool(screen->dev, &dpci, 0,
1214 &ctx->batches[i].descpool) != VK_SUCCESS)
1215 goto fail;
1216 }
1217
1218 vkGetDeviceQueue(screen->dev, screen->gfx_queue, 0, &ctx->queue);
1219
1220 ctx->program_cache = _mesa_hash_table_create(NULL,
1221 hash_gfx_program,
1222 equals_gfx_program);
1223 ctx->render_pass_cache = _mesa_hash_table_create(NULL,
1224 hash_render_pass_state,
1225 equals_render_pass_state);
1226 if (!ctx->program_cache || !ctx->render_pass_cache)
1227 goto fail;
1228
1229 const uint8_t data[] = { 0 };
1230 ctx->dummy_buffer = pipe_buffer_create_with_data(&ctx->base,
1231 PIPE_BIND_VERTEX_BUFFER, PIPE_USAGE_IMMUTABLE, sizeof(data), data);
1232 if (!ctx->dummy_buffer)
1233 goto fail;
1234
1235 ctx->dirty_program = true;
1236
1237 /* start the first batch */
1238 zink_start_batch(ctx, zink_curr_batch(ctx));
1239
1240 return &ctx->base;
1241
1242 fail:
1243 if (ctx) {
1244 vkDestroyCommandPool(screen->dev, ctx->cmdpool, NULL);
1245 FREE(ctx);
1246 }
1247 return NULL;
1248 }