zink: add sample mask support
[mesa.git] / src / gallium / drivers / zink / zink_context.c
1 /*
2 * Copyright 2018 Collabora Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "zink_context.h"
25
26 #include "zink_batch.h"
27 #include "zink_compiler.h"
28 #include "zink_fence.h"
29 #include "zink_framebuffer.h"
30 #include "zink_pipeline.h"
31 #include "zink_program.h"
32 #include "zink_render_pass.h"
33 #include "zink_resource.h"
34 #include "zink_screen.h"
35 #include "zink_state.h"
36 #include "zink_surface.h"
37
38 #include "indices/u_primconvert.h"
39 #include "util/u_blitter.h"
40 #include "util/u_debug.h"
41 #include "util/u_format.h"
42 #include "util/u_framebuffer.h"
43 #include "util/u_helpers.h"
44 #include "util/u_inlines.h"
45
46 #include "nir.h"
47
48 #include "util/u_memory.h"
49 #include "util/u_prim.h"
50 #include "util/u_upload_mgr.h"
51
52 static void
53 zink_context_destroy(struct pipe_context *pctx)
54 {
55 struct zink_context *ctx = zink_context(pctx);
56 struct zink_screen *screen = zink_screen(pctx->screen);
57
58 if (vkQueueWaitIdle(ctx->queue) != VK_SUCCESS)
59 debug_printf("vkQueueWaitIdle failed\n");
60
61 for (int i = 0; i < ARRAY_SIZE(ctx->batches); ++i)
62 vkFreeCommandBuffers(screen->dev, ctx->cmdpool, 1, &ctx->batches[i].cmdbuf);
63 vkDestroyCommandPool(screen->dev, ctx->cmdpool, NULL);
64
65 util_primconvert_destroy(ctx->primconvert);
66 u_upload_destroy(pctx->stream_uploader);
67 slab_destroy_child(&ctx->transfer_pool);
68 util_blitter_destroy(ctx->blitter);
69 FREE(ctx);
70 }
71
72 static VkFilter
73 filter(enum pipe_tex_filter filter)
74 {
75 switch (filter) {
76 case PIPE_TEX_FILTER_NEAREST: return VK_FILTER_NEAREST;
77 case PIPE_TEX_FILTER_LINEAR: return VK_FILTER_LINEAR;
78 }
79 unreachable("unexpected filter");
80 }
81
82 static VkSamplerMipmapMode
83 sampler_mipmap_mode(enum pipe_tex_mipfilter filter)
84 {
85 switch (filter) {
86 case PIPE_TEX_MIPFILTER_NEAREST: return VK_SAMPLER_MIPMAP_MODE_NEAREST;
87 case PIPE_TEX_MIPFILTER_LINEAR: return VK_SAMPLER_MIPMAP_MODE_LINEAR;
88 case PIPE_TEX_MIPFILTER_NONE:
89 unreachable("PIPE_TEX_MIPFILTER_NONE should be dealt with earlier");
90 }
91 unreachable("unexpected filter");
92 }
93
94 static VkSamplerAddressMode
95 sampler_address_mode(enum pipe_tex_wrap filter)
96 {
97 switch (filter) {
98 case PIPE_TEX_WRAP_REPEAT: return VK_SAMPLER_ADDRESS_MODE_REPEAT;
99 case PIPE_TEX_WRAP_CLAMP: return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; /* not technically correct, but kinda works */
100 case PIPE_TEX_WRAP_CLAMP_TO_EDGE: return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
101 case PIPE_TEX_WRAP_CLAMP_TO_BORDER: return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER;
102 case PIPE_TEX_WRAP_MIRROR_REPEAT: return VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT;
103 case PIPE_TEX_WRAP_MIRROR_CLAMP: return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; /* not technically correct, but kinda works */
104 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE: return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE;
105 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER: return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; /* not technically correct, but kinda works */
106 }
107 unreachable("unexpected wrap");
108 }
109
110 static VkCompareOp
111 compare_op(enum pipe_compare_func op)
112 {
113 switch (op) {
114 case PIPE_FUNC_NEVER: return VK_COMPARE_OP_NEVER;
115 case PIPE_FUNC_LESS: return VK_COMPARE_OP_LESS;
116 case PIPE_FUNC_EQUAL: return VK_COMPARE_OP_EQUAL;
117 case PIPE_FUNC_LEQUAL: return VK_COMPARE_OP_LESS_OR_EQUAL;
118 case PIPE_FUNC_GREATER: return VK_COMPARE_OP_GREATER;
119 case PIPE_FUNC_NOTEQUAL: return VK_COMPARE_OP_NOT_EQUAL;
120 case PIPE_FUNC_GEQUAL: return VK_COMPARE_OP_GREATER_OR_EQUAL;
121 case PIPE_FUNC_ALWAYS: return VK_COMPARE_OP_ALWAYS;
122 }
123 unreachable("unexpected compare");
124 }
125
126 static void *
127 zink_create_sampler_state(struct pipe_context *pctx,
128 const struct pipe_sampler_state *state)
129 {
130 struct zink_screen *screen = zink_screen(pctx->screen);
131
132 VkSamplerCreateInfo sci = {};
133 sci.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
134 sci.magFilter = filter(state->mag_img_filter);
135 sci.minFilter = filter(state->min_img_filter);
136
137 if (state->min_mip_filter != PIPE_TEX_MIPFILTER_NONE) {
138 sci.mipmapMode = sampler_mipmap_mode(state->min_mip_filter);
139 sci.minLod = state->min_lod;
140 sci.maxLod = state->max_lod;
141 } else {
142 sci.mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST;
143 sci.minLod = 0;
144 sci.maxLod = 0;
145 }
146
147 sci.addressModeU = sampler_address_mode(state->wrap_s);
148 sci.addressModeV = sampler_address_mode(state->wrap_t);
149 sci.addressModeW = sampler_address_mode(state->wrap_r);
150 sci.mipLodBias = state->lod_bias;
151
152 if (state->compare_mode == PIPE_TEX_COMPARE_NONE)
153 sci.compareOp = VK_COMPARE_OP_NEVER;
154 else
155 sci.compareOp = compare_op(state->compare_func);
156
157 sci.borderColor = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK; // TODO
158 sci.unnormalizedCoordinates = !state->normalized_coords;
159
160 if (state->max_anisotropy > 1) {
161 sci.maxAnisotropy = state->max_anisotropy;
162 sci.anisotropyEnable = VK_TRUE;
163 }
164
165 VkSampler sampler;
166 VkResult err = vkCreateSampler(screen->dev, &sci, NULL, &sampler);
167 if (err != VK_SUCCESS)
168 return NULL;
169
170 return sampler;
171 }
172
173 static void
174 zink_bind_sampler_states(struct pipe_context *pctx,
175 enum pipe_shader_type shader,
176 unsigned start_slot,
177 unsigned num_samplers,
178 void **samplers)
179 {
180 struct zink_context *ctx = zink_context(pctx);
181 for (unsigned i = 0; i < num_samplers; ++i)
182 ctx->samplers[shader][start_slot + i] = (VkSampler)samplers[i];
183 }
184
185 static void
186 zink_delete_sampler_state(struct pipe_context *pctx,
187 void *sampler_state)
188 {
189 struct zink_batch *batch = zink_curr_batch(zink_context(pctx));
190 util_dynarray_append(&batch->zombie_samplers,
191 VkSampler, sampler_state);
192 }
193
194
195 static VkImageViewType
196 image_view_type(enum pipe_texture_target target)
197 {
198 switch (target) {
199 case PIPE_TEXTURE_1D: return VK_IMAGE_VIEW_TYPE_1D;
200 case PIPE_TEXTURE_1D_ARRAY: return VK_IMAGE_VIEW_TYPE_1D_ARRAY;
201 case PIPE_TEXTURE_2D: return VK_IMAGE_VIEW_TYPE_2D;
202 case PIPE_TEXTURE_2D_ARRAY: return VK_IMAGE_VIEW_TYPE_2D_ARRAY;
203 case PIPE_TEXTURE_CUBE: return VK_IMAGE_VIEW_TYPE_CUBE;
204 case PIPE_TEXTURE_CUBE_ARRAY: return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
205 case PIPE_TEXTURE_3D: return VK_IMAGE_VIEW_TYPE_3D;
206 case PIPE_TEXTURE_RECT: return VK_IMAGE_VIEW_TYPE_2D; /* not sure */
207 default:
208 unreachable("unexpected target");
209 }
210 }
211
212 static VkComponentSwizzle
213 component_mapping(enum pipe_swizzle swizzle)
214 {
215 switch (swizzle) {
216 case PIPE_SWIZZLE_X: return VK_COMPONENT_SWIZZLE_R;
217 case PIPE_SWIZZLE_Y: return VK_COMPONENT_SWIZZLE_G;
218 case PIPE_SWIZZLE_Z: return VK_COMPONENT_SWIZZLE_B;
219 case PIPE_SWIZZLE_W: return VK_COMPONENT_SWIZZLE_A;
220 case PIPE_SWIZZLE_0: return VK_COMPONENT_SWIZZLE_ZERO;
221 case PIPE_SWIZZLE_1: return VK_COMPONENT_SWIZZLE_ONE;
222 case PIPE_SWIZZLE_NONE: return VK_COMPONENT_SWIZZLE_IDENTITY; // ???
223 default:
224 unreachable("unexpected swizzle");
225 }
226 }
227
228 static struct pipe_sampler_view *
229 zink_create_sampler_view(struct pipe_context *pctx, struct pipe_resource *pres,
230 const struct pipe_sampler_view *state)
231 {
232 struct zink_screen *screen = zink_screen(pctx->screen);
233 struct zink_resource *res = zink_resource(pres);
234 struct zink_sampler_view *sampler_view = CALLOC_STRUCT(zink_sampler_view);
235
236 sampler_view->base = *state;
237 sampler_view->base.texture = NULL;
238 pipe_resource_reference(&sampler_view->base.texture, pres);
239 sampler_view->base.reference.count = 1;
240 sampler_view->base.context = pctx;
241
242 VkImageViewCreateInfo ivci = {};
243 ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
244 ivci.image = res->image;
245 ivci.viewType = image_view_type(state->target);
246 ivci.format = zink_get_format(state->format);
247 ivci.components.r = component_mapping(state->swizzle_r);
248 ivci.components.g = component_mapping(state->swizzle_g);
249 ivci.components.b = component_mapping(state->swizzle_b);
250 ivci.components.a = component_mapping(state->swizzle_a);
251 ivci.subresourceRange.aspectMask = zink_aspect_from_format(state->format);
252 ivci.subresourceRange.baseMipLevel = state->u.tex.first_level;
253 ivci.subresourceRange.baseArrayLayer = state->u.tex.first_layer;
254 ivci.subresourceRange.levelCount = state->u.tex.last_level - state->u.tex.first_level + 1;
255 ivci.subresourceRange.layerCount = state->u.tex.last_layer - state->u.tex.first_layer + 1;
256
257 VkResult err = vkCreateImageView(screen->dev, &ivci, NULL, &sampler_view->image_view);
258 if (err != VK_SUCCESS) {
259 FREE(sampler_view);
260 return NULL;
261 }
262
263 return &sampler_view->base;
264 }
265
266 static void
267 zink_sampler_view_destroy(struct pipe_context *pctx,
268 struct pipe_sampler_view *pview)
269 {
270 struct zink_sampler_view *view = zink_sampler_view(pview);
271 vkDestroyImageView(zink_screen(pctx->screen)->dev, view->image_view, NULL);
272 FREE(view);
273 }
274
275 static void *
276 zink_create_vs_state(struct pipe_context *pctx,
277 const struct pipe_shader_state *shader)
278 {
279 struct nir_shader *nir;
280 if (shader->type != PIPE_SHADER_IR_NIR)
281 nir = zink_tgsi_to_nir(pctx->screen, shader->tokens);
282 else
283 nir = (struct nir_shader *)shader->ir.nir;
284
285 return zink_compile_nir(zink_screen(pctx->screen), nir);
286 }
287
288 static void
289 bind_stage(struct zink_context *ctx, enum pipe_shader_type stage,
290 struct zink_shader *shader)
291 {
292 assert(stage < PIPE_SHADER_COMPUTE);
293 ctx->gfx_stages[stage] = shader;
294 ctx->dirty |= ZINK_DIRTY_PROGRAM;
295 }
296
297 static void
298 zink_bind_vs_state(struct pipe_context *pctx,
299 void *cso)
300 {
301 bind_stage(zink_context(pctx), PIPE_SHADER_VERTEX, cso);
302 }
303
304 static void
305 zink_delete_vs_state(struct pipe_context *pctx,
306 void *cso)
307 {
308 zink_shader_free(zink_screen(pctx->screen), cso);
309 }
310
311 static void *
312 zink_create_fs_state(struct pipe_context *pctx,
313 const struct pipe_shader_state *shader)
314 {
315 struct nir_shader *nir;
316 if (shader->type != PIPE_SHADER_IR_NIR)
317 nir = zink_tgsi_to_nir(pctx->screen, shader->tokens);
318 else
319 nir = (struct nir_shader *)shader->ir.nir;
320
321 return zink_compile_nir(zink_screen(pctx->screen), nir);
322 }
323
324 static void
325 zink_bind_fs_state(struct pipe_context *pctx,
326 void *cso)
327 {
328 bind_stage(zink_context(pctx), PIPE_SHADER_FRAGMENT, cso);
329 }
330
331 static void
332 zink_delete_fs_state(struct pipe_context *pctx,
333 void *cso)
334 {
335 zink_shader_free(zink_screen(pctx->screen), cso);
336 }
337
338 static void
339 zink_set_polygon_stipple(struct pipe_context *pctx,
340 const struct pipe_poly_stipple *ps)
341 {
342 }
343
344 static void
345 zink_set_vertex_buffers(struct pipe_context *pctx,
346 unsigned start_slot,
347 unsigned num_buffers,
348 const struct pipe_vertex_buffer *buffers)
349 {
350 struct zink_context *ctx = zink_context(pctx);
351
352 if (buffers) {
353 for (int i = 0; i < num_buffers; ++i) {
354 const struct pipe_vertex_buffer *vb = buffers + i;
355 ctx->gfx_pipeline_state.bindings[start_slot + i].stride = vb->stride;
356 }
357 }
358
359 util_set_vertex_buffers_mask(ctx->buffers, &ctx->buffers_enabled_mask,
360 buffers, start_slot, num_buffers);
361 }
362
363 static void
364 zink_set_viewport_states(struct pipe_context *pctx,
365 unsigned start_slot,
366 unsigned num_viewports,
367 const struct pipe_viewport_state *state)
368 {
369 struct zink_context *ctx = zink_context(pctx);
370
371 for (unsigned i = 0; i < num_viewports; ++i) {
372 VkViewport viewport = {
373 state[i].translate[0] - state[i].scale[0],
374 state[i].translate[1] - state[i].scale[1],
375 state[i].scale[0] * 2,
376 state[i].scale[1] * 2,
377 state[i].translate[2] - state[i].scale[2],
378 state[i].translate[2] + state[i].scale[2]
379 };
380 ctx->viewports[start_slot + i] = viewport;
381 }
382 ctx->num_viewports = start_slot + num_viewports;
383 }
384
385 static void
386 zink_set_scissor_states(struct pipe_context *pctx,
387 unsigned start_slot, unsigned num_scissors,
388 const struct pipe_scissor_state *states)
389 {
390 struct zink_context *ctx = zink_context(pctx);
391
392 for (unsigned i = 0; i < num_scissors; i++) {
393 VkRect2D scissor;
394
395 scissor.offset.x = states[i].minx;
396 scissor.offset.y = states[i].miny;
397 scissor.extent.width = states[i].maxx - states[i].minx;
398 scissor.extent.height = states[i].maxy - states[i].miny;
399 ctx->scissors[start_slot + i] = scissor;
400 }
401 ctx->num_scissors = start_slot + num_scissors;
402 }
403
404 static void
405 zink_set_constant_buffer(struct pipe_context *pctx,
406 enum pipe_shader_type shader, uint index,
407 const struct pipe_constant_buffer *cb)
408 {
409 struct zink_context *ctx = zink_context(pctx);
410
411 if (cb) {
412 struct pipe_resource *buffer = cb->buffer;
413 unsigned offset = cb->buffer_offset;
414 if (cb->user_buffer)
415 u_upload_data(ctx->base.const_uploader, 0, cb->buffer_size, 64,
416 cb->user_buffer, &offset, &buffer);
417
418 pipe_resource_reference(&ctx->ubos[shader][index].buffer, buffer);
419 ctx->ubos[shader][index].buffer_offset = offset;
420 ctx->ubos[shader][index].buffer_size = cb->buffer_size;
421 ctx->ubos[shader][index].user_buffer = NULL;
422
423 if (cb->user_buffer)
424 pipe_resource_reference(&buffer, NULL);
425 } else {
426 pipe_resource_reference(&ctx->ubos[shader][index].buffer, NULL);
427 ctx->ubos[shader][index].buffer_offset = 0;
428 ctx->ubos[shader][index].buffer_size = 0;
429 ctx->ubos[shader][index].user_buffer = NULL;
430 }
431 }
432
433 static void
434 zink_set_sampler_views(struct pipe_context *pctx,
435 enum pipe_shader_type shader_type,
436 unsigned start_slot,
437 unsigned num_views,
438 struct pipe_sampler_view **views)
439 {
440 struct zink_context *ctx = zink_context(pctx);
441 assert(views);
442 for (unsigned i = 0; i < num_views; ++i) {
443 pipe_sampler_view_reference(
444 &ctx->image_views[shader_type][start_slot + i],
445 views[i]);
446 }
447 }
448
449 static void
450 zink_set_stencil_ref(struct pipe_context *pctx,
451 const struct pipe_stencil_ref *ref)
452 {
453 struct zink_context *ctx = zink_context(pctx);
454 ctx->stencil_ref[0] = ref->ref_value[0];
455 ctx->stencil_ref[1] = ref->ref_value[1];
456 }
457
458 static void
459 zink_set_clip_state(struct pipe_context *pctx,
460 const struct pipe_clip_state *pcs)
461 {
462 }
463
464 static struct zink_render_pass *
465 get_render_pass(struct zink_context *ctx)
466 {
467 struct zink_screen *screen = zink_screen(ctx->base.screen);
468 const struct pipe_framebuffer_state *fb = &ctx->fb_state;
469 struct zink_render_pass_state state;
470
471 for (int i = 0; i < fb->nr_cbufs; i++) {
472 struct zink_resource *cbuf = zink_resource(fb->cbufs[i]->texture);
473 state.rts[i].format = cbuf->format;
474 }
475 state.num_cbufs = fb->nr_cbufs;
476
477 if (fb->zsbuf) {
478 struct zink_resource *zsbuf = zink_resource(fb->zsbuf->texture);
479 state.rts[fb->nr_cbufs].format = zsbuf->format;
480 }
481 state.have_zsbuf = fb->zsbuf != NULL;
482
483 struct hash_entry *entry = _mesa_hash_table_search(ctx->render_pass_cache,
484 &state);
485 if (!entry) {
486 struct zink_render_pass *rp;
487 rp = zink_create_render_pass(screen, &state);
488 entry = _mesa_hash_table_insert(ctx->render_pass_cache, &state, rp);
489 if (!entry)
490 return NULL;
491 }
492
493 return entry->data;
494 }
495
496 static struct zink_framebuffer *
497 get_framebuffer(struct zink_context *ctx)
498 {
499 struct zink_screen *screen = zink_screen(ctx->base.screen);
500
501 struct zink_framebuffer_state state = {};
502 state.rp = get_render_pass(ctx);
503 for (int i = 0; i < ctx->fb_state.nr_cbufs; i++) {
504 struct pipe_surface *psurf = ctx->fb_state.cbufs[i];
505 state.attachments[i] = zink_surface(psurf);
506 }
507
508 state.num_attachments = ctx->fb_state.nr_cbufs;
509 if (ctx->fb_state.zsbuf) {
510 struct pipe_surface *psurf = ctx->fb_state.zsbuf;
511 state.attachments[state.num_attachments++] = zink_surface(psurf);
512 }
513
514 state.width = ctx->fb_state.width;
515 state.height = ctx->fb_state.height;
516 state.layers = MAX2(ctx->fb_state.layers, 1);
517
518 struct hash_entry *entry = _mesa_hash_table_search(ctx->framebuffer_cache,
519 &state);
520 if (!entry) {
521 struct zink_framebuffer *fb = zink_create_framebuffer(screen, &state);
522 entry = _mesa_hash_table_insert(ctx->framebuffer_cache, &state, fb);
523 if (!entry)
524 return NULL;
525 }
526
527 return entry->data;
528 }
529
530 void
531 zink_begin_render_pass(struct zink_context *ctx, struct zink_batch *batch)
532 {
533 struct zink_screen *screen = zink_screen(ctx->base.screen);
534 assert(batch == zink_curr_batch(ctx));
535 assert(ctx->gfx_pipeline_state.render_pass);
536
537 VkRenderPassBeginInfo rpbi = {};
538 rpbi.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
539 rpbi.renderPass = ctx->gfx_pipeline_state.render_pass->render_pass;
540 rpbi.renderArea.offset.x = 0;
541 rpbi.renderArea.offset.y = 0;
542 rpbi.renderArea.extent.width = ctx->fb_state.width;
543 rpbi.renderArea.extent.height = ctx->fb_state.height;
544 rpbi.clearValueCount = 0;
545 rpbi.pClearValues = NULL;
546 rpbi.framebuffer = ctx->framebuffer->fb;
547
548 assert(ctx->gfx_pipeline_state.render_pass && ctx->framebuffer);
549 assert(!batch->rp || batch->rp == ctx->gfx_pipeline_state.render_pass);
550 assert(!batch->fb || batch->fb == ctx->framebuffer);
551
552 zink_render_pass_reference(screen, &batch->rp, ctx->gfx_pipeline_state.render_pass);
553 zink_framebuffer_reference(screen, &batch->fb, ctx->framebuffer);
554
555 vkCmdBeginRenderPass(batch->cmdbuf, &rpbi, VK_SUBPASS_CONTENTS_INLINE);
556 }
557
558 static void
559 flush_batch(struct zink_context *ctx)
560 {
561 struct zink_batch *batch = zink_curr_batch(ctx);
562 if (batch->rp)
563 vkCmdEndRenderPass(batch->cmdbuf);
564
565 zink_end_batch(ctx, batch);
566
567 ctx->curr_batch++;
568 if (ctx->curr_batch == ARRAY_SIZE(ctx->batches))
569 ctx->curr_batch = 0;
570
571 zink_start_batch(ctx, zink_curr_batch(ctx));
572 }
573
574 struct zink_batch *
575 zink_batch_rp(struct zink_context *ctx)
576 {
577 struct zink_batch *batch = zink_curr_batch(ctx);
578 if (!batch->rp) {
579 zink_begin_render_pass(ctx, batch);
580 assert(batch->rp);
581 }
582 return batch;
583 }
584
585 struct zink_batch *
586 zink_batch_no_rp(struct zink_context *ctx)
587 {
588 struct zink_batch *batch = zink_curr_batch(ctx);
589 if (batch->rp) {
590 /* flush batch and get a new one */
591 flush_batch(ctx);
592 batch = zink_curr_batch(ctx);
593 assert(!batch->rp);
594 }
595 return batch;
596 }
597
598 static void
599 zink_set_framebuffer_state(struct pipe_context *pctx,
600 const struct pipe_framebuffer_state *state)
601 {
602 struct zink_context *ctx = zink_context(pctx);
603 struct zink_screen *screen = zink_screen(pctx->screen);
604
605 util_copy_framebuffer_state(&ctx->fb_state, state);
606
607 struct zink_framebuffer *fb = get_framebuffer(ctx);
608 zink_framebuffer_reference(screen, &ctx->framebuffer, fb);
609 zink_render_pass_reference(screen, &ctx->gfx_pipeline_state.render_pass, fb->rp);
610
611 ctx->gfx_pipeline_state.num_attachments = state->nr_cbufs;
612
613 struct zink_batch *batch = zink_batch_no_rp(ctx);
614
615 for (int i = 0; i < state->nr_cbufs; i++) {
616 struct zink_resource *res = zink_resource(state->cbufs[i]->texture);
617 if (res->layout != VK_IMAGE_LAYOUT_GENERAL &&
618 res->layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL)
619 zink_resource_barrier(batch->cmdbuf, res, res->aspect,
620 VK_IMAGE_LAYOUT_GENERAL);
621 }
622
623 if (state->zsbuf) {
624 struct zink_resource *res = zink_resource(state->zsbuf->texture);
625 if (res->layout != VK_IMAGE_LAYOUT_GENERAL &&
626 res->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL)
627 zink_resource_barrier(batch->cmdbuf, res, res->aspect,
628 VK_IMAGE_LAYOUT_GENERAL);
629 }
630 }
631
632 static void
633 zink_set_active_query_state(struct pipe_context *pctx, bool enable)
634 {
635 }
636
637 static void
638 zink_set_blend_color(struct pipe_context *pctx,
639 const struct pipe_blend_color *color)
640 {
641 struct zink_context *ctx = zink_context(pctx);
642 memcpy(ctx->blend_constants, color->color, sizeof(float) * 4);
643 }
644
645 static void
646 zink_set_sample_mask(struct pipe_context *pctx, unsigned sample_mask)
647 {
648 struct zink_context *ctx = zink_context(pctx);
649 ctx->gfx_pipeline_state.sample_mask = sample_mask;
650 }
651
652 static VkAccessFlags
653 access_flags(VkImageLayout layout)
654 {
655 switch (layout) {
656 case VK_IMAGE_LAYOUT_UNDEFINED:
657 case VK_IMAGE_LAYOUT_GENERAL:
658 return 0;
659
660 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
661 return VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
662 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
663 return VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
664
665 case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
666 return VK_ACCESS_SHADER_READ_BIT;
667
668 case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
669 return VK_ACCESS_TRANSFER_READ_BIT;
670
671 case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
672 return VK_ACCESS_TRANSFER_WRITE_BIT;
673
674 case VK_IMAGE_LAYOUT_PREINITIALIZED:
675 return VK_ACCESS_HOST_WRITE_BIT;
676
677 default:
678 unreachable("unexpected layout");
679 }
680 }
681
682 void
683 zink_resource_barrier(VkCommandBuffer cmdbuf, struct zink_resource *res,
684 VkImageAspectFlags aspect, VkImageLayout new_layout)
685 {
686 VkImageSubresourceRange isr = {
687 aspect,
688 0, VK_REMAINING_MIP_LEVELS,
689 0, VK_REMAINING_ARRAY_LAYERS
690 };
691
692 VkImageMemoryBarrier imb = {
693 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
694 NULL,
695 access_flags(res->layout),
696 access_flags(new_layout),
697 res->layout,
698 new_layout,
699 VK_QUEUE_FAMILY_IGNORED,
700 VK_QUEUE_FAMILY_IGNORED,
701 res->image,
702 isr
703 };
704 vkCmdPipelineBarrier(
705 cmdbuf,
706 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
707 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
708 0,
709 0, NULL,
710 0, NULL,
711 1, &imb
712 );
713
714 res->layout = new_layout;
715 }
716
717 static void
718 zink_clear(struct pipe_context *pctx,
719 unsigned buffers,
720 const union pipe_color_union *pcolor,
721 double depth, unsigned stencil)
722 {
723 struct zink_context *ctx = zink_context(pctx);
724 struct pipe_framebuffer_state *fb = &ctx->fb_state;
725
726 /* FIXME: this is very inefficient; if no renderpass has been started yet,
727 * we should record the clear if it's full-screen, and apply it as we
728 * start the render-pass. Otherwise we can do a partial out-of-renderpass
729 * clear.
730 */
731 struct zink_batch *batch = zink_batch_rp(ctx);
732
733 VkClearAttachment attachments[1 + PIPE_MAX_COLOR_BUFS];
734 int num_attachments = 0;
735
736 if (buffers & PIPE_CLEAR_COLOR) {
737 VkClearColorValue color;
738 color.float32[0] = pcolor->f[0];
739 color.float32[1] = pcolor->f[1];
740 color.float32[2] = pcolor->f[2];
741 color.float32[3] = pcolor->f[3];
742
743 for (unsigned i = 0; i < fb->nr_cbufs; i++) {
744 if (!(buffers & (PIPE_CLEAR_COLOR0 << i)) || !fb->cbufs[i])
745 continue;
746
747 attachments[num_attachments].aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
748 attachments[num_attachments].colorAttachment = i;
749 attachments[num_attachments].clearValue.color = color;
750 ++num_attachments;
751 }
752 }
753
754 if (buffers & PIPE_CLEAR_DEPTHSTENCIL && fb->zsbuf) {
755 VkImageAspectFlags aspect = 0;
756 if (buffers & PIPE_CLEAR_DEPTH)
757 aspect |= VK_IMAGE_ASPECT_DEPTH_BIT;
758 if (buffers & PIPE_CLEAR_STENCIL)
759 aspect |= VK_IMAGE_ASPECT_STENCIL_BIT;
760
761 attachments[num_attachments].aspectMask = aspect;
762 attachments[num_attachments].clearValue.depthStencil.depth = depth;
763 attachments[num_attachments].clearValue.depthStencil.stencil = stencil;
764 ++num_attachments;
765 }
766
767 unsigned num_layers = util_framebuffer_get_num_layers(fb);
768 VkClearRect rects[PIPE_MAX_VIEWPORTS];
769 uint32_t num_rects;
770 if (ctx->num_scissors) {
771 for (unsigned i = 0 ; i < ctx->num_scissors; ++i) {
772 rects[i].rect = ctx->scissors[i];
773 rects[i].rect.extent.width = MIN2(rects[i].rect.extent.width,
774 fb->width);
775 rects[i].rect.extent.height = MIN2(rects[i].rect.extent.height,
776 fb->height);
777 rects[i].baseArrayLayer = 0;
778 rects[i].layerCount = num_layers;
779 }
780 num_rects = ctx->num_scissors;
781 } else {
782 rects[0].rect.offset.x = 0;
783 rects[0].rect.offset.y = 0;
784 rects[0].rect.extent.width = fb->width;
785 rects[0].rect.extent.height = fb->height;
786 rects[0].baseArrayLayer = 0;
787 rects[0].layerCount = num_layers;
788 num_rects = 1;
789 }
790
791 vkCmdClearAttachments(batch->cmdbuf,
792 num_attachments, attachments,
793 num_rects, rects);
794 }
795
796 VkShaderStageFlagBits
797 zink_shader_stage(enum pipe_shader_type type)
798 {
799 VkShaderStageFlagBits stages[] = {
800 [PIPE_SHADER_VERTEX] = VK_SHADER_STAGE_VERTEX_BIT,
801 [PIPE_SHADER_FRAGMENT] = VK_SHADER_STAGE_FRAGMENT_BIT,
802 [PIPE_SHADER_GEOMETRY] = VK_SHADER_STAGE_GEOMETRY_BIT,
803 [PIPE_SHADER_TESS_CTRL] = VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
804 [PIPE_SHADER_TESS_EVAL] = VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
805 [PIPE_SHADER_COMPUTE] = VK_SHADER_STAGE_COMPUTE_BIT,
806 };
807 return stages[type];
808 }
809
810 static VkDescriptorSet
811 allocate_descriptor_set(struct zink_screen *screen,
812 struct zink_batch *batch,
813 struct zink_gfx_program *prog)
814 {
815 assert(batch->descs_left >= prog->num_descriptors);
816 VkDescriptorSetAllocateInfo dsai;
817 memset((void *)&dsai, 0, sizeof(dsai));
818 dsai.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
819 dsai.pNext = NULL;
820 dsai.descriptorPool = batch->descpool;
821 dsai.descriptorSetCount = 1;
822 dsai.pSetLayouts = &prog->dsl;
823
824 VkDescriptorSet desc_set;
825 if (vkAllocateDescriptorSets(screen->dev, &dsai, &desc_set) != VK_SUCCESS) {
826 debug_printf("ZINK: failed to allocate descriptor set :/");
827 return VK_NULL_HANDLE;
828 }
829
830 batch->descs_left -= prog->num_descriptors;
831 return desc_set;
832 }
833
834 static void
835 zink_bind_vertex_buffers(struct zink_batch *batch, struct zink_context *ctx)
836 {
837 VkBuffer buffers[PIPE_MAX_ATTRIBS];
838 VkDeviceSize buffer_offsets[PIPE_MAX_ATTRIBS];
839 const struct zink_vertex_elements_state *elems = ctx->element_state;
840 for (unsigned i = 0; i < elems->hw_state.num_bindings; i++) {
841 struct pipe_vertex_buffer *vb = ctx->buffers + ctx->element_state->binding_map[i];
842 assert(vb && vb->buffer.resource);
843 struct zink_resource *res = zink_resource(vb->buffer.resource);
844 buffers[i] = res->buffer;
845 buffer_offsets[i] = vb->buffer_offset;
846 zink_batch_reference_resoure(batch, res);
847 }
848
849 if (elems->hw_state.num_bindings > 0)
850 vkCmdBindVertexBuffers(batch->cmdbuf, 0,
851 elems->hw_state.num_bindings,
852 buffers, buffer_offsets);
853 }
854
855 static uint32_t
856 hash_gfx_program(const void *key)
857 {
858 return _mesa_hash_data(key, sizeof(struct zink_shader *) * (PIPE_SHADER_TYPES - 1));
859 }
860
861 static bool
862 equals_gfx_program(const void *a, const void *b)
863 {
864 return memcmp(a, b, sizeof(struct zink_shader *) * (PIPE_SHADER_TYPES - 1)) == 0;
865 }
866
867 static uint32_t
868 hash_render_pass_state(const void *key)
869 {
870 return _mesa_hash_data(key, sizeof(struct zink_render_pass_state));
871 }
872
873 static bool
874 equals_render_pass_state(const void *a, const void *b)
875 {
876 return memcmp(a, b, sizeof(struct zink_render_pass_state)) == 0;
877 }
878
879 static uint32_t
880 hash_framebuffer_state(const void *key)
881 {
882 struct zink_framebuffer_state *s = (struct zink_framebuffer_state*)key;
883 return _mesa_hash_data(key, sizeof(struct zink_framebuffer_state) + sizeof(s->attachments) * s->num_attachments);
884 }
885
886 static bool
887 equals_framebuffer_state(const void *a, const void *b)
888 {
889 struct zink_framebuffer_state *s = (struct zink_framebuffer_state*)a;
890 return memcmp(a, b, sizeof(struct zink_framebuffer_state) + sizeof(s->attachments) * s->num_attachments) == 0;
891 }
892
893 static struct zink_gfx_program *
894 get_gfx_program(struct zink_context *ctx)
895 {
896 if (ctx->dirty & ZINK_DIRTY_PROGRAM) {
897 struct hash_entry *entry = _mesa_hash_table_search(ctx->program_cache,
898 ctx->gfx_stages);
899 if (!entry) {
900 struct zink_gfx_program *prog;
901 prog = zink_create_gfx_program(zink_screen(ctx->base.screen),
902 ctx->gfx_stages);
903 entry = _mesa_hash_table_insert(ctx->program_cache, prog->stages, prog);
904 if (!entry)
905 return NULL;
906 }
907 ctx->curr_program = entry->data;
908 ctx->dirty &= ~ZINK_DIRTY_PROGRAM;
909 }
910
911 assert(ctx->curr_program);
912 return ctx->curr_program;
913 }
914
915 static void
916 zink_draw_vbo(struct pipe_context *pctx,
917 const struct pipe_draw_info *dinfo)
918 {
919 struct zink_context *ctx = zink_context(pctx);
920 struct zink_screen *screen = zink_screen(pctx->screen);
921 struct zink_rasterizer_state *rast_state = ctx->rast_state;
922
923 if (dinfo->mode >= PIPE_PRIM_QUADS ||
924 dinfo->mode == PIPE_PRIM_LINE_LOOP) {
925 if (!u_trim_pipe_prim(dinfo->mode, (unsigned *)&dinfo->count))
926 return;
927
928 util_primconvert_save_rasterizer_state(ctx->primconvert, &rast_state->base);
929 util_primconvert_draw_vbo(ctx->primconvert, dinfo);
930 return;
931 }
932
933 struct zink_gfx_program *gfx_program = get_gfx_program(ctx);
934 if (!gfx_program)
935 return;
936
937 VkPipeline pipeline = zink_get_gfx_pipeline(screen, gfx_program,
938 &ctx->gfx_pipeline_state,
939 dinfo->mode);
940
941 bool depth_bias = false;
942 switch (u_reduced_prim(dinfo->mode)) {
943 case PIPE_PRIM_POINTS:
944 depth_bias = rast_state->offset_point;
945 break;
946
947 case PIPE_PRIM_LINES:
948 depth_bias = rast_state->offset_line;
949 break;
950
951 case PIPE_PRIM_TRIANGLES:
952 depth_bias = rast_state->offset_tri;
953 break;
954
955 default:
956 unreachable("unexpected reduced prim");
957 }
958
959 unsigned index_offset = 0;
960 struct pipe_resource *index_buffer = NULL;
961 if (dinfo->index_size > 0) {
962 if (dinfo->has_user_indices) {
963 if (!util_upload_index_buffer(pctx, dinfo, &index_buffer, &index_offset)) {
964 debug_printf("util_upload_index_buffer() failed\n");
965 return;
966 }
967 } else
968 index_buffer = dinfo->index.resource;
969 }
970
971 VkWriteDescriptorSet wds[PIPE_SHADER_TYPES * PIPE_MAX_CONSTANT_BUFFERS + PIPE_SHADER_TYPES * PIPE_MAX_SHADER_SAMPLER_VIEWS];
972 VkDescriptorBufferInfo buffer_infos[PIPE_SHADER_TYPES * PIPE_MAX_CONSTANT_BUFFERS];
973 VkDescriptorImageInfo image_infos[PIPE_SHADER_TYPES * PIPE_MAX_SHADER_SAMPLER_VIEWS];
974 int num_wds = 0, num_buffer_info = 0, num_image_info = 0;
975
976 struct zink_resource *transitions[PIPE_SHADER_TYPES * PIPE_MAX_SHADER_SAMPLER_VIEWS];
977 int num_transitions = 0;
978
979 for (int i = 0; i < ARRAY_SIZE(ctx->gfx_stages); i++) {
980 struct zink_shader *shader = ctx->gfx_stages[i];
981 if (!shader)
982 continue;
983
984 for (int j = 0; j < shader->num_bindings; j++) {
985 int index = shader->bindings[j].index;
986 if (shader->bindings[j].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) {
987 assert(ctx->ubos[i][index].buffer_size > 0);
988 assert(ctx->ubos[i][index].buffer);
989 struct zink_resource *res = zink_resource(ctx->ubos[i][index].buffer);
990 buffer_infos[num_buffer_info].buffer = res->buffer;
991 buffer_infos[num_buffer_info].offset = ctx->ubos[i][index].buffer_offset;
992 buffer_infos[num_buffer_info].range = VK_WHOLE_SIZE;
993 wds[num_wds].pBufferInfo = buffer_infos + num_buffer_info;
994 ++num_buffer_info;
995 } else {
996 struct pipe_sampler_view *psampler_view = ctx->image_views[i][index];
997 assert(psampler_view);
998 struct zink_sampler_view *sampler_view = zink_sampler_view(psampler_view);
999
1000 struct zink_resource *res = zink_resource(psampler_view->texture);
1001 VkImageLayout layout = res->layout;
1002 if (layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL &&
1003 layout != VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL &&
1004 layout != VK_IMAGE_LAYOUT_GENERAL) {
1005 transitions[num_transitions++] = res;
1006 layout = VK_IMAGE_LAYOUT_GENERAL;
1007 }
1008 image_infos[num_image_info].imageLayout = layout;
1009 image_infos[num_image_info].imageView = sampler_view->image_view;
1010 image_infos[num_image_info].sampler = ctx->samplers[i][index];
1011 wds[num_wds].pImageInfo = image_infos + num_image_info;
1012 ++num_image_info;
1013 }
1014
1015 wds[num_wds].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
1016 wds[num_wds].pNext = NULL;
1017 wds[num_wds].dstBinding = shader->bindings[j].binding;
1018 wds[num_wds].dstArrayElement = 0;
1019 wds[num_wds].descriptorCount = 1;
1020 wds[num_wds].descriptorType = shader->bindings[j].type;
1021 ++num_wds;
1022 }
1023 }
1024
1025 struct zink_batch *batch;
1026 if (num_transitions > 0) {
1027 batch = zink_batch_no_rp(ctx);
1028
1029 for (int i = 0; i < num_transitions; ++i)
1030 zink_resource_barrier(batch->cmdbuf, transitions[i],
1031 transitions[i]->aspect,
1032 VK_IMAGE_LAYOUT_GENERAL);
1033 }
1034
1035 batch = zink_batch_rp(ctx);
1036
1037 if (batch->descs_left < gfx_program->num_descriptors) {
1038 flush_batch(ctx);
1039 batch = zink_batch_rp(ctx);
1040 assert(batch->descs_left >= gfx_program->num_descriptors);
1041 }
1042
1043 VkDescriptorSet desc_set = allocate_descriptor_set(screen, batch,
1044 gfx_program);
1045 assert(desc_set != VK_NULL_HANDLE);
1046
1047 for (int i = 0; i < ARRAY_SIZE(ctx->gfx_stages); i++) {
1048 struct zink_shader *shader = ctx->gfx_stages[i];
1049 if (!shader)
1050 continue;
1051
1052 for (int j = 0; j < shader->num_bindings; j++) {
1053 int index = shader->bindings[j].index;
1054 if (shader->bindings[j].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) {
1055 struct zink_resource *res = zink_resource(ctx->ubos[i][index].buffer);
1056 zink_batch_reference_resoure(batch, res);
1057 } else {
1058 struct zink_sampler_view *sampler_view = zink_sampler_view(ctx->image_views[i][index]);
1059 zink_batch_reference_sampler_view(batch, sampler_view);
1060 }
1061 }
1062 }
1063
1064 vkCmdSetViewport(batch->cmdbuf, 0, ctx->num_viewports, ctx->viewports);
1065
1066 if (ctx->num_scissors)
1067 vkCmdSetScissor(batch->cmdbuf, 0, ctx->num_scissors, ctx->scissors);
1068 else if (ctx->fb_state.width && ctx->fb_state.height) {
1069 VkRect2D fb_scissor = {};
1070 fb_scissor.extent.width = ctx->fb_state.width;
1071 fb_scissor.extent.height = ctx->fb_state.height;
1072 vkCmdSetScissor(batch->cmdbuf, 0, 1, &fb_scissor);
1073 }
1074
1075 vkCmdSetStencilReference(batch->cmdbuf, VK_STENCIL_FACE_FRONT_BIT, ctx->stencil_ref[0]);
1076 vkCmdSetStencilReference(batch->cmdbuf, VK_STENCIL_FACE_BACK_BIT, ctx->stencil_ref[1]);
1077
1078 if (depth_bias)
1079 vkCmdSetDepthBias(batch->cmdbuf, rast_state->offset_units, rast_state->offset_clamp, rast_state->offset_scale);
1080 else
1081 vkCmdSetDepthBias(batch->cmdbuf, 0.0f, 0.0f, 0.0f);
1082
1083 if (ctx->gfx_pipeline_state.blend_state->need_blend_constants)
1084 vkCmdSetBlendConstants(batch->cmdbuf, ctx->blend_constants);
1085
1086 for (int i = 0; i < num_wds; ++i)
1087 wds[i].dstSet = desc_set;
1088
1089 vkUpdateDescriptorSets(screen->dev, num_wds, wds, 0, NULL);
1090
1091 vkCmdBindPipeline(batch->cmdbuf, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
1092 vkCmdBindDescriptorSets(batch->cmdbuf, VK_PIPELINE_BIND_POINT_GRAPHICS,
1093 gfx_program->layout, 0, 1, &desc_set, 0, NULL);
1094 zink_bind_vertex_buffers(batch, ctx);
1095
1096 if (dinfo->index_size > 0) {
1097 assert(dinfo->index_size != 1);
1098 VkIndexType index_type = dinfo->index_size == 2 ? VK_INDEX_TYPE_UINT16 : VK_INDEX_TYPE_UINT32;
1099 struct zink_resource *res = zink_resource(index_buffer);
1100 vkCmdBindIndexBuffer(batch->cmdbuf, res->buffer, index_offset, index_type);
1101 zink_batch_reference_resoure(batch, res);
1102 vkCmdDrawIndexed(batch->cmdbuf,
1103 dinfo->count, dinfo->instance_count,
1104 dinfo->start, dinfo->index_bias, dinfo->start_instance);
1105 } else
1106 vkCmdDraw(batch->cmdbuf, dinfo->count, dinfo->instance_count, dinfo->start, dinfo->start_instance);
1107
1108 if (dinfo->index_size > 0 && dinfo->has_user_indices)
1109 pipe_resource_reference(&index_buffer, NULL);
1110 }
1111
1112 static void
1113 zink_flush(struct pipe_context *pctx,
1114 struct pipe_fence_handle **pfence,
1115 enum pipe_flush_flags flags)
1116 {
1117 struct zink_context *ctx = zink_context(pctx);
1118
1119 struct zink_batch *batch = zink_curr_batch(ctx);
1120 flush_batch(ctx);
1121
1122 if (pfence)
1123 zink_fence_reference(zink_screen(pctx->screen),
1124 (struct zink_fence **)pfence,
1125 batch->fence);
1126
1127 if (flags & PIPE_FLUSH_END_OF_FRAME)
1128 pctx->screen->fence_finish(pctx->screen, pctx,
1129 (struct pipe_fence_handle *)batch->fence,
1130 PIPE_TIMEOUT_INFINITE);
1131 }
1132
1133 static void
1134 zink_blit(struct pipe_context *pctx,
1135 const struct pipe_blit_info *info)
1136 {
1137 struct zink_context *ctx = zink_context(pctx);
1138 bool is_resolve = false;
1139 if (info->mask != PIPE_MASK_RGBA ||
1140 info->scissor_enable ||
1141 info->alpha_blend) {
1142 if (!util_blitter_is_blit_supported(ctx->blitter, info)) {
1143 debug_printf("blit unsupported %s -> %s\n",
1144 util_format_short_name(info->src.resource->format),
1145 util_format_short_name(info->dst.resource->format));
1146 return;
1147 }
1148
1149 util_blitter_save_fragment_constant_buffer_slot(ctx->blitter, ctx->ubos[PIPE_SHADER_FRAGMENT]);
1150 util_blitter_save_vertex_buffer_slot(ctx->blitter, ctx->buffers);
1151 util_blitter_save_vertex_shader(ctx->blitter, ctx->gfx_stages[PIPE_SHADER_VERTEX]);
1152 util_blitter_save_fragment_shader(ctx->blitter, ctx->gfx_stages[PIPE_SHADER_FRAGMENT]);
1153 util_blitter_save_rasterizer(ctx->blitter, ctx->gfx_pipeline_state.rast_state);
1154
1155 util_blitter_blit(ctx->blitter, info);
1156 return;
1157 }
1158
1159 struct zink_resource *src = zink_resource(info->src.resource);
1160 struct zink_resource *dst = zink_resource(info->dst.resource);
1161
1162 if (src->base.nr_samples > 1 && dst->base.nr_samples <= 1)
1163 is_resolve = true;
1164
1165 struct zink_batch *batch = zink_batch_no_rp(ctx);
1166
1167 zink_batch_reference_resoure(batch, src);
1168 zink_batch_reference_resoure(batch, dst);
1169
1170 if (is_resolve) {
1171 VkImageResolve region = {};
1172
1173 region.srcSubresource.aspectMask = src->aspect;
1174 region.srcSubresource.mipLevel = info->src.level;
1175 region.srcSubresource.baseArrayLayer = 0; // no clue
1176 region.srcSubresource.layerCount = 1; // no clue
1177 region.srcOffset.x = info->src.box.x;
1178 region.srcOffset.y = info->src.box.y;
1179 region.srcOffset.z = info->src.box.z;
1180
1181 region.dstSubresource.aspectMask = dst->aspect;
1182 region.dstSubresource.mipLevel = info->dst.level;
1183 region.dstSubresource.baseArrayLayer = 0; // no clue
1184 region.dstSubresource.layerCount = 1; // no clue
1185 region.dstOffset.x = info->dst.box.x;
1186 region.dstOffset.y = info->dst.box.y;
1187 region.dstOffset.z = info->dst.box.z;
1188
1189 region.extent.width = info->dst.box.width;
1190 region.extent.height = info->dst.box.height;
1191 region.extent.depth = info->dst.box.depth;
1192 vkCmdResolveImage(batch->cmdbuf, src->image, src->layout,
1193 dst->image, dst->layout,
1194 1, &region);
1195
1196 } else {
1197 if (dst->layout != VK_IMAGE_LAYOUT_GENERAL &&
1198 dst->layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL)
1199 zink_resource_barrier(batch->cmdbuf, dst, dst->aspect,
1200 VK_IMAGE_LAYOUT_GENERAL);
1201
1202 VkImageBlit region = {};
1203 region.srcSubresource.aspectMask = src->aspect;
1204 region.srcSubresource.mipLevel = info->src.level;
1205 region.srcOffsets[0].x = info->src.box.x;
1206 region.srcOffsets[0].y = info->src.box.y;
1207 region.srcOffsets[1].x = info->src.box.x + info->src.box.width;
1208 region.srcOffsets[1].y = info->src.box.y + info->src.box.height;
1209
1210 if (src->base.array_size > 1) {
1211 region.srcOffsets[0].z = 0;
1212 region.srcOffsets[1].z = 1;
1213 region.srcSubresource.baseArrayLayer = info->src.box.z;
1214 region.srcSubresource.layerCount = info->src.box.depth;
1215 } else {
1216 region.srcOffsets[0].z = info->src.box.z;
1217 region.srcOffsets[1].z = info->src.box.z + info->src.box.depth;
1218 region.srcSubresource.baseArrayLayer = 0;
1219 region.srcSubresource.layerCount = 1;
1220 }
1221
1222 region.dstSubresource.aspectMask = dst->aspect;
1223 region.dstSubresource.mipLevel = info->dst.level;
1224 region.dstOffsets[0].x = info->dst.box.x;
1225 region.dstOffsets[0].y = info->dst.box.y;
1226 region.dstOffsets[1].x = info->dst.box.x + info->dst.box.width;
1227 region.dstOffsets[1].y = info->dst.box.y + info->dst.box.height;
1228
1229 if (dst->base.array_size > 1) {
1230 region.dstOffsets[0].z = 0;
1231 region.dstOffsets[1].z = 1;
1232 region.dstSubresource.baseArrayLayer = info->dst.box.z;
1233 region.dstSubresource.layerCount = info->dst.box.depth;
1234 } else {
1235 region.dstOffsets[0].z = info->dst.box.z;
1236 region.dstOffsets[1].z = info->dst.box.z + info->dst.box.depth;
1237 region.dstSubresource.baseArrayLayer = 0;
1238 region.dstSubresource.layerCount = 1;
1239 }
1240
1241 vkCmdBlitImage(batch->cmdbuf, src->image, src->layout,
1242 dst->image, dst->layout,
1243 1, &region,
1244 filter(info->filter));
1245 }
1246
1247 /* HACK: I have no idea why this is needed, but without it ioquake3
1248 * randomly keeps fading to black.
1249 */
1250 flush_batch(ctx);
1251 }
1252
1253 static void
1254 zink_flush_resource(struct pipe_context *pipe,
1255 struct pipe_resource *resource)
1256 {
1257 }
1258
1259 static void
1260 zink_resource_copy_region(struct pipe_context *pctx,
1261 struct pipe_resource *pdst,
1262 unsigned dst_level, unsigned dstx, unsigned dsty, unsigned dstz,
1263 struct pipe_resource *psrc,
1264 unsigned src_level, const struct pipe_box *src_box)
1265 {
1266 struct zink_resource *dst = zink_resource(pdst);
1267 struct zink_resource *src = zink_resource(psrc);
1268 struct zink_context *ctx = zink_context(pctx);
1269 if (dst->base.target != PIPE_BUFFER && src->base.target != PIPE_BUFFER) {
1270 VkImageCopy region = {};
1271
1272 region.srcSubresource.aspectMask = src->aspect;
1273 region.srcSubresource.mipLevel = src_level;
1274 region.srcSubresource.layerCount = 1;
1275 if (src->base.array_size > 1) {
1276 region.srcSubresource.baseArrayLayer = src_box->z;
1277 region.srcSubresource.layerCount = src_box->depth;
1278 region.extent.depth = 1;
1279 } else {
1280 region.srcOffset.z = src_box->z;
1281 region.srcSubresource.layerCount = 1;
1282 region.extent.depth = src_box->depth;
1283 }
1284
1285 region.srcOffset.x = src_box->x;
1286 region.srcOffset.y = src_box->y;
1287
1288 region.dstSubresource.aspectMask = dst->aspect;
1289 region.dstSubresource.mipLevel = dst_level;
1290 if (dst->base.array_size > 1) {
1291 region.dstSubresource.baseArrayLayer = dstz;
1292 region.dstSubresource.layerCount = src_box->depth;
1293 } else {
1294 region.dstOffset.z = dstz;
1295 region.dstSubresource.layerCount = 1;
1296 }
1297
1298 region.dstOffset.x = dstx;
1299 region.dstOffset.y = dsty;
1300 region.extent.width = src_box->width;
1301 region.extent.height = src_box->height;
1302
1303 struct zink_batch *batch = zink_batch_no_rp(ctx);
1304 zink_batch_reference_resoure(batch, src);
1305 zink_batch_reference_resoure(batch, dst);
1306
1307 vkCmdCopyImage(batch->cmdbuf, src->image, src->layout,
1308 dst->image, dst->layout,
1309 1, &region);
1310 } else
1311 debug_printf("zink: TODO resource copy\n");
1312 }
1313
1314 struct pipe_context *
1315 zink_context_create(struct pipe_screen *pscreen, void *priv, unsigned flags)
1316 {
1317 struct zink_screen *screen = zink_screen(pscreen);
1318 struct zink_context *ctx = CALLOC_STRUCT(zink_context);
1319
1320 ctx->base.screen = pscreen;
1321 ctx->base.priv = priv;
1322
1323 ctx->base.destroy = zink_context_destroy;
1324
1325 zink_context_state_init(&ctx->base);
1326
1327 ctx->base.create_sampler_state = zink_create_sampler_state;
1328 ctx->base.bind_sampler_states = zink_bind_sampler_states;
1329 ctx->base.delete_sampler_state = zink_delete_sampler_state;
1330
1331 ctx->base.create_sampler_view = zink_create_sampler_view;
1332 ctx->base.set_sampler_views = zink_set_sampler_views;
1333 ctx->base.sampler_view_destroy = zink_sampler_view_destroy;
1334
1335 ctx->base.create_vs_state = zink_create_vs_state;
1336 ctx->base.bind_vs_state = zink_bind_vs_state;
1337 ctx->base.delete_vs_state = zink_delete_vs_state;
1338
1339 ctx->base.create_fs_state = zink_create_fs_state;
1340 ctx->base.bind_fs_state = zink_bind_fs_state;
1341 ctx->base.delete_fs_state = zink_delete_fs_state;
1342
1343 ctx->base.set_polygon_stipple = zink_set_polygon_stipple;
1344 ctx->base.set_vertex_buffers = zink_set_vertex_buffers;
1345 ctx->base.set_viewport_states = zink_set_viewport_states;
1346 ctx->base.set_scissor_states = zink_set_scissor_states;
1347 ctx->base.set_constant_buffer = zink_set_constant_buffer;
1348 ctx->base.set_framebuffer_state = zink_set_framebuffer_state;
1349 ctx->base.set_stencil_ref = zink_set_stencil_ref;
1350 ctx->base.set_clip_state = zink_set_clip_state;
1351 ctx->base.set_active_query_state = zink_set_active_query_state;
1352 ctx->base.set_blend_color = zink_set_blend_color;
1353
1354 ctx->base.set_sample_mask = zink_set_sample_mask;
1355
1356 ctx->base.clear = zink_clear;
1357 ctx->base.draw_vbo = zink_draw_vbo;
1358 ctx->base.flush = zink_flush;
1359
1360 ctx->base.resource_copy_region = zink_resource_copy_region;
1361 ctx->base.blit = zink_blit;
1362
1363 ctx->base.flush_resource = zink_flush_resource;
1364 zink_context_surface_init(&ctx->base);
1365 zink_context_resource_init(&ctx->base);
1366 zink_context_query_init(&ctx->base);
1367
1368 slab_create_child(&ctx->transfer_pool, &screen->transfer_pool);
1369
1370 ctx->base.stream_uploader = u_upload_create_default(&ctx->base);
1371 ctx->base.const_uploader = ctx->base.stream_uploader;
1372
1373 int prim_hwsupport = 1 << PIPE_PRIM_POINTS |
1374 1 << PIPE_PRIM_LINES |
1375 1 << PIPE_PRIM_LINE_STRIP |
1376 1 << PIPE_PRIM_TRIANGLES |
1377 1 << PIPE_PRIM_TRIANGLE_STRIP |
1378 1 << PIPE_PRIM_TRIANGLE_FAN;
1379
1380 ctx->primconvert = util_primconvert_create(&ctx->base, prim_hwsupport);
1381 if (!ctx->primconvert)
1382 goto fail;
1383
1384 ctx->blitter = util_blitter_create(&ctx->base);
1385 if (!ctx->blitter)
1386 goto fail;
1387
1388 VkCommandPoolCreateInfo cpci = {};
1389 cpci.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
1390 cpci.queueFamilyIndex = screen->gfx_queue;
1391 cpci.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
1392 if (vkCreateCommandPool(screen->dev, &cpci, NULL, &ctx->cmdpool) != VK_SUCCESS)
1393 goto fail;
1394
1395 VkCommandBufferAllocateInfo cbai = {};
1396 cbai.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
1397 cbai.commandPool = ctx->cmdpool;
1398 cbai.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
1399 cbai.commandBufferCount = 1;
1400
1401 VkDescriptorPoolSize sizes[] = {
1402 {VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, ZINK_BATCH_DESC_SIZE}
1403 };
1404 VkDescriptorPoolCreateInfo dpci = {};
1405 dpci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
1406 dpci.pPoolSizes = sizes;
1407 dpci.poolSizeCount = ARRAY_SIZE(sizes);
1408 dpci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
1409 dpci.maxSets = ZINK_BATCH_DESC_SIZE;
1410
1411 for (int i = 0; i < ARRAY_SIZE(ctx->batches); ++i) {
1412 if (vkAllocateCommandBuffers(screen->dev, &cbai, &ctx->batches[i].cmdbuf) != VK_SUCCESS)
1413 goto fail;
1414
1415 ctx->batches[i].resources = _mesa_set_create(NULL, _mesa_hash_pointer,
1416 _mesa_key_pointer_equal);
1417 ctx->batches[i].sampler_views = _mesa_set_create(NULL,
1418 _mesa_hash_pointer,
1419 _mesa_key_pointer_equal);
1420
1421 if (!ctx->batches[i].resources || !ctx->batches[i].sampler_views)
1422 goto fail;
1423
1424 util_dynarray_init(&ctx->batches[i].zombie_samplers, NULL);
1425
1426 if (vkCreateDescriptorPool(screen->dev, &dpci, 0,
1427 &ctx->batches[i].descpool) != VK_SUCCESS)
1428 goto fail;
1429 }
1430
1431 vkGetDeviceQueue(screen->dev, screen->gfx_queue, 0, &ctx->queue);
1432
1433 ctx->program_cache = _mesa_hash_table_create(NULL,
1434 hash_gfx_program,
1435 equals_gfx_program);
1436 ctx->render_pass_cache = _mesa_hash_table_create(NULL,
1437 hash_render_pass_state,
1438 equals_render_pass_state);
1439 ctx->framebuffer_cache = _mesa_hash_table_create(NULL,
1440 hash_framebuffer_state,
1441 equals_framebuffer_state);
1442
1443 if (!ctx->program_cache || !ctx->render_pass_cache ||
1444 !ctx->framebuffer_cache)
1445 goto fail;
1446
1447 ctx->dirty = ZINK_DIRTY_PROGRAM;
1448
1449 /* start the first batch */
1450 zink_start_batch(ctx, zink_curr_batch(ctx));
1451
1452 return &ctx->base;
1453
1454 fail:
1455 if (ctx) {
1456 vkDestroyCommandPool(screen->dev, ctx->cmdpool, NULL);
1457 FREE(ctx);
1458 }
1459 return NULL;
1460 }