8ea635363ec409b683140d5be1122bb64b3c88a0
[mesa.git] / src / gallium / drivers / zink / zink_context.c
1 /*
2 * Copyright 2018 Collabora Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "zink_context.h"
25
26 #include "zink_batch.h"
27 #include "zink_compiler.h"
28 #include "zink_fence.h"
29 #include "zink_framebuffer.h"
30 #include "zink_pipeline.h"
31 #include "zink_program.h"
32 #include "zink_render_pass.h"
33 #include "zink_resource.h"
34 #include "zink_screen.h"
35 #include "zink_state.h"
36 #include "zink_surface.h"
37
38 #include "indices/u_primconvert.h"
39 #include "util/u_blitter.h"
40 #include "util/u_debug.h"
41 #include "util/u_format.h"
42 #include "util/u_framebuffer.h"
43 #include "util/u_helpers.h"
44 #include "util/u_inlines.h"
45
46 #include "nir.h"
47
48 #include "util/u_memory.h"
49 #include "util/u_prim.h"
50 #include "util/u_upload_mgr.h"
51
52 static void
53 zink_context_destroy(struct pipe_context *pctx)
54 {
55 struct zink_context *ctx = zink_context(pctx);
56 struct zink_screen *screen = zink_screen(pctx->screen);
57
58 if (vkQueueWaitIdle(ctx->queue) != VK_SUCCESS)
59 debug_printf("vkQueueWaitIdle failed\n");
60
61 for (int i = 0; i < ARRAY_SIZE(ctx->batches); ++i)
62 vkFreeCommandBuffers(screen->dev, ctx->cmdpool, 1, &ctx->batches[i].cmdbuf);
63 vkDestroyCommandPool(screen->dev, ctx->cmdpool, NULL);
64
65 util_primconvert_destroy(ctx->primconvert);
66 u_upload_destroy(pctx->stream_uploader);
67 slab_destroy_child(&ctx->transfer_pool);
68 util_blitter_destroy(ctx->blitter);
69 FREE(ctx);
70 }
71
72 static VkFilter
73 filter(enum pipe_tex_filter filter)
74 {
75 switch (filter) {
76 case PIPE_TEX_FILTER_NEAREST: return VK_FILTER_NEAREST;
77 case PIPE_TEX_FILTER_LINEAR: return VK_FILTER_LINEAR;
78 }
79 unreachable("unexpected filter");
80 }
81
82 static VkSamplerMipmapMode
83 sampler_mipmap_mode(enum pipe_tex_mipfilter filter)
84 {
85 switch (filter) {
86 case PIPE_TEX_MIPFILTER_NEAREST: return VK_SAMPLER_MIPMAP_MODE_NEAREST;
87 case PIPE_TEX_MIPFILTER_LINEAR: return VK_SAMPLER_MIPMAP_MODE_LINEAR;
88 case PIPE_TEX_MIPFILTER_NONE:
89 unreachable("PIPE_TEX_MIPFILTER_NONE should be dealt with earlier");
90 }
91 unreachable("unexpected filter");
92 }
93
94 static VkSamplerAddressMode
95 sampler_address_mode(enum pipe_tex_wrap filter)
96 {
97 switch (filter) {
98 case PIPE_TEX_WRAP_REPEAT: return VK_SAMPLER_ADDRESS_MODE_REPEAT;
99 case PIPE_TEX_WRAP_CLAMP: return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; /* not technically correct, but kinda works */
100 case PIPE_TEX_WRAP_CLAMP_TO_EDGE: return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
101 case PIPE_TEX_WRAP_CLAMP_TO_BORDER: return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER;
102 case PIPE_TEX_WRAP_MIRROR_REPEAT: return VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT;
103 case PIPE_TEX_WRAP_MIRROR_CLAMP: return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; /* not technically correct, but kinda works */
104 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE: return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE;
105 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER: return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; /* not technically correct, but kinda works */
106 }
107 unreachable("unexpected wrap");
108 }
109
110 static VkCompareOp
111 compare_op(enum pipe_compare_func op)
112 {
113 switch (op) {
114 case PIPE_FUNC_NEVER: return VK_COMPARE_OP_NEVER;
115 case PIPE_FUNC_LESS: return VK_COMPARE_OP_LESS;
116 case PIPE_FUNC_EQUAL: return VK_COMPARE_OP_EQUAL;
117 case PIPE_FUNC_LEQUAL: return VK_COMPARE_OP_LESS_OR_EQUAL;
118 case PIPE_FUNC_GREATER: return VK_COMPARE_OP_GREATER;
119 case PIPE_FUNC_NOTEQUAL: return VK_COMPARE_OP_NOT_EQUAL;
120 case PIPE_FUNC_GEQUAL: return VK_COMPARE_OP_GREATER_OR_EQUAL;
121 case PIPE_FUNC_ALWAYS: return VK_COMPARE_OP_ALWAYS;
122 }
123 unreachable("unexpected compare");
124 }
125
126 static void *
127 zink_create_sampler_state(struct pipe_context *pctx,
128 const struct pipe_sampler_state *state)
129 {
130 struct zink_screen *screen = zink_screen(pctx->screen);
131
132 VkSamplerCreateInfo sci = {};
133 sci.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
134 sci.magFilter = filter(state->mag_img_filter);
135 sci.minFilter = filter(state->min_img_filter);
136
137 if (state->min_mip_filter != PIPE_TEX_MIPFILTER_NONE) {
138 sci.mipmapMode = sampler_mipmap_mode(state->min_mip_filter);
139 sci.minLod = state->min_lod;
140 sci.maxLod = state->max_lod;
141 } else {
142 sci.mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST;
143 sci.minLod = 0;
144 sci.maxLod = 0;
145 }
146
147 sci.addressModeU = sampler_address_mode(state->wrap_s);
148 sci.addressModeV = sampler_address_mode(state->wrap_t);
149 sci.addressModeW = sampler_address_mode(state->wrap_r);
150 sci.mipLodBias = state->lod_bias;
151
152 if (state->compare_mode == PIPE_TEX_COMPARE_NONE)
153 sci.compareOp = VK_COMPARE_OP_NEVER;
154 else
155 sci.compareOp = compare_op(state->compare_func);
156
157 sci.borderColor = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK; // TODO
158 sci.unnormalizedCoordinates = !state->normalized_coords;
159
160 if (state->max_anisotropy > 1) {
161 sci.maxAnisotropy = state->max_anisotropy;
162 sci.anisotropyEnable = VK_TRUE;
163 }
164
165 VkSampler sampler;
166 VkResult err = vkCreateSampler(screen->dev, &sci, NULL, &sampler);
167 if (err != VK_SUCCESS)
168 return NULL;
169
170 return sampler;
171 }
172
173 static void
174 zink_bind_sampler_states(struct pipe_context *pctx,
175 enum pipe_shader_type shader,
176 unsigned start_slot,
177 unsigned num_samplers,
178 void **samplers)
179 {
180 struct zink_context *ctx = zink_context(pctx);
181 for (unsigned i = 0; i < num_samplers; ++i)
182 ctx->samplers[shader][start_slot + i] = (VkSampler)samplers[i];
183 }
184
185 static void
186 zink_delete_sampler_state(struct pipe_context *pctx,
187 void *sampler_state)
188 {
189 struct zink_batch *batch = zink_curr_batch(zink_context(pctx));
190 util_dynarray_append(&batch->zombie_samplers,
191 VkSampler, sampler_state);
192 }
193
194
195 static VkImageViewType
196 image_view_type(enum pipe_texture_target target)
197 {
198 switch (target) {
199 case PIPE_TEXTURE_1D: return VK_IMAGE_VIEW_TYPE_1D;
200 case PIPE_TEXTURE_1D_ARRAY: return VK_IMAGE_VIEW_TYPE_1D_ARRAY;
201 case PIPE_TEXTURE_2D: return VK_IMAGE_VIEW_TYPE_2D;
202 case PIPE_TEXTURE_2D_ARRAY: return VK_IMAGE_VIEW_TYPE_2D_ARRAY;
203 case PIPE_TEXTURE_CUBE: return VK_IMAGE_VIEW_TYPE_CUBE;
204 case PIPE_TEXTURE_CUBE_ARRAY: return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
205 case PIPE_TEXTURE_3D: return VK_IMAGE_VIEW_TYPE_3D;
206 case PIPE_TEXTURE_RECT: return VK_IMAGE_VIEW_TYPE_2D; /* not sure */
207 default:
208 unreachable("unexpected target");
209 }
210 }
211
212 static VkComponentSwizzle
213 component_mapping(enum pipe_swizzle swizzle)
214 {
215 switch (swizzle) {
216 case PIPE_SWIZZLE_X: return VK_COMPONENT_SWIZZLE_R;
217 case PIPE_SWIZZLE_Y: return VK_COMPONENT_SWIZZLE_G;
218 case PIPE_SWIZZLE_Z: return VK_COMPONENT_SWIZZLE_B;
219 case PIPE_SWIZZLE_W: return VK_COMPONENT_SWIZZLE_A;
220 case PIPE_SWIZZLE_0: return VK_COMPONENT_SWIZZLE_ZERO;
221 case PIPE_SWIZZLE_1: return VK_COMPONENT_SWIZZLE_ONE;
222 case PIPE_SWIZZLE_NONE: return VK_COMPONENT_SWIZZLE_IDENTITY; // ???
223 default:
224 unreachable("unexpected swizzle");
225 }
226 }
227
228 static struct pipe_sampler_view *
229 zink_create_sampler_view(struct pipe_context *pctx, struct pipe_resource *pres,
230 const struct pipe_sampler_view *state)
231 {
232 struct zink_screen *screen = zink_screen(pctx->screen);
233 struct zink_resource *res = zink_resource(pres);
234 struct zink_sampler_view *sampler_view = CALLOC_STRUCT(zink_sampler_view);
235
236 sampler_view->base = *state;
237 sampler_view->base.texture = NULL;
238 pipe_resource_reference(&sampler_view->base.texture, pres);
239 sampler_view->base.reference.count = 1;
240 sampler_view->base.context = pctx;
241
242 VkImageViewCreateInfo ivci = {};
243 ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
244 ivci.image = res->image;
245 ivci.viewType = image_view_type(state->target);
246 ivci.format = zink_get_format(state->format);
247 ivci.components.r = component_mapping(state->swizzle_r);
248 ivci.components.g = component_mapping(state->swizzle_g);
249 ivci.components.b = component_mapping(state->swizzle_b);
250 ivci.components.a = component_mapping(state->swizzle_a);
251 ivci.subresourceRange.aspectMask = zink_aspect_from_format(state->format);
252 ivci.subresourceRange.baseMipLevel = state->u.tex.first_level;
253 ivci.subresourceRange.baseArrayLayer = state->u.tex.first_layer;
254 ivci.subresourceRange.levelCount = state->u.tex.last_level - state->u.tex.first_level + 1;
255 ivci.subresourceRange.layerCount = state->u.tex.last_layer - state->u.tex.first_layer + 1;
256
257 VkResult err = vkCreateImageView(screen->dev, &ivci, NULL, &sampler_view->image_view);
258 if (err != VK_SUCCESS) {
259 FREE(sampler_view);
260 return NULL;
261 }
262
263 return &sampler_view->base;
264 }
265
266 static void
267 zink_sampler_view_destroy(struct pipe_context *pctx,
268 struct pipe_sampler_view *pview)
269 {
270 struct zink_sampler_view *view = zink_sampler_view(pview);
271 vkDestroyImageView(zink_screen(pctx->screen)->dev, view->image_view, NULL);
272 FREE(view);
273 }
274
275 static void *
276 zink_create_vs_state(struct pipe_context *pctx,
277 const struct pipe_shader_state *shader)
278 {
279 struct nir_shader *nir;
280 if (shader->type != PIPE_SHADER_IR_NIR)
281 nir = zink_tgsi_to_nir(pctx->screen, shader->tokens);
282 else
283 nir = (struct nir_shader *)shader->ir.nir;
284
285 return zink_compile_nir(zink_screen(pctx->screen), nir);
286 }
287
288 static void
289 bind_stage(struct zink_context *ctx, enum pipe_shader_type stage,
290 struct zink_shader *shader)
291 {
292 assert(stage < PIPE_SHADER_COMPUTE);
293 ctx->gfx_stages[stage] = shader;
294 ctx->dirty |= ZINK_DIRTY_PROGRAM;
295 }
296
297 static void
298 zink_bind_vs_state(struct pipe_context *pctx,
299 void *cso)
300 {
301 bind_stage(zink_context(pctx), PIPE_SHADER_VERTEX, cso);
302 }
303
304 static void
305 zink_delete_vs_state(struct pipe_context *pctx,
306 void *cso)
307 {
308 zink_shader_free(zink_screen(pctx->screen), cso);
309 }
310
311 static void *
312 zink_create_fs_state(struct pipe_context *pctx,
313 const struct pipe_shader_state *shader)
314 {
315 struct nir_shader *nir;
316 if (shader->type != PIPE_SHADER_IR_NIR)
317 nir = zink_tgsi_to_nir(pctx->screen, shader->tokens);
318 else
319 nir = (struct nir_shader *)shader->ir.nir;
320
321 return zink_compile_nir(zink_screen(pctx->screen), nir);
322 }
323
324 static void
325 zink_bind_fs_state(struct pipe_context *pctx,
326 void *cso)
327 {
328 bind_stage(zink_context(pctx), PIPE_SHADER_FRAGMENT, cso);
329 }
330
331 static void
332 zink_delete_fs_state(struct pipe_context *pctx,
333 void *cso)
334 {
335 zink_shader_free(zink_screen(pctx->screen), cso);
336 }
337
338 static void
339 zink_set_polygon_stipple(struct pipe_context *pctx,
340 const struct pipe_poly_stipple *ps)
341 {
342 }
343
344 static void
345 zink_set_vertex_buffers(struct pipe_context *pctx,
346 unsigned start_slot,
347 unsigned num_buffers,
348 const struct pipe_vertex_buffer *buffers)
349 {
350 struct zink_context *ctx = zink_context(pctx);
351
352 if (buffers) {
353 for (int i = 0; i < num_buffers; ++i) {
354 const struct pipe_vertex_buffer *vb = buffers + i;
355 ctx->gfx_pipeline_state.bindings[start_slot + i].stride = vb->stride;
356 }
357 }
358
359 util_set_vertex_buffers_mask(ctx->buffers, &ctx->buffers_enabled_mask,
360 buffers, start_slot, num_buffers);
361 }
362
363 static void
364 zink_set_viewport_states(struct pipe_context *pctx,
365 unsigned start_slot,
366 unsigned num_viewports,
367 const struct pipe_viewport_state *state)
368 {
369 struct zink_context *ctx = zink_context(pctx);
370
371 for (unsigned i = 0; i < num_viewports; ++i) {
372 VkViewport viewport = {
373 state[i].translate[0] - state[i].scale[0],
374 state[i].translate[1] - state[i].scale[1],
375 state[i].scale[0] * 2,
376 state[i].scale[1] * 2,
377 state[i].translate[2] - state[i].scale[2],
378 state[i].translate[2] + state[i].scale[2]
379 };
380 ctx->viewports[start_slot + i] = viewport;
381 }
382 ctx->num_viewports = start_slot + num_viewports;
383 }
384
385 static void
386 zink_set_scissor_states(struct pipe_context *pctx,
387 unsigned start_slot, unsigned num_scissors,
388 const struct pipe_scissor_state *states)
389 {
390 struct zink_context *ctx = zink_context(pctx);
391
392 for (unsigned i = 0; i < num_scissors; i++) {
393 VkRect2D scissor;
394
395 scissor.offset.x = states[i].minx;
396 scissor.offset.y = states[i].miny;
397 scissor.extent.width = states[i].maxx - states[i].minx;
398 scissor.extent.height = states[i].maxy - states[i].miny;
399 ctx->scissors[start_slot + i] = scissor;
400 }
401 ctx->num_scissors = start_slot + num_scissors;
402 }
403
404 static void
405 zink_set_constant_buffer(struct pipe_context *pctx,
406 enum pipe_shader_type shader, uint index,
407 const struct pipe_constant_buffer *cb)
408 {
409 struct zink_context *ctx = zink_context(pctx);
410
411 if (cb) {
412 struct pipe_resource *buffer = cb->buffer;
413 unsigned offset = cb->buffer_offset;
414 if (cb->user_buffer)
415 u_upload_data(ctx->base.const_uploader, 0, cb->buffer_size, 64,
416 cb->user_buffer, &offset, &buffer);
417
418 pipe_resource_reference(&ctx->ubos[shader][index].buffer, buffer);
419 ctx->ubos[shader][index].buffer_offset = offset;
420 ctx->ubos[shader][index].buffer_size = cb->buffer_size;
421 ctx->ubos[shader][index].user_buffer = NULL;
422
423 if (cb->user_buffer)
424 pipe_resource_reference(&buffer, NULL);
425 } else {
426 pipe_resource_reference(&ctx->ubos[shader][index].buffer, NULL);
427 ctx->ubos[shader][index].buffer_offset = 0;
428 ctx->ubos[shader][index].buffer_size = 0;
429 ctx->ubos[shader][index].user_buffer = NULL;
430 }
431 }
432
433 static void
434 zink_set_sampler_views(struct pipe_context *pctx,
435 enum pipe_shader_type shader_type,
436 unsigned start_slot,
437 unsigned num_views,
438 struct pipe_sampler_view **views)
439 {
440 struct zink_context *ctx = zink_context(pctx);
441 assert(views);
442 for (unsigned i = 0; i < num_views; ++i) {
443 pipe_sampler_view_reference(
444 &ctx->image_views[shader_type][start_slot + i],
445 views[i]);
446 }
447 }
448
449 static void
450 zink_set_stencil_ref(struct pipe_context *pctx,
451 const struct pipe_stencil_ref *ref)
452 {
453 struct zink_context *ctx = zink_context(pctx);
454 ctx->stencil_ref[0] = ref->ref_value[0];
455 ctx->stencil_ref[1] = ref->ref_value[1];
456 }
457
458 static void
459 zink_set_clip_state(struct pipe_context *pctx,
460 const struct pipe_clip_state *pcs)
461 {
462 }
463
464 static struct zink_render_pass *
465 get_render_pass(struct zink_context *ctx)
466 {
467 struct zink_screen *screen = zink_screen(ctx->base.screen);
468 const struct pipe_framebuffer_state *fb = &ctx->fb_state;
469 struct zink_render_pass_state state;
470
471 for (int i = 0; i < fb->nr_cbufs; i++) {
472 struct zink_resource *cbuf = zink_resource(fb->cbufs[i]->texture);
473 state.rts[i].format = cbuf->format;
474 }
475 state.num_cbufs = fb->nr_cbufs;
476
477 if (fb->zsbuf) {
478 struct zink_resource *zsbuf = zink_resource(fb->zsbuf->texture);
479 state.rts[fb->nr_cbufs].format = zsbuf->format;
480 }
481 state.have_zsbuf = fb->zsbuf != NULL;
482
483 struct hash_entry *entry = _mesa_hash_table_search(ctx->render_pass_cache,
484 &state);
485 if (!entry) {
486 struct zink_render_pass *rp;
487 rp = zink_create_render_pass(screen, &state);
488 entry = _mesa_hash_table_insert(ctx->render_pass_cache, &state, rp);
489 if (!entry)
490 return NULL;
491 }
492
493 return entry->data;
494 }
495
496 static struct zink_framebuffer *
497 get_framebuffer(struct zink_context *ctx)
498 {
499 struct zink_screen *screen = zink_screen(ctx->base.screen);
500
501 struct zink_framebuffer_state state = {};
502 state.rp = get_render_pass(ctx);
503 for (int i = 0; i < ctx->fb_state.nr_cbufs; i++) {
504 struct pipe_surface *psurf = ctx->fb_state.cbufs[i];
505 state.attachments[i] = zink_surface(psurf);
506 }
507
508 state.num_attachments = ctx->fb_state.nr_cbufs;
509 if (ctx->fb_state.zsbuf) {
510 struct pipe_surface *psurf = ctx->fb_state.zsbuf;
511 state.attachments[state.num_attachments++] = zink_surface(psurf);
512 }
513
514 state.width = ctx->fb_state.width;
515 state.height = ctx->fb_state.height;
516 state.layers = MAX2(ctx->fb_state.layers, 1);
517
518 struct hash_entry *entry = _mesa_hash_table_search(ctx->framebuffer_cache,
519 &state);
520 if (!entry) {
521 struct zink_framebuffer *fb = zink_create_framebuffer(screen, &state);
522 entry = _mesa_hash_table_insert(ctx->framebuffer_cache, &state, fb);
523 if (!entry)
524 return NULL;
525 }
526
527 return entry->data;
528 }
529
530 void
531 zink_begin_render_pass(struct zink_context *ctx, struct zink_batch *batch)
532 {
533 struct zink_screen *screen = zink_screen(ctx->base.screen);
534 assert(batch == zink_curr_batch(ctx));
535 assert(ctx->gfx_pipeline_state.render_pass);
536
537 VkRenderPassBeginInfo rpbi = {};
538 rpbi.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
539 rpbi.renderPass = ctx->gfx_pipeline_state.render_pass->render_pass;
540 rpbi.renderArea.offset.x = 0;
541 rpbi.renderArea.offset.y = 0;
542 rpbi.renderArea.extent.width = ctx->fb_state.width;
543 rpbi.renderArea.extent.height = ctx->fb_state.height;
544 rpbi.clearValueCount = 0;
545 rpbi.pClearValues = NULL;
546 rpbi.framebuffer = ctx->framebuffer->fb;
547
548 assert(ctx->gfx_pipeline_state.render_pass && ctx->framebuffer);
549 assert(!batch->rp || batch->rp == ctx->gfx_pipeline_state.render_pass);
550 assert(!batch->fb || batch->fb == ctx->framebuffer);
551
552 zink_render_pass_reference(screen, &batch->rp, ctx->gfx_pipeline_state.render_pass);
553 zink_framebuffer_reference(screen, &batch->fb, ctx->framebuffer);
554
555 vkCmdBeginRenderPass(batch->cmdbuf, &rpbi, VK_SUBPASS_CONTENTS_INLINE);
556 }
557
558 static void
559 flush_batch(struct zink_context *ctx)
560 {
561 struct zink_batch *batch = zink_curr_batch(ctx);
562 if (batch->rp)
563 vkCmdEndRenderPass(batch->cmdbuf);
564
565 zink_end_batch(ctx, batch);
566
567 ctx->curr_batch++;
568 if (ctx->curr_batch == ARRAY_SIZE(ctx->batches))
569 ctx->curr_batch = 0;
570
571 zink_start_batch(ctx, zink_curr_batch(ctx));
572 }
573
574 struct zink_batch *
575 zink_batch_rp(struct zink_context *ctx)
576 {
577 struct zink_batch *batch = zink_curr_batch(ctx);
578 if (!batch->rp) {
579 zink_begin_render_pass(ctx, batch);
580 assert(batch->rp);
581 }
582 return batch;
583 }
584
585 struct zink_batch *
586 zink_batch_no_rp(struct zink_context *ctx)
587 {
588 struct zink_batch *batch = zink_curr_batch(ctx);
589 if (batch->rp) {
590 /* flush batch and get a new one */
591 flush_batch(ctx);
592 batch = zink_curr_batch(ctx);
593 assert(!batch->rp);
594 }
595 return batch;
596 }
597
598 static void
599 zink_set_framebuffer_state(struct pipe_context *pctx,
600 const struct pipe_framebuffer_state *state)
601 {
602 struct zink_context *ctx = zink_context(pctx);
603 struct zink_screen *screen = zink_screen(pctx->screen);
604
605 util_copy_framebuffer_state(&ctx->fb_state, state);
606
607 struct zink_framebuffer *fb = get_framebuffer(ctx);
608 zink_framebuffer_reference(screen, &ctx->framebuffer, fb);
609 zink_render_pass_reference(screen, &ctx->gfx_pipeline_state.render_pass, fb->rp);
610
611 ctx->gfx_pipeline_state.num_attachments = state->nr_cbufs;
612
613 struct zink_batch *batch = zink_batch_no_rp(ctx);
614
615 for (int i = 0; i < state->nr_cbufs; i++) {
616 struct zink_resource *res = zink_resource(state->cbufs[i]->texture);
617 if (res->layout != VK_IMAGE_LAYOUT_GENERAL &&
618 res->layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL)
619 zink_resource_barrier(batch->cmdbuf, res, res->aspect,
620 VK_IMAGE_LAYOUT_GENERAL);
621 }
622
623 if (state->zsbuf) {
624 struct zink_resource *res = zink_resource(state->zsbuf->texture);
625 if (res->layout != VK_IMAGE_LAYOUT_GENERAL &&
626 res->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL)
627 zink_resource_barrier(batch->cmdbuf, res, res->aspect,
628 VK_IMAGE_LAYOUT_GENERAL);
629 }
630 }
631
632 static void
633 zink_set_active_query_state(struct pipe_context *pctx, bool enable)
634 {
635 }
636
637 static void
638 zink_set_blend_color(struct pipe_context *pctx,
639 const struct pipe_blend_color *color)
640 {
641 struct zink_context *ctx = zink_context(pctx);
642 memcpy(ctx->blend_constants, color->color, sizeof(float) * 4);
643 }
644
645 static VkAccessFlags
646 access_flags(VkImageLayout layout)
647 {
648 switch (layout) {
649 case VK_IMAGE_LAYOUT_UNDEFINED:
650 case VK_IMAGE_LAYOUT_GENERAL:
651 return 0;
652
653 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
654 return VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
655 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
656 return VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
657
658 case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
659 return VK_ACCESS_SHADER_READ_BIT;
660
661 case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
662 return VK_ACCESS_TRANSFER_READ_BIT;
663
664 case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
665 return VK_ACCESS_TRANSFER_WRITE_BIT;
666
667 case VK_IMAGE_LAYOUT_PREINITIALIZED:
668 return VK_ACCESS_HOST_WRITE_BIT;
669
670 default:
671 unreachable("unexpected layout");
672 }
673 }
674
675 void
676 zink_resource_barrier(VkCommandBuffer cmdbuf, struct zink_resource *res,
677 VkImageAspectFlags aspect, VkImageLayout new_layout)
678 {
679 VkImageSubresourceRange isr = {
680 aspect,
681 0, VK_REMAINING_MIP_LEVELS,
682 0, VK_REMAINING_ARRAY_LAYERS
683 };
684
685 VkImageMemoryBarrier imb = {
686 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
687 NULL,
688 access_flags(res->layout),
689 access_flags(new_layout),
690 res->layout,
691 new_layout,
692 VK_QUEUE_FAMILY_IGNORED,
693 VK_QUEUE_FAMILY_IGNORED,
694 res->image,
695 isr
696 };
697 vkCmdPipelineBarrier(
698 cmdbuf,
699 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
700 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
701 0,
702 0, NULL,
703 0, NULL,
704 1, &imb
705 );
706
707 res->layout = new_layout;
708 }
709
710 static void
711 zink_clear(struct pipe_context *pctx,
712 unsigned buffers,
713 const union pipe_color_union *pcolor,
714 double depth, unsigned stencil)
715 {
716 struct zink_context *ctx = zink_context(pctx);
717 struct pipe_framebuffer_state *fb = &ctx->fb_state;
718
719 /* FIXME: this is very inefficient; if no renderpass has been started yet,
720 * we should record the clear if it's full-screen, and apply it as we
721 * start the render-pass. Otherwise we can do a partial out-of-renderpass
722 * clear.
723 */
724 struct zink_batch *batch = zink_batch_rp(ctx);
725
726 VkClearAttachment attachments[1 + PIPE_MAX_COLOR_BUFS];
727 int num_attachments = 0;
728
729 if (buffers & PIPE_CLEAR_COLOR) {
730 VkClearColorValue color;
731 color.float32[0] = pcolor->f[0];
732 color.float32[1] = pcolor->f[1];
733 color.float32[2] = pcolor->f[2];
734 color.float32[3] = pcolor->f[3];
735
736 for (unsigned i = 0; i < fb->nr_cbufs; i++) {
737 if (!(buffers & (PIPE_CLEAR_COLOR0 << i)) || !fb->cbufs[i])
738 continue;
739
740 attachments[num_attachments].aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
741 attachments[num_attachments].colorAttachment = i;
742 attachments[num_attachments].clearValue.color = color;
743 ++num_attachments;
744 }
745 }
746
747 if (buffers & PIPE_CLEAR_DEPTHSTENCIL && fb->zsbuf) {
748 VkImageAspectFlags aspect = 0;
749 if (buffers & PIPE_CLEAR_DEPTH)
750 aspect |= VK_IMAGE_ASPECT_DEPTH_BIT;
751 if (buffers & PIPE_CLEAR_STENCIL)
752 aspect |= VK_IMAGE_ASPECT_STENCIL_BIT;
753
754 attachments[num_attachments].aspectMask = aspect;
755 attachments[num_attachments].clearValue.depthStencil.depth = depth;
756 attachments[num_attachments].clearValue.depthStencil.stencil = stencil;
757 ++num_attachments;
758 }
759
760 unsigned num_layers = util_framebuffer_get_num_layers(fb);
761 VkClearRect rects[PIPE_MAX_VIEWPORTS];
762 uint32_t num_rects;
763 if (ctx->num_scissors) {
764 for (unsigned i = 0 ; i < ctx->num_scissors; ++i) {
765 rects[i].rect = ctx->scissors[i];
766 rects[i].rect.extent.width = MIN2(rects[i].rect.extent.width,
767 fb->width);
768 rects[i].rect.extent.height = MIN2(rects[i].rect.extent.height,
769 fb->height);
770 rects[i].baseArrayLayer = 0;
771 rects[i].layerCount = num_layers;
772 }
773 num_rects = ctx->num_scissors;
774 } else {
775 rects[0].rect.offset.x = 0;
776 rects[0].rect.offset.y = 0;
777 rects[0].rect.extent.width = fb->width;
778 rects[0].rect.extent.height = fb->height;
779 rects[0].baseArrayLayer = 0;
780 rects[0].layerCount = num_layers;
781 num_rects = 1;
782 }
783
784 vkCmdClearAttachments(batch->cmdbuf,
785 num_attachments, attachments,
786 num_rects, rects);
787 }
788
789 VkShaderStageFlagBits
790 zink_shader_stage(enum pipe_shader_type type)
791 {
792 VkShaderStageFlagBits stages[] = {
793 [PIPE_SHADER_VERTEX] = VK_SHADER_STAGE_VERTEX_BIT,
794 [PIPE_SHADER_FRAGMENT] = VK_SHADER_STAGE_FRAGMENT_BIT,
795 [PIPE_SHADER_GEOMETRY] = VK_SHADER_STAGE_GEOMETRY_BIT,
796 [PIPE_SHADER_TESS_CTRL] = VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
797 [PIPE_SHADER_TESS_EVAL] = VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
798 [PIPE_SHADER_COMPUTE] = VK_SHADER_STAGE_COMPUTE_BIT,
799 };
800 return stages[type];
801 }
802
803 static VkDescriptorSet
804 allocate_descriptor_set(struct zink_context *ctx, VkDescriptorSetLayout dsl)
805 {
806 struct zink_screen *screen = zink_screen(ctx->base.screen);
807 VkDescriptorSetAllocateInfo dsai;
808 memset((void *)&dsai, 0, sizeof(dsai));
809 dsai.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
810 dsai.pNext = NULL;
811 dsai.descriptorPool = ctx->descpool;
812 dsai.descriptorSetCount = 1;
813 dsai.pSetLayouts = &dsl;
814
815 VkDescriptorSet desc_set;
816 if (vkAllocateDescriptorSets(screen->dev, &dsai, &desc_set) != VK_SUCCESS) {
817
818 /* if we run out of descriptor sets we either need to create a bunch
819 * more... or flush and wait. For simplicity, let's flush for now.
820 */
821 struct pipe_fence_handle *fence = NULL;
822 ctx->base.flush(&ctx->base, &fence, 0);
823 ctx->base.screen->fence_finish(ctx->base.screen, &ctx->base, fence,
824 PIPE_TIMEOUT_INFINITE);
825
826 if (vkResetDescriptorPool(screen->dev, ctx->descpool, 0) != VK_SUCCESS) {
827 fprintf(stderr, "vkResetDescriptorPool failed\n");
828 return VK_NULL_HANDLE;
829 }
830 if (vkAllocateDescriptorSets(screen->dev, &dsai, &desc_set) != VK_SUCCESS) {
831 fprintf(stderr, "vkAllocateDescriptorSets failed\n");
832 return VK_NULL_HANDLE;
833 }
834 }
835
836 return desc_set;
837 }
838
839 static void
840 zink_bind_vertex_buffers(struct zink_batch *batch, struct zink_context *ctx)
841 {
842 VkBuffer buffers[PIPE_MAX_ATTRIBS];
843 VkDeviceSize buffer_offsets[PIPE_MAX_ATTRIBS];
844 const struct zink_vertex_elements_state *elems = ctx->element_state;
845 for (unsigned i = 0; i < elems->hw_state.num_bindings; i++) {
846 struct pipe_vertex_buffer *vb = ctx->buffers + ctx->element_state->binding_map[i];
847 assert(vb && vb->buffer.resource);
848 struct zink_resource *res = zink_resource(vb->buffer.resource);
849 buffers[i] = res->buffer;
850 buffer_offsets[i] = vb->buffer_offset;
851 zink_batch_reference_resoure(batch, res);
852 }
853
854 if (elems->hw_state.num_bindings > 0)
855 vkCmdBindVertexBuffers(batch->cmdbuf, 0,
856 elems->hw_state.num_bindings,
857 buffers, buffer_offsets);
858 }
859
860 static uint32_t
861 hash_gfx_program(const void *key)
862 {
863 return _mesa_hash_data(key, sizeof(struct zink_shader *) * (PIPE_SHADER_TYPES - 1));
864 }
865
866 static bool
867 equals_gfx_program(const void *a, const void *b)
868 {
869 return memcmp(a, b, sizeof(struct zink_shader *) * (PIPE_SHADER_TYPES - 1)) == 0;
870 }
871
872 static uint32_t
873 hash_render_pass_state(const void *key)
874 {
875 return _mesa_hash_data(key, sizeof(struct zink_render_pass_state));
876 }
877
878 static bool
879 equals_render_pass_state(const void *a, const void *b)
880 {
881 return memcmp(a, b, sizeof(struct zink_render_pass_state)) == 0;
882 }
883
884 static uint32_t
885 hash_framebuffer_state(const void *key)
886 {
887 struct zink_framebuffer_state *s = (struct zink_framebuffer_state*)key;
888 return _mesa_hash_data(key, sizeof(struct zink_framebuffer_state) + sizeof(s->attachments) * s->num_attachments);
889 }
890
891 static bool
892 equals_framebuffer_state(const void *a, const void *b)
893 {
894 struct zink_framebuffer_state *s = (struct zink_framebuffer_state*)a;
895 return memcmp(a, b, sizeof(struct zink_framebuffer_state) + sizeof(s->attachments) * s->num_attachments) == 0;
896 }
897
898 static struct zink_gfx_program *
899 get_gfx_program(struct zink_context *ctx)
900 {
901 if (ctx->dirty & ZINK_DIRTY_PROGRAM) {
902 struct hash_entry *entry = _mesa_hash_table_search(ctx->program_cache,
903 ctx->gfx_stages);
904 if (!entry) {
905 struct zink_gfx_program *prog;
906 prog = zink_create_gfx_program(zink_screen(ctx->base.screen),
907 ctx->gfx_stages);
908 entry = _mesa_hash_table_insert(ctx->program_cache, prog->stages, prog);
909 if (!entry)
910 return NULL;
911 }
912 ctx->curr_program = entry->data;
913 ctx->dirty &= ~ZINK_DIRTY_PROGRAM;
914 }
915
916 assert(ctx->curr_program);
917 return ctx->curr_program;
918 }
919
920 static void
921 zink_draw_vbo(struct pipe_context *pctx,
922 const struct pipe_draw_info *dinfo)
923 {
924 struct zink_context *ctx = zink_context(pctx);
925 struct zink_screen *screen = zink_screen(pctx->screen);
926 struct zink_rasterizer_state *rast_state = ctx->rast_state;
927
928 if (dinfo->mode >= PIPE_PRIM_QUADS ||
929 dinfo->mode == PIPE_PRIM_LINE_LOOP) {
930 if (!u_trim_pipe_prim(dinfo->mode, (unsigned *)&dinfo->count))
931 return;
932
933 util_primconvert_save_rasterizer_state(ctx->primconvert, &rast_state->base);
934 util_primconvert_draw_vbo(ctx->primconvert, dinfo);
935 return;
936 }
937
938 struct zink_gfx_program *gfx_program = get_gfx_program(ctx);
939 if (!gfx_program)
940 return;
941
942 VkPipeline pipeline = zink_get_gfx_pipeline(screen, gfx_program,
943 &ctx->gfx_pipeline_state,
944 dinfo->mode);
945
946 bool depth_bias = false;
947 switch (u_reduced_prim(dinfo->mode)) {
948 case PIPE_PRIM_POINTS:
949 depth_bias = rast_state->offset_point;
950 break;
951
952 case PIPE_PRIM_LINES:
953 depth_bias = rast_state->offset_line;
954 break;
955
956 case PIPE_PRIM_TRIANGLES:
957 depth_bias = rast_state->offset_tri;
958 break;
959
960 default:
961 unreachable("unexpected reduced prim");
962 }
963
964 unsigned index_offset = 0;
965 struct pipe_resource *index_buffer = NULL;
966 if (dinfo->index_size > 0) {
967 if (dinfo->has_user_indices) {
968 if (!util_upload_index_buffer(pctx, dinfo, &index_buffer, &index_offset)) {
969 debug_printf("util_upload_index_buffer() failed\n");
970 return;
971 }
972 } else
973 index_buffer = dinfo->index.resource;
974 }
975
976 VkWriteDescriptorSet wds[PIPE_SHADER_TYPES * PIPE_MAX_CONSTANT_BUFFERS + PIPE_SHADER_TYPES * PIPE_MAX_SHADER_SAMPLER_VIEWS];
977 VkDescriptorBufferInfo buffer_infos[PIPE_SHADER_TYPES * PIPE_MAX_CONSTANT_BUFFERS];
978 VkDescriptorImageInfo image_infos[PIPE_SHADER_TYPES * PIPE_MAX_SHADER_SAMPLER_VIEWS];
979 int num_wds = 0, num_buffer_info = 0, num_image_info = 0;
980
981 struct zink_resource *transitions[PIPE_SHADER_TYPES * PIPE_MAX_SHADER_SAMPLER_VIEWS];
982 int num_transitions = 0;
983
984 for (int i = 0; i < ARRAY_SIZE(ctx->gfx_stages); i++) {
985 struct zink_shader *shader = ctx->gfx_stages[i];
986 if (!shader)
987 continue;
988
989 for (int j = 0; j < shader->num_bindings; j++) {
990 int index = shader->bindings[j].index;
991 if (shader->bindings[j].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) {
992 assert(ctx->ubos[i][index].buffer_size > 0);
993 assert(ctx->ubos[i][index].buffer);
994 struct zink_resource *res = zink_resource(ctx->ubos[i][index].buffer);
995 buffer_infos[num_buffer_info].buffer = res->buffer;
996 buffer_infos[num_buffer_info].offset = ctx->ubos[i][index].buffer_offset;
997 buffer_infos[num_buffer_info].range = VK_WHOLE_SIZE;
998 wds[num_wds].pBufferInfo = buffer_infos + num_buffer_info;
999 ++num_buffer_info;
1000 } else {
1001 struct pipe_sampler_view *psampler_view = ctx->image_views[i][index];
1002 assert(psampler_view);
1003 struct zink_sampler_view *sampler_view = zink_sampler_view(psampler_view);
1004
1005 struct zink_resource *res = zink_resource(psampler_view->texture);
1006 VkImageLayout layout = res->layout;
1007 if (layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL &&
1008 layout != VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL &&
1009 layout != VK_IMAGE_LAYOUT_GENERAL) {
1010 transitions[num_transitions++] = res;
1011 layout = VK_IMAGE_LAYOUT_GENERAL;
1012 }
1013 image_infos[num_image_info].imageLayout = layout;
1014 image_infos[num_image_info].imageView = sampler_view->image_view;
1015 image_infos[num_image_info].sampler = ctx->samplers[i][index];
1016 wds[num_wds].pImageInfo = image_infos + num_image_info;
1017 ++num_image_info;
1018 }
1019
1020 wds[num_wds].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
1021 wds[num_wds].pNext = NULL;
1022 wds[num_wds].dstBinding = shader->bindings[j].binding;
1023 wds[num_wds].dstArrayElement = 0;
1024 wds[num_wds].descriptorCount = 1;
1025 wds[num_wds].descriptorType = shader->bindings[j].type;
1026 ++num_wds;
1027 }
1028 }
1029
1030 struct zink_batch *batch;
1031 if (num_transitions > 0) {
1032 batch = zink_batch_no_rp(ctx);
1033
1034 for (int i = 0; i < num_transitions; ++i)
1035 zink_resource_barrier(batch->cmdbuf, transitions[i],
1036 transitions[i]->aspect,
1037 VK_IMAGE_LAYOUT_GENERAL);
1038 }
1039
1040 VkDescriptorSet desc_set = allocate_descriptor_set(ctx, gfx_program->dsl);
1041
1042 batch = zink_batch_rp(ctx);
1043
1044 for (int i = 0; i < ARRAY_SIZE(ctx->gfx_stages); i++) {
1045 struct zink_shader *shader = ctx->gfx_stages[i];
1046 if (!shader)
1047 continue;
1048
1049 for (int j = 0; j < shader->num_bindings; j++) {
1050 int index = shader->bindings[j].index;
1051 if (shader->bindings[j].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) {
1052 struct zink_resource *res = zink_resource(ctx->ubos[i][index].buffer);
1053 zink_batch_reference_resoure(batch, res);
1054 } else {
1055 struct zink_sampler_view *sampler_view = zink_sampler_view(ctx->image_views[i][index]);
1056 zink_batch_reference_sampler_view(batch, sampler_view);
1057 }
1058 }
1059 }
1060
1061 vkCmdSetViewport(batch->cmdbuf, 0, ctx->num_viewports, ctx->viewports);
1062
1063 if (ctx->num_scissors)
1064 vkCmdSetScissor(batch->cmdbuf, 0, ctx->num_scissors, ctx->scissors);
1065 else if (ctx->fb_state.width && ctx->fb_state.height) {
1066 VkRect2D fb_scissor = {};
1067 fb_scissor.extent.width = ctx->fb_state.width;
1068 fb_scissor.extent.height = ctx->fb_state.height;
1069 vkCmdSetScissor(batch->cmdbuf, 0, 1, &fb_scissor);
1070 }
1071
1072 vkCmdSetStencilReference(batch->cmdbuf, VK_STENCIL_FACE_FRONT_BIT, ctx->stencil_ref[0]);
1073 vkCmdSetStencilReference(batch->cmdbuf, VK_STENCIL_FACE_BACK_BIT, ctx->stencil_ref[1]);
1074
1075 if (depth_bias)
1076 vkCmdSetDepthBias(batch->cmdbuf, rast_state->offset_units, rast_state->offset_clamp, rast_state->offset_scale);
1077 else
1078 vkCmdSetDepthBias(batch->cmdbuf, 0.0f, 0.0f, 0.0f);
1079
1080 if (ctx->gfx_pipeline_state.blend_state->need_blend_constants)
1081 vkCmdSetBlendConstants(batch->cmdbuf, ctx->blend_constants);
1082
1083 for (int i = 0; i < num_wds; ++i)
1084 wds[i].dstSet = desc_set;
1085
1086 vkUpdateDescriptorSets(screen->dev, num_wds, wds, 0, NULL);
1087
1088 vkCmdBindPipeline(batch->cmdbuf, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
1089 vkCmdBindDescriptorSets(batch->cmdbuf, VK_PIPELINE_BIND_POINT_GRAPHICS,
1090 gfx_program->layout, 0, 1, &desc_set, 0, NULL);
1091 zink_bind_vertex_buffers(batch, ctx);
1092
1093 if (dinfo->index_size > 0) {
1094 assert(dinfo->index_size != 1);
1095 VkIndexType index_type = dinfo->index_size == 2 ? VK_INDEX_TYPE_UINT16 : VK_INDEX_TYPE_UINT32;
1096 struct zink_resource *res = zink_resource(index_buffer);
1097 vkCmdBindIndexBuffer(batch->cmdbuf, res->buffer, index_offset, index_type);
1098 zink_batch_reference_resoure(batch, res);
1099 vkCmdDrawIndexed(batch->cmdbuf,
1100 dinfo->count, dinfo->instance_count,
1101 dinfo->start, dinfo->index_bias, dinfo->start_instance);
1102 } else
1103 vkCmdDraw(batch->cmdbuf, dinfo->count, dinfo->instance_count, dinfo->start, dinfo->start_instance);
1104
1105 if (dinfo->index_size > 0 && dinfo->has_user_indices)
1106 pipe_resource_reference(&index_buffer, NULL);
1107 }
1108
1109 static void
1110 zink_flush(struct pipe_context *pctx,
1111 struct pipe_fence_handle **pfence,
1112 enum pipe_flush_flags flags)
1113 {
1114 struct zink_context *ctx = zink_context(pctx);
1115
1116 struct zink_batch *batch = zink_curr_batch(ctx);
1117 flush_batch(ctx);
1118
1119 if (pfence)
1120 zink_fence_reference(zink_screen(pctx->screen),
1121 (struct zink_fence **)pfence,
1122 batch->fence);
1123
1124 if (flags & PIPE_FLUSH_END_OF_FRAME)
1125 pctx->screen->fence_finish(pctx->screen, pctx,
1126 (struct pipe_fence_handle *)batch->fence,
1127 PIPE_TIMEOUT_INFINITE);
1128 }
1129
1130 static void
1131 zink_blit(struct pipe_context *pctx,
1132 const struct pipe_blit_info *info)
1133 {
1134 struct zink_context *ctx = zink_context(pctx);
1135 bool is_resolve = false;
1136 if (info->mask != PIPE_MASK_RGBA ||
1137 info->scissor_enable ||
1138 info->alpha_blend) {
1139 if (!util_blitter_is_blit_supported(ctx->blitter, info)) {
1140 debug_printf("blit unsupported %s -> %s\n",
1141 util_format_short_name(info->src.resource->format),
1142 util_format_short_name(info->dst.resource->format));
1143 return;
1144 }
1145
1146 util_blitter_save_fragment_constant_buffer_slot(ctx->blitter, ctx->ubos[PIPE_SHADER_FRAGMENT]);
1147 util_blitter_save_vertex_buffer_slot(ctx->blitter, ctx->buffers);
1148 util_blitter_save_vertex_shader(ctx->blitter, ctx->gfx_stages[PIPE_SHADER_VERTEX]);
1149 util_blitter_save_fragment_shader(ctx->blitter, ctx->gfx_stages[PIPE_SHADER_FRAGMENT]);
1150 util_blitter_save_rasterizer(ctx->blitter, ctx->gfx_pipeline_state.rast_state);
1151
1152 util_blitter_blit(ctx->blitter, info);
1153 return;
1154 }
1155
1156 struct zink_resource *src = zink_resource(info->src.resource);
1157 struct zink_resource *dst = zink_resource(info->dst.resource);
1158
1159 if (src->base.nr_samples > 1 && dst->base.nr_samples <= 1)
1160 is_resolve = true;
1161
1162 struct zink_batch *batch = zink_batch_no_rp(ctx);
1163
1164 zink_batch_reference_resoure(batch, src);
1165 zink_batch_reference_resoure(batch, dst);
1166
1167 if (is_resolve) {
1168 VkImageResolve region = {};
1169
1170 region.srcSubresource.aspectMask = src->aspect;
1171 region.srcSubresource.mipLevel = info->src.level;
1172 region.srcSubresource.baseArrayLayer = 0; // no clue
1173 region.srcSubresource.layerCount = 1; // no clue
1174 region.srcOffset.x = info->src.box.x;
1175 region.srcOffset.y = info->src.box.y;
1176 region.srcOffset.z = info->src.box.z;
1177
1178 region.dstSubresource.aspectMask = dst->aspect;
1179 region.dstSubresource.mipLevel = info->dst.level;
1180 region.dstSubresource.baseArrayLayer = 0; // no clue
1181 region.dstSubresource.layerCount = 1; // no clue
1182 region.dstOffset.x = info->dst.box.x;
1183 region.dstOffset.y = info->dst.box.y;
1184 region.dstOffset.z = info->dst.box.z;
1185
1186 region.extent.width = info->dst.box.width;
1187 region.extent.height = info->dst.box.height;
1188 region.extent.depth = info->dst.box.depth;
1189 vkCmdResolveImage(batch->cmdbuf, src->image, src->layout,
1190 dst->image, dst->layout,
1191 1, &region);
1192
1193 } else {
1194 if (dst->layout != VK_IMAGE_LAYOUT_GENERAL &&
1195 dst->layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL)
1196 zink_resource_barrier(batch->cmdbuf, dst, dst->aspect,
1197 VK_IMAGE_LAYOUT_GENERAL);
1198
1199 VkImageBlit region = {};
1200 region.srcSubresource.aspectMask = src->aspect;
1201 region.srcSubresource.mipLevel = info->src.level;
1202 region.srcOffsets[0].x = info->src.box.x;
1203 region.srcOffsets[0].y = info->src.box.y;
1204 region.srcOffsets[1].x = info->src.box.x + info->src.box.width;
1205 region.srcOffsets[1].y = info->src.box.y + info->src.box.height;
1206
1207 if (src->base.array_size > 1) {
1208 region.srcOffsets[0].z = 0;
1209 region.srcOffsets[1].z = 1;
1210 region.srcSubresource.baseArrayLayer = info->src.box.z;
1211 region.srcSubresource.layerCount = info->src.box.depth;
1212 } else {
1213 region.srcOffsets[0].z = info->src.box.z;
1214 region.srcOffsets[1].z = info->src.box.z + info->src.box.depth;
1215 region.srcSubresource.baseArrayLayer = 0;
1216 region.srcSubresource.layerCount = 1;
1217 }
1218
1219 region.dstSubresource.aspectMask = dst->aspect;
1220 region.dstSubresource.mipLevel = info->dst.level;
1221 region.dstOffsets[0].x = info->dst.box.x;
1222 region.dstOffsets[0].y = info->dst.box.y;
1223 region.dstOffsets[1].x = info->dst.box.x + info->dst.box.width;
1224 region.dstOffsets[1].y = info->dst.box.y + info->dst.box.height;
1225
1226 if (dst->base.array_size > 1) {
1227 region.dstOffsets[0].z = 0;
1228 region.dstOffsets[1].z = 1;
1229 region.dstSubresource.baseArrayLayer = info->dst.box.z;
1230 region.dstSubresource.layerCount = info->dst.box.depth;
1231 } else {
1232 region.dstOffsets[0].z = info->dst.box.z;
1233 region.dstOffsets[1].z = info->dst.box.z + info->dst.box.depth;
1234 region.dstSubresource.baseArrayLayer = 0;
1235 region.dstSubresource.layerCount = 1;
1236 }
1237
1238 vkCmdBlitImage(batch->cmdbuf, src->image, src->layout,
1239 dst->image, dst->layout,
1240 1, &region,
1241 filter(info->filter));
1242 }
1243
1244 /* HACK: I have no idea why this is needed, but without it ioquake3
1245 * randomly keeps fading to black.
1246 */
1247 flush_batch(ctx);
1248 }
1249
1250 static void
1251 zink_flush_resource(struct pipe_context *pipe,
1252 struct pipe_resource *resource)
1253 {
1254 }
1255
1256 static void
1257 zink_resource_copy_region(struct pipe_context *pctx,
1258 struct pipe_resource *pdst,
1259 unsigned dst_level, unsigned dstx, unsigned dsty, unsigned dstz,
1260 struct pipe_resource *psrc,
1261 unsigned src_level, const struct pipe_box *src_box)
1262 {
1263 struct zink_resource *dst = zink_resource(pdst);
1264 struct zink_resource *src = zink_resource(psrc);
1265 struct zink_context *ctx = zink_context(pctx);
1266 if (dst->base.target != PIPE_BUFFER && src->base.target != PIPE_BUFFER) {
1267 VkImageCopy region = {};
1268
1269 region.srcSubresource.aspectMask = src->aspect;
1270 region.srcSubresource.mipLevel = src_level;
1271 region.srcSubresource.layerCount = 1;
1272 if (src->base.array_size > 1) {
1273 region.srcSubresource.baseArrayLayer = src_box->z;
1274 region.srcSubresource.layerCount = src_box->depth;
1275 region.extent.depth = 1;
1276 } else {
1277 region.srcOffset.z = src_box->z;
1278 region.srcSubresource.layerCount = 1;
1279 region.extent.depth = src_box->depth;
1280 }
1281
1282 region.srcOffset.x = src_box->x;
1283 region.srcOffset.y = src_box->y;
1284
1285 region.dstSubresource.aspectMask = dst->aspect;
1286 region.dstSubresource.mipLevel = dst_level;
1287 if (dst->base.array_size > 1) {
1288 region.dstSubresource.baseArrayLayer = dstz;
1289 region.dstSubresource.layerCount = src_box->depth;
1290 } else {
1291 region.dstOffset.z = dstz;
1292 region.dstSubresource.layerCount = 1;
1293 }
1294
1295 region.dstOffset.x = dstx;
1296 region.dstOffset.y = dsty;
1297 region.extent.width = src_box->width;
1298 region.extent.height = src_box->height;
1299
1300 struct zink_batch *batch = zink_batch_no_rp(ctx);
1301 zink_batch_reference_resoure(batch, src);
1302 zink_batch_reference_resoure(batch, dst);
1303
1304 vkCmdCopyImage(batch->cmdbuf, src->image, src->layout,
1305 dst->image, dst->layout,
1306 1, &region);
1307 } else
1308 debug_printf("zink: TODO resource copy\n");
1309 }
1310
1311 struct pipe_context *
1312 zink_context_create(struct pipe_screen *pscreen, void *priv, unsigned flags)
1313 {
1314 struct zink_screen *screen = zink_screen(pscreen);
1315 struct zink_context *ctx = CALLOC_STRUCT(zink_context);
1316
1317 ctx->base.screen = pscreen;
1318 ctx->base.priv = priv;
1319
1320 ctx->base.destroy = zink_context_destroy;
1321
1322 zink_context_state_init(&ctx->base);
1323
1324 ctx->base.create_sampler_state = zink_create_sampler_state;
1325 ctx->base.bind_sampler_states = zink_bind_sampler_states;
1326 ctx->base.delete_sampler_state = zink_delete_sampler_state;
1327
1328 ctx->base.create_sampler_view = zink_create_sampler_view;
1329 ctx->base.set_sampler_views = zink_set_sampler_views;
1330 ctx->base.sampler_view_destroy = zink_sampler_view_destroy;
1331
1332 ctx->base.create_vs_state = zink_create_vs_state;
1333 ctx->base.bind_vs_state = zink_bind_vs_state;
1334 ctx->base.delete_vs_state = zink_delete_vs_state;
1335
1336 ctx->base.create_fs_state = zink_create_fs_state;
1337 ctx->base.bind_fs_state = zink_bind_fs_state;
1338 ctx->base.delete_fs_state = zink_delete_fs_state;
1339
1340 ctx->base.set_polygon_stipple = zink_set_polygon_stipple;
1341 ctx->base.set_vertex_buffers = zink_set_vertex_buffers;
1342 ctx->base.set_viewport_states = zink_set_viewport_states;
1343 ctx->base.set_scissor_states = zink_set_scissor_states;
1344 ctx->base.set_constant_buffer = zink_set_constant_buffer;
1345 ctx->base.set_framebuffer_state = zink_set_framebuffer_state;
1346 ctx->base.set_stencil_ref = zink_set_stencil_ref;
1347 ctx->base.set_clip_state = zink_set_clip_state;
1348 ctx->base.set_active_query_state = zink_set_active_query_state;
1349 ctx->base.set_blend_color = zink_set_blend_color;
1350
1351 ctx->base.clear = zink_clear;
1352 ctx->base.draw_vbo = zink_draw_vbo;
1353 ctx->base.flush = zink_flush;
1354
1355 ctx->base.resource_copy_region = zink_resource_copy_region;
1356 ctx->base.blit = zink_blit;
1357
1358 ctx->base.flush_resource = zink_flush_resource;
1359 zink_context_surface_init(&ctx->base);
1360 zink_context_resource_init(&ctx->base);
1361 zink_context_query_init(&ctx->base);
1362
1363 slab_create_child(&ctx->transfer_pool, &screen->transfer_pool);
1364
1365 ctx->base.stream_uploader = u_upload_create_default(&ctx->base);
1366 ctx->base.const_uploader = ctx->base.stream_uploader;
1367
1368 int prim_hwsupport = 1 << PIPE_PRIM_POINTS |
1369 1 << PIPE_PRIM_LINES |
1370 1 << PIPE_PRIM_LINE_STRIP |
1371 1 << PIPE_PRIM_TRIANGLES |
1372 1 << PIPE_PRIM_TRIANGLE_STRIP |
1373 1 << PIPE_PRIM_TRIANGLE_FAN;
1374
1375 ctx->primconvert = util_primconvert_create(&ctx->base, prim_hwsupport);
1376 if (!ctx->primconvert)
1377 goto fail;
1378
1379 ctx->blitter = util_blitter_create(&ctx->base);
1380 if (!ctx->blitter)
1381 goto fail;
1382
1383 VkCommandPoolCreateInfo cpci = {};
1384 cpci.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
1385 cpci.queueFamilyIndex = screen->gfx_queue;
1386 cpci.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
1387 if (vkCreateCommandPool(screen->dev, &cpci, NULL, &ctx->cmdpool) != VK_SUCCESS)
1388 goto fail;
1389
1390 VkCommandBufferAllocateInfo cbai = {};
1391 cbai.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
1392 cbai.commandPool = ctx->cmdpool;
1393 cbai.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
1394 cbai.commandBufferCount = 1;
1395 for (int i = 0; i < ARRAY_SIZE(ctx->batches); ++i) {
1396 if (vkAllocateCommandBuffers(screen->dev, &cbai, &ctx->batches[i].cmdbuf) != VK_SUCCESS)
1397 goto fail;
1398
1399 ctx->batches[i].resources = _mesa_set_create(NULL, _mesa_hash_pointer,
1400 _mesa_key_pointer_equal);
1401 ctx->batches[i].sampler_views = _mesa_set_create(NULL,
1402 _mesa_hash_pointer,
1403 _mesa_key_pointer_equal);
1404
1405 if (!ctx->batches[i].resources || !ctx->batches[i].sampler_views)
1406 goto fail;
1407
1408 util_dynarray_init(&ctx->batches[i].zombie_samplers, NULL);
1409 }
1410
1411 VkDescriptorPoolSize sizes[] = {
1412 {VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1000}
1413 };
1414 VkDescriptorPoolCreateInfo dpci = {};
1415 dpci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
1416 dpci.pPoolSizes = sizes;
1417 dpci.poolSizeCount = ARRAY_SIZE(sizes);
1418 dpci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
1419 dpci.maxSets = 1000;
1420
1421 if(vkCreateDescriptorPool(screen->dev, &dpci, 0, &ctx->descpool) != VK_SUCCESS)
1422 goto fail;
1423
1424 vkGetDeviceQueue(screen->dev, screen->gfx_queue, 0, &ctx->queue);
1425
1426 ctx->program_cache = _mesa_hash_table_create(NULL,
1427 hash_gfx_program,
1428 equals_gfx_program);
1429 ctx->render_pass_cache = _mesa_hash_table_create(NULL,
1430 hash_render_pass_state,
1431 equals_render_pass_state);
1432 ctx->framebuffer_cache = _mesa_hash_table_create(NULL,
1433 hash_framebuffer_state,
1434 equals_framebuffer_state);
1435
1436 if (!ctx->program_cache || !ctx->render_pass_cache ||
1437 !ctx->framebuffer_cache)
1438 goto fail;
1439
1440 ctx->dirty = ZINK_DIRTY_PROGRAM;
1441
1442 /* start the first batch */
1443 zink_start_batch(ctx, zink_curr_batch(ctx));
1444
1445 return &ctx->base;
1446
1447 fail:
1448 if (ctx) {
1449 vkDestroyCommandPool(screen->dev, ctx->cmdpool, NULL);
1450 FREE(ctx);
1451 }
1452 return NULL;
1453 }