zink: move blitting to separate source
[mesa.git] / src / gallium / drivers / zink / zink_context.c
1 /*
2 * Copyright 2018 Collabora Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "zink_context.h"
25
26 #include "zink_batch.h"
27 #include "zink_compiler.h"
28 #include "zink_fence.h"
29 #include "zink_framebuffer.h"
30 #include "zink_helpers.h"
31 #include "zink_pipeline.h"
32 #include "zink_program.h"
33 #include "zink_render_pass.h"
34 #include "zink_resource.h"
35 #include "zink_screen.h"
36 #include "zink_state.h"
37 #include "zink_surface.h"
38
39 #include "indices/u_primconvert.h"
40 #include "util/u_blitter.h"
41 #include "util/u_debug.h"
42 #include "util/u_format.h"
43 #include "util/u_framebuffer.h"
44 #include "util/u_helpers.h"
45 #include "util/u_inlines.h"
46
47 #include "nir.h"
48
49 #include "util/u_memory.h"
50 #include "util/u_prim.h"
51 #include "util/u_upload_mgr.h"
52
53 static void
54 zink_context_destroy(struct pipe_context *pctx)
55 {
56 struct zink_context *ctx = zink_context(pctx);
57 struct zink_screen *screen = zink_screen(pctx->screen);
58
59 if (vkQueueWaitIdle(ctx->queue) != VK_SUCCESS)
60 debug_printf("vkQueueWaitIdle failed\n");
61
62 for (int i = 0; i < ARRAY_SIZE(ctx->batches); ++i)
63 vkFreeCommandBuffers(screen->dev, ctx->cmdpool, 1, &ctx->batches[i].cmdbuf);
64 vkDestroyCommandPool(screen->dev, ctx->cmdpool, NULL);
65
66 util_primconvert_destroy(ctx->primconvert);
67 u_upload_destroy(pctx->stream_uploader);
68 slab_destroy_child(&ctx->transfer_pool);
69 util_blitter_destroy(ctx->blitter);
70 FREE(ctx);
71 }
72
73 static VkSamplerMipmapMode
74 sampler_mipmap_mode(enum pipe_tex_mipfilter filter)
75 {
76 switch (filter) {
77 case PIPE_TEX_MIPFILTER_NEAREST: return VK_SAMPLER_MIPMAP_MODE_NEAREST;
78 case PIPE_TEX_MIPFILTER_LINEAR: return VK_SAMPLER_MIPMAP_MODE_LINEAR;
79 case PIPE_TEX_MIPFILTER_NONE:
80 unreachable("PIPE_TEX_MIPFILTER_NONE should be dealt with earlier");
81 }
82 unreachable("unexpected filter");
83 }
84
85 static VkSamplerAddressMode
86 sampler_address_mode(enum pipe_tex_wrap filter)
87 {
88 switch (filter) {
89 case PIPE_TEX_WRAP_REPEAT: return VK_SAMPLER_ADDRESS_MODE_REPEAT;
90 case PIPE_TEX_WRAP_CLAMP: return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; /* not technically correct, but kinda works */
91 case PIPE_TEX_WRAP_CLAMP_TO_EDGE: return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
92 case PIPE_TEX_WRAP_CLAMP_TO_BORDER: return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER;
93 case PIPE_TEX_WRAP_MIRROR_REPEAT: return VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT;
94 case PIPE_TEX_WRAP_MIRROR_CLAMP: return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; /* not technically correct, but kinda works */
95 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE: return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE;
96 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER: return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; /* not technically correct, but kinda works */
97 }
98 unreachable("unexpected wrap");
99 }
100
101 static VkCompareOp
102 compare_op(enum pipe_compare_func op)
103 {
104 switch (op) {
105 case PIPE_FUNC_NEVER: return VK_COMPARE_OP_NEVER;
106 case PIPE_FUNC_LESS: return VK_COMPARE_OP_LESS;
107 case PIPE_FUNC_EQUAL: return VK_COMPARE_OP_EQUAL;
108 case PIPE_FUNC_LEQUAL: return VK_COMPARE_OP_LESS_OR_EQUAL;
109 case PIPE_FUNC_GREATER: return VK_COMPARE_OP_GREATER;
110 case PIPE_FUNC_NOTEQUAL: return VK_COMPARE_OP_NOT_EQUAL;
111 case PIPE_FUNC_GEQUAL: return VK_COMPARE_OP_GREATER_OR_EQUAL;
112 case PIPE_FUNC_ALWAYS: return VK_COMPARE_OP_ALWAYS;
113 }
114 unreachable("unexpected compare");
115 }
116
117 static void *
118 zink_create_sampler_state(struct pipe_context *pctx,
119 const struct pipe_sampler_state *state)
120 {
121 struct zink_screen *screen = zink_screen(pctx->screen);
122
123 VkSamplerCreateInfo sci = {};
124 sci.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
125 sci.magFilter = zink_filter(state->mag_img_filter);
126 sci.minFilter = zink_filter(state->min_img_filter);
127
128 if (state->min_mip_filter != PIPE_TEX_MIPFILTER_NONE) {
129 sci.mipmapMode = sampler_mipmap_mode(state->min_mip_filter);
130 sci.minLod = state->min_lod;
131 sci.maxLod = state->max_lod;
132 } else {
133 sci.mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST;
134 sci.minLod = 0;
135 sci.maxLod = 0;
136 }
137
138 sci.addressModeU = sampler_address_mode(state->wrap_s);
139 sci.addressModeV = sampler_address_mode(state->wrap_t);
140 sci.addressModeW = sampler_address_mode(state->wrap_r);
141 sci.mipLodBias = state->lod_bias;
142
143 if (state->compare_mode == PIPE_TEX_COMPARE_NONE)
144 sci.compareOp = VK_COMPARE_OP_NEVER;
145 else
146 sci.compareOp = compare_op(state->compare_func);
147
148 sci.borderColor = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK; // TODO
149 sci.unnormalizedCoordinates = !state->normalized_coords;
150
151 if (state->max_anisotropy > 1) {
152 sci.maxAnisotropy = state->max_anisotropy;
153 sci.anisotropyEnable = VK_TRUE;
154 }
155
156 VkSampler *sampler = CALLOC(1, sizeof(VkSampler));
157 if (!sampler)
158 return NULL;
159
160 if (vkCreateSampler(screen->dev, &sci, NULL, sampler) != VK_SUCCESS) {
161 FREE(sampler);
162 return NULL;
163 }
164
165 return sampler;
166 }
167
168 static void
169 zink_bind_sampler_states(struct pipe_context *pctx,
170 enum pipe_shader_type shader,
171 unsigned start_slot,
172 unsigned num_samplers,
173 void **samplers)
174 {
175 struct zink_context *ctx = zink_context(pctx);
176 for (unsigned i = 0; i < num_samplers; ++i) {
177 VkSampler *sampler = samplers[i];
178 ctx->samplers[shader][start_slot + i] = sampler ? *sampler : VK_NULL_HANDLE;
179 }
180 ctx->num_samplers[shader] = start_slot + num_samplers;
181 }
182
183 static void
184 zink_delete_sampler_state(struct pipe_context *pctx,
185 void *sampler_state)
186 {
187 struct zink_batch *batch = zink_curr_batch(zink_context(pctx));
188 util_dynarray_append(&batch->zombie_samplers, VkSampler,
189 *(VkSampler *)sampler_state);
190 FREE(sampler_state);
191 }
192
193
194 static VkImageViewType
195 image_view_type(enum pipe_texture_target target)
196 {
197 switch (target) {
198 case PIPE_TEXTURE_1D: return VK_IMAGE_VIEW_TYPE_1D;
199 case PIPE_TEXTURE_1D_ARRAY: return VK_IMAGE_VIEW_TYPE_1D_ARRAY;
200 case PIPE_TEXTURE_2D: return VK_IMAGE_VIEW_TYPE_2D;
201 case PIPE_TEXTURE_2D_ARRAY: return VK_IMAGE_VIEW_TYPE_2D_ARRAY;
202 case PIPE_TEXTURE_CUBE: return VK_IMAGE_VIEW_TYPE_CUBE;
203 case PIPE_TEXTURE_CUBE_ARRAY: return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
204 case PIPE_TEXTURE_3D: return VK_IMAGE_VIEW_TYPE_3D;
205 case PIPE_TEXTURE_RECT: return VK_IMAGE_VIEW_TYPE_2D; /* not sure */
206 default:
207 unreachable("unexpected target");
208 }
209 }
210
211 static VkComponentSwizzle
212 component_mapping(enum pipe_swizzle swizzle)
213 {
214 switch (swizzle) {
215 case PIPE_SWIZZLE_X: return VK_COMPONENT_SWIZZLE_R;
216 case PIPE_SWIZZLE_Y: return VK_COMPONENT_SWIZZLE_G;
217 case PIPE_SWIZZLE_Z: return VK_COMPONENT_SWIZZLE_B;
218 case PIPE_SWIZZLE_W: return VK_COMPONENT_SWIZZLE_A;
219 case PIPE_SWIZZLE_0: return VK_COMPONENT_SWIZZLE_ZERO;
220 case PIPE_SWIZZLE_1: return VK_COMPONENT_SWIZZLE_ONE;
221 case PIPE_SWIZZLE_NONE: return VK_COMPONENT_SWIZZLE_IDENTITY; // ???
222 default:
223 unreachable("unexpected swizzle");
224 }
225 }
226
227 static VkImageAspectFlags
228 sampler_aspect_from_format(enum pipe_format fmt)
229 {
230 if (util_format_is_depth_or_stencil(fmt)) {
231 const struct util_format_description *desc = util_format_description(fmt);
232 if (util_format_has_depth(desc))
233 return VK_IMAGE_ASPECT_DEPTH_BIT;
234 assert(util_format_has_stencil(desc));
235 return VK_IMAGE_ASPECT_STENCIL_BIT;
236 } else
237 return VK_IMAGE_ASPECT_COLOR_BIT;
238 }
239
240 static struct pipe_sampler_view *
241 zink_create_sampler_view(struct pipe_context *pctx, struct pipe_resource *pres,
242 const struct pipe_sampler_view *state)
243 {
244 struct zink_screen *screen = zink_screen(pctx->screen);
245 struct zink_resource *res = zink_resource(pres);
246 struct zink_sampler_view *sampler_view = CALLOC_STRUCT(zink_sampler_view);
247
248 sampler_view->base = *state;
249 sampler_view->base.texture = NULL;
250 pipe_resource_reference(&sampler_view->base.texture, pres);
251 sampler_view->base.reference.count = 1;
252 sampler_view->base.context = pctx;
253
254 VkImageViewCreateInfo ivci = {};
255 ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
256 ivci.image = res->image;
257 ivci.viewType = image_view_type(state->target);
258 ivci.format = zink_get_format(screen, state->format);
259 ivci.components.r = component_mapping(state->swizzle_r);
260 ivci.components.g = component_mapping(state->swizzle_g);
261 ivci.components.b = component_mapping(state->swizzle_b);
262 ivci.components.a = component_mapping(state->swizzle_a);
263
264 ivci.subresourceRange.aspectMask = sampler_aspect_from_format(state->format);
265 ivci.subresourceRange.baseMipLevel = state->u.tex.first_level;
266 ivci.subresourceRange.baseArrayLayer = state->u.tex.first_layer;
267 ivci.subresourceRange.levelCount = state->u.tex.last_level - state->u.tex.first_level + 1;
268 ivci.subresourceRange.layerCount = state->u.tex.last_layer - state->u.tex.first_layer + 1;
269
270 VkResult err = vkCreateImageView(screen->dev, &ivci, NULL, &sampler_view->image_view);
271 if (err != VK_SUCCESS) {
272 FREE(sampler_view);
273 return NULL;
274 }
275
276 return &sampler_view->base;
277 }
278
279 static void
280 zink_sampler_view_destroy(struct pipe_context *pctx,
281 struct pipe_sampler_view *pview)
282 {
283 struct zink_sampler_view *view = zink_sampler_view(pview);
284 vkDestroyImageView(zink_screen(pctx->screen)->dev, view->image_view, NULL);
285 FREE(view);
286 }
287
288 static void *
289 zink_create_vs_state(struct pipe_context *pctx,
290 const struct pipe_shader_state *shader)
291 {
292 struct nir_shader *nir;
293 if (shader->type != PIPE_SHADER_IR_NIR)
294 nir = zink_tgsi_to_nir(pctx->screen, shader->tokens);
295 else
296 nir = (struct nir_shader *)shader->ir.nir;
297
298 return zink_compile_nir(zink_screen(pctx->screen), nir);
299 }
300
301 static void
302 bind_stage(struct zink_context *ctx, enum pipe_shader_type stage,
303 struct zink_shader *shader)
304 {
305 assert(stage < PIPE_SHADER_COMPUTE);
306 ctx->gfx_stages[stage] = shader;
307 ctx->dirty_program = true;
308 }
309
310 static void
311 zink_bind_vs_state(struct pipe_context *pctx,
312 void *cso)
313 {
314 bind_stage(zink_context(pctx), PIPE_SHADER_VERTEX, cso);
315 }
316
317 static void
318 zink_delete_vs_state(struct pipe_context *pctx,
319 void *cso)
320 {
321 zink_shader_free(zink_screen(pctx->screen), cso);
322 }
323
324 static void *
325 zink_create_fs_state(struct pipe_context *pctx,
326 const struct pipe_shader_state *shader)
327 {
328 struct nir_shader *nir;
329 if (shader->type != PIPE_SHADER_IR_NIR)
330 nir = zink_tgsi_to_nir(pctx->screen, shader->tokens);
331 else
332 nir = (struct nir_shader *)shader->ir.nir;
333
334 return zink_compile_nir(zink_screen(pctx->screen), nir);
335 }
336
337 static void
338 zink_bind_fs_state(struct pipe_context *pctx,
339 void *cso)
340 {
341 bind_stage(zink_context(pctx), PIPE_SHADER_FRAGMENT, cso);
342 }
343
344 static void
345 zink_delete_fs_state(struct pipe_context *pctx,
346 void *cso)
347 {
348 zink_shader_free(zink_screen(pctx->screen), cso);
349 }
350
351 static void
352 zink_set_polygon_stipple(struct pipe_context *pctx,
353 const struct pipe_poly_stipple *ps)
354 {
355 }
356
357 static void
358 zink_set_vertex_buffers(struct pipe_context *pctx,
359 unsigned start_slot,
360 unsigned num_buffers,
361 const struct pipe_vertex_buffer *buffers)
362 {
363 struct zink_context *ctx = zink_context(pctx);
364
365 if (buffers) {
366 for (int i = 0; i < num_buffers; ++i) {
367 const struct pipe_vertex_buffer *vb = buffers + i;
368 ctx->gfx_pipeline_state.bindings[start_slot + i].stride = vb->stride;
369 }
370 }
371
372 util_set_vertex_buffers_mask(ctx->buffers, &ctx->buffers_enabled_mask,
373 buffers, start_slot, num_buffers);
374 }
375
376 static void
377 zink_set_viewport_states(struct pipe_context *pctx,
378 unsigned start_slot,
379 unsigned num_viewports,
380 const struct pipe_viewport_state *state)
381 {
382 struct zink_context *ctx = zink_context(pctx);
383
384 for (unsigned i = 0; i < num_viewports; ++i) {
385 VkViewport viewport = {
386 state[i].translate[0] - state[i].scale[0],
387 state[i].translate[1] - state[i].scale[1],
388 state[i].scale[0] * 2,
389 state[i].scale[1] * 2,
390 state[i].translate[2] - state[i].scale[2],
391 state[i].translate[2] + state[i].scale[2]
392 };
393 ctx->viewport_states[start_slot + i] = state[i];
394 ctx->viewports[start_slot + i] = viewport;
395 }
396 ctx->num_viewports = start_slot + num_viewports;
397 }
398
399 static void
400 zink_set_scissor_states(struct pipe_context *pctx,
401 unsigned start_slot, unsigned num_scissors,
402 const struct pipe_scissor_state *states)
403 {
404 struct zink_context *ctx = zink_context(pctx);
405
406 for (unsigned i = 0; i < num_scissors; i++) {
407 VkRect2D scissor;
408
409 scissor.offset.x = states[i].minx;
410 scissor.offset.y = states[i].miny;
411 scissor.extent.width = states[i].maxx - states[i].minx;
412 scissor.extent.height = states[i].maxy - states[i].miny;
413 ctx->scissor_states[start_slot + i] = states[i];
414 ctx->scissors[start_slot + i] = scissor;
415 }
416 }
417
418 static void
419 zink_set_constant_buffer(struct pipe_context *pctx,
420 enum pipe_shader_type shader, uint index,
421 const struct pipe_constant_buffer *cb)
422 {
423 struct zink_context *ctx = zink_context(pctx);
424
425 if (cb) {
426 struct pipe_resource *buffer = cb->buffer;
427 unsigned offset = cb->buffer_offset;
428 if (cb->user_buffer) {
429 struct zink_screen *screen = zink_screen(pctx->screen);
430 u_upload_data(ctx->base.const_uploader, 0, cb->buffer_size,
431 screen->props.limits.minUniformBufferOffsetAlignment,
432 cb->user_buffer, &offset, &buffer);
433 }
434
435 pipe_resource_reference(&ctx->ubos[shader][index].buffer, buffer);
436 ctx->ubos[shader][index].buffer_offset = offset;
437 ctx->ubos[shader][index].buffer_size = cb->buffer_size;
438 ctx->ubos[shader][index].user_buffer = NULL;
439
440 if (cb->user_buffer)
441 pipe_resource_reference(&buffer, NULL);
442 } else {
443 pipe_resource_reference(&ctx->ubos[shader][index].buffer, NULL);
444 ctx->ubos[shader][index].buffer_offset = 0;
445 ctx->ubos[shader][index].buffer_size = 0;
446 ctx->ubos[shader][index].user_buffer = NULL;
447 }
448 }
449
450 static void
451 zink_set_sampler_views(struct pipe_context *pctx,
452 enum pipe_shader_type shader_type,
453 unsigned start_slot,
454 unsigned num_views,
455 struct pipe_sampler_view **views)
456 {
457 struct zink_context *ctx = zink_context(pctx);
458 assert(views);
459 for (unsigned i = 0; i < num_views; ++i) {
460 pipe_sampler_view_reference(
461 &ctx->image_views[shader_type][start_slot + i],
462 views[i]);
463 }
464 ctx->num_image_views[shader_type] = start_slot + num_views;
465 }
466
467 static void
468 zink_set_stencil_ref(struct pipe_context *pctx,
469 const struct pipe_stencil_ref *ref)
470 {
471 struct zink_context *ctx = zink_context(pctx);
472 ctx->stencil_ref = *ref;
473 }
474
475 static void
476 zink_set_clip_state(struct pipe_context *pctx,
477 const struct pipe_clip_state *pcs)
478 {
479 }
480
481 static struct zink_render_pass *
482 get_render_pass(struct zink_context *ctx)
483 {
484 struct zink_screen *screen = zink_screen(ctx->base.screen);
485 const struct pipe_framebuffer_state *fb = &ctx->fb_state;
486 struct zink_render_pass_state state;
487
488 for (int i = 0; i < fb->nr_cbufs; i++) {
489 struct pipe_resource *res = fb->cbufs[i]->texture;
490 state.rts[i].format = zink_get_format(screen, fb->cbufs[i]->format);
491 state.rts[i].samples = res->nr_samples > 0 ? res->nr_samples :
492 VK_SAMPLE_COUNT_1_BIT;
493 }
494 state.num_cbufs = fb->nr_cbufs;
495
496 if (fb->zsbuf) {
497 struct zink_resource *zsbuf = zink_resource(fb->zsbuf->texture);
498 state.rts[fb->nr_cbufs].format = zsbuf->format;
499 state.rts[fb->nr_cbufs].samples = zsbuf->base.nr_samples > 0 ? zsbuf->base.nr_samples : VK_SAMPLE_COUNT_1_BIT;
500 }
501 state.have_zsbuf = fb->zsbuf != NULL;
502
503 struct hash_entry *entry = _mesa_hash_table_search(ctx->render_pass_cache,
504 &state);
505 if (!entry) {
506 struct zink_render_pass *rp;
507 rp = zink_create_render_pass(screen, &state);
508 entry = _mesa_hash_table_insert(ctx->render_pass_cache, &state, rp);
509 if (!entry)
510 return NULL;
511 }
512
513 return entry->data;
514 }
515
516 static struct zink_framebuffer *
517 get_framebuffer(struct zink_context *ctx)
518 {
519 struct zink_screen *screen = zink_screen(ctx->base.screen);
520
521 struct zink_framebuffer_state state = {};
522 state.rp = get_render_pass(ctx);
523 for (int i = 0; i < ctx->fb_state.nr_cbufs; i++) {
524 struct pipe_surface *psurf = ctx->fb_state.cbufs[i];
525 state.attachments[i] = zink_surface(psurf);
526 }
527
528 state.num_attachments = ctx->fb_state.nr_cbufs;
529 if (ctx->fb_state.zsbuf) {
530 struct pipe_surface *psurf = ctx->fb_state.zsbuf;
531 state.attachments[state.num_attachments++] = zink_surface(psurf);
532 }
533
534 state.width = ctx->fb_state.width;
535 state.height = ctx->fb_state.height;
536 state.layers = MAX2(ctx->fb_state.layers, 1);
537
538 struct hash_entry *entry = _mesa_hash_table_search(ctx->framebuffer_cache,
539 &state);
540 if (!entry) {
541 struct zink_framebuffer *fb = zink_create_framebuffer(screen, &state);
542 entry = _mesa_hash_table_insert(ctx->framebuffer_cache, &state, fb);
543 if (!entry)
544 return NULL;
545 }
546
547 return entry->data;
548 }
549
550 void
551 zink_begin_render_pass(struct zink_context *ctx, struct zink_batch *batch)
552 {
553 struct zink_screen *screen = zink_screen(ctx->base.screen);
554 assert(batch == zink_curr_batch(ctx));
555 assert(ctx->gfx_pipeline_state.render_pass);
556
557 struct pipe_framebuffer_state *fb_state = &ctx->fb_state;
558
559 VkRenderPassBeginInfo rpbi = {};
560 rpbi.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
561 rpbi.renderPass = ctx->gfx_pipeline_state.render_pass->render_pass;
562 rpbi.renderArea.offset.x = 0;
563 rpbi.renderArea.offset.y = 0;
564 rpbi.renderArea.extent.width = fb_state->width;
565 rpbi.renderArea.extent.height = fb_state->height;
566 rpbi.clearValueCount = 0;
567 rpbi.pClearValues = NULL;
568 rpbi.framebuffer = ctx->framebuffer->fb;
569
570 assert(ctx->gfx_pipeline_state.render_pass && ctx->framebuffer);
571 assert(!batch->rp || batch->rp == ctx->gfx_pipeline_state.render_pass);
572 assert(!batch->fb || batch->fb == ctx->framebuffer);
573
574 for (int i = 0; i < fb_state->nr_cbufs; i++) {
575 struct zink_resource *res = zink_resource(fb_state->cbufs[i]->texture);
576 if (res->layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL)
577 zink_resource_barrier(batch->cmdbuf, res, res->aspect,
578 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
579 }
580
581 if (fb_state->zsbuf) {
582 struct zink_resource *res = zink_resource(fb_state->zsbuf->texture);
583 if (res->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL)
584 zink_resource_barrier(batch->cmdbuf, res, res->aspect,
585 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
586 }
587
588 zink_render_pass_reference(screen, &batch->rp, ctx->gfx_pipeline_state.render_pass);
589 zink_framebuffer_reference(screen, &batch->fb, ctx->framebuffer);
590
591 vkCmdBeginRenderPass(batch->cmdbuf, &rpbi, VK_SUBPASS_CONTENTS_INLINE);
592 }
593
594 static void
595 flush_batch(struct zink_context *ctx)
596 {
597 struct zink_batch *batch = zink_curr_batch(ctx);
598 if (batch->rp)
599 vkCmdEndRenderPass(batch->cmdbuf);
600
601 zink_end_batch(ctx, batch);
602
603 ctx->curr_batch++;
604 if (ctx->curr_batch == ARRAY_SIZE(ctx->batches))
605 ctx->curr_batch = 0;
606
607 zink_start_batch(ctx, zink_curr_batch(ctx));
608 }
609
610 struct zink_batch *
611 zink_batch_rp(struct zink_context *ctx)
612 {
613 struct zink_batch *batch = zink_curr_batch(ctx);
614 if (!batch->rp) {
615 zink_begin_render_pass(ctx, batch);
616 assert(batch->rp);
617 }
618 return batch;
619 }
620
621 struct zink_batch *
622 zink_batch_no_rp(struct zink_context *ctx)
623 {
624 struct zink_batch *batch = zink_curr_batch(ctx);
625 if (batch->rp) {
626 /* flush batch and get a new one */
627 flush_batch(ctx);
628 batch = zink_curr_batch(ctx);
629 assert(!batch->rp);
630 }
631 return batch;
632 }
633
634 static void
635 zink_set_framebuffer_state(struct pipe_context *pctx,
636 const struct pipe_framebuffer_state *state)
637 {
638 struct zink_context *ctx = zink_context(pctx);
639 struct zink_screen *screen = zink_screen(pctx->screen);
640
641 VkSampleCountFlagBits rast_samples = VK_SAMPLE_COUNT_1_BIT;
642 for (int i = 0; i < state->nr_cbufs; i++)
643 rast_samples = MAX2(rast_samples, state->cbufs[i]->texture->nr_samples);
644 if (state->zsbuf && state->zsbuf->texture->nr_samples)
645 rast_samples = MAX2(rast_samples, state->zsbuf->texture->nr_samples);
646
647 util_copy_framebuffer_state(&ctx->fb_state, state);
648
649 struct zink_framebuffer *fb = get_framebuffer(ctx);
650 zink_framebuffer_reference(screen, &ctx->framebuffer, fb);
651 zink_render_pass_reference(screen, &ctx->gfx_pipeline_state.render_pass, fb->rp);
652
653 ctx->gfx_pipeline_state.rast_samples = rast_samples;
654 ctx->gfx_pipeline_state.num_attachments = state->nr_cbufs;
655
656 struct zink_batch *batch = zink_batch_no_rp(ctx);
657
658 for (int i = 0; i < state->nr_cbufs; i++) {
659 struct zink_resource *res = zink_resource(state->cbufs[i]->texture);
660 if (res->layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL)
661 zink_resource_barrier(batch->cmdbuf, res, res->aspect,
662 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
663 }
664
665 if (state->zsbuf) {
666 struct zink_resource *res = zink_resource(state->zsbuf->texture);
667 if (res->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL)
668 zink_resource_barrier(batch->cmdbuf, res, res->aspect,
669 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
670 }
671 }
672
673 static void
674 zink_set_blend_color(struct pipe_context *pctx,
675 const struct pipe_blend_color *color)
676 {
677 struct zink_context *ctx = zink_context(pctx);
678 memcpy(ctx->blend_constants, color->color, sizeof(float) * 4);
679 }
680
681 static void
682 zink_set_sample_mask(struct pipe_context *pctx, unsigned sample_mask)
683 {
684 struct zink_context *ctx = zink_context(pctx);
685 ctx->gfx_pipeline_state.sample_mask = sample_mask;
686 }
687
688 static VkAccessFlags
689 access_src_flags(VkImageLayout layout)
690 {
691 switch (layout) {
692 case VK_IMAGE_LAYOUT_UNDEFINED:
693 case VK_IMAGE_LAYOUT_GENERAL:
694 return 0;
695
696 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
697 return VK_ACCESS_COLOR_ATTACHMENT_READ_BIT;
698 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
699 return VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
700
701 case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
702 return VK_ACCESS_SHADER_READ_BIT;
703
704 case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
705 return VK_ACCESS_TRANSFER_READ_BIT;
706
707 case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
708 return VK_ACCESS_TRANSFER_WRITE_BIT;
709
710 case VK_IMAGE_LAYOUT_PREINITIALIZED:
711 return VK_ACCESS_HOST_WRITE_BIT;
712
713 default:
714 unreachable("unexpected layout");
715 }
716 }
717
718 static VkAccessFlags
719 access_dst_flags(VkImageLayout layout)
720 {
721 switch (layout) {
722 case VK_IMAGE_LAYOUT_UNDEFINED:
723 case VK_IMAGE_LAYOUT_GENERAL:
724 return 0;
725
726 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
727 return VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
728 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
729 return VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
730
731 case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
732 return VK_ACCESS_TRANSFER_READ_BIT;
733
734 case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
735 return VK_ACCESS_TRANSFER_WRITE_BIT;
736
737 default:
738 unreachable("unexpected layout");
739 }
740 }
741
742 static VkPipelineStageFlags
743 pipeline_dst_stage(VkImageLayout layout)
744 {
745 switch (layout) {
746 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
747 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
748 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
749 return VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
750
751 case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
752 return VK_PIPELINE_STAGE_TRANSFER_BIT;
753 case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
754 return VK_PIPELINE_STAGE_TRANSFER_BIT;
755
756 default:
757 return VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
758 }
759 }
760
761 static VkPipelineStageFlags
762 pipeline_src_stage(VkImageLayout layout)
763 {
764 switch (layout) {
765 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
766 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
767 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
768 return VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT;
769
770 case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
771 return VK_PIPELINE_STAGE_TRANSFER_BIT;
772 case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
773 return VK_PIPELINE_STAGE_TRANSFER_BIT;
774
775 default:
776 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
777 }
778 }
779
780
781 void
782 zink_resource_barrier(VkCommandBuffer cmdbuf, struct zink_resource *res,
783 VkImageAspectFlags aspect, VkImageLayout new_layout)
784 {
785 VkImageSubresourceRange isr = {
786 aspect,
787 0, VK_REMAINING_MIP_LEVELS,
788 0, VK_REMAINING_ARRAY_LAYERS
789 };
790
791 VkImageMemoryBarrier imb = {
792 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
793 NULL,
794 access_src_flags(res->layout),
795 access_dst_flags(new_layout),
796 res->layout,
797 new_layout,
798 VK_QUEUE_FAMILY_IGNORED,
799 VK_QUEUE_FAMILY_IGNORED,
800 res->image,
801 isr
802 };
803 vkCmdPipelineBarrier(
804 cmdbuf,
805 pipeline_src_stage(res->layout),
806 pipeline_dst_stage(new_layout),
807 0,
808 0, NULL,
809 0, NULL,
810 1, &imb
811 );
812
813 res->layout = new_layout;
814 }
815
816 static void
817 zink_clear(struct pipe_context *pctx,
818 unsigned buffers,
819 const union pipe_color_union *pcolor,
820 double depth, unsigned stencil)
821 {
822 struct zink_context *ctx = zink_context(pctx);
823 struct pipe_framebuffer_state *fb = &ctx->fb_state;
824
825 /* FIXME: this is very inefficient; if no renderpass has been started yet,
826 * we should record the clear if it's full-screen, and apply it as we
827 * start the render-pass. Otherwise we can do a partial out-of-renderpass
828 * clear.
829 */
830 struct zink_batch *batch = zink_batch_rp(ctx);
831
832 VkClearAttachment attachments[1 + PIPE_MAX_COLOR_BUFS];
833 int num_attachments = 0;
834
835 if (buffers & PIPE_CLEAR_COLOR) {
836 VkClearColorValue color;
837 color.float32[0] = pcolor->f[0];
838 color.float32[1] = pcolor->f[1];
839 color.float32[2] = pcolor->f[2];
840 color.float32[3] = pcolor->f[3];
841
842 for (unsigned i = 0; i < fb->nr_cbufs; i++) {
843 if (!(buffers & (PIPE_CLEAR_COLOR0 << i)) || !fb->cbufs[i])
844 continue;
845
846 attachments[num_attachments].aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
847 attachments[num_attachments].colorAttachment = i;
848 attachments[num_attachments].clearValue.color = color;
849 ++num_attachments;
850 }
851 }
852
853 if (buffers & PIPE_CLEAR_DEPTHSTENCIL && fb->zsbuf) {
854 VkImageAspectFlags aspect = 0;
855 if (buffers & PIPE_CLEAR_DEPTH)
856 aspect |= VK_IMAGE_ASPECT_DEPTH_BIT;
857 if (buffers & PIPE_CLEAR_STENCIL)
858 aspect |= VK_IMAGE_ASPECT_STENCIL_BIT;
859
860 attachments[num_attachments].aspectMask = aspect;
861 attachments[num_attachments].clearValue.depthStencil.depth = depth;
862 attachments[num_attachments].clearValue.depthStencil.stencil = stencil;
863 ++num_attachments;
864 }
865
866 VkClearRect cr;
867 cr.rect.offset.x = 0;
868 cr.rect.offset.y = 0;
869 cr.rect.extent.width = fb->width;
870 cr.rect.extent.height = fb->height;
871 cr.baseArrayLayer = 0;
872 cr.layerCount = util_framebuffer_get_num_layers(fb);
873 vkCmdClearAttachments(batch->cmdbuf, num_attachments, attachments, 1, &cr);
874 }
875
876 VkShaderStageFlagBits
877 zink_shader_stage(enum pipe_shader_type type)
878 {
879 VkShaderStageFlagBits stages[] = {
880 [PIPE_SHADER_VERTEX] = VK_SHADER_STAGE_VERTEX_BIT,
881 [PIPE_SHADER_FRAGMENT] = VK_SHADER_STAGE_FRAGMENT_BIT,
882 [PIPE_SHADER_GEOMETRY] = VK_SHADER_STAGE_GEOMETRY_BIT,
883 [PIPE_SHADER_TESS_CTRL] = VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
884 [PIPE_SHADER_TESS_EVAL] = VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
885 [PIPE_SHADER_COMPUTE] = VK_SHADER_STAGE_COMPUTE_BIT,
886 };
887 return stages[type];
888 }
889
890 static VkDescriptorSet
891 allocate_descriptor_set(struct zink_screen *screen,
892 struct zink_batch *batch,
893 struct zink_gfx_program *prog)
894 {
895 assert(batch->descs_left >= prog->num_descriptors);
896 VkDescriptorSetAllocateInfo dsai;
897 memset((void *)&dsai, 0, sizeof(dsai));
898 dsai.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
899 dsai.pNext = NULL;
900 dsai.descriptorPool = batch->descpool;
901 dsai.descriptorSetCount = 1;
902 dsai.pSetLayouts = &prog->dsl;
903
904 VkDescriptorSet desc_set;
905 if (vkAllocateDescriptorSets(screen->dev, &dsai, &desc_set) != VK_SUCCESS) {
906 debug_printf("ZINK: failed to allocate descriptor set :/");
907 return VK_NULL_HANDLE;
908 }
909
910 batch->descs_left -= prog->num_descriptors;
911 return desc_set;
912 }
913
914 static void
915 zink_bind_vertex_buffers(struct zink_batch *batch, struct zink_context *ctx)
916 {
917 VkBuffer buffers[PIPE_MAX_ATTRIBS];
918 VkDeviceSize buffer_offsets[PIPE_MAX_ATTRIBS];
919 const struct zink_vertex_elements_state *elems = ctx->element_state;
920 for (unsigned i = 0; i < elems->hw_state.num_bindings; i++) {
921 struct pipe_vertex_buffer *vb = ctx->buffers + ctx->element_state->binding_map[i];
922 assert(vb && vb->buffer.resource);
923 struct zink_resource *res = zink_resource(vb->buffer.resource);
924 buffers[i] = res->buffer;
925 buffer_offsets[i] = vb->buffer_offset;
926 zink_batch_reference_resoure(batch, res);
927 }
928
929 if (elems->hw_state.num_bindings > 0)
930 vkCmdBindVertexBuffers(batch->cmdbuf, 0,
931 elems->hw_state.num_bindings,
932 buffers, buffer_offsets);
933 }
934
935 static uint32_t
936 hash_gfx_program(const void *key)
937 {
938 return _mesa_hash_data(key, sizeof(struct zink_shader *) * (PIPE_SHADER_TYPES - 1));
939 }
940
941 static bool
942 equals_gfx_program(const void *a, const void *b)
943 {
944 return memcmp(a, b, sizeof(struct zink_shader *) * (PIPE_SHADER_TYPES - 1)) == 0;
945 }
946
947 static uint32_t
948 hash_render_pass_state(const void *key)
949 {
950 return _mesa_hash_data(key, sizeof(struct zink_render_pass_state));
951 }
952
953 static bool
954 equals_render_pass_state(const void *a, const void *b)
955 {
956 return memcmp(a, b, sizeof(struct zink_render_pass_state)) == 0;
957 }
958
959 static uint32_t
960 hash_framebuffer_state(const void *key)
961 {
962 struct zink_framebuffer_state *s = (struct zink_framebuffer_state*)key;
963 return _mesa_hash_data(key, sizeof(struct zink_framebuffer_state) + sizeof(s->attachments) * s->num_attachments);
964 }
965
966 static bool
967 equals_framebuffer_state(const void *a, const void *b)
968 {
969 struct zink_framebuffer_state *s = (struct zink_framebuffer_state*)a;
970 return memcmp(a, b, sizeof(struct zink_framebuffer_state) + sizeof(s->attachments) * s->num_attachments) == 0;
971 }
972
973 static struct zink_gfx_program *
974 get_gfx_program(struct zink_context *ctx)
975 {
976 if (ctx->dirty_program) {
977 struct hash_entry *entry = _mesa_hash_table_search(ctx->program_cache,
978 ctx->gfx_stages);
979 if (!entry) {
980 struct zink_gfx_program *prog;
981 prog = zink_create_gfx_program(zink_screen(ctx->base.screen),
982 ctx->gfx_stages);
983 entry = _mesa_hash_table_insert(ctx->program_cache, prog->stages, prog);
984 if (!entry)
985 return NULL;
986 }
987 ctx->curr_program = entry->data;
988 ctx->dirty_program = false;
989 }
990
991 assert(ctx->curr_program);
992 return ctx->curr_program;
993 }
994
995 static bool
996 line_width_needed(enum pipe_prim_type reduced_prim,
997 VkPolygonMode polygon_mode)
998 {
999 switch (reduced_prim) {
1000 case PIPE_PRIM_POINTS:
1001 return false;
1002
1003 case PIPE_PRIM_LINES:
1004 return true;
1005
1006 case PIPE_PRIM_TRIANGLES:
1007 return polygon_mode == VK_POLYGON_MODE_LINE;
1008
1009 default:
1010 unreachable("unexpected reduced prim");
1011 }
1012 }
1013
1014 static void
1015 zink_draw_vbo(struct pipe_context *pctx,
1016 const struct pipe_draw_info *dinfo)
1017 {
1018 struct zink_context *ctx = zink_context(pctx);
1019 struct zink_screen *screen = zink_screen(pctx->screen);
1020 struct zink_rasterizer_state *rast_state = ctx->rast_state;
1021
1022 if (dinfo->mode >= PIPE_PRIM_QUADS ||
1023 dinfo->mode == PIPE_PRIM_LINE_LOOP ||
1024 dinfo->index_size == 1) {
1025 if (!u_trim_pipe_prim(dinfo->mode, (unsigned *)&dinfo->count))
1026 return;
1027
1028 util_primconvert_save_rasterizer_state(ctx->primconvert, &rast_state->base);
1029 util_primconvert_draw_vbo(ctx->primconvert, dinfo);
1030 return;
1031 }
1032
1033 struct zink_gfx_program *gfx_program = get_gfx_program(ctx);
1034 if (!gfx_program)
1035 return;
1036
1037 VkPipeline pipeline = zink_get_gfx_pipeline(screen, gfx_program,
1038 &ctx->gfx_pipeline_state,
1039 dinfo->mode);
1040
1041 enum pipe_prim_type reduced_prim = u_reduced_prim(dinfo->mode);
1042
1043 bool depth_bias = false;
1044 switch (reduced_prim) {
1045 case PIPE_PRIM_POINTS:
1046 depth_bias = rast_state->offset_point;
1047 break;
1048
1049 case PIPE_PRIM_LINES:
1050 depth_bias = rast_state->offset_line;
1051 break;
1052
1053 case PIPE_PRIM_TRIANGLES:
1054 depth_bias = rast_state->offset_tri;
1055 break;
1056
1057 default:
1058 unreachable("unexpected reduced prim");
1059 }
1060
1061 unsigned index_offset = 0;
1062 struct pipe_resource *index_buffer = NULL;
1063 if (dinfo->index_size > 0) {
1064 if (dinfo->has_user_indices) {
1065 if (!util_upload_index_buffer(pctx, dinfo, &index_buffer, &index_offset)) {
1066 debug_printf("util_upload_index_buffer() failed\n");
1067 return;
1068 }
1069 } else
1070 index_buffer = dinfo->index.resource;
1071 }
1072
1073 VkWriteDescriptorSet wds[PIPE_SHADER_TYPES * PIPE_MAX_CONSTANT_BUFFERS + PIPE_SHADER_TYPES * PIPE_MAX_SHADER_SAMPLER_VIEWS];
1074 VkDescriptorBufferInfo buffer_infos[PIPE_SHADER_TYPES * PIPE_MAX_CONSTANT_BUFFERS];
1075 VkDescriptorImageInfo image_infos[PIPE_SHADER_TYPES * PIPE_MAX_SHADER_SAMPLER_VIEWS];
1076 int num_wds = 0, num_buffer_info = 0, num_image_info = 0;
1077
1078 struct zink_resource *transitions[PIPE_SHADER_TYPES * PIPE_MAX_SHADER_SAMPLER_VIEWS];
1079 int num_transitions = 0;
1080
1081 for (int i = 0; i < ARRAY_SIZE(ctx->gfx_stages); i++) {
1082 struct zink_shader *shader = ctx->gfx_stages[i];
1083 if (!shader)
1084 continue;
1085
1086 for (int j = 0; j < shader->num_bindings; j++) {
1087 int index = shader->bindings[j].index;
1088 if (shader->bindings[j].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) {
1089 assert(ctx->ubos[i][index].buffer_size > 0);
1090 assert(ctx->ubos[i][index].buffer_size <= screen->props.limits.maxUniformBufferRange);
1091 assert(ctx->ubos[i][index].buffer);
1092 struct zink_resource *res = zink_resource(ctx->ubos[i][index].buffer);
1093 buffer_infos[num_buffer_info].buffer = res->buffer;
1094 buffer_infos[num_buffer_info].offset = ctx->ubos[i][index].buffer_offset;
1095 buffer_infos[num_buffer_info].range = ctx->ubos[i][index].buffer_size;
1096 wds[num_wds].pBufferInfo = buffer_infos + num_buffer_info;
1097 ++num_buffer_info;
1098 } else {
1099 struct pipe_sampler_view *psampler_view = ctx->image_views[i][index];
1100 assert(psampler_view);
1101 struct zink_sampler_view *sampler_view = zink_sampler_view(psampler_view);
1102
1103 struct zink_resource *res = zink_resource(psampler_view->texture);
1104 VkImageLayout layout = res->layout;
1105 if (layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL &&
1106 layout != VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL &&
1107 layout != VK_IMAGE_LAYOUT_GENERAL) {
1108 transitions[num_transitions++] = res;
1109 layout = VK_IMAGE_LAYOUT_GENERAL;
1110 }
1111 image_infos[num_image_info].imageLayout = layout;
1112 image_infos[num_image_info].imageView = sampler_view->image_view;
1113 image_infos[num_image_info].sampler = ctx->samplers[i][index];
1114 wds[num_wds].pImageInfo = image_infos + num_image_info;
1115 ++num_image_info;
1116 }
1117
1118 wds[num_wds].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
1119 wds[num_wds].pNext = NULL;
1120 wds[num_wds].dstBinding = shader->bindings[j].binding;
1121 wds[num_wds].dstArrayElement = 0;
1122 wds[num_wds].descriptorCount = 1;
1123 wds[num_wds].descriptorType = shader->bindings[j].type;
1124 ++num_wds;
1125 }
1126 }
1127
1128 struct zink_batch *batch;
1129 if (num_transitions > 0) {
1130 batch = zink_batch_no_rp(ctx);
1131
1132 for (int i = 0; i < num_transitions; ++i)
1133 zink_resource_barrier(batch->cmdbuf, transitions[i],
1134 transitions[i]->aspect,
1135 VK_IMAGE_LAYOUT_GENERAL);
1136 }
1137
1138 batch = zink_batch_rp(ctx);
1139
1140 if (batch->descs_left < gfx_program->num_descriptors) {
1141 flush_batch(ctx);
1142 batch = zink_batch_rp(ctx);
1143 assert(batch->descs_left >= gfx_program->num_descriptors);
1144 }
1145
1146 VkDescriptorSet desc_set = allocate_descriptor_set(screen, batch,
1147 gfx_program);
1148 assert(desc_set != VK_NULL_HANDLE);
1149
1150 for (int i = 0; i < ARRAY_SIZE(ctx->gfx_stages); i++) {
1151 struct zink_shader *shader = ctx->gfx_stages[i];
1152 if (!shader)
1153 continue;
1154
1155 for (int j = 0; j < shader->num_bindings; j++) {
1156 int index = shader->bindings[j].index;
1157 if (shader->bindings[j].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) {
1158 struct zink_resource *res = zink_resource(ctx->ubos[i][index].buffer);
1159 zink_batch_reference_resoure(batch, res);
1160 } else {
1161 struct zink_sampler_view *sampler_view = zink_sampler_view(ctx->image_views[i][index]);
1162 zink_batch_reference_sampler_view(batch, sampler_view);
1163 }
1164 }
1165 }
1166
1167 vkCmdSetViewport(batch->cmdbuf, 0, ctx->num_viewports, ctx->viewports);
1168 if (ctx->rast_state->base.scissor)
1169 vkCmdSetScissor(batch->cmdbuf, 0, ctx->num_viewports, ctx->scissors);
1170 else if (ctx->fb_state.width && ctx->fb_state.height) {
1171 VkRect2D fb_scissor = {};
1172 fb_scissor.extent.width = ctx->fb_state.width;
1173 fb_scissor.extent.height = ctx->fb_state.height;
1174 vkCmdSetScissor(batch->cmdbuf, 0, 1, &fb_scissor);
1175 }
1176
1177 if (line_width_needed(reduced_prim, rast_state->hw_state.polygon_mode)) {
1178 if (screen->feats.wideLines || ctx->line_width == 1.0f)
1179 vkCmdSetLineWidth(batch->cmdbuf, ctx->line_width);
1180 else
1181 debug_printf("BUG: wide lines not supported, needs fallback!");
1182 }
1183
1184 vkCmdSetStencilReference(batch->cmdbuf, VK_STENCIL_FACE_FRONT_BIT, ctx->stencil_ref.ref_value[0]);
1185 vkCmdSetStencilReference(batch->cmdbuf, VK_STENCIL_FACE_BACK_BIT, ctx->stencil_ref.ref_value[1]);
1186
1187 if (depth_bias)
1188 vkCmdSetDepthBias(batch->cmdbuf, rast_state->offset_units, rast_state->offset_clamp, rast_state->offset_scale);
1189 else
1190 vkCmdSetDepthBias(batch->cmdbuf, 0.0f, 0.0f, 0.0f);
1191
1192 if (ctx->gfx_pipeline_state.blend_state->need_blend_constants)
1193 vkCmdSetBlendConstants(batch->cmdbuf, ctx->blend_constants);
1194
1195 if (num_wds > 0) {
1196 for (int i = 0; i < num_wds; ++i)
1197 wds[i].dstSet = desc_set;
1198 vkUpdateDescriptorSets(screen->dev, num_wds, wds, 0, NULL);
1199 }
1200
1201 vkCmdBindPipeline(batch->cmdbuf, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
1202 vkCmdBindDescriptorSets(batch->cmdbuf, VK_PIPELINE_BIND_POINT_GRAPHICS,
1203 gfx_program->layout, 0, 1, &desc_set, 0, NULL);
1204 zink_bind_vertex_buffers(batch, ctx);
1205
1206 if (dinfo->index_size > 0) {
1207 assert(dinfo->index_size != 1);
1208 VkIndexType index_type = dinfo->index_size == 2 ? VK_INDEX_TYPE_UINT16 : VK_INDEX_TYPE_UINT32;
1209 struct zink_resource *res = zink_resource(index_buffer);
1210 vkCmdBindIndexBuffer(batch->cmdbuf, res->buffer, index_offset, index_type);
1211 zink_batch_reference_resoure(batch, res);
1212 vkCmdDrawIndexed(batch->cmdbuf,
1213 dinfo->count, dinfo->instance_count,
1214 dinfo->start, dinfo->index_bias, dinfo->start_instance);
1215 } else
1216 vkCmdDraw(batch->cmdbuf, dinfo->count, dinfo->instance_count, dinfo->start, dinfo->start_instance);
1217
1218 if (dinfo->index_size > 0 && dinfo->has_user_indices)
1219 pipe_resource_reference(&index_buffer, NULL);
1220 }
1221
1222 static void
1223 zink_flush(struct pipe_context *pctx,
1224 struct pipe_fence_handle **pfence,
1225 enum pipe_flush_flags flags)
1226 {
1227 struct zink_context *ctx = zink_context(pctx);
1228
1229 struct zink_batch *batch = zink_curr_batch(ctx);
1230 flush_batch(ctx);
1231
1232 if (pfence)
1233 zink_fence_reference(zink_screen(pctx->screen),
1234 (struct zink_fence **)pfence,
1235 batch->fence);
1236
1237 /* HACK:
1238 * For some strange reason, we need to finish before presenting, or else
1239 * we start rendering on top of the back-buffer for the next frame. This
1240 * seems like a bug in the DRI-driver to me, because we really should
1241 * be properly protected by fences here, and the back-buffer should
1242 * either be swapped with the front-buffer, or blitted from. But for
1243 * some strange reason, neither of these things happen.
1244 */
1245 if (flags & PIPE_FLUSH_END_OF_FRAME)
1246 pctx->screen->fence_finish(pctx->screen, pctx,
1247 (struct pipe_fence_handle *)batch->fence,
1248 PIPE_TIMEOUT_INFINITE);
1249 }
1250
1251 static void
1252 zink_flush_resource(struct pipe_context *pipe,
1253 struct pipe_resource *resource)
1254 {
1255 }
1256
1257 static void
1258 zink_resource_copy_region(struct pipe_context *pctx,
1259 struct pipe_resource *pdst,
1260 unsigned dst_level, unsigned dstx, unsigned dsty, unsigned dstz,
1261 struct pipe_resource *psrc,
1262 unsigned src_level, const struct pipe_box *src_box)
1263 {
1264 struct zink_resource *dst = zink_resource(pdst);
1265 struct zink_resource *src = zink_resource(psrc);
1266 struct zink_context *ctx = zink_context(pctx);
1267 if (dst->base.target != PIPE_BUFFER && src->base.target != PIPE_BUFFER) {
1268 VkImageCopy region = {};
1269
1270 region.srcSubresource.aspectMask = src->aspect;
1271 region.srcSubresource.mipLevel = src_level;
1272 region.srcSubresource.layerCount = 1;
1273 if (src->base.array_size > 1) {
1274 region.srcSubresource.baseArrayLayer = src_box->z;
1275 region.srcSubresource.layerCount = src_box->depth;
1276 region.extent.depth = 1;
1277 } else {
1278 region.srcOffset.z = src_box->z;
1279 region.srcSubresource.layerCount = 1;
1280 region.extent.depth = src_box->depth;
1281 }
1282
1283 region.srcOffset.x = src_box->x;
1284 region.srcOffset.y = src_box->y;
1285
1286 region.dstSubresource.aspectMask = dst->aspect;
1287 region.dstSubresource.mipLevel = dst_level;
1288 if (dst->base.array_size > 1) {
1289 region.dstSubresource.baseArrayLayer = dstz;
1290 region.dstSubresource.layerCount = src_box->depth;
1291 } else {
1292 region.dstOffset.z = dstz;
1293 region.dstSubresource.layerCount = 1;
1294 }
1295
1296 region.dstOffset.x = dstx;
1297 region.dstOffset.y = dsty;
1298 region.extent.width = src_box->width;
1299 region.extent.height = src_box->height;
1300
1301 struct zink_batch *batch = zink_batch_no_rp(ctx);
1302 zink_batch_reference_resoure(batch, src);
1303 zink_batch_reference_resoure(batch, dst);
1304
1305 if (src->layout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) {
1306 zink_resource_barrier(batch->cmdbuf, src, src->aspect,
1307 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
1308 }
1309
1310 if (dst->layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
1311 zink_resource_barrier(batch->cmdbuf, dst, dst->aspect,
1312 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
1313 }
1314
1315 vkCmdCopyImage(batch->cmdbuf, src->image, src->layout,
1316 dst->image, dst->layout,
1317 1, &region);
1318 } else if (dst->base.target == PIPE_BUFFER &&
1319 src->base.target == PIPE_BUFFER) {
1320 VkBufferCopy region;
1321 region.srcOffset = src_box->x;
1322 region.dstOffset = dstx;
1323 region.size = src_box->width;
1324
1325 struct zink_batch *batch = zink_batch_no_rp(ctx);
1326 zink_batch_reference_resoure(batch, src);
1327 zink_batch_reference_resoure(batch, dst);
1328
1329 vkCmdCopyBuffer(batch->cmdbuf, src->buffer, dst->buffer, 1, &region);
1330 } else
1331 debug_printf("zink: TODO resource copy\n");
1332 }
1333
1334 struct pipe_context *
1335 zink_context_create(struct pipe_screen *pscreen, void *priv, unsigned flags)
1336 {
1337 struct zink_screen *screen = zink_screen(pscreen);
1338 struct zink_context *ctx = CALLOC_STRUCT(zink_context);
1339
1340 ctx->base.screen = pscreen;
1341 ctx->base.priv = priv;
1342
1343 ctx->base.destroy = zink_context_destroy;
1344
1345 zink_context_state_init(&ctx->base);
1346
1347 ctx->base.create_sampler_state = zink_create_sampler_state;
1348 ctx->base.bind_sampler_states = zink_bind_sampler_states;
1349 ctx->base.delete_sampler_state = zink_delete_sampler_state;
1350
1351 ctx->base.create_sampler_view = zink_create_sampler_view;
1352 ctx->base.set_sampler_views = zink_set_sampler_views;
1353 ctx->base.sampler_view_destroy = zink_sampler_view_destroy;
1354
1355 ctx->base.create_vs_state = zink_create_vs_state;
1356 ctx->base.bind_vs_state = zink_bind_vs_state;
1357 ctx->base.delete_vs_state = zink_delete_vs_state;
1358
1359 ctx->base.create_fs_state = zink_create_fs_state;
1360 ctx->base.bind_fs_state = zink_bind_fs_state;
1361 ctx->base.delete_fs_state = zink_delete_fs_state;
1362
1363 ctx->base.set_polygon_stipple = zink_set_polygon_stipple;
1364 ctx->base.set_vertex_buffers = zink_set_vertex_buffers;
1365 ctx->base.set_viewport_states = zink_set_viewport_states;
1366 ctx->base.set_scissor_states = zink_set_scissor_states;
1367 ctx->base.set_constant_buffer = zink_set_constant_buffer;
1368 ctx->base.set_framebuffer_state = zink_set_framebuffer_state;
1369 ctx->base.set_stencil_ref = zink_set_stencil_ref;
1370 ctx->base.set_clip_state = zink_set_clip_state;
1371 ctx->base.set_blend_color = zink_set_blend_color;
1372
1373 ctx->base.set_sample_mask = zink_set_sample_mask;
1374
1375 ctx->base.clear = zink_clear;
1376 ctx->base.draw_vbo = zink_draw_vbo;
1377 ctx->base.flush = zink_flush;
1378
1379 ctx->base.resource_copy_region = zink_resource_copy_region;
1380 ctx->base.blit = zink_blit;
1381
1382 ctx->base.flush_resource = zink_flush_resource;
1383 zink_context_surface_init(&ctx->base);
1384 zink_context_resource_init(&ctx->base);
1385 zink_context_query_init(&ctx->base);
1386
1387 slab_create_child(&ctx->transfer_pool, &screen->transfer_pool);
1388
1389 ctx->base.stream_uploader = u_upload_create_default(&ctx->base);
1390 ctx->base.const_uploader = ctx->base.stream_uploader;
1391
1392 int prim_hwsupport = 1 << PIPE_PRIM_POINTS |
1393 1 << PIPE_PRIM_LINES |
1394 1 << PIPE_PRIM_LINE_STRIP |
1395 1 << PIPE_PRIM_TRIANGLES |
1396 1 << PIPE_PRIM_TRIANGLE_STRIP |
1397 1 << PIPE_PRIM_TRIANGLE_FAN;
1398
1399 ctx->primconvert = util_primconvert_create(&ctx->base, prim_hwsupport);
1400 if (!ctx->primconvert)
1401 goto fail;
1402
1403 ctx->blitter = util_blitter_create(&ctx->base);
1404 if (!ctx->blitter)
1405 goto fail;
1406
1407 VkCommandPoolCreateInfo cpci = {};
1408 cpci.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
1409 cpci.queueFamilyIndex = screen->gfx_queue;
1410 cpci.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
1411 if (vkCreateCommandPool(screen->dev, &cpci, NULL, &ctx->cmdpool) != VK_SUCCESS)
1412 goto fail;
1413
1414 VkCommandBufferAllocateInfo cbai = {};
1415 cbai.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
1416 cbai.commandPool = ctx->cmdpool;
1417 cbai.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
1418 cbai.commandBufferCount = 1;
1419
1420 VkDescriptorPoolSize sizes[] = {
1421 {VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, ZINK_BATCH_DESC_SIZE}
1422 };
1423 VkDescriptorPoolCreateInfo dpci = {};
1424 dpci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
1425 dpci.pPoolSizes = sizes;
1426 dpci.poolSizeCount = ARRAY_SIZE(sizes);
1427 dpci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
1428 dpci.maxSets = ZINK_BATCH_DESC_SIZE;
1429
1430 for (int i = 0; i < ARRAY_SIZE(ctx->batches); ++i) {
1431 if (vkAllocateCommandBuffers(screen->dev, &cbai, &ctx->batches[i].cmdbuf) != VK_SUCCESS)
1432 goto fail;
1433
1434 ctx->batches[i].resources = _mesa_set_create(NULL, _mesa_hash_pointer,
1435 _mesa_key_pointer_equal);
1436 ctx->batches[i].sampler_views = _mesa_set_create(NULL,
1437 _mesa_hash_pointer,
1438 _mesa_key_pointer_equal);
1439
1440 if (!ctx->batches[i].resources || !ctx->batches[i].sampler_views)
1441 goto fail;
1442
1443 util_dynarray_init(&ctx->batches[i].zombie_samplers, NULL);
1444
1445 if (vkCreateDescriptorPool(screen->dev, &dpci, 0,
1446 &ctx->batches[i].descpool) != VK_SUCCESS)
1447 goto fail;
1448 }
1449
1450 vkGetDeviceQueue(screen->dev, screen->gfx_queue, 0, &ctx->queue);
1451
1452 ctx->program_cache = _mesa_hash_table_create(NULL,
1453 hash_gfx_program,
1454 equals_gfx_program);
1455 ctx->render_pass_cache = _mesa_hash_table_create(NULL,
1456 hash_render_pass_state,
1457 equals_render_pass_state);
1458 ctx->framebuffer_cache = _mesa_hash_table_create(NULL,
1459 hash_framebuffer_state,
1460 equals_framebuffer_state);
1461
1462 if (!ctx->program_cache || !ctx->render_pass_cache ||
1463 !ctx->framebuffer_cache)
1464 goto fail;
1465
1466 ctx->dirty_program = true;
1467
1468 /* start the first batch */
1469 zink_start_batch(ctx, zink_curr_batch(ctx));
1470
1471 return &ctx->base;
1472
1473 fail:
1474 if (ctx) {
1475 vkDestroyCommandPool(screen->dev, ctx->cmdpool, NULL);
1476 FREE(ctx);
1477 }
1478 return NULL;
1479 }