zink: remove insecure comment
[mesa.git] / src / gallium / drivers / zink / zink_context.c
1 /*
2 * Copyright 2018 Collabora Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "zink_context.h"
25
26 #include "zink_batch.h"
27 #include "zink_compiler.h"
28 #include "zink_fence.h"
29 #include "zink_framebuffer.h"
30 #include "zink_pipeline.h"
31 #include "zink_program.h"
32 #include "zink_render_pass.h"
33 #include "zink_resource.h"
34 #include "zink_screen.h"
35 #include "zink_state.h"
36 #include "zink_surface.h"
37
38 #include "indices/u_primconvert.h"
39 #include "util/u_blitter.h"
40 #include "util/u_debug.h"
41 #include "util/u_format.h"
42 #include "util/u_framebuffer.h"
43 #include "util/u_helpers.h"
44 #include "util/u_inlines.h"
45
46 #include "nir.h"
47
48 #include "util/u_memory.h"
49 #include "util/u_prim.h"
50 #include "util/u_upload_mgr.h"
51
52 static void
53 zink_context_destroy(struct pipe_context *pctx)
54 {
55 struct zink_context *ctx = zink_context(pctx);
56 struct zink_screen *screen = zink_screen(pctx->screen);
57
58 if (vkQueueWaitIdle(ctx->queue) != VK_SUCCESS)
59 debug_printf("vkQueueWaitIdle failed\n");
60
61 for (int i = 0; i < ARRAY_SIZE(ctx->batches); ++i)
62 vkFreeCommandBuffers(screen->dev, ctx->cmdpool, 1, &ctx->batches[i].cmdbuf);
63 vkDestroyCommandPool(screen->dev, ctx->cmdpool, NULL);
64
65 util_primconvert_destroy(ctx->primconvert);
66 u_upload_destroy(pctx->stream_uploader);
67 slab_destroy_child(&ctx->transfer_pool);
68 util_blitter_destroy(ctx->blitter);
69 FREE(ctx);
70 }
71
72 static VkFilter
73 filter(enum pipe_tex_filter filter)
74 {
75 switch (filter) {
76 case PIPE_TEX_FILTER_NEAREST: return VK_FILTER_NEAREST;
77 case PIPE_TEX_FILTER_LINEAR: return VK_FILTER_LINEAR;
78 }
79 unreachable("unexpected filter");
80 }
81
82 static VkSamplerMipmapMode
83 sampler_mipmap_mode(enum pipe_tex_mipfilter filter)
84 {
85 switch (filter) {
86 case PIPE_TEX_MIPFILTER_NEAREST: return VK_SAMPLER_MIPMAP_MODE_NEAREST;
87 case PIPE_TEX_MIPFILTER_LINEAR: return VK_SAMPLER_MIPMAP_MODE_LINEAR;
88 case PIPE_TEX_MIPFILTER_NONE:
89 unreachable("PIPE_TEX_MIPFILTER_NONE should be dealt with earlier");
90 }
91 unreachable("unexpected filter");
92 }
93
94 static VkSamplerAddressMode
95 sampler_address_mode(enum pipe_tex_wrap filter)
96 {
97 switch (filter) {
98 case PIPE_TEX_WRAP_REPEAT: return VK_SAMPLER_ADDRESS_MODE_REPEAT;
99 case PIPE_TEX_WRAP_CLAMP: return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; /* not technically correct, but kinda works */
100 case PIPE_TEX_WRAP_CLAMP_TO_EDGE: return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
101 case PIPE_TEX_WRAP_CLAMP_TO_BORDER: return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER;
102 case PIPE_TEX_WRAP_MIRROR_REPEAT: return VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT;
103 case PIPE_TEX_WRAP_MIRROR_CLAMP: return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; /* not technically correct, but kinda works */
104 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE: return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE;
105 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER: return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; /* not technically correct, but kinda works */
106 }
107 unreachable("unexpected wrap");
108 }
109
110 static VkCompareOp
111 compare_op(enum pipe_compare_func op)
112 {
113 switch (op) {
114 case PIPE_FUNC_NEVER: return VK_COMPARE_OP_NEVER;
115 case PIPE_FUNC_LESS: return VK_COMPARE_OP_LESS;
116 case PIPE_FUNC_EQUAL: return VK_COMPARE_OP_EQUAL;
117 case PIPE_FUNC_LEQUAL: return VK_COMPARE_OP_LESS_OR_EQUAL;
118 case PIPE_FUNC_GREATER: return VK_COMPARE_OP_GREATER;
119 case PIPE_FUNC_NOTEQUAL: return VK_COMPARE_OP_NOT_EQUAL;
120 case PIPE_FUNC_GEQUAL: return VK_COMPARE_OP_GREATER_OR_EQUAL;
121 case PIPE_FUNC_ALWAYS: return VK_COMPARE_OP_ALWAYS;
122 }
123 unreachable("unexpected compare");
124 }
125
126 static void *
127 zink_create_sampler_state(struct pipe_context *pctx,
128 const struct pipe_sampler_state *state)
129 {
130 struct zink_screen *screen = zink_screen(pctx->screen);
131
132 VkSamplerCreateInfo sci = {};
133 sci.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
134 sci.magFilter = filter(state->mag_img_filter);
135 sci.minFilter = filter(state->min_img_filter);
136
137 if (state->min_mip_filter != PIPE_TEX_MIPFILTER_NONE) {
138 sci.mipmapMode = sampler_mipmap_mode(state->min_mip_filter);
139 sci.minLod = state->min_lod;
140 sci.maxLod = state->max_lod;
141 } else {
142 sci.mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST;
143 sci.minLod = 0;
144 sci.maxLod = 0;
145 }
146
147 sci.addressModeU = sampler_address_mode(state->wrap_s);
148 sci.addressModeV = sampler_address_mode(state->wrap_t);
149 sci.addressModeW = sampler_address_mode(state->wrap_r);
150 sci.mipLodBias = state->lod_bias;
151
152 if (state->compare_mode == PIPE_TEX_COMPARE_NONE)
153 sci.compareOp = VK_COMPARE_OP_NEVER;
154 else
155 sci.compareOp = compare_op(state->compare_func);
156
157 sci.borderColor = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK; // TODO
158 sci.unnormalizedCoordinates = !state->normalized_coords;
159
160 if (state->max_anisotropy > 1) {
161 sci.maxAnisotropy = state->max_anisotropy;
162 sci.anisotropyEnable = VK_TRUE;
163 }
164
165 VkSampler sampler;
166 VkResult err = vkCreateSampler(screen->dev, &sci, NULL, &sampler);
167 if (err != VK_SUCCESS)
168 return NULL;
169
170 return sampler;
171 }
172
173 static void
174 zink_bind_sampler_states(struct pipe_context *pctx,
175 enum pipe_shader_type shader,
176 unsigned start_slot,
177 unsigned num_samplers,
178 void **samplers)
179 {
180 struct zink_context *ctx = zink_context(pctx);
181 for (unsigned i = 0; i < num_samplers; ++i)
182 ctx->samplers[shader][start_slot + i] = (VkSampler)samplers[i];
183 ctx->num_samplers[shader] = start_slot + num_samplers;
184 }
185
186 static void
187 zink_delete_sampler_state(struct pipe_context *pctx,
188 void *sampler_state)
189 {
190 struct zink_batch *batch = zink_curr_batch(zink_context(pctx));
191 util_dynarray_append(&batch->zombie_samplers,
192 VkSampler, sampler_state);
193 }
194
195
196 static VkImageViewType
197 image_view_type(enum pipe_texture_target target)
198 {
199 switch (target) {
200 case PIPE_TEXTURE_1D: return VK_IMAGE_VIEW_TYPE_1D;
201 case PIPE_TEXTURE_1D_ARRAY: return VK_IMAGE_VIEW_TYPE_1D_ARRAY;
202 case PIPE_TEXTURE_2D: return VK_IMAGE_VIEW_TYPE_2D;
203 case PIPE_TEXTURE_2D_ARRAY: return VK_IMAGE_VIEW_TYPE_2D_ARRAY;
204 case PIPE_TEXTURE_CUBE: return VK_IMAGE_VIEW_TYPE_CUBE;
205 case PIPE_TEXTURE_CUBE_ARRAY: return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
206 case PIPE_TEXTURE_3D: return VK_IMAGE_VIEW_TYPE_3D;
207 case PIPE_TEXTURE_RECT: return VK_IMAGE_VIEW_TYPE_2D; /* not sure */
208 default:
209 unreachable("unexpected target");
210 }
211 }
212
213 static VkComponentSwizzle
214 component_mapping(enum pipe_swizzle swizzle)
215 {
216 switch (swizzle) {
217 case PIPE_SWIZZLE_X: return VK_COMPONENT_SWIZZLE_R;
218 case PIPE_SWIZZLE_Y: return VK_COMPONENT_SWIZZLE_G;
219 case PIPE_SWIZZLE_Z: return VK_COMPONENT_SWIZZLE_B;
220 case PIPE_SWIZZLE_W: return VK_COMPONENT_SWIZZLE_A;
221 case PIPE_SWIZZLE_0: return VK_COMPONENT_SWIZZLE_ZERO;
222 case PIPE_SWIZZLE_1: return VK_COMPONENT_SWIZZLE_ONE;
223 case PIPE_SWIZZLE_NONE: return VK_COMPONENT_SWIZZLE_IDENTITY; // ???
224 default:
225 unreachable("unexpected swizzle");
226 }
227 }
228
229 static struct pipe_sampler_view *
230 zink_create_sampler_view(struct pipe_context *pctx, struct pipe_resource *pres,
231 const struct pipe_sampler_view *state)
232 {
233 struct zink_screen *screen = zink_screen(pctx->screen);
234 struct zink_resource *res = zink_resource(pres);
235 struct zink_sampler_view *sampler_view = CALLOC_STRUCT(zink_sampler_view);
236
237 sampler_view->base = *state;
238 sampler_view->base.texture = NULL;
239 pipe_resource_reference(&sampler_view->base.texture, pres);
240 sampler_view->base.reference.count = 1;
241 sampler_view->base.context = pctx;
242
243 VkImageViewCreateInfo ivci = {};
244 ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
245 ivci.image = res->image;
246 ivci.viewType = image_view_type(state->target);
247 ivci.format = zink_get_format(state->format);
248 ivci.components.r = component_mapping(state->swizzle_r);
249 ivci.components.g = component_mapping(state->swizzle_g);
250 ivci.components.b = component_mapping(state->swizzle_b);
251 ivci.components.a = component_mapping(state->swizzle_a);
252 ivci.subresourceRange.aspectMask = zink_aspect_from_format(state->format);
253 ivci.subresourceRange.baseMipLevel = state->u.tex.first_level;
254 ivci.subresourceRange.baseArrayLayer = state->u.tex.first_layer;
255 ivci.subresourceRange.levelCount = state->u.tex.last_level - state->u.tex.first_level + 1;
256 ivci.subresourceRange.layerCount = state->u.tex.last_layer - state->u.tex.first_layer + 1;
257
258 VkResult err = vkCreateImageView(screen->dev, &ivci, NULL, &sampler_view->image_view);
259 if (err != VK_SUCCESS) {
260 FREE(sampler_view);
261 return NULL;
262 }
263
264 return &sampler_view->base;
265 }
266
267 static void
268 zink_sampler_view_destroy(struct pipe_context *pctx,
269 struct pipe_sampler_view *pview)
270 {
271 struct zink_sampler_view *view = zink_sampler_view(pview);
272 vkDestroyImageView(zink_screen(pctx->screen)->dev, view->image_view, NULL);
273 FREE(view);
274 }
275
276 static void *
277 zink_create_vs_state(struct pipe_context *pctx,
278 const struct pipe_shader_state *shader)
279 {
280 struct nir_shader *nir;
281 if (shader->type != PIPE_SHADER_IR_NIR)
282 nir = zink_tgsi_to_nir(pctx->screen, shader->tokens);
283 else
284 nir = (struct nir_shader *)shader->ir.nir;
285
286 return zink_compile_nir(zink_screen(pctx->screen), nir);
287 }
288
289 static void
290 bind_stage(struct zink_context *ctx, enum pipe_shader_type stage,
291 struct zink_shader *shader)
292 {
293 assert(stage < PIPE_SHADER_COMPUTE);
294 ctx->gfx_stages[stage] = shader;
295 ctx->dirty |= ZINK_DIRTY_PROGRAM;
296 }
297
298 static void
299 zink_bind_vs_state(struct pipe_context *pctx,
300 void *cso)
301 {
302 bind_stage(zink_context(pctx), PIPE_SHADER_VERTEX, cso);
303 }
304
305 static void
306 zink_delete_vs_state(struct pipe_context *pctx,
307 void *cso)
308 {
309 zink_shader_free(zink_screen(pctx->screen), cso);
310 }
311
312 static void *
313 zink_create_fs_state(struct pipe_context *pctx,
314 const struct pipe_shader_state *shader)
315 {
316 struct nir_shader *nir;
317 if (shader->type != PIPE_SHADER_IR_NIR)
318 nir = zink_tgsi_to_nir(pctx->screen, shader->tokens);
319 else
320 nir = (struct nir_shader *)shader->ir.nir;
321
322 return zink_compile_nir(zink_screen(pctx->screen), nir);
323 }
324
325 static void
326 zink_bind_fs_state(struct pipe_context *pctx,
327 void *cso)
328 {
329 bind_stage(zink_context(pctx), PIPE_SHADER_FRAGMENT, cso);
330 }
331
332 static void
333 zink_delete_fs_state(struct pipe_context *pctx,
334 void *cso)
335 {
336 zink_shader_free(zink_screen(pctx->screen), cso);
337 }
338
339 static void
340 zink_set_polygon_stipple(struct pipe_context *pctx,
341 const struct pipe_poly_stipple *ps)
342 {
343 }
344
345 static void
346 zink_set_vertex_buffers(struct pipe_context *pctx,
347 unsigned start_slot,
348 unsigned num_buffers,
349 const struct pipe_vertex_buffer *buffers)
350 {
351 struct zink_context *ctx = zink_context(pctx);
352
353 if (buffers) {
354 for (int i = 0; i < num_buffers; ++i) {
355 const struct pipe_vertex_buffer *vb = buffers + i;
356 ctx->gfx_pipeline_state.bindings[start_slot + i].stride = vb->stride;
357 }
358 }
359
360 util_set_vertex_buffers_mask(ctx->buffers, &ctx->buffers_enabled_mask,
361 buffers, start_slot, num_buffers);
362 }
363
364 static void
365 zink_set_viewport_states(struct pipe_context *pctx,
366 unsigned start_slot,
367 unsigned num_viewports,
368 const struct pipe_viewport_state *state)
369 {
370 struct zink_context *ctx = zink_context(pctx);
371
372 for (unsigned i = 0; i < num_viewports; ++i) {
373 VkViewport viewport = {
374 state[i].translate[0] - state[i].scale[0],
375 state[i].translate[1] - state[i].scale[1],
376 state[i].scale[0] * 2,
377 state[i].scale[1] * 2,
378 state[i].translate[2] - state[i].scale[2],
379 state[i].translate[2] + state[i].scale[2]
380 };
381 ctx->viewport_states[start_slot + i] = state[i];
382 ctx->viewports[start_slot + i] = viewport;
383 }
384 ctx->num_viewports = start_slot + num_viewports;
385 }
386
387 static void
388 zink_set_scissor_states(struct pipe_context *pctx,
389 unsigned start_slot, unsigned num_scissors,
390 const struct pipe_scissor_state *states)
391 {
392 struct zink_context *ctx = zink_context(pctx);
393
394 for (unsigned i = 0; i < num_scissors; i++) {
395 VkRect2D scissor;
396
397 scissor.offset.x = states[i].minx;
398 scissor.offset.y = states[i].miny;
399 scissor.extent.width = states[i].maxx - states[i].minx;
400 scissor.extent.height = states[i].maxy - states[i].miny;
401 ctx->scissor_states[start_slot + i] = states[i];
402 ctx->scissors[start_slot + i] = scissor;
403 }
404 ctx->num_scissors = start_slot + num_scissors;
405 }
406
407 static void
408 zink_set_constant_buffer(struct pipe_context *pctx,
409 enum pipe_shader_type shader, uint index,
410 const struct pipe_constant_buffer *cb)
411 {
412 struct zink_context *ctx = zink_context(pctx);
413
414 if (cb) {
415 struct pipe_resource *buffer = cb->buffer;
416 unsigned offset = cb->buffer_offset;
417 if (cb->user_buffer)
418 u_upload_data(ctx->base.const_uploader, 0, cb->buffer_size, 64,
419 cb->user_buffer, &offset, &buffer);
420
421 pipe_resource_reference(&ctx->ubos[shader][index].buffer, buffer);
422 ctx->ubos[shader][index].buffer_offset = offset;
423 ctx->ubos[shader][index].buffer_size = cb->buffer_size;
424 ctx->ubos[shader][index].user_buffer = NULL;
425
426 if (cb->user_buffer)
427 pipe_resource_reference(&buffer, NULL);
428 } else {
429 pipe_resource_reference(&ctx->ubos[shader][index].buffer, NULL);
430 ctx->ubos[shader][index].buffer_offset = 0;
431 ctx->ubos[shader][index].buffer_size = 0;
432 ctx->ubos[shader][index].user_buffer = NULL;
433 }
434 }
435
436 static void
437 zink_set_sampler_views(struct pipe_context *pctx,
438 enum pipe_shader_type shader_type,
439 unsigned start_slot,
440 unsigned num_views,
441 struct pipe_sampler_view **views)
442 {
443 struct zink_context *ctx = zink_context(pctx);
444 assert(views);
445 for (unsigned i = 0; i < num_views; ++i) {
446 pipe_sampler_view_reference(
447 &ctx->image_views[shader_type][start_slot + i],
448 views[i]);
449 }
450 ctx->num_image_views[shader_type] = start_slot + num_views;
451 }
452
453 static void
454 zink_set_stencil_ref(struct pipe_context *pctx,
455 const struct pipe_stencil_ref *ref)
456 {
457 struct zink_context *ctx = zink_context(pctx);
458 ctx->stencil_ref = *ref;
459 }
460
461 static void
462 zink_set_clip_state(struct pipe_context *pctx,
463 const struct pipe_clip_state *pcs)
464 {
465 }
466
467 static struct zink_render_pass *
468 get_render_pass(struct zink_context *ctx)
469 {
470 struct zink_screen *screen = zink_screen(ctx->base.screen);
471 const struct pipe_framebuffer_state *fb = &ctx->fb_state;
472 struct zink_render_pass_state state;
473
474 for (int i = 0; i < fb->nr_cbufs; i++) {
475 struct zink_resource *cbuf = zink_resource(fb->cbufs[i]->texture);
476 state.rts[i].format = cbuf->format;
477 state.rts[i].samples = cbuf->base.nr_samples > 0 ? cbuf->base.nr_samples : VK_SAMPLE_COUNT_1_BIT;
478 }
479 state.num_cbufs = fb->nr_cbufs;
480
481 if (fb->zsbuf) {
482 struct zink_resource *zsbuf = zink_resource(fb->zsbuf->texture);
483 state.rts[fb->nr_cbufs].format = zsbuf->format;
484 state.rts[fb->nr_cbufs].samples = zsbuf->base.nr_samples > 0 ? zsbuf->base.nr_samples : VK_SAMPLE_COUNT_1_BIT;
485 }
486 state.have_zsbuf = fb->zsbuf != NULL;
487
488 struct hash_entry *entry = _mesa_hash_table_search(ctx->render_pass_cache,
489 &state);
490 if (!entry) {
491 struct zink_render_pass *rp;
492 rp = zink_create_render_pass(screen, &state);
493 entry = _mesa_hash_table_insert(ctx->render_pass_cache, &state, rp);
494 if (!entry)
495 return NULL;
496 }
497
498 return entry->data;
499 }
500
501 static struct zink_framebuffer *
502 get_framebuffer(struct zink_context *ctx)
503 {
504 struct zink_screen *screen = zink_screen(ctx->base.screen);
505
506 struct zink_framebuffer_state state = {};
507 state.rp = get_render_pass(ctx);
508 for (int i = 0; i < ctx->fb_state.nr_cbufs; i++) {
509 struct pipe_surface *psurf = ctx->fb_state.cbufs[i];
510 state.attachments[i] = zink_surface(psurf);
511 }
512
513 state.num_attachments = ctx->fb_state.nr_cbufs;
514 if (ctx->fb_state.zsbuf) {
515 struct pipe_surface *psurf = ctx->fb_state.zsbuf;
516 state.attachments[state.num_attachments++] = zink_surface(psurf);
517 }
518
519 state.width = ctx->fb_state.width;
520 state.height = ctx->fb_state.height;
521 state.layers = MAX2(ctx->fb_state.layers, 1);
522
523 struct hash_entry *entry = _mesa_hash_table_search(ctx->framebuffer_cache,
524 &state);
525 if (!entry) {
526 struct zink_framebuffer *fb = zink_create_framebuffer(screen, &state);
527 entry = _mesa_hash_table_insert(ctx->framebuffer_cache, &state, fb);
528 if (!entry)
529 return NULL;
530 }
531
532 return entry->data;
533 }
534
535 void
536 zink_begin_render_pass(struct zink_context *ctx, struct zink_batch *batch)
537 {
538 struct zink_screen *screen = zink_screen(ctx->base.screen);
539 assert(batch == zink_curr_batch(ctx));
540 assert(ctx->gfx_pipeline_state.render_pass);
541
542 VkRenderPassBeginInfo rpbi = {};
543 rpbi.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
544 rpbi.renderPass = ctx->gfx_pipeline_state.render_pass->render_pass;
545 rpbi.renderArea.offset.x = 0;
546 rpbi.renderArea.offset.y = 0;
547 rpbi.renderArea.extent.width = ctx->fb_state.width;
548 rpbi.renderArea.extent.height = ctx->fb_state.height;
549 rpbi.clearValueCount = 0;
550 rpbi.pClearValues = NULL;
551 rpbi.framebuffer = ctx->framebuffer->fb;
552
553 assert(ctx->gfx_pipeline_state.render_pass && ctx->framebuffer);
554 assert(!batch->rp || batch->rp == ctx->gfx_pipeline_state.render_pass);
555 assert(!batch->fb || batch->fb == ctx->framebuffer);
556
557 zink_render_pass_reference(screen, &batch->rp, ctx->gfx_pipeline_state.render_pass);
558 zink_framebuffer_reference(screen, &batch->fb, ctx->framebuffer);
559
560 vkCmdBeginRenderPass(batch->cmdbuf, &rpbi, VK_SUBPASS_CONTENTS_INLINE);
561 }
562
563 static void
564 flush_batch(struct zink_context *ctx)
565 {
566 struct zink_batch *batch = zink_curr_batch(ctx);
567 if (batch->rp)
568 vkCmdEndRenderPass(batch->cmdbuf);
569
570 zink_end_batch(ctx, batch);
571
572 ctx->curr_batch++;
573 if (ctx->curr_batch == ARRAY_SIZE(ctx->batches))
574 ctx->curr_batch = 0;
575
576 zink_start_batch(ctx, zink_curr_batch(ctx));
577 }
578
579 struct zink_batch *
580 zink_batch_rp(struct zink_context *ctx)
581 {
582 struct zink_batch *batch = zink_curr_batch(ctx);
583 if (!batch->rp) {
584 zink_begin_render_pass(ctx, batch);
585 assert(batch->rp);
586 }
587 return batch;
588 }
589
590 struct zink_batch *
591 zink_batch_no_rp(struct zink_context *ctx)
592 {
593 struct zink_batch *batch = zink_curr_batch(ctx);
594 if (batch->rp) {
595 /* flush batch and get a new one */
596 flush_batch(ctx);
597 batch = zink_curr_batch(ctx);
598 assert(!batch->rp);
599 }
600 return batch;
601 }
602
603 static void
604 zink_set_framebuffer_state(struct pipe_context *pctx,
605 const struct pipe_framebuffer_state *state)
606 {
607 struct zink_context *ctx = zink_context(pctx);
608 struct zink_screen *screen = zink_screen(pctx->screen);
609
610 VkSampleCountFlagBits rast_samples = VK_SAMPLE_COUNT_1_BIT;
611 for (int i = 0; i < state->nr_cbufs; i++)
612 rast_samples = MAX2(rast_samples, state->cbufs[i]->texture->nr_samples);
613 if (state->zsbuf && state->zsbuf->texture->nr_samples)
614 rast_samples = MAX2(rast_samples, state->zsbuf->texture->nr_samples);
615
616 util_copy_framebuffer_state(&ctx->fb_state, state);
617
618 struct zink_framebuffer *fb = get_framebuffer(ctx);
619 zink_framebuffer_reference(screen, &ctx->framebuffer, fb);
620 zink_render_pass_reference(screen, &ctx->gfx_pipeline_state.render_pass, fb->rp);
621
622 ctx->gfx_pipeline_state.rast_samples = rast_samples;
623 ctx->gfx_pipeline_state.num_attachments = state->nr_cbufs;
624
625 struct zink_batch *batch = zink_batch_no_rp(ctx);
626
627 for (int i = 0; i < state->nr_cbufs; i++) {
628 struct zink_resource *res = zink_resource(state->cbufs[i]->texture);
629 if (res->layout != VK_IMAGE_LAYOUT_GENERAL &&
630 res->layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL)
631 zink_resource_barrier(batch->cmdbuf, res, res->aspect,
632 VK_IMAGE_LAYOUT_GENERAL);
633 }
634
635 if (state->zsbuf) {
636 struct zink_resource *res = zink_resource(state->zsbuf->texture);
637 if (res->layout != VK_IMAGE_LAYOUT_GENERAL &&
638 res->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL)
639 zink_resource_barrier(batch->cmdbuf, res, res->aspect,
640 VK_IMAGE_LAYOUT_GENERAL);
641 }
642 }
643
644 static void
645 zink_set_active_query_state(struct pipe_context *pctx, bool enable)
646 {
647 }
648
649 static void
650 zink_set_blend_color(struct pipe_context *pctx,
651 const struct pipe_blend_color *color)
652 {
653 struct zink_context *ctx = zink_context(pctx);
654 memcpy(ctx->blend_constants, color->color, sizeof(float) * 4);
655 }
656
657 static void
658 zink_set_sample_mask(struct pipe_context *pctx, unsigned sample_mask)
659 {
660 struct zink_context *ctx = zink_context(pctx);
661 ctx->gfx_pipeline_state.sample_mask = sample_mask;
662 }
663
664 static VkAccessFlags
665 access_flags(VkImageLayout layout)
666 {
667 switch (layout) {
668 case VK_IMAGE_LAYOUT_UNDEFINED:
669 case VK_IMAGE_LAYOUT_GENERAL:
670 return 0;
671
672 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
673 return VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
674 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
675 return VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
676
677 case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
678 return VK_ACCESS_SHADER_READ_BIT;
679
680 case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
681 return VK_ACCESS_TRANSFER_READ_BIT;
682
683 case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
684 return VK_ACCESS_TRANSFER_WRITE_BIT;
685
686 case VK_IMAGE_LAYOUT_PREINITIALIZED:
687 return VK_ACCESS_HOST_WRITE_BIT;
688
689 default:
690 unreachable("unexpected layout");
691 }
692 }
693
694 void
695 zink_resource_barrier(VkCommandBuffer cmdbuf, struct zink_resource *res,
696 VkImageAspectFlags aspect, VkImageLayout new_layout)
697 {
698 VkImageSubresourceRange isr = {
699 aspect,
700 0, VK_REMAINING_MIP_LEVELS,
701 0, VK_REMAINING_ARRAY_LAYERS
702 };
703
704 VkImageMemoryBarrier imb = {
705 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
706 NULL,
707 access_flags(res->layout),
708 access_flags(new_layout),
709 res->layout,
710 new_layout,
711 VK_QUEUE_FAMILY_IGNORED,
712 VK_QUEUE_FAMILY_IGNORED,
713 res->image,
714 isr
715 };
716 vkCmdPipelineBarrier(
717 cmdbuf,
718 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
719 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
720 0,
721 0, NULL,
722 0, NULL,
723 1, &imb
724 );
725
726 res->layout = new_layout;
727 }
728
729 static void
730 zink_clear(struct pipe_context *pctx,
731 unsigned buffers,
732 const union pipe_color_union *pcolor,
733 double depth, unsigned stencil)
734 {
735 struct zink_context *ctx = zink_context(pctx);
736 struct pipe_framebuffer_state *fb = &ctx->fb_state;
737
738 /* FIXME: this is very inefficient; if no renderpass has been started yet,
739 * we should record the clear if it's full-screen, and apply it as we
740 * start the render-pass. Otherwise we can do a partial out-of-renderpass
741 * clear.
742 */
743 struct zink_batch *batch = zink_batch_rp(ctx);
744
745 VkClearAttachment attachments[1 + PIPE_MAX_COLOR_BUFS];
746 int num_attachments = 0;
747
748 if (buffers & PIPE_CLEAR_COLOR) {
749 VkClearColorValue color;
750 color.float32[0] = pcolor->f[0];
751 color.float32[1] = pcolor->f[1];
752 color.float32[2] = pcolor->f[2];
753 color.float32[3] = pcolor->f[3];
754
755 for (unsigned i = 0; i < fb->nr_cbufs; i++) {
756 if (!(buffers & (PIPE_CLEAR_COLOR0 << i)) || !fb->cbufs[i])
757 continue;
758
759 attachments[num_attachments].aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
760 attachments[num_attachments].colorAttachment = i;
761 attachments[num_attachments].clearValue.color = color;
762 ++num_attachments;
763 }
764 }
765
766 if (buffers & PIPE_CLEAR_DEPTHSTENCIL && fb->zsbuf) {
767 VkImageAspectFlags aspect = 0;
768 if (buffers & PIPE_CLEAR_DEPTH)
769 aspect |= VK_IMAGE_ASPECT_DEPTH_BIT;
770 if (buffers & PIPE_CLEAR_STENCIL)
771 aspect |= VK_IMAGE_ASPECT_STENCIL_BIT;
772
773 attachments[num_attachments].aspectMask = aspect;
774 attachments[num_attachments].clearValue.depthStencil.depth = depth;
775 attachments[num_attachments].clearValue.depthStencil.stencil = stencil;
776 ++num_attachments;
777 }
778
779 unsigned num_layers = util_framebuffer_get_num_layers(fb);
780 VkClearRect rects[PIPE_MAX_VIEWPORTS];
781 uint32_t num_rects;
782 if (ctx->num_scissors) {
783 for (unsigned i = 0 ; i < ctx->num_scissors; ++i) {
784 rects[i].rect = ctx->scissors[i];
785 rects[i].rect.extent.width = MIN2(rects[i].rect.extent.width,
786 fb->width);
787 rects[i].rect.extent.height = MIN2(rects[i].rect.extent.height,
788 fb->height);
789 rects[i].baseArrayLayer = 0;
790 rects[i].layerCount = num_layers;
791 }
792 num_rects = ctx->num_scissors;
793 } else {
794 rects[0].rect.offset.x = 0;
795 rects[0].rect.offset.y = 0;
796 rects[0].rect.extent.width = fb->width;
797 rects[0].rect.extent.height = fb->height;
798 rects[0].baseArrayLayer = 0;
799 rects[0].layerCount = num_layers;
800 num_rects = 1;
801 }
802
803 vkCmdClearAttachments(batch->cmdbuf,
804 num_attachments, attachments,
805 num_rects, rects);
806 }
807
808 VkShaderStageFlagBits
809 zink_shader_stage(enum pipe_shader_type type)
810 {
811 VkShaderStageFlagBits stages[] = {
812 [PIPE_SHADER_VERTEX] = VK_SHADER_STAGE_VERTEX_BIT,
813 [PIPE_SHADER_FRAGMENT] = VK_SHADER_STAGE_FRAGMENT_BIT,
814 [PIPE_SHADER_GEOMETRY] = VK_SHADER_STAGE_GEOMETRY_BIT,
815 [PIPE_SHADER_TESS_CTRL] = VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
816 [PIPE_SHADER_TESS_EVAL] = VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
817 [PIPE_SHADER_COMPUTE] = VK_SHADER_STAGE_COMPUTE_BIT,
818 };
819 return stages[type];
820 }
821
822 static VkDescriptorSet
823 allocate_descriptor_set(struct zink_screen *screen,
824 struct zink_batch *batch,
825 struct zink_gfx_program *prog)
826 {
827 assert(batch->descs_left >= prog->num_descriptors);
828 VkDescriptorSetAllocateInfo dsai;
829 memset((void *)&dsai, 0, sizeof(dsai));
830 dsai.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
831 dsai.pNext = NULL;
832 dsai.descriptorPool = batch->descpool;
833 dsai.descriptorSetCount = 1;
834 dsai.pSetLayouts = &prog->dsl;
835
836 VkDescriptorSet desc_set;
837 if (vkAllocateDescriptorSets(screen->dev, &dsai, &desc_set) != VK_SUCCESS) {
838 debug_printf("ZINK: failed to allocate descriptor set :/");
839 return VK_NULL_HANDLE;
840 }
841
842 batch->descs_left -= prog->num_descriptors;
843 return desc_set;
844 }
845
846 static void
847 zink_bind_vertex_buffers(struct zink_batch *batch, struct zink_context *ctx)
848 {
849 VkBuffer buffers[PIPE_MAX_ATTRIBS];
850 VkDeviceSize buffer_offsets[PIPE_MAX_ATTRIBS];
851 const struct zink_vertex_elements_state *elems = ctx->element_state;
852 for (unsigned i = 0; i < elems->hw_state.num_bindings; i++) {
853 struct pipe_vertex_buffer *vb = ctx->buffers + ctx->element_state->binding_map[i];
854 assert(vb && vb->buffer.resource);
855 struct zink_resource *res = zink_resource(vb->buffer.resource);
856 buffers[i] = res->buffer;
857 buffer_offsets[i] = vb->buffer_offset;
858 zink_batch_reference_resoure(batch, res);
859 }
860
861 if (elems->hw_state.num_bindings > 0)
862 vkCmdBindVertexBuffers(batch->cmdbuf, 0,
863 elems->hw_state.num_bindings,
864 buffers, buffer_offsets);
865 }
866
867 static uint32_t
868 hash_gfx_program(const void *key)
869 {
870 return _mesa_hash_data(key, sizeof(struct zink_shader *) * (PIPE_SHADER_TYPES - 1));
871 }
872
873 static bool
874 equals_gfx_program(const void *a, const void *b)
875 {
876 return memcmp(a, b, sizeof(struct zink_shader *) * (PIPE_SHADER_TYPES - 1)) == 0;
877 }
878
879 static uint32_t
880 hash_render_pass_state(const void *key)
881 {
882 return _mesa_hash_data(key, sizeof(struct zink_render_pass_state));
883 }
884
885 static bool
886 equals_render_pass_state(const void *a, const void *b)
887 {
888 return memcmp(a, b, sizeof(struct zink_render_pass_state)) == 0;
889 }
890
891 static uint32_t
892 hash_framebuffer_state(const void *key)
893 {
894 struct zink_framebuffer_state *s = (struct zink_framebuffer_state*)key;
895 return _mesa_hash_data(key, sizeof(struct zink_framebuffer_state) + sizeof(s->attachments) * s->num_attachments);
896 }
897
898 static bool
899 equals_framebuffer_state(const void *a, const void *b)
900 {
901 struct zink_framebuffer_state *s = (struct zink_framebuffer_state*)a;
902 return memcmp(a, b, sizeof(struct zink_framebuffer_state) + sizeof(s->attachments) * s->num_attachments) == 0;
903 }
904
905 static struct zink_gfx_program *
906 get_gfx_program(struct zink_context *ctx)
907 {
908 if (ctx->dirty & ZINK_DIRTY_PROGRAM) {
909 struct hash_entry *entry = _mesa_hash_table_search(ctx->program_cache,
910 ctx->gfx_stages);
911 if (!entry) {
912 struct zink_gfx_program *prog;
913 prog = zink_create_gfx_program(zink_screen(ctx->base.screen),
914 ctx->gfx_stages);
915 entry = _mesa_hash_table_insert(ctx->program_cache, prog->stages, prog);
916 if (!entry)
917 return NULL;
918 }
919 ctx->curr_program = entry->data;
920 ctx->dirty &= ~ZINK_DIRTY_PROGRAM;
921 }
922
923 assert(ctx->curr_program);
924 return ctx->curr_program;
925 }
926
927 static void
928 zink_draw_vbo(struct pipe_context *pctx,
929 const struct pipe_draw_info *dinfo)
930 {
931 struct zink_context *ctx = zink_context(pctx);
932 struct zink_screen *screen = zink_screen(pctx->screen);
933 struct zink_rasterizer_state *rast_state = ctx->rast_state;
934
935 if (dinfo->mode >= PIPE_PRIM_QUADS ||
936 dinfo->mode == PIPE_PRIM_LINE_LOOP) {
937 if (!u_trim_pipe_prim(dinfo->mode, (unsigned *)&dinfo->count))
938 return;
939
940 util_primconvert_save_rasterizer_state(ctx->primconvert, &rast_state->base);
941 util_primconvert_draw_vbo(ctx->primconvert, dinfo);
942 return;
943 }
944
945 struct zink_gfx_program *gfx_program = get_gfx_program(ctx);
946 if (!gfx_program)
947 return;
948
949 VkPipeline pipeline = zink_get_gfx_pipeline(screen, gfx_program,
950 &ctx->gfx_pipeline_state,
951 dinfo->mode);
952
953 bool depth_bias = false;
954 switch (u_reduced_prim(dinfo->mode)) {
955 case PIPE_PRIM_POINTS:
956 depth_bias = rast_state->offset_point;
957 break;
958
959 case PIPE_PRIM_LINES:
960 depth_bias = rast_state->offset_line;
961 break;
962
963 case PIPE_PRIM_TRIANGLES:
964 depth_bias = rast_state->offset_tri;
965 break;
966
967 default:
968 unreachable("unexpected reduced prim");
969 }
970
971 unsigned index_offset = 0;
972 struct pipe_resource *index_buffer = NULL;
973 if (dinfo->index_size > 0) {
974 if (dinfo->has_user_indices) {
975 if (!util_upload_index_buffer(pctx, dinfo, &index_buffer, &index_offset)) {
976 debug_printf("util_upload_index_buffer() failed\n");
977 return;
978 }
979 } else
980 index_buffer = dinfo->index.resource;
981 }
982
983 VkWriteDescriptorSet wds[PIPE_SHADER_TYPES * PIPE_MAX_CONSTANT_BUFFERS + PIPE_SHADER_TYPES * PIPE_MAX_SHADER_SAMPLER_VIEWS];
984 VkDescriptorBufferInfo buffer_infos[PIPE_SHADER_TYPES * PIPE_MAX_CONSTANT_BUFFERS];
985 VkDescriptorImageInfo image_infos[PIPE_SHADER_TYPES * PIPE_MAX_SHADER_SAMPLER_VIEWS];
986 int num_wds = 0, num_buffer_info = 0, num_image_info = 0;
987
988 struct zink_resource *transitions[PIPE_SHADER_TYPES * PIPE_MAX_SHADER_SAMPLER_VIEWS];
989 int num_transitions = 0;
990
991 for (int i = 0; i < ARRAY_SIZE(ctx->gfx_stages); i++) {
992 struct zink_shader *shader = ctx->gfx_stages[i];
993 if (!shader)
994 continue;
995
996 for (int j = 0; j < shader->num_bindings; j++) {
997 int index = shader->bindings[j].index;
998 if (shader->bindings[j].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) {
999 assert(ctx->ubos[i][index].buffer_size > 0);
1000 assert(ctx->ubos[i][index].buffer);
1001 struct zink_resource *res = zink_resource(ctx->ubos[i][index].buffer);
1002 buffer_infos[num_buffer_info].buffer = res->buffer;
1003 buffer_infos[num_buffer_info].offset = ctx->ubos[i][index].buffer_offset;
1004 buffer_infos[num_buffer_info].range = VK_WHOLE_SIZE;
1005 wds[num_wds].pBufferInfo = buffer_infos + num_buffer_info;
1006 ++num_buffer_info;
1007 } else {
1008 struct pipe_sampler_view *psampler_view = ctx->image_views[i][index];
1009 assert(psampler_view);
1010 struct zink_sampler_view *sampler_view = zink_sampler_view(psampler_view);
1011
1012 struct zink_resource *res = zink_resource(psampler_view->texture);
1013 VkImageLayout layout = res->layout;
1014 if (layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL &&
1015 layout != VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL &&
1016 layout != VK_IMAGE_LAYOUT_GENERAL) {
1017 transitions[num_transitions++] = res;
1018 layout = VK_IMAGE_LAYOUT_GENERAL;
1019 }
1020 image_infos[num_image_info].imageLayout = layout;
1021 image_infos[num_image_info].imageView = sampler_view->image_view;
1022 image_infos[num_image_info].sampler = ctx->samplers[i][index];
1023 wds[num_wds].pImageInfo = image_infos + num_image_info;
1024 ++num_image_info;
1025 }
1026
1027 wds[num_wds].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
1028 wds[num_wds].pNext = NULL;
1029 wds[num_wds].dstBinding = shader->bindings[j].binding;
1030 wds[num_wds].dstArrayElement = 0;
1031 wds[num_wds].descriptorCount = 1;
1032 wds[num_wds].descriptorType = shader->bindings[j].type;
1033 ++num_wds;
1034 }
1035 }
1036
1037 struct zink_batch *batch;
1038 if (num_transitions > 0) {
1039 batch = zink_batch_no_rp(ctx);
1040
1041 for (int i = 0; i < num_transitions; ++i)
1042 zink_resource_barrier(batch->cmdbuf, transitions[i],
1043 transitions[i]->aspect,
1044 VK_IMAGE_LAYOUT_GENERAL);
1045 }
1046
1047 batch = zink_batch_rp(ctx);
1048
1049 if (batch->descs_left < gfx_program->num_descriptors) {
1050 flush_batch(ctx);
1051 batch = zink_batch_rp(ctx);
1052 assert(batch->descs_left >= gfx_program->num_descriptors);
1053 }
1054
1055 VkDescriptorSet desc_set = allocate_descriptor_set(screen, batch,
1056 gfx_program);
1057 assert(desc_set != VK_NULL_HANDLE);
1058
1059 for (int i = 0; i < ARRAY_SIZE(ctx->gfx_stages); i++) {
1060 struct zink_shader *shader = ctx->gfx_stages[i];
1061 if (!shader)
1062 continue;
1063
1064 for (int j = 0; j < shader->num_bindings; j++) {
1065 int index = shader->bindings[j].index;
1066 if (shader->bindings[j].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) {
1067 struct zink_resource *res = zink_resource(ctx->ubos[i][index].buffer);
1068 zink_batch_reference_resoure(batch, res);
1069 } else {
1070 struct zink_sampler_view *sampler_view = zink_sampler_view(ctx->image_views[i][index]);
1071 zink_batch_reference_sampler_view(batch, sampler_view);
1072 }
1073 }
1074 }
1075
1076 vkCmdSetViewport(batch->cmdbuf, 0, ctx->num_viewports, ctx->viewports);
1077
1078 if (ctx->num_scissors)
1079 vkCmdSetScissor(batch->cmdbuf, 0, ctx->num_scissors, ctx->scissors);
1080 else if (ctx->fb_state.width && ctx->fb_state.height) {
1081 VkRect2D fb_scissor = {};
1082 fb_scissor.extent.width = ctx->fb_state.width;
1083 fb_scissor.extent.height = ctx->fb_state.height;
1084 vkCmdSetScissor(batch->cmdbuf, 0, 1, &fb_scissor);
1085 }
1086
1087 vkCmdSetStencilReference(batch->cmdbuf, VK_STENCIL_FACE_FRONT_BIT, ctx->stencil_ref.ref_value[0]);
1088 vkCmdSetStencilReference(batch->cmdbuf, VK_STENCIL_FACE_BACK_BIT, ctx->stencil_ref.ref_value[1]);
1089
1090 if (depth_bias)
1091 vkCmdSetDepthBias(batch->cmdbuf, rast_state->offset_units, rast_state->offset_clamp, rast_state->offset_scale);
1092 else
1093 vkCmdSetDepthBias(batch->cmdbuf, 0.0f, 0.0f, 0.0f);
1094
1095 if (ctx->gfx_pipeline_state.blend_state->need_blend_constants)
1096 vkCmdSetBlendConstants(batch->cmdbuf, ctx->blend_constants);
1097
1098 for (int i = 0; i < num_wds; ++i)
1099 wds[i].dstSet = desc_set;
1100
1101 vkUpdateDescriptorSets(screen->dev, num_wds, wds, 0, NULL);
1102
1103 vkCmdBindPipeline(batch->cmdbuf, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
1104 vkCmdBindDescriptorSets(batch->cmdbuf, VK_PIPELINE_BIND_POINT_GRAPHICS,
1105 gfx_program->layout, 0, 1, &desc_set, 0, NULL);
1106 zink_bind_vertex_buffers(batch, ctx);
1107
1108 if (dinfo->index_size > 0) {
1109 assert(dinfo->index_size != 1);
1110 VkIndexType index_type = dinfo->index_size == 2 ? VK_INDEX_TYPE_UINT16 : VK_INDEX_TYPE_UINT32;
1111 struct zink_resource *res = zink_resource(index_buffer);
1112 vkCmdBindIndexBuffer(batch->cmdbuf, res->buffer, index_offset, index_type);
1113 zink_batch_reference_resoure(batch, res);
1114 vkCmdDrawIndexed(batch->cmdbuf,
1115 dinfo->count, dinfo->instance_count,
1116 dinfo->start, dinfo->index_bias, dinfo->start_instance);
1117 } else
1118 vkCmdDraw(batch->cmdbuf, dinfo->count, dinfo->instance_count, dinfo->start, dinfo->start_instance);
1119
1120 if (dinfo->index_size > 0 && dinfo->has_user_indices)
1121 pipe_resource_reference(&index_buffer, NULL);
1122 }
1123
1124 static void
1125 zink_flush(struct pipe_context *pctx,
1126 struct pipe_fence_handle **pfence,
1127 enum pipe_flush_flags flags)
1128 {
1129 struct zink_context *ctx = zink_context(pctx);
1130
1131 struct zink_batch *batch = zink_curr_batch(ctx);
1132 flush_batch(ctx);
1133
1134 if (pfence)
1135 zink_fence_reference(zink_screen(pctx->screen),
1136 (struct zink_fence **)pfence,
1137 batch->fence);
1138
1139 /* HACK:
1140 * For some strange reason, we need to finish before presenting, or else
1141 * we start rendering on top of the back-buffer for the next frame. This
1142 * seems like a bug in the DRI-driver to me, because we really should
1143 * be properly protected by fences here, and the back-buffer should
1144 * either be swapped with the front-buffer, or blitted from. But for
1145 * some strange reason, neither of these things happen.
1146 */
1147 if (flags & PIPE_FLUSH_END_OF_FRAME)
1148 pctx->screen->fence_finish(pctx->screen, pctx,
1149 (struct pipe_fence_handle *)batch->fence,
1150 PIPE_TIMEOUT_INFINITE);
1151 }
1152
1153 static void
1154 zink_blit(struct pipe_context *pctx,
1155 const struct pipe_blit_info *info)
1156 {
1157 struct zink_context *ctx = zink_context(pctx);
1158 bool is_resolve = false;
1159 if (info->mask != PIPE_MASK_RGBA ||
1160 info->scissor_enable ||
1161 info->alpha_blend) {
1162 if (!util_blitter_is_blit_supported(ctx->blitter, info)) {
1163 debug_printf("blit unsupported %s -> %s\n",
1164 util_format_short_name(info->src.resource->format),
1165 util_format_short_name(info->dst.resource->format));
1166 return;
1167 }
1168
1169 util_blitter_save_blend(ctx->blitter, ctx->gfx_pipeline_state.blend_state);
1170 util_blitter_save_depth_stencil_alpha(ctx->blitter, ctx->gfx_pipeline_state.depth_stencil_alpha_state);
1171 util_blitter_save_vertex_elements(ctx->blitter, ctx->element_state);
1172 util_blitter_save_stencil_ref(ctx->blitter, &ctx->stencil_ref);
1173 util_blitter_save_rasterizer(ctx->blitter, ctx->rast_state);
1174 util_blitter_save_fragment_shader(ctx->blitter, ctx->gfx_stages[PIPE_SHADER_FRAGMENT]);
1175 util_blitter_save_vertex_shader(ctx->blitter, ctx->gfx_stages[PIPE_SHADER_VERTEX]);
1176 util_blitter_save_framebuffer(ctx->blitter, &ctx->fb_state);
1177 util_blitter_save_viewport(ctx->blitter, ctx->viewport_states);
1178 util_blitter_save_scissor(ctx->blitter, ctx->scissor_states);
1179 util_blitter_save_fragment_sampler_states(ctx->blitter,
1180 ctx->num_samplers[PIPE_SHADER_FRAGMENT],
1181 (void **)ctx->samplers[PIPE_SHADER_FRAGMENT]);
1182 util_blitter_save_fragment_sampler_views(ctx->blitter,
1183 ctx->num_image_views[PIPE_SHADER_FRAGMENT],
1184 ctx->image_views[PIPE_SHADER_FRAGMENT]);
1185 util_blitter_save_fragment_constant_buffer_slot(ctx->blitter, ctx->ubos[PIPE_SHADER_FRAGMENT]);
1186 util_blitter_save_vertex_buffer_slot(ctx->blitter, ctx->buffers);
1187 util_blitter_save_sample_mask(ctx->blitter, ctx->gfx_pipeline_state.sample_mask);
1188
1189 util_blitter_blit(ctx->blitter, info);
1190 return;
1191 }
1192
1193 struct zink_resource *src = zink_resource(info->src.resource);
1194 struct zink_resource *dst = zink_resource(info->dst.resource);
1195
1196 if (src->base.nr_samples > 1 && dst->base.nr_samples <= 1)
1197 is_resolve = true;
1198
1199 struct zink_batch *batch = zink_batch_no_rp(ctx);
1200
1201 zink_batch_reference_resoure(batch, src);
1202 zink_batch_reference_resoure(batch, dst);
1203
1204 if (is_resolve) {
1205 VkImageResolve region = {};
1206
1207 region.srcSubresource.aspectMask = src->aspect;
1208 region.srcSubresource.mipLevel = info->src.level;
1209 region.srcSubresource.baseArrayLayer = 0; // no clue
1210 region.srcSubresource.layerCount = 1; // no clue
1211 region.srcOffset.x = info->src.box.x;
1212 region.srcOffset.y = info->src.box.y;
1213 region.srcOffset.z = info->src.box.z;
1214
1215 region.dstSubresource.aspectMask = dst->aspect;
1216 region.dstSubresource.mipLevel = info->dst.level;
1217 region.dstSubresource.baseArrayLayer = 0; // no clue
1218 region.dstSubresource.layerCount = 1; // no clue
1219 region.dstOffset.x = info->dst.box.x;
1220 region.dstOffset.y = info->dst.box.y;
1221 region.dstOffset.z = info->dst.box.z;
1222
1223 region.extent.width = info->dst.box.width;
1224 region.extent.height = info->dst.box.height;
1225 region.extent.depth = info->dst.box.depth;
1226 vkCmdResolveImage(batch->cmdbuf, src->image, src->layout,
1227 dst->image, dst->layout,
1228 1, &region);
1229
1230 } else {
1231 if (dst->layout != VK_IMAGE_LAYOUT_GENERAL &&
1232 dst->layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL)
1233 zink_resource_barrier(batch->cmdbuf, dst, dst->aspect,
1234 VK_IMAGE_LAYOUT_GENERAL);
1235
1236 VkImageBlit region = {};
1237 region.srcSubresource.aspectMask = src->aspect;
1238 region.srcSubresource.mipLevel = info->src.level;
1239 region.srcOffsets[0].x = info->src.box.x;
1240 region.srcOffsets[0].y = info->src.box.y;
1241 region.srcOffsets[1].x = info->src.box.x + info->src.box.width;
1242 region.srcOffsets[1].y = info->src.box.y + info->src.box.height;
1243
1244 if (src->base.array_size > 1) {
1245 region.srcOffsets[0].z = 0;
1246 region.srcOffsets[1].z = 1;
1247 region.srcSubresource.baseArrayLayer = info->src.box.z;
1248 region.srcSubresource.layerCount = info->src.box.depth;
1249 } else {
1250 region.srcOffsets[0].z = info->src.box.z;
1251 region.srcOffsets[1].z = info->src.box.z + info->src.box.depth;
1252 region.srcSubresource.baseArrayLayer = 0;
1253 region.srcSubresource.layerCount = 1;
1254 }
1255
1256 region.dstSubresource.aspectMask = dst->aspect;
1257 region.dstSubresource.mipLevel = info->dst.level;
1258 region.dstOffsets[0].x = info->dst.box.x;
1259 region.dstOffsets[0].y = info->dst.box.y;
1260 region.dstOffsets[1].x = info->dst.box.x + info->dst.box.width;
1261 region.dstOffsets[1].y = info->dst.box.y + info->dst.box.height;
1262
1263 if (dst->base.array_size > 1) {
1264 region.dstOffsets[0].z = 0;
1265 region.dstOffsets[1].z = 1;
1266 region.dstSubresource.baseArrayLayer = info->dst.box.z;
1267 region.dstSubresource.layerCount = info->dst.box.depth;
1268 } else {
1269 region.dstOffsets[0].z = info->dst.box.z;
1270 region.dstOffsets[1].z = info->dst.box.z + info->dst.box.depth;
1271 region.dstSubresource.baseArrayLayer = 0;
1272 region.dstSubresource.layerCount = 1;
1273 }
1274
1275 vkCmdBlitImage(batch->cmdbuf, src->image, src->layout,
1276 dst->image, dst->layout,
1277 1, &region,
1278 filter(info->filter));
1279 }
1280
1281 /* HACK: I have no idea why this is needed, but without it ioquake3
1282 * randomly keeps fading to black.
1283 */
1284 flush_batch(ctx);
1285 }
1286
1287 static void
1288 zink_flush_resource(struct pipe_context *pipe,
1289 struct pipe_resource *resource)
1290 {
1291 }
1292
1293 static void
1294 zink_resource_copy_region(struct pipe_context *pctx,
1295 struct pipe_resource *pdst,
1296 unsigned dst_level, unsigned dstx, unsigned dsty, unsigned dstz,
1297 struct pipe_resource *psrc,
1298 unsigned src_level, const struct pipe_box *src_box)
1299 {
1300 struct zink_resource *dst = zink_resource(pdst);
1301 struct zink_resource *src = zink_resource(psrc);
1302 struct zink_context *ctx = zink_context(pctx);
1303 if (dst->base.target != PIPE_BUFFER && src->base.target != PIPE_BUFFER) {
1304 VkImageCopy region = {};
1305
1306 region.srcSubresource.aspectMask = src->aspect;
1307 region.srcSubresource.mipLevel = src_level;
1308 region.srcSubresource.layerCount = 1;
1309 if (src->base.array_size > 1) {
1310 region.srcSubresource.baseArrayLayer = src_box->z;
1311 region.srcSubresource.layerCount = src_box->depth;
1312 region.extent.depth = 1;
1313 } else {
1314 region.srcOffset.z = src_box->z;
1315 region.srcSubresource.layerCount = 1;
1316 region.extent.depth = src_box->depth;
1317 }
1318
1319 region.srcOffset.x = src_box->x;
1320 region.srcOffset.y = src_box->y;
1321
1322 region.dstSubresource.aspectMask = dst->aspect;
1323 region.dstSubresource.mipLevel = dst_level;
1324 if (dst->base.array_size > 1) {
1325 region.dstSubresource.baseArrayLayer = dstz;
1326 region.dstSubresource.layerCount = src_box->depth;
1327 } else {
1328 region.dstOffset.z = dstz;
1329 region.dstSubresource.layerCount = 1;
1330 }
1331
1332 region.dstOffset.x = dstx;
1333 region.dstOffset.y = dsty;
1334 region.extent.width = src_box->width;
1335 region.extent.height = src_box->height;
1336
1337 struct zink_batch *batch = zink_batch_no_rp(ctx);
1338 zink_batch_reference_resoure(batch, src);
1339 zink_batch_reference_resoure(batch, dst);
1340
1341 if (src->layout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL &&
1342 src->layout != VK_IMAGE_LAYOUT_GENERAL) {
1343 zink_resource_barrier(batch->cmdbuf, src, src->aspect,
1344 VK_IMAGE_LAYOUT_GENERAL);
1345 src->layout = VK_IMAGE_LAYOUT_GENERAL;
1346 }
1347
1348 if (dst->layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL &&
1349 dst->layout != VK_IMAGE_LAYOUT_GENERAL) {
1350 zink_resource_barrier(batch->cmdbuf, dst, dst->aspect,
1351 VK_IMAGE_LAYOUT_GENERAL);
1352 dst->layout = VK_IMAGE_LAYOUT_GENERAL;
1353 }
1354
1355 vkCmdCopyImage(batch->cmdbuf, src->image, src->layout,
1356 dst->image, dst->layout,
1357 1, &region);
1358 } else
1359 debug_printf("zink: TODO resource copy\n");
1360 }
1361
1362 struct pipe_context *
1363 zink_context_create(struct pipe_screen *pscreen, void *priv, unsigned flags)
1364 {
1365 struct zink_screen *screen = zink_screen(pscreen);
1366 struct zink_context *ctx = CALLOC_STRUCT(zink_context);
1367
1368 ctx->base.screen = pscreen;
1369 ctx->base.priv = priv;
1370
1371 ctx->base.destroy = zink_context_destroy;
1372
1373 zink_context_state_init(&ctx->base);
1374
1375 ctx->base.create_sampler_state = zink_create_sampler_state;
1376 ctx->base.bind_sampler_states = zink_bind_sampler_states;
1377 ctx->base.delete_sampler_state = zink_delete_sampler_state;
1378
1379 ctx->base.create_sampler_view = zink_create_sampler_view;
1380 ctx->base.set_sampler_views = zink_set_sampler_views;
1381 ctx->base.sampler_view_destroy = zink_sampler_view_destroy;
1382
1383 ctx->base.create_vs_state = zink_create_vs_state;
1384 ctx->base.bind_vs_state = zink_bind_vs_state;
1385 ctx->base.delete_vs_state = zink_delete_vs_state;
1386
1387 ctx->base.create_fs_state = zink_create_fs_state;
1388 ctx->base.bind_fs_state = zink_bind_fs_state;
1389 ctx->base.delete_fs_state = zink_delete_fs_state;
1390
1391 ctx->base.set_polygon_stipple = zink_set_polygon_stipple;
1392 ctx->base.set_vertex_buffers = zink_set_vertex_buffers;
1393 ctx->base.set_viewport_states = zink_set_viewport_states;
1394 ctx->base.set_scissor_states = zink_set_scissor_states;
1395 ctx->base.set_constant_buffer = zink_set_constant_buffer;
1396 ctx->base.set_framebuffer_state = zink_set_framebuffer_state;
1397 ctx->base.set_stencil_ref = zink_set_stencil_ref;
1398 ctx->base.set_clip_state = zink_set_clip_state;
1399 ctx->base.set_active_query_state = zink_set_active_query_state;
1400 ctx->base.set_blend_color = zink_set_blend_color;
1401
1402 ctx->base.set_sample_mask = zink_set_sample_mask;
1403
1404 ctx->base.clear = zink_clear;
1405 ctx->base.draw_vbo = zink_draw_vbo;
1406 ctx->base.flush = zink_flush;
1407
1408 ctx->base.resource_copy_region = zink_resource_copy_region;
1409 ctx->base.blit = zink_blit;
1410
1411 ctx->base.flush_resource = zink_flush_resource;
1412 zink_context_surface_init(&ctx->base);
1413 zink_context_resource_init(&ctx->base);
1414 zink_context_query_init(&ctx->base);
1415
1416 slab_create_child(&ctx->transfer_pool, &screen->transfer_pool);
1417
1418 ctx->base.stream_uploader = u_upload_create_default(&ctx->base);
1419 ctx->base.const_uploader = ctx->base.stream_uploader;
1420
1421 int prim_hwsupport = 1 << PIPE_PRIM_POINTS |
1422 1 << PIPE_PRIM_LINES |
1423 1 << PIPE_PRIM_LINE_STRIP |
1424 1 << PIPE_PRIM_TRIANGLES |
1425 1 << PIPE_PRIM_TRIANGLE_STRIP |
1426 1 << PIPE_PRIM_TRIANGLE_FAN;
1427
1428 ctx->primconvert = util_primconvert_create(&ctx->base, prim_hwsupport);
1429 if (!ctx->primconvert)
1430 goto fail;
1431
1432 ctx->blitter = util_blitter_create(&ctx->base);
1433 if (!ctx->blitter)
1434 goto fail;
1435
1436 VkCommandPoolCreateInfo cpci = {};
1437 cpci.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
1438 cpci.queueFamilyIndex = screen->gfx_queue;
1439 cpci.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
1440 if (vkCreateCommandPool(screen->dev, &cpci, NULL, &ctx->cmdpool) != VK_SUCCESS)
1441 goto fail;
1442
1443 VkCommandBufferAllocateInfo cbai = {};
1444 cbai.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
1445 cbai.commandPool = ctx->cmdpool;
1446 cbai.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
1447 cbai.commandBufferCount = 1;
1448
1449 VkDescriptorPoolSize sizes[] = {
1450 {VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, ZINK_BATCH_DESC_SIZE}
1451 };
1452 VkDescriptorPoolCreateInfo dpci = {};
1453 dpci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
1454 dpci.pPoolSizes = sizes;
1455 dpci.poolSizeCount = ARRAY_SIZE(sizes);
1456 dpci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
1457 dpci.maxSets = ZINK_BATCH_DESC_SIZE;
1458
1459 for (int i = 0; i < ARRAY_SIZE(ctx->batches); ++i) {
1460 if (vkAllocateCommandBuffers(screen->dev, &cbai, &ctx->batches[i].cmdbuf) != VK_SUCCESS)
1461 goto fail;
1462
1463 ctx->batches[i].resources = _mesa_set_create(NULL, _mesa_hash_pointer,
1464 _mesa_key_pointer_equal);
1465 ctx->batches[i].sampler_views = _mesa_set_create(NULL,
1466 _mesa_hash_pointer,
1467 _mesa_key_pointer_equal);
1468
1469 if (!ctx->batches[i].resources || !ctx->batches[i].sampler_views)
1470 goto fail;
1471
1472 util_dynarray_init(&ctx->batches[i].zombie_samplers, NULL);
1473
1474 if (vkCreateDescriptorPool(screen->dev, &dpci, 0,
1475 &ctx->batches[i].descpool) != VK_SUCCESS)
1476 goto fail;
1477 }
1478
1479 vkGetDeviceQueue(screen->dev, screen->gfx_queue, 0, &ctx->queue);
1480
1481 ctx->program_cache = _mesa_hash_table_create(NULL,
1482 hash_gfx_program,
1483 equals_gfx_program);
1484 ctx->render_pass_cache = _mesa_hash_table_create(NULL,
1485 hash_render_pass_state,
1486 equals_render_pass_state);
1487 ctx->framebuffer_cache = _mesa_hash_table_create(NULL,
1488 hash_framebuffer_state,
1489 equals_framebuffer_state);
1490
1491 if (!ctx->program_cache || !ctx->render_pass_cache ||
1492 !ctx->framebuffer_cache)
1493 goto fail;
1494
1495 ctx->dirty = ZINK_DIRTY_PROGRAM;
1496
1497 /* start the first batch */
1498 zink_start_batch(ctx, zink_curr_batch(ctx));
1499
1500 return &ctx->base;
1501
1502 fail:
1503 if (ctx) {
1504 vkDestroyCommandPool(screen->dev, ctx->cmdpool, NULL);
1505 FREE(ctx);
1506 }
1507 return NULL;
1508 }