2 * Copyright 2018 Collabora Ltd.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 #include "zink_context.h"
26 #include "zink_batch.h"
27 #include "zink_compiler.h"
28 #include "zink_fence.h"
29 #include "zink_framebuffer.h"
30 #include "zink_helpers.h"
31 #include "zink_pipeline.h"
32 #include "zink_query.h"
33 #include "zink_render_pass.h"
34 #include "zink_resource.h"
35 #include "zink_screen.h"
36 #include "zink_state.h"
37 #include "zink_surface.h"
39 #include "indices/u_primconvert.h"
40 #include "util/u_blitter.h"
41 #include "util/u_debug.h"
42 #include "util/format/u_format.h"
43 #include "util/u_framebuffer.h"
44 #include "util/u_helpers.h"
45 #include "util/u_inlines.h"
49 #include "util/u_memory.h"
50 #include "util/u_upload_mgr.h"
53 zink_context_destroy(struct pipe_context
*pctx
)
55 struct zink_context
*ctx
= zink_context(pctx
);
56 struct zink_screen
*screen
= zink_screen(pctx
->screen
);
58 if (vkQueueWaitIdle(ctx
->queue
) != VK_SUCCESS
)
59 debug_printf("vkQueueWaitIdle failed\n");
61 for (unsigned i
= 0; i
< ARRAY_SIZE(ctx
->null_buffers
); i
++)
62 pipe_resource_reference(&ctx
->null_buffers
[i
], NULL
);
64 for (int i
= 0; i
< ARRAY_SIZE(ctx
->batches
); ++i
) {
65 vkDestroyDescriptorPool(screen
->dev
, ctx
->batches
[i
].descpool
, NULL
);
66 vkFreeCommandBuffers(screen
->dev
, ctx
->cmdpool
, 1, &ctx
->batches
[i
].cmdbuf
);
68 vkDestroyCommandPool(screen
->dev
, ctx
->cmdpool
, NULL
);
70 util_primconvert_destroy(ctx
->primconvert
);
71 u_upload_destroy(pctx
->stream_uploader
);
72 slab_destroy_child(&ctx
->transfer_pool
);
73 util_blitter_destroy(ctx
->blitter
);
77 static VkSamplerMipmapMode
78 sampler_mipmap_mode(enum pipe_tex_mipfilter filter
)
81 case PIPE_TEX_MIPFILTER_NEAREST
: return VK_SAMPLER_MIPMAP_MODE_NEAREST
;
82 case PIPE_TEX_MIPFILTER_LINEAR
: return VK_SAMPLER_MIPMAP_MODE_LINEAR
;
83 case PIPE_TEX_MIPFILTER_NONE
:
84 unreachable("PIPE_TEX_MIPFILTER_NONE should be dealt with earlier");
86 unreachable("unexpected filter");
89 static VkSamplerAddressMode
90 sampler_address_mode(enum pipe_tex_wrap filter
)
93 case PIPE_TEX_WRAP_REPEAT
: return VK_SAMPLER_ADDRESS_MODE_REPEAT
;
94 case PIPE_TEX_WRAP_CLAMP
: return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE
; /* not technically correct, but kinda works */
95 case PIPE_TEX_WRAP_CLAMP_TO_EDGE
: return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE
;
96 case PIPE_TEX_WRAP_CLAMP_TO_BORDER
: return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER
;
97 case PIPE_TEX_WRAP_MIRROR_REPEAT
: return VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT
;
98 case PIPE_TEX_WRAP_MIRROR_CLAMP
: return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE
; /* not technically correct, but kinda works */
99 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE
: return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE
;
100 case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER
: return VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE
; /* not technically correct, but kinda works */
102 unreachable("unexpected wrap");
106 compare_op(enum pipe_compare_func op
)
109 case PIPE_FUNC_NEVER
: return VK_COMPARE_OP_NEVER
;
110 case PIPE_FUNC_LESS
: return VK_COMPARE_OP_LESS
;
111 case PIPE_FUNC_EQUAL
: return VK_COMPARE_OP_EQUAL
;
112 case PIPE_FUNC_LEQUAL
: return VK_COMPARE_OP_LESS_OR_EQUAL
;
113 case PIPE_FUNC_GREATER
: return VK_COMPARE_OP_GREATER
;
114 case PIPE_FUNC_NOTEQUAL
: return VK_COMPARE_OP_NOT_EQUAL
;
115 case PIPE_FUNC_GEQUAL
: return VK_COMPARE_OP_GREATER_OR_EQUAL
;
116 case PIPE_FUNC_ALWAYS
: return VK_COMPARE_OP_ALWAYS
;
118 unreachable("unexpected compare");
122 zink_create_sampler_state(struct pipe_context
*pctx
,
123 const struct pipe_sampler_state
*state
)
125 struct zink_screen
*screen
= zink_screen(pctx
->screen
);
127 VkSamplerCreateInfo sci
= {};
128 sci
.sType
= VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO
;
129 sci
.magFilter
= zink_filter(state
->mag_img_filter
);
130 sci
.minFilter
= zink_filter(state
->min_img_filter
);
132 if (state
->min_mip_filter
!= PIPE_TEX_MIPFILTER_NONE
) {
133 sci
.mipmapMode
= sampler_mipmap_mode(state
->min_mip_filter
);
134 sci
.minLod
= state
->min_lod
;
135 sci
.maxLod
= state
->max_lod
;
137 sci
.mipmapMode
= VK_SAMPLER_MIPMAP_MODE_NEAREST
;
142 sci
.addressModeU
= sampler_address_mode(state
->wrap_s
);
143 sci
.addressModeV
= sampler_address_mode(state
->wrap_t
);
144 sci
.addressModeW
= sampler_address_mode(state
->wrap_r
);
145 sci
.mipLodBias
= state
->lod_bias
;
147 if (state
->compare_mode
== PIPE_TEX_COMPARE_NONE
)
148 sci
.compareOp
= VK_COMPARE_OP_NEVER
;
150 sci
.compareOp
= compare_op(state
->compare_func
);
151 sci
.compareEnable
= VK_TRUE
;
154 sci
.borderColor
= VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK
; // TODO
155 sci
.unnormalizedCoordinates
= !state
->normalized_coords
;
157 if (state
->max_anisotropy
> 1) {
158 sci
.maxAnisotropy
= state
->max_anisotropy
;
159 sci
.anisotropyEnable
= VK_TRUE
;
162 VkSampler
*sampler
= CALLOC(1, sizeof(VkSampler
));
166 if (vkCreateSampler(screen
->dev
, &sci
, NULL
, sampler
) != VK_SUCCESS
) {
175 zink_bind_sampler_states(struct pipe_context
*pctx
,
176 enum pipe_shader_type shader
,
178 unsigned num_samplers
,
181 struct zink_context
*ctx
= zink_context(pctx
);
182 for (unsigned i
= 0; i
< num_samplers
; ++i
) {
183 VkSampler
*sampler
= samplers
[i
];
184 ctx
->sampler_states
[shader
][start_slot
+ i
] = sampler
;
185 ctx
->samplers
[shader
][start_slot
+ i
] = sampler
? *sampler
: VK_NULL_HANDLE
;
187 ctx
->num_samplers
[shader
] = start_slot
+ num_samplers
;
191 zink_delete_sampler_state(struct pipe_context
*pctx
,
194 struct zink_batch
*batch
= zink_curr_batch(zink_context(pctx
));
195 util_dynarray_append(&batch
->zombie_samplers
, VkSampler
,
196 *(VkSampler
*)sampler_state
);
201 static VkImageViewType
202 image_view_type(enum pipe_texture_target target
)
205 case PIPE_TEXTURE_1D
: return VK_IMAGE_VIEW_TYPE_1D
;
206 case PIPE_TEXTURE_1D_ARRAY
: return VK_IMAGE_VIEW_TYPE_1D_ARRAY
;
207 case PIPE_TEXTURE_2D
: return VK_IMAGE_VIEW_TYPE_2D
;
208 case PIPE_TEXTURE_2D_ARRAY
: return VK_IMAGE_VIEW_TYPE_2D_ARRAY
;
209 case PIPE_TEXTURE_CUBE
: return VK_IMAGE_VIEW_TYPE_CUBE
;
210 case PIPE_TEXTURE_CUBE_ARRAY
: return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY
;
211 case PIPE_TEXTURE_3D
: return VK_IMAGE_VIEW_TYPE_3D
;
212 case PIPE_TEXTURE_RECT
: return VK_IMAGE_VIEW_TYPE_2D
;
214 unreachable("unexpected target");
218 static VkComponentSwizzle
219 component_mapping(enum pipe_swizzle swizzle
)
222 case PIPE_SWIZZLE_X
: return VK_COMPONENT_SWIZZLE_R
;
223 case PIPE_SWIZZLE_Y
: return VK_COMPONENT_SWIZZLE_G
;
224 case PIPE_SWIZZLE_Z
: return VK_COMPONENT_SWIZZLE_B
;
225 case PIPE_SWIZZLE_W
: return VK_COMPONENT_SWIZZLE_A
;
226 case PIPE_SWIZZLE_0
: return VK_COMPONENT_SWIZZLE_ZERO
;
227 case PIPE_SWIZZLE_1
: return VK_COMPONENT_SWIZZLE_ONE
;
228 case PIPE_SWIZZLE_NONE
: return VK_COMPONENT_SWIZZLE_IDENTITY
; // ???
230 unreachable("unexpected swizzle");
234 static VkImageAspectFlags
235 sampler_aspect_from_format(enum pipe_format fmt
)
237 if (util_format_is_depth_or_stencil(fmt
)) {
238 const struct util_format_description
*desc
= util_format_description(fmt
);
239 if (util_format_has_depth(desc
))
240 return VK_IMAGE_ASPECT_DEPTH_BIT
;
241 assert(util_format_has_stencil(desc
));
242 return VK_IMAGE_ASPECT_STENCIL_BIT
;
244 return VK_IMAGE_ASPECT_COLOR_BIT
;
247 static struct pipe_sampler_view
*
248 zink_create_sampler_view(struct pipe_context
*pctx
, struct pipe_resource
*pres
,
249 const struct pipe_sampler_view
*state
)
251 struct zink_screen
*screen
= zink_screen(pctx
->screen
);
252 struct zink_resource
*res
= zink_resource(pres
);
253 struct zink_sampler_view
*sampler_view
= CALLOC_STRUCT(zink_sampler_view
);
255 sampler_view
->base
= *state
;
256 sampler_view
->base
.texture
= NULL
;
257 pipe_resource_reference(&sampler_view
->base
.texture
, pres
);
258 sampler_view
->base
.reference
.count
= 1;
259 sampler_view
->base
.context
= pctx
;
261 VkImageViewCreateInfo ivci
= {};
262 ivci
.sType
= VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO
;
263 ivci
.image
= res
->image
;
264 ivci
.viewType
= image_view_type(state
->target
);
265 ivci
.format
= zink_get_format(screen
, state
->format
);
266 ivci
.components
.r
= component_mapping(state
->swizzle_r
);
267 ivci
.components
.g
= component_mapping(state
->swizzle_g
);
268 ivci
.components
.b
= component_mapping(state
->swizzle_b
);
269 ivci
.components
.a
= component_mapping(state
->swizzle_a
);
271 ivci
.subresourceRange
.aspectMask
= sampler_aspect_from_format(state
->format
);
272 ivci
.subresourceRange
.baseMipLevel
= state
->u
.tex
.first_level
;
273 ivci
.subresourceRange
.baseArrayLayer
= state
->u
.tex
.first_layer
;
274 ivci
.subresourceRange
.levelCount
= state
->u
.tex
.last_level
- state
->u
.tex
.first_level
+ 1;
275 ivci
.subresourceRange
.layerCount
= state
->u
.tex
.last_layer
- state
->u
.tex
.first_layer
+ 1;
277 VkResult err
= vkCreateImageView(screen
->dev
, &ivci
, NULL
, &sampler_view
->image_view
);
278 if (err
!= VK_SUCCESS
) {
283 return &sampler_view
->base
;
287 zink_sampler_view_destroy(struct pipe_context
*pctx
,
288 struct pipe_sampler_view
*pview
)
290 struct zink_sampler_view
*view
= zink_sampler_view(pview
);
291 vkDestroyImageView(zink_screen(pctx
->screen
)->dev
, view
->image_view
, NULL
);
296 zink_create_vs_state(struct pipe_context
*pctx
,
297 const struct pipe_shader_state
*shader
)
299 struct nir_shader
*nir
;
300 if (shader
->type
!= PIPE_SHADER_IR_NIR
)
301 nir
= zink_tgsi_to_nir(pctx
->screen
, shader
->tokens
);
303 nir
= (struct nir_shader
*)shader
->ir
.nir
;
305 return zink_compile_nir(zink_screen(pctx
->screen
), nir
, &shader
->stream_output
);
309 bind_stage(struct zink_context
*ctx
, enum pipe_shader_type stage
,
310 struct zink_shader
*shader
)
312 assert(stage
< PIPE_SHADER_COMPUTE
);
313 ctx
->gfx_stages
[stage
] = shader
;
314 ctx
->dirty_program
= true;
318 zink_bind_vs_state(struct pipe_context
*pctx
,
321 bind_stage(zink_context(pctx
), PIPE_SHADER_VERTEX
, cso
);
325 zink_delete_vs_state(struct pipe_context
*pctx
,
328 zink_shader_free(zink_context(pctx
), cso
);
332 zink_create_fs_state(struct pipe_context
*pctx
,
333 const struct pipe_shader_state
*shader
)
335 struct nir_shader
*nir
;
336 if (shader
->type
!= PIPE_SHADER_IR_NIR
)
337 nir
= zink_tgsi_to_nir(pctx
->screen
, shader
->tokens
);
339 nir
= (struct nir_shader
*)shader
->ir
.nir
;
341 return zink_compile_nir(zink_screen(pctx
->screen
), nir
, NULL
);
345 zink_bind_fs_state(struct pipe_context
*pctx
,
348 bind_stage(zink_context(pctx
), PIPE_SHADER_FRAGMENT
, cso
);
352 zink_delete_fs_state(struct pipe_context
*pctx
,
355 zink_shader_free(zink_context(pctx
), cso
);
359 zink_set_polygon_stipple(struct pipe_context
*pctx
,
360 const struct pipe_poly_stipple
*ps
)
365 zink_set_vertex_buffers(struct pipe_context
*pctx
,
367 unsigned num_buffers
,
368 const struct pipe_vertex_buffer
*buffers
)
370 struct zink_context
*ctx
= zink_context(pctx
);
373 for (int i
= 0; i
< num_buffers
; ++i
) {
374 const struct pipe_vertex_buffer
*vb
= buffers
+ i
;
375 struct zink_resource
*res
= zink_resource(vb
->buffer
.resource
);
377 ctx
->gfx_pipeline_state
.bindings
[start_slot
+ i
].stride
= vb
->stride
;
378 if (res
&& res
->needs_xfb_barrier
) {
379 /* if we're binding a previously-used xfb buffer, we need cmd buffer synchronization to ensure
380 * that we use the right buffer data
382 pctx
->flush(pctx
, NULL
, 0);
383 res
->needs_xfb_barrier
= false;
388 util_set_vertex_buffers_mask(ctx
->buffers
, &ctx
->buffers_enabled_mask
,
389 buffers
, start_slot
, num_buffers
);
393 zink_set_viewport_states(struct pipe_context
*pctx
,
395 unsigned num_viewports
,
396 const struct pipe_viewport_state
*state
)
398 struct zink_context
*ctx
= zink_context(pctx
);
400 for (unsigned i
= 0; i
< num_viewports
; ++i
) {
401 VkViewport viewport
= {
402 state
[i
].translate
[0] - state
[i
].scale
[0],
403 state
[i
].translate
[1] - state
[i
].scale
[1],
404 state
[i
].scale
[0] * 2,
405 state
[i
].scale
[1] * 2,
406 state
[i
].translate
[2] - state
[i
].scale
[2],
407 state
[i
].translate
[2] + state
[i
].scale
[2]
409 ctx
->viewport_states
[start_slot
+ i
] = state
[i
];
410 ctx
->viewports
[start_slot
+ i
] = viewport
;
412 ctx
->num_viewports
= start_slot
+ num_viewports
;
416 zink_set_scissor_states(struct pipe_context
*pctx
,
417 unsigned start_slot
, unsigned num_scissors
,
418 const struct pipe_scissor_state
*states
)
420 struct zink_context
*ctx
= zink_context(pctx
);
422 for (unsigned i
= 0; i
< num_scissors
; i
++) {
425 scissor
.offset
.x
= states
[i
].minx
;
426 scissor
.offset
.y
= states
[i
].miny
;
427 scissor
.extent
.width
= states
[i
].maxx
- states
[i
].minx
;
428 scissor
.extent
.height
= states
[i
].maxy
- states
[i
].miny
;
429 ctx
->scissor_states
[start_slot
+ i
] = states
[i
];
430 ctx
->scissors
[start_slot
+ i
] = scissor
;
435 zink_set_constant_buffer(struct pipe_context
*pctx
,
436 enum pipe_shader_type shader
, uint index
,
437 const struct pipe_constant_buffer
*cb
)
439 struct zink_context
*ctx
= zink_context(pctx
);
442 struct pipe_resource
*buffer
= cb
->buffer
;
443 unsigned offset
= cb
->buffer_offset
;
444 if (cb
->user_buffer
) {
445 struct zink_screen
*screen
= zink_screen(pctx
->screen
);
446 u_upload_data(ctx
->base
.const_uploader
, 0, cb
->buffer_size
,
447 screen
->props
.limits
.minUniformBufferOffsetAlignment
,
448 cb
->user_buffer
, &offset
, &buffer
);
451 pipe_resource_reference(&ctx
->ubos
[shader
][index
].buffer
, buffer
);
452 ctx
->ubos
[shader
][index
].buffer_offset
= offset
;
453 ctx
->ubos
[shader
][index
].buffer_size
= cb
->buffer_size
;
454 ctx
->ubos
[shader
][index
].user_buffer
= NULL
;
457 pipe_resource_reference(&buffer
, NULL
);
459 pipe_resource_reference(&ctx
->ubos
[shader
][index
].buffer
, NULL
);
460 ctx
->ubos
[shader
][index
].buffer_offset
= 0;
461 ctx
->ubos
[shader
][index
].buffer_size
= 0;
462 ctx
->ubos
[shader
][index
].user_buffer
= NULL
;
467 zink_set_sampler_views(struct pipe_context
*pctx
,
468 enum pipe_shader_type shader_type
,
471 struct pipe_sampler_view
**views
)
473 struct zink_context
*ctx
= zink_context(pctx
);
475 for (unsigned i
= 0; i
< num_views
; ++i
) {
476 pipe_sampler_view_reference(
477 &ctx
->image_views
[shader_type
][start_slot
+ i
],
480 ctx
->num_image_views
[shader_type
] = start_slot
+ num_views
;
484 zink_set_stencil_ref(struct pipe_context
*pctx
,
485 const struct pipe_stencil_ref
*ref
)
487 struct zink_context
*ctx
= zink_context(pctx
);
488 ctx
->stencil_ref
= *ref
;
492 zink_set_clip_state(struct pipe_context
*pctx
,
493 const struct pipe_clip_state
*pcs
)
497 static struct zink_render_pass
*
498 get_render_pass(struct zink_context
*ctx
)
500 struct zink_screen
*screen
= zink_screen(ctx
->base
.screen
);
501 const struct pipe_framebuffer_state
*fb
= &ctx
->fb_state
;
502 struct zink_render_pass_state state
= { 0 };
504 for (int i
= 0; i
< fb
->nr_cbufs
; i
++) {
505 struct pipe_surface
*surf
= fb
->cbufs
[i
];
507 state
.rts
[i
].format
= zink_get_format(screen
, surf
->format
);
508 state
.rts
[i
].samples
= surf
->nr_samples
> 0 ? surf
->nr_samples
:
509 VK_SAMPLE_COUNT_1_BIT
;
511 state
.rts
[i
].format
= VK_FORMAT_R8_UINT
;
512 state
.rts
[i
].samples
= MAX2(fb
->samples
, 1);
515 state
.num_cbufs
= fb
->nr_cbufs
;
518 struct zink_resource
*zsbuf
= zink_resource(fb
->zsbuf
->texture
);
519 state
.rts
[fb
->nr_cbufs
].format
= zsbuf
->format
;
520 state
.rts
[fb
->nr_cbufs
].samples
= zsbuf
->base
.nr_samples
> 0 ? zsbuf
->base
.nr_samples
: VK_SAMPLE_COUNT_1_BIT
;
522 state
.have_zsbuf
= fb
->zsbuf
!= NULL
;
524 struct hash_entry
*entry
= _mesa_hash_table_search(ctx
->render_pass_cache
,
527 struct zink_render_pass
*rp
;
528 rp
= zink_create_render_pass(screen
, &state
);
529 entry
= _mesa_hash_table_insert(ctx
->render_pass_cache
, &state
, rp
);
537 static struct zink_framebuffer
*
538 create_framebuffer(struct zink_context
*ctx
)
540 struct zink_screen
*screen
= zink_screen(ctx
->base
.screen
);
542 struct zink_framebuffer_state state
= {};
543 state
.rp
= get_render_pass(ctx
);
544 for (int i
= 0; i
< ctx
->fb_state
.nr_cbufs
; i
++) {
545 struct pipe_surface
*psurf
= ctx
->fb_state
.cbufs
[i
];
546 state
.attachments
[i
] = zink_surface(psurf
);
547 state
.has_null_attachments
|= !state
.attachments
[i
];
550 state
.num_attachments
= ctx
->fb_state
.nr_cbufs
;
551 if (ctx
->fb_state
.zsbuf
) {
552 struct pipe_surface
*psurf
= ctx
->fb_state
.zsbuf
;
553 state
.attachments
[state
.num_attachments
++] = zink_surface(psurf
);
556 state
.width
= ctx
->fb_state
.width
;
557 state
.height
= ctx
->fb_state
.height
;
558 state
.layers
= MAX2(ctx
->fb_state
.layers
, 1);
559 state
.samples
= ctx
->fb_state
.samples
;
561 return zink_create_framebuffer(ctx
, screen
, &state
);
565 framebuffer_state_buffer_barriers_setup(struct zink_context
*ctx
,
566 const struct pipe_framebuffer_state
*state
, struct zink_batch
*batch
)
568 for (int i
= 0; i
< state
->nr_cbufs
; i
++) {
569 struct pipe_surface
*surf
= state
->cbufs
[i
];
571 surf
= ctx
->framebuffer
->null_surface
;
572 struct zink_resource
*res
= zink_resource(surf
->texture
);
573 if (res
->layout
!= VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL
)
574 zink_resource_barrier(batch
->cmdbuf
, res
, res
->aspect
,
575 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL
);
579 struct zink_resource
*res
= zink_resource(state
->zsbuf
->texture
);
580 if (res
->layout
!= VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL
)
581 zink_resource_barrier(batch
->cmdbuf
, res
, res
->aspect
,
582 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL
);
587 zink_begin_render_pass(struct zink_context
*ctx
, struct zink_batch
*batch
)
589 struct zink_screen
*screen
= zink_screen(ctx
->base
.screen
);
590 assert(batch
== zink_curr_batch(ctx
));
591 assert(ctx
->gfx_pipeline_state
.render_pass
);
593 struct pipe_framebuffer_state
*fb_state
= &ctx
->fb_state
;
595 VkRenderPassBeginInfo rpbi
= {};
596 rpbi
.sType
= VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO
;
597 rpbi
.renderPass
= ctx
->gfx_pipeline_state
.render_pass
->render_pass
;
598 rpbi
.renderArea
.offset
.x
= 0;
599 rpbi
.renderArea
.offset
.y
= 0;
600 rpbi
.renderArea
.extent
.width
= fb_state
->width
;
601 rpbi
.renderArea
.extent
.height
= fb_state
->height
;
602 rpbi
.clearValueCount
= 0;
603 rpbi
.pClearValues
= NULL
;
604 rpbi
.framebuffer
= ctx
->framebuffer
->fb
;
606 assert(ctx
->gfx_pipeline_state
.render_pass
&& ctx
->framebuffer
);
607 assert(!batch
->rp
|| batch
->rp
== ctx
->gfx_pipeline_state
.render_pass
);
608 assert(!batch
->fb
|| batch
->fb
== ctx
->framebuffer
);
610 framebuffer_state_buffer_barriers_setup(ctx
, fb_state
, batch
);
612 zink_render_pass_reference(screen
, &batch
->rp
, ctx
->gfx_pipeline_state
.render_pass
);
613 zink_framebuffer_reference(screen
, &batch
->fb
, ctx
->framebuffer
);
615 vkCmdBeginRenderPass(batch
->cmdbuf
, &rpbi
, VK_SUBPASS_CONTENTS_INLINE
);
619 flush_batch(struct zink_context
*ctx
)
621 struct zink_batch
*batch
= zink_curr_batch(ctx
);
623 vkCmdEndRenderPass(batch
->cmdbuf
);
625 zink_end_batch(ctx
, batch
);
628 if (ctx
->curr_batch
== ARRAY_SIZE(ctx
->batches
))
631 zink_start_batch(ctx
, zink_curr_batch(ctx
));
635 zink_batch_rp(struct zink_context
*ctx
)
637 struct zink_batch
*batch
= zink_curr_batch(ctx
);
639 zink_begin_render_pass(ctx
, batch
);
646 zink_batch_no_rp(struct zink_context
*ctx
)
648 struct zink_batch
*batch
= zink_curr_batch(ctx
);
650 /* flush batch and get a new one */
652 batch
= zink_curr_batch(ctx
);
659 zink_set_framebuffer_state(struct pipe_context
*pctx
,
660 const struct pipe_framebuffer_state
*state
)
662 struct zink_context
*ctx
= zink_context(pctx
);
663 struct zink_screen
*screen
= zink_screen(pctx
->screen
);
665 util_copy_framebuffer_state(&ctx
->fb_state
, state
);
667 struct zink_framebuffer
*fb
= ctx
->framebuffer
;
668 /* explicitly unref previous fb to ensure it gets destroyed */
670 zink_framebuffer_reference(screen
, &fb
, NULL
);
671 fb
= create_framebuffer(ctx
);
672 zink_framebuffer_reference(screen
, &ctx
->framebuffer
, fb
);
673 zink_render_pass_reference(screen
, &ctx
->gfx_pipeline_state
.render_pass
, fb
->rp
);
675 ctx
->gfx_pipeline_state
.rast_samples
= MAX2(state
->samples
, 1);
676 ctx
->gfx_pipeline_state
.num_attachments
= state
->nr_cbufs
;
678 struct zink_batch
*batch
= zink_batch_no_rp(ctx
);
680 framebuffer_state_buffer_barriers_setup(ctx
, state
, batch
);
684 zink_set_blend_color(struct pipe_context
*pctx
,
685 const struct pipe_blend_color
*color
)
687 struct zink_context
*ctx
= zink_context(pctx
);
688 memcpy(ctx
->blend_constants
, color
->color
, sizeof(float) * 4);
692 zink_set_sample_mask(struct pipe_context
*pctx
, unsigned sample_mask
)
694 struct zink_context
*ctx
= zink_context(pctx
);
695 ctx
->gfx_pipeline_state
.sample_mask
= sample_mask
;
699 access_src_flags(VkImageLayout layout
)
702 case VK_IMAGE_LAYOUT_UNDEFINED
:
703 case VK_IMAGE_LAYOUT_GENERAL
:
706 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL
:
707 return VK_ACCESS_COLOR_ATTACHMENT_READ_BIT
;
708 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL
:
709 return VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT
;
711 case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
:
712 return VK_ACCESS_SHADER_READ_BIT
;
714 case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL
:
715 return VK_ACCESS_TRANSFER_READ_BIT
;
717 case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL
:
718 return VK_ACCESS_TRANSFER_WRITE_BIT
;
720 case VK_IMAGE_LAYOUT_PREINITIALIZED
:
721 return VK_ACCESS_HOST_WRITE_BIT
;
724 unreachable("unexpected layout");
729 access_dst_flags(VkImageLayout layout
)
732 case VK_IMAGE_LAYOUT_UNDEFINED
:
733 case VK_IMAGE_LAYOUT_GENERAL
:
736 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL
:
737 return VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT
;
738 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL
:
739 return VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT
;
741 case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL
:
742 return VK_ACCESS_TRANSFER_READ_BIT
;
744 case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL
:
745 return VK_ACCESS_TRANSFER_WRITE_BIT
;
748 unreachable("unexpected layout");
752 static VkPipelineStageFlags
753 pipeline_dst_stage(VkImageLayout layout
)
756 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL
:
757 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT
;
758 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL
:
759 return VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT
;
761 case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL
:
762 return VK_PIPELINE_STAGE_TRANSFER_BIT
;
763 case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL
:
764 return VK_PIPELINE_STAGE_TRANSFER_BIT
;
767 return VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT
;
771 static VkPipelineStageFlags
772 pipeline_src_stage(VkImageLayout layout
)
775 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL
:
776 return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT
;
777 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL
:
778 return VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT
;
780 case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL
:
781 return VK_PIPELINE_STAGE_TRANSFER_BIT
;
782 case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL
:
783 return VK_PIPELINE_STAGE_TRANSFER_BIT
;
786 return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT
;
792 zink_resource_barrier(VkCommandBuffer cmdbuf
, struct zink_resource
*res
,
793 VkImageAspectFlags aspect
, VkImageLayout new_layout
)
795 VkImageSubresourceRange isr
= {
797 0, VK_REMAINING_MIP_LEVELS
,
798 0, VK_REMAINING_ARRAY_LAYERS
801 VkImageMemoryBarrier imb
= {
802 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER
,
804 access_src_flags(res
->layout
),
805 access_dst_flags(new_layout
),
808 VK_QUEUE_FAMILY_IGNORED
,
809 VK_QUEUE_FAMILY_IGNORED
,
813 vkCmdPipelineBarrier(
815 pipeline_src_stage(res
->layout
),
816 pipeline_dst_stage(new_layout
),
823 res
->layout
= new_layout
;
827 zink_clear(struct pipe_context
*pctx
,
829 const struct pipe_scissor_state
*scissor_state
,
830 const union pipe_color_union
*pcolor
,
831 double depth
, unsigned stencil
)
833 struct zink_context
*ctx
= zink_context(pctx
);
834 struct pipe_framebuffer_state
*fb
= &ctx
->fb_state
;
836 /* FIXME: this is very inefficient; if no renderpass has been started yet,
837 * we should record the clear if it's full-screen, and apply it as we
838 * start the render-pass. Otherwise we can do a partial out-of-renderpass
841 struct zink_batch
*batch
= zink_batch_rp(ctx
);
843 VkClearAttachment attachments
[1 + PIPE_MAX_COLOR_BUFS
];
844 int num_attachments
= 0;
846 if (buffers
& PIPE_CLEAR_COLOR
) {
847 VkClearColorValue color
;
848 color
.float32
[0] = pcolor
->f
[0];
849 color
.float32
[1] = pcolor
->f
[1];
850 color
.float32
[2] = pcolor
->f
[2];
851 color
.float32
[3] = pcolor
->f
[3];
853 for (unsigned i
= 0; i
< fb
->nr_cbufs
; i
++) {
854 if (!(buffers
& (PIPE_CLEAR_COLOR0
<< i
)) || !fb
->cbufs
[i
])
857 attachments
[num_attachments
].aspectMask
= VK_IMAGE_ASPECT_COLOR_BIT
;
858 attachments
[num_attachments
].colorAttachment
= i
;
859 attachments
[num_attachments
].clearValue
.color
= color
;
864 if (buffers
& PIPE_CLEAR_DEPTHSTENCIL
&& fb
->zsbuf
) {
865 VkImageAspectFlags aspect
= 0;
866 if (buffers
& PIPE_CLEAR_DEPTH
)
867 aspect
|= VK_IMAGE_ASPECT_DEPTH_BIT
;
868 if (buffers
& PIPE_CLEAR_STENCIL
)
869 aspect
|= VK_IMAGE_ASPECT_STENCIL_BIT
;
871 attachments
[num_attachments
].aspectMask
= aspect
;
872 attachments
[num_attachments
].clearValue
.depthStencil
.depth
= depth
;
873 attachments
[num_attachments
].clearValue
.depthStencil
.stencil
= stencil
;
878 cr
.rect
.offset
.x
= 0;
879 cr
.rect
.offset
.y
= 0;
880 cr
.rect
.extent
.width
= fb
->width
;
881 cr
.rect
.extent
.height
= fb
->height
;
882 cr
.baseArrayLayer
= 0;
883 cr
.layerCount
= util_framebuffer_get_num_layers(fb
);
884 vkCmdClearAttachments(batch
->cmdbuf
, num_attachments
, attachments
, 1, &cr
);
887 VkShaderStageFlagBits
888 zink_shader_stage(enum pipe_shader_type type
)
890 VkShaderStageFlagBits stages
[] = {
891 [PIPE_SHADER_VERTEX
] = VK_SHADER_STAGE_VERTEX_BIT
,
892 [PIPE_SHADER_FRAGMENT
] = VK_SHADER_STAGE_FRAGMENT_BIT
,
893 [PIPE_SHADER_GEOMETRY
] = VK_SHADER_STAGE_GEOMETRY_BIT
,
894 [PIPE_SHADER_TESS_CTRL
] = VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT
,
895 [PIPE_SHADER_TESS_EVAL
] = VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT
,
896 [PIPE_SHADER_COMPUTE
] = VK_SHADER_STAGE_COMPUTE_BIT
,
902 hash_gfx_program(const void *key
)
904 return _mesa_hash_data(key
, sizeof(struct zink_shader
*) * (PIPE_SHADER_TYPES
- 1));
908 equals_gfx_program(const void *a
, const void *b
)
910 return memcmp(a
, b
, sizeof(struct zink_shader
*) * (PIPE_SHADER_TYPES
- 1)) == 0;
914 hash_render_pass_state(const void *key
)
916 return _mesa_hash_data(key
, sizeof(struct zink_render_pass_state
));
920 equals_render_pass_state(const void *a
, const void *b
)
922 return memcmp(a
, b
, sizeof(struct zink_render_pass_state
)) == 0;
926 zink_flush(struct pipe_context
*pctx
,
927 struct pipe_fence_handle
**pfence
,
928 enum pipe_flush_flags flags
)
930 struct zink_context
*ctx
= zink_context(pctx
);
932 struct zink_batch
*batch
= zink_curr_batch(ctx
);
935 if (zink_screen(pctx
->screen
)->have_EXT_transform_feedback
&& ctx
->num_so_targets
)
936 ctx
->dirty_so_targets
= true;
939 zink_fence_reference(zink_screen(pctx
->screen
),
940 (struct zink_fence
**)pfence
,
944 * For some strange reason, we need to finish before presenting, or else
945 * we start rendering on top of the back-buffer for the next frame. This
946 * seems like a bug in the DRI-driver to me, because we really should
947 * be properly protected by fences here, and the back-buffer should
948 * either be swapped with the front-buffer, or blitted from. But for
949 * some strange reason, neither of these things happen.
951 if (flags
& PIPE_FLUSH_END_OF_FRAME
)
952 pctx
->screen
->fence_finish(pctx
->screen
, pctx
,
953 (struct pipe_fence_handle
*)batch
->fence
,
954 PIPE_TIMEOUT_INFINITE
);
958 zink_flush_resource(struct pipe_context
*pipe
,
959 struct pipe_resource
*resource
)
964 zink_resource_copy_region(struct pipe_context
*pctx
,
965 struct pipe_resource
*pdst
,
966 unsigned dst_level
, unsigned dstx
, unsigned dsty
, unsigned dstz
,
967 struct pipe_resource
*psrc
,
968 unsigned src_level
, const struct pipe_box
*src_box
)
970 struct zink_resource
*dst
= zink_resource(pdst
);
971 struct zink_resource
*src
= zink_resource(psrc
);
972 struct zink_context
*ctx
= zink_context(pctx
);
973 if (dst
->base
.target
!= PIPE_BUFFER
&& src
->base
.target
!= PIPE_BUFFER
) {
974 VkImageCopy region
= {};
976 region
.srcSubresource
.aspectMask
= src
->aspect
;
977 region
.srcSubresource
.mipLevel
= src_level
;
978 region
.srcSubresource
.layerCount
= 1;
979 if (src
->base
.array_size
> 1) {
980 region
.srcSubresource
.baseArrayLayer
= src_box
->z
;
981 region
.srcSubresource
.layerCount
= src_box
->depth
;
982 region
.extent
.depth
= 1;
984 region
.srcOffset
.z
= src_box
->z
;
985 region
.srcSubresource
.layerCount
= 1;
986 region
.extent
.depth
= src_box
->depth
;
989 region
.srcOffset
.x
= src_box
->x
;
990 region
.srcOffset
.y
= src_box
->y
;
992 region
.dstSubresource
.aspectMask
= dst
->aspect
;
993 region
.dstSubresource
.mipLevel
= dst_level
;
994 if (dst
->base
.array_size
> 1) {
995 region
.dstSubresource
.baseArrayLayer
= dstz
;
996 region
.dstSubresource
.layerCount
= src_box
->depth
;
998 region
.dstOffset
.z
= dstz
;
999 region
.dstSubresource
.layerCount
= 1;
1002 region
.dstOffset
.x
= dstx
;
1003 region
.dstOffset
.y
= dsty
;
1004 region
.extent
.width
= src_box
->width
;
1005 region
.extent
.height
= src_box
->height
;
1007 struct zink_batch
*batch
= zink_batch_no_rp(ctx
);
1008 zink_batch_reference_resoure(batch
, src
);
1009 zink_batch_reference_resoure(batch
, dst
);
1011 if (src
->layout
!= VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL
) {
1012 zink_resource_barrier(batch
->cmdbuf
, src
, src
->aspect
,
1013 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL
);
1016 if (dst
->layout
!= VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL
) {
1017 zink_resource_barrier(batch
->cmdbuf
, dst
, dst
->aspect
,
1018 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL
);
1021 vkCmdCopyImage(batch
->cmdbuf
, src
->image
, src
->layout
,
1022 dst
->image
, dst
->layout
,
1024 } else if (dst
->base
.target
== PIPE_BUFFER
&&
1025 src
->base
.target
== PIPE_BUFFER
) {
1026 VkBufferCopy region
;
1027 region
.srcOffset
= src_box
->x
;
1028 region
.dstOffset
= dstx
;
1029 region
.size
= src_box
->width
;
1031 struct zink_batch
*batch
= zink_batch_no_rp(ctx
);
1032 zink_batch_reference_resoure(batch
, src
);
1033 zink_batch_reference_resoure(batch
, dst
);
1035 vkCmdCopyBuffer(batch
->cmdbuf
, src
->buffer
, dst
->buffer
, 1, ®ion
);
1037 debug_printf("zink: TODO resource copy\n");
1040 static struct pipe_stream_output_target
*
1041 zink_create_stream_output_target(struct pipe_context
*pctx
,
1042 struct pipe_resource
*pres
,
1043 unsigned buffer_offset
,
1044 unsigned buffer_size
)
1046 struct zink_so_target
*t
;
1047 t
= CALLOC_STRUCT(zink_so_target
);
1051 t
->base
.reference
.count
= 1;
1052 t
->base
.context
= pctx
;
1053 pipe_resource_reference(&t
->base
.buffer
, pres
);
1054 t
->base
.buffer_offset
= buffer_offset
;
1055 t
->base
.buffer_size
= buffer_size
;
1057 /* using PIPE_BIND_CUSTOM here lets us create a custom pipe buffer resource,
1058 * which allows us to differentiate and use VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT
1059 * as we must for this case
1061 t
->counter_buffer
= pipe_buffer_create(pctx
->screen
, PIPE_BIND_STREAM_OUTPUT
| PIPE_BIND_CUSTOM
, PIPE_USAGE_DEFAULT
, 4);
1062 if (!t
->counter_buffer
) {
1071 zink_stream_output_target_destroy(struct pipe_context
*pctx
,
1072 struct pipe_stream_output_target
*psot
)
1074 struct zink_so_target
*t
= (struct zink_so_target
*)psot
;
1075 pipe_resource_reference(&t
->counter_buffer
, NULL
);
1076 pipe_resource_reference(&t
->base
.buffer
, NULL
);
1081 zink_set_stream_output_targets(struct pipe_context
*pctx
,
1082 unsigned num_targets
,
1083 struct pipe_stream_output_target
**targets
,
1084 const unsigned *offsets
)
1086 struct zink_context
*ctx
= zink_context(pctx
);
1088 if (num_targets
== 0) {
1089 for (unsigned i
= 0; i
< ctx
->num_so_targets
; i
++)
1090 pipe_so_target_reference(&ctx
->so_targets
[i
], NULL
);
1091 ctx
->num_so_targets
= 0;
1093 for (unsigned i
= 0; i
< num_targets
; i
++)
1094 pipe_so_target_reference(&ctx
->so_targets
[i
], targets
[i
]);
1095 for (unsigned i
= num_targets
; i
< ctx
->num_so_targets
; i
++)
1096 pipe_so_target_reference(&ctx
->so_targets
[i
], NULL
);
1097 ctx
->num_so_targets
= num_targets
;
1099 /* emit memory barrier on next draw for synchronization */
1100 if (offsets
[0] == (unsigned)-1)
1101 ctx
->xfb_barrier
= true;
1102 /* TODO: possibly avoid rebinding on resume if resuming from same buffers? */
1103 ctx
->dirty_so_targets
= true;
1107 struct pipe_context
*
1108 zink_context_create(struct pipe_screen
*pscreen
, void *priv
, unsigned flags
)
1110 struct zink_screen
*screen
= zink_screen(pscreen
);
1111 struct zink_context
*ctx
= CALLOC_STRUCT(zink_context
);
1115 ctx
->base
.screen
= pscreen
;
1116 ctx
->base
.priv
= priv
;
1118 ctx
->base
.destroy
= zink_context_destroy
;
1120 zink_context_state_init(&ctx
->base
);
1122 ctx
->base
.create_sampler_state
= zink_create_sampler_state
;
1123 ctx
->base
.bind_sampler_states
= zink_bind_sampler_states
;
1124 ctx
->base
.delete_sampler_state
= zink_delete_sampler_state
;
1126 ctx
->base
.create_sampler_view
= zink_create_sampler_view
;
1127 ctx
->base
.set_sampler_views
= zink_set_sampler_views
;
1128 ctx
->base
.sampler_view_destroy
= zink_sampler_view_destroy
;
1130 ctx
->base
.create_vs_state
= zink_create_vs_state
;
1131 ctx
->base
.bind_vs_state
= zink_bind_vs_state
;
1132 ctx
->base
.delete_vs_state
= zink_delete_vs_state
;
1134 ctx
->base
.create_fs_state
= zink_create_fs_state
;
1135 ctx
->base
.bind_fs_state
= zink_bind_fs_state
;
1136 ctx
->base
.delete_fs_state
= zink_delete_fs_state
;
1138 ctx
->base
.set_polygon_stipple
= zink_set_polygon_stipple
;
1139 ctx
->base
.set_vertex_buffers
= zink_set_vertex_buffers
;
1140 ctx
->base
.set_viewport_states
= zink_set_viewport_states
;
1141 ctx
->base
.set_scissor_states
= zink_set_scissor_states
;
1142 ctx
->base
.set_constant_buffer
= zink_set_constant_buffer
;
1143 ctx
->base
.set_framebuffer_state
= zink_set_framebuffer_state
;
1144 ctx
->base
.set_stencil_ref
= zink_set_stencil_ref
;
1145 ctx
->base
.set_clip_state
= zink_set_clip_state
;
1146 ctx
->base
.set_blend_color
= zink_set_blend_color
;
1148 ctx
->base
.set_sample_mask
= zink_set_sample_mask
;
1150 ctx
->base
.clear
= zink_clear
;
1151 ctx
->base
.draw_vbo
= zink_draw_vbo
;
1152 ctx
->base
.flush
= zink_flush
;
1154 ctx
->base
.resource_copy_region
= zink_resource_copy_region
;
1155 ctx
->base
.blit
= zink_blit
;
1156 ctx
->base
.create_stream_output_target
= zink_create_stream_output_target
;
1157 ctx
->base
.stream_output_target_destroy
= zink_stream_output_target_destroy
;
1159 ctx
->base
.set_stream_output_targets
= zink_set_stream_output_targets
;
1160 ctx
->base
.flush_resource
= zink_flush_resource
;
1161 zink_context_surface_init(&ctx
->base
);
1162 zink_context_resource_init(&ctx
->base
);
1163 zink_context_query_init(&ctx
->base
);
1165 slab_create_child(&ctx
->transfer_pool
, &screen
->transfer_pool
);
1167 ctx
->base
.stream_uploader
= u_upload_create_default(&ctx
->base
);
1168 ctx
->base
.const_uploader
= ctx
->base
.stream_uploader
;
1170 int prim_hwsupport
= 1 << PIPE_PRIM_POINTS
|
1171 1 << PIPE_PRIM_LINES
|
1172 1 << PIPE_PRIM_LINE_STRIP
|
1173 1 << PIPE_PRIM_TRIANGLES
|
1174 1 << PIPE_PRIM_TRIANGLE_STRIP
|
1175 1 << PIPE_PRIM_TRIANGLE_FAN
;
1177 ctx
->primconvert
= util_primconvert_create(&ctx
->base
, prim_hwsupport
);
1178 if (!ctx
->primconvert
)
1181 ctx
->blitter
= util_blitter_create(&ctx
->base
);
1185 VkCommandPoolCreateInfo cpci
= {};
1186 cpci
.sType
= VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO
;
1187 cpci
.queueFamilyIndex
= screen
->gfx_queue
;
1188 cpci
.flags
= VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT
;
1189 if (vkCreateCommandPool(screen
->dev
, &cpci
, NULL
, &ctx
->cmdpool
) != VK_SUCCESS
)
1192 VkCommandBufferAllocateInfo cbai
= {};
1193 cbai
.sType
= VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO
;
1194 cbai
.commandPool
= ctx
->cmdpool
;
1195 cbai
.level
= VK_COMMAND_BUFFER_LEVEL_PRIMARY
;
1196 cbai
.commandBufferCount
= 1;
1198 VkDescriptorPoolSize sizes
[] = {
1199 {VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
, ZINK_BATCH_DESC_SIZE
},
1200 {VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
, ZINK_BATCH_DESC_SIZE
}
1202 VkDescriptorPoolCreateInfo dpci
= {};
1203 dpci
.sType
= VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO
;
1204 dpci
.pPoolSizes
= sizes
;
1205 dpci
.poolSizeCount
= ARRAY_SIZE(sizes
);
1206 dpci
.flags
= VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT
;
1207 dpci
.maxSets
= ZINK_BATCH_DESC_SIZE
;
1209 for (int i
= 0; i
< ARRAY_SIZE(ctx
->batches
); ++i
) {
1210 if (vkAllocateCommandBuffers(screen
->dev
, &cbai
, &ctx
->batches
[i
].cmdbuf
) != VK_SUCCESS
)
1213 ctx
->batches
[i
].resources
= _mesa_set_create(NULL
, _mesa_hash_pointer
,
1214 _mesa_key_pointer_equal
);
1215 ctx
->batches
[i
].sampler_views
= _mesa_set_create(NULL
,
1217 _mesa_key_pointer_equal
);
1219 if (!ctx
->batches
[i
].resources
|| !ctx
->batches
[i
].sampler_views
)
1222 util_dynarray_init(&ctx
->batches
[i
].zombie_samplers
, NULL
);
1224 if (vkCreateDescriptorPool(screen
->dev
, &dpci
, 0,
1225 &ctx
->batches
[i
].descpool
) != VK_SUCCESS
)
1229 vkGetDeviceQueue(screen
->dev
, screen
->gfx_queue
, 0, &ctx
->queue
);
1231 ctx
->program_cache
= _mesa_hash_table_create(NULL
,
1233 equals_gfx_program
);
1234 ctx
->render_pass_cache
= _mesa_hash_table_create(NULL
,
1235 hash_render_pass_state
,
1236 equals_render_pass_state
);
1237 if (!ctx
->program_cache
|| !ctx
->render_pass_cache
)
1240 const uint8_t data
[] = { 0 };
1241 ctx
->dummy_buffer
= pipe_buffer_create_with_data(&ctx
->base
,
1242 PIPE_BIND_VERTEX_BUFFER
, PIPE_USAGE_IMMUTABLE
, sizeof(data
), data
);
1243 if (!ctx
->dummy_buffer
)
1246 ctx
->dirty_program
= true;
1248 /* start the first batch */
1249 zink_start_batch(ctx
, zink_curr_batch(ctx
));
1255 vkDestroyCommandPool(screen
->dev
, ctx
->cmdpool
, NULL
);