2 * Copyright © 2019 Red Hat.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 /* use a gallium context to execute a command buffer */
26 #include "val_private.h"
28 #include "pipe/p_context.h"
29 #include "pipe/p_state.h"
32 #include "pipe/p_shader_tokens.h"
33 #include "tgsi/tgsi_text.h"
34 #include "tgsi/tgsi_parse.h"
36 #include "util/format/u_format.h"
37 #include "util/u_surface.h"
38 #include "util/u_sampler.h"
39 #include "util/u_box.h"
40 #include "util/u_inlines.h"
41 #include "util/format/u_format_zs.h"
43 struct rendering_state
{
44 struct pipe_context
*pctx
;
49 bool stencil_ref_dirty
;
50 bool clip_state_dirty
;
51 bool blend_color_dirty
;
54 bool constbuf_dirty
[PIPE_SHADER_TYPES
];
55 bool pcbuf_dirty
[PIPE_SHADER_TYPES
];
59 bool sample_mask_dirty
;
60 bool min_samples_dirty
;
61 struct pipe_draw_indirect_info indirect_info
;
62 struct pipe_draw_info info
;
64 struct pipe_grid_info dispatch_info
;
65 struct pipe_framebuffer_state framebuffer
;
67 struct pipe_blend_state blend_state
;
69 struct pipe_rasterizer_state rs_state
;
71 struct pipe_depth_stencil_alpha_state dsa_state
;
74 struct pipe_blend_color blend_color
;
75 struct pipe_stencil_ref stencil_ref
;
76 struct pipe_clip_state clip_state
;
79 struct pipe_scissor_state scissors
[16];
82 struct pipe_viewport_state viewports
[16];
85 unsigned index_offset
;
86 struct pipe_resource
*index_buffer
;
87 struct pipe_constant_buffer pc_buffer
[PIPE_SHADER_TYPES
];
88 struct pipe_constant_buffer const_buffer
[PIPE_SHADER_TYPES
][16];
89 int num_const_bufs
[PIPE_SHADER_TYPES
];
92 struct pipe_vertex_buffer vb
[PIPE_MAX_ATTRIBS
];
94 struct pipe_vertex_element ve
[PIPE_MAX_ATTRIBS
];
96 struct pipe_sampler_view
*sv
[PIPE_SHADER_TYPES
][PIPE_MAX_SAMPLERS
];
97 int num_sampler_views
[PIPE_SHADER_TYPES
];
98 struct pipe_sampler_state ss
[PIPE_SHADER_TYPES
][PIPE_MAX_SAMPLERS
];
99 int num_sampler_states
[PIPE_SHADER_TYPES
];
100 bool sv_dirty
[PIPE_SHADER_TYPES
];
101 bool ss_dirty
[PIPE_SHADER_TYPES
];
103 struct pipe_image_view iv
[PIPE_SHADER_TYPES
][PIPE_MAX_SHADER_IMAGES
];
104 int num_shader_images
[PIPE_SHADER_TYPES
];
105 struct pipe_shader_buffer sb
[PIPE_SHADER_TYPES
][PIPE_MAX_SHADER_BUFFERS
];
106 int num_shader_buffers
[PIPE_SHADER_TYPES
];
107 bool iv_dirty
[PIPE_SHADER_TYPES
];
108 bool sb_dirty
[PIPE_SHADER_TYPES
];
109 void *ss_cso
[PIPE_SHADER_TYPES
][PIPE_MAX_SAMPLERS
];
112 uint8_t push_constants
[128 * 4];
114 struct val_render_pass
*pass
;
116 struct val_framebuffer
*vk_framebuffer
;
117 VkRect2D render_area
;
119 uint32_t sample_mask
;
120 unsigned min_samples
;
122 struct val_attachment_state
*attachments
;
125 static void emit_compute_state(struct rendering_state
*state
)
127 if (state
->iv_dirty
[PIPE_SHADER_COMPUTE
]) {
128 state
->pctx
->set_shader_images(state
->pctx
, PIPE_SHADER_COMPUTE
,
129 0, state
->num_shader_images
[PIPE_SHADER_COMPUTE
],
130 state
->iv
[PIPE_SHADER_COMPUTE
]);
131 state
->iv_dirty
[PIPE_SHADER_COMPUTE
] = false;
134 if (state
->pcbuf_dirty
[PIPE_SHADER_COMPUTE
]) {
135 state
->pctx
->set_constant_buffer(state
->pctx
, PIPE_SHADER_COMPUTE
,
136 0, &state
->pc_buffer
[PIPE_SHADER_COMPUTE
]);
137 state
->pcbuf_dirty
[PIPE_SHADER_COMPUTE
] = false;
140 if (state
->constbuf_dirty
[PIPE_SHADER_COMPUTE
]) {
141 for (unsigned i
= 0; i
< state
->num_const_bufs
[PIPE_SHADER_COMPUTE
]; i
++)
142 state
->pctx
->set_constant_buffer(state
->pctx
, PIPE_SHADER_COMPUTE
,
143 i
+ 1, &state
->const_buffer
[PIPE_SHADER_COMPUTE
][i
]);
144 state
->constbuf_dirty
[PIPE_SHADER_COMPUTE
] = false;
147 if (state
->sb_dirty
[PIPE_SHADER_COMPUTE
]) {
148 state
->pctx
->set_shader_buffers(state
->pctx
, PIPE_SHADER_COMPUTE
,
149 0, state
->num_shader_buffers
[PIPE_SHADER_COMPUTE
],
150 state
->sb
[PIPE_SHADER_COMPUTE
], 0);
151 state
->sb_dirty
[PIPE_SHADER_COMPUTE
] = false;
154 if (state
->sv_dirty
[PIPE_SHADER_COMPUTE
]) {
155 state
->pctx
->set_sampler_views(state
->pctx
, PIPE_SHADER_COMPUTE
, 0, state
->num_sampler_views
[PIPE_SHADER_COMPUTE
],
156 state
->sv
[PIPE_SHADER_COMPUTE
]);
157 state
->sv_dirty
[PIPE_SHADER_COMPUTE
] = false;
160 if (state
->ss_dirty
[PIPE_SHADER_COMPUTE
]) {
161 for (unsigned i
= 0; i
< state
->num_sampler_states
[PIPE_SHADER_COMPUTE
]; i
++) {
162 if (state
->ss_cso
[PIPE_SHADER_COMPUTE
][i
])
163 state
->pctx
->delete_sampler_state(state
->pctx
, state
->ss_cso
[PIPE_SHADER_COMPUTE
][i
]);
164 state
->ss_cso
[PIPE_SHADER_COMPUTE
][i
] = state
->pctx
->create_sampler_state(state
->pctx
, &state
->ss
[PIPE_SHADER_COMPUTE
][i
]);
166 state
->pctx
->bind_sampler_states(state
->pctx
, PIPE_SHADER_COMPUTE
, 0, state
->num_sampler_states
[PIPE_SHADER_COMPUTE
], state
->ss_cso
[PIPE_SHADER_COMPUTE
]);
167 state
->ss_dirty
[PIPE_SHADER_COMPUTE
] = false;
171 static void emit_state(struct rendering_state
*state
)
174 if (state
->blend_dirty
) {
175 if (state
->blend_handle
) {
176 state
->pctx
->bind_blend_state(state
->pctx
, NULL
);
177 state
->pctx
->delete_blend_state(state
->pctx
, state
->blend_handle
);
179 state
->blend_handle
= state
->pctx
->create_blend_state(state
->pctx
,
180 &state
->blend_state
);
181 state
->pctx
->bind_blend_state(state
->pctx
, state
->blend_handle
);
183 state
->blend_dirty
= false;
186 if (state
->rs_dirty
) {
187 if (state
->rast_handle
) {
188 state
->pctx
->bind_rasterizer_state(state
->pctx
, NULL
);
189 state
->pctx
->delete_rasterizer_state(state
->pctx
, state
->rast_handle
);
191 state
->rast_handle
= state
->pctx
->create_rasterizer_state(state
->pctx
,
193 state
->pctx
->bind_rasterizer_state(state
->pctx
, state
->rast_handle
);
194 state
->rs_dirty
= false;
197 if (state
->dsa_dirty
) {
198 if (state
->dsa_handle
) {
199 state
->pctx
->bind_depth_stencil_alpha_state(state
->pctx
, NULL
);
200 state
->pctx
->delete_depth_stencil_alpha_state(state
->pctx
, state
->dsa_handle
);
202 state
->dsa_handle
= state
->pctx
->create_depth_stencil_alpha_state(state
->pctx
,
204 state
->pctx
->bind_depth_stencil_alpha_state(state
->pctx
, state
->dsa_handle
);
206 state
->dsa_dirty
= false;
209 if (state
->sample_mask_dirty
) {
210 state
->pctx
->set_sample_mask(state
->pctx
, state
->sample_mask
);
211 state
->sample_mask_dirty
= false;
214 if (state
->min_samples_dirty
) {
215 state
->pctx
->set_min_samples(state
->pctx
, state
->min_samples
);
216 state
->min_samples_dirty
= false;
219 if (state
->blend_color_dirty
) {
220 state
->pctx
->set_blend_color(state
->pctx
, &state
->blend_color
);
221 state
->blend_color_dirty
= false;
224 if (state
->stencil_ref_dirty
) {
225 state
->pctx
->set_stencil_ref(state
->pctx
, &state
->stencil_ref
);
226 state
->stencil_ref_dirty
= false;
229 if (state
->vb_dirty
) {
230 state
->pctx
->set_vertex_buffers(state
->pctx
, state
->start_vb
,
231 state
->num_vb
, state
->vb
);
232 state
->vb_dirty
= false;
235 if (state
->ve_dirty
) {
237 if (state
->velems_cso
)
238 ve
= state
->velems_cso
;
240 state
->velems_cso
= state
->pctx
->create_vertex_elements_state(state
->pctx
, state
->num_ve
,
242 state
->pctx
->bind_vertex_elements_state(state
->pctx
, state
->velems_cso
);
245 state
->pctx
->delete_vertex_elements_state(state
->pctx
, ve
);
248 for (sh
= 0; sh
< PIPE_SHADER_TYPES
; sh
++) {
249 if (state
->constbuf_dirty
[sh
]) {
250 for (unsigned idx
= 0; idx
< state
->num_const_bufs
[sh
]; idx
++)
251 state
->pctx
->set_constant_buffer(state
->pctx
, sh
,
252 idx
+ 1, &state
->const_buffer
[sh
][idx
]);
254 state
->constbuf_dirty
[sh
] = false;
257 for (sh
= 0; sh
< PIPE_SHADER_TYPES
; sh
++) {
258 if (state
->pcbuf_dirty
[sh
]) {
259 state
->pctx
->set_constant_buffer(state
->pctx
, sh
,
260 0, &state
->pc_buffer
[sh
]);
264 for (sh
= 0; sh
< PIPE_SHADER_TYPES
; sh
++) {
265 if (state
->sb_dirty
[sh
]) {
266 state
->pctx
->set_shader_buffers(state
->pctx
, sh
,
267 0, state
->num_shader_buffers
[sh
],
272 for (sh
= 0; sh
< PIPE_SHADER_TYPES
; sh
++) {
273 if (state
->iv_dirty
[sh
]) {
274 state
->pctx
->set_shader_images(state
->pctx
, sh
,
275 0, state
->num_shader_images
[sh
],
280 for (sh
= 0; sh
< PIPE_SHADER_TYPES
; sh
++) {
282 if (!state
->sv_dirty
[sh
])
285 state
->pctx
->set_sampler_views(state
->pctx
, sh
, 0, state
->num_sampler_views
[sh
],
287 state
->sv_dirty
[sh
] = false;
290 for (sh
= 0; sh
< PIPE_SHADER_TYPES
; sh
++) {
292 if (!state
->ss_dirty
[sh
])
295 for (i
= 0; i
< state
->num_sampler_states
[sh
]; i
++) {
296 if (state
->ss_cso
[sh
][i
])
297 state
->pctx
->delete_sampler_state(state
->pctx
, state
->ss_cso
[sh
][i
]);
298 state
->ss_cso
[sh
][i
] = state
->pctx
->create_sampler_state(state
->pctx
, &state
->ss
[sh
][i
]);
301 state
->pctx
->bind_sampler_states(state
->pctx
, sh
, 0, state
->num_sampler_states
[sh
], state
->ss_cso
[sh
]);
304 if (state
->vp_dirty
) {
305 state
->pctx
->set_viewport_states(state
->pctx
, 0, state
->num_viewports
, state
->viewports
);
306 state
->vp_dirty
= false;
309 if (state
->scissor_dirty
) {
310 state
->pctx
->set_scissor_states(state
->pctx
, 0, state
->num_scissors
, state
->scissors
);
311 state
->scissor_dirty
= false;
315 static void handle_compute_pipeline(struct val_cmd_buffer_entry
*cmd
,
316 struct rendering_state
*state
)
318 struct val_pipeline
*pipeline
= cmd
->u
.pipeline
.pipeline
;
320 state
->dispatch_info
.block
[0] = pipeline
->pipeline_nir
[MESA_SHADER_COMPUTE
]->info
.cs
.local_size
[0];
321 state
->dispatch_info
.block
[1] = pipeline
->pipeline_nir
[MESA_SHADER_COMPUTE
]->info
.cs
.local_size
[1];
322 state
->dispatch_info
.block
[2] = pipeline
->pipeline_nir
[MESA_SHADER_COMPUTE
]->info
.cs
.local_size
[2];
323 state
->pctx
->bind_compute_state(state
->pctx
, pipeline
->shader_cso
[PIPE_SHADER_COMPUTE
]);
327 get_viewport_xform(const VkViewport
*viewport
,
328 float scale
[3], float translate
[3])
330 float x
= viewport
->x
;
331 float y
= viewport
->y
;
332 float half_width
= 0.5f
* viewport
->width
;
333 float half_height
= 0.5f
* viewport
->height
;
334 double n
= viewport
->minDepth
;
335 double f
= viewport
->maxDepth
;
337 scale
[0] = half_width
;
338 translate
[0] = half_width
+ x
;
339 scale
[1] = half_height
;
340 translate
[1] = half_height
+ y
;
346 static void handle_graphics_pipeline(struct val_cmd_buffer_entry
*cmd
,
347 struct rendering_state
*state
)
349 struct val_pipeline
*pipeline
= cmd
->u
.pipeline
.pipeline
;
350 bool dynamic_states
[VK_DYNAMIC_STATE_STENCIL_REFERENCE
+1];
351 unsigned fb_samples
= 0;
353 memset(dynamic_states
, 0, sizeof(dynamic_states
));
354 if (pipeline
->graphics_create_info
.pDynamicState
)
356 const VkPipelineDynamicStateCreateInfo
*dyn
= pipeline
->graphics_create_info
.pDynamicState
;
358 for (i
= 0; i
< dyn
->dynamicStateCount
; i
++) {
359 if (dyn
->pDynamicStates
[i
] > VK_DYNAMIC_STATE_STENCIL_REFERENCE
)
361 dynamic_states
[dyn
->pDynamicStates
[i
]] = true;
365 bool has_stage
[PIPE_SHADER_TYPES
] = { false };
367 state
->pctx
->bind_gs_state(state
->pctx
, NULL
);
368 if (state
->pctx
->bind_tcs_state
)
369 state
->pctx
->bind_tcs_state(state
->pctx
, NULL
);
370 if (state
->pctx
->bind_tes_state
)
371 state
->pctx
->bind_tes_state(state
->pctx
, NULL
);
374 for (i
= 0; i
< pipeline
->graphics_create_info
.stageCount
; i
++) {
375 const VkPipelineShaderStageCreateInfo
*sh
= &pipeline
->graphics_create_info
.pStages
[i
];
377 case VK_SHADER_STAGE_FRAGMENT_BIT
:
378 state
->pctx
->bind_fs_state(state
->pctx
, pipeline
->shader_cso
[PIPE_SHADER_FRAGMENT
]);
379 has_stage
[PIPE_SHADER_FRAGMENT
] = true;
381 case VK_SHADER_STAGE_VERTEX_BIT
:
382 state
->pctx
->bind_vs_state(state
->pctx
, pipeline
->shader_cso
[PIPE_SHADER_VERTEX
]);
383 has_stage
[PIPE_SHADER_VERTEX
] = true;
385 case VK_SHADER_STAGE_GEOMETRY_BIT
:
386 state
->pctx
->bind_gs_state(state
->pctx
, pipeline
->shader_cso
[PIPE_SHADER_GEOMETRY
]);
387 has_stage
[PIPE_SHADER_GEOMETRY
] = true;
389 case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT
:
390 state
->pctx
->bind_tcs_state(state
->pctx
, pipeline
->shader_cso
[PIPE_SHADER_TESS_CTRL
]);
391 has_stage
[PIPE_SHADER_TESS_CTRL
] = true;
393 case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT
:
394 state
->pctx
->bind_tes_state(state
->pctx
, pipeline
->shader_cso
[PIPE_SHADER_TESS_EVAL
]);
395 has_stage
[PIPE_SHADER_TESS_EVAL
] = true;
404 /* there should always be a dummy fs. */
405 if (!has_stage
[PIPE_SHADER_FRAGMENT
])
406 state
->pctx
->bind_fs_state(state
->pctx
, pipeline
->shader_cso
[PIPE_SHADER_FRAGMENT
]);
407 if (state
->pctx
->bind_gs_state
&& !has_stage
[PIPE_SHADER_GEOMETRY
])
408 state
->pctx
->bind_gs_state(state
->pctx
, NULL
);
409 if (state
->pctx
->bind_tcs_state
&& !has_stage
[PIPE_SHADER_TESS_CTRL
])
410 state
->pctx
->bind_tcs_state(state
->pctx
, NULL
);
411 if (state
->pctx
->bind_tes_state
&& !has_stage
[PIPE_SHADER_TESS_EVAL
])
412 state
->pctx
->bind_tes_state(state
->pctx
, NULL
);
414 /* rasterization state */
415 if (pipeline
->graphics_create_info
.pRasterizationState
) {
416 const VkPipelineRasterizationStateCreateInfo
*rsc
= pipeline
->graphics_create_info
.pRasterizationState
;
417 state
->rs_state
.depth_clip_near
= state
->rs_state
.depth_clip_far
= !rsc
->depthClampEnable
;
418 state
->rs_state
.rasterizer_discard
= rsc
->rasterizerDiscardEnable
;
419 state
->rs_state
.front_ccw
= (rsc
->frontFace
== VK_FRONT_FACE_COUNTER_CLOCKWISE
);
420 state
->rs_state
.cull_face
= vk_cull_to_pipe(rsc
->cullMode
);
421 state
->rs_state
.fill_front
= vk_polygon_mode_to_pipe(rsc
->polygonMode
);
422 state
->rs_state
.fill_back
= vk_polygon_mode_to_pipe(rsc
->polygonMode
);
423 state
->rs_state
.point_size_per_vertex
= true;
424 state
->rs_state
.flatshade_first
= true;
425 state
->rs_state
.point_quad_rasterization
= true;
426 state
->rs_state
.clip_halfz
= true;
427 state
->rs_state
.half_pixel_center
= true;
428 state
->rs_state
.scissor
= true;
430 if (!dynamic_states
[VK_DYNAMIC_STATE_LINE_WIDTH
])
431 state
->rs_state
.line_width
= rsc
->lineWidth
;
433 if (!dynamic_states
[VK_DYNAMIC_STATE_DEPTH_BIAS
]) {
434 state
->rs_state
.offset_units
= rsc
->depthBiasConstantFactor
;
435 state
->rs_state
.offset_scale
= rsc
->depthBiasSlopeFactor
;
436 state
->rs_state
.offset_clamp
= rsc
->depthBiasClamp
;
438 state
->rs_dirty
= true;
441 if (pipeline
->graphics_create_info
.pMultisampleState
) {
442 const VkPipelineMultisampleStateCreateInfo
*ms
= pipeline
->graphics_create_info
.pMultisampleState
;
443 state
->rs_state
.multisample
= ms
->rasterizationSamples
> 1;
444 state
->sample_mask
= ms
->pSampleMask
? ms
->pSampleMask
[0] : 0xffffffff;
445 state
->blend_state
.alpha_to_coverage
= ms
->alphaToCoverageEnable
;
446 state
->blend_state
.alpha_to_one
= ms
->alphaToOneEnable
;
447 state
->blend_dirty
= true;
448 state
->rs_dirty
= true;
449 state
->min_samples
= 1;
450 state
->sample_mask_dirty
= true;
451 fb_samples
= ms
->rasterizationSamples
;
452 if (ms
->sampleShadingEnable
) {
453 state
->min_samples
= ceil(ms
->rasterizationSamples
* ms
->minSampleShading
);
454 if (state
->min_samples
> 1)
455 state
->min_samples
= ms
->rasterizationSamples
;
456 if (state
->min_samples
< 1)
457 state
->min_samples
= 1;
459 if (pipeline
->force_min_sample
)
460 state
->min_samples
= ms
->rasterizationSamples
;
461 state
->min_samples_dirty
= true;
463 state
->rs_state
.multisample
= false;
464 state
->blend_state
.alpha_to_coverage
= false;
465 state
->blend_state
.alpha_to_one
= false;
466 state
->rs_dirty
= true;
469 if (pipeline
->graphics_create_info
.pDepthStencilState
) {
470 const VkPipelineDepthStencilStateCreateInfo
*dsa
= pipeline
->graphics_create_info
.pDepthStencilState
;
472 state
->dsa_state
.depth
.enabled
= dsa
->depthTestEnable
;
473 state
->dsa_state
.depth
.writemask
= dsa
->depthWriteEnable
;
474 state
->dsa_state
.depth
.func
= dsa
->depthCompareOp
;
475 state
->dsa_state
.depth
.bounds_test
= dsa
->depthBoundsTestEnable
;
477 if (!dynamic_states
[VK_DYNAMIC_STATE_DEPTH_BOUNDS
]) {
478 state
->dsa_state
.depth
.bounds_min
= dsa
->minDepthBounds
;
479 state
->dsa_state
.depth
.bounds_max
= dsa
->maxDepthBounds
;
482 state
->dsa_state
.stencil
[0].enabled
= dsa
->stencilTestEnable
;
483 state
->dsa_state
.stencil
[0].func
= dsa
->front
.compareOp
;
484 state
->dsa_state
.stencil
[0].fail_op
= vk_conv_stencil_op(dsa
->front
.failOp
);
485 state
->dsa_state
.stencil
[0].zpass_op
= vk_conv_stencil_op(dsa
->front
.passOp
);
486 state
->dsa_state
.stencil
[0].zfail_op
= vk_conv_stencil_op(dsa
->front
.depthFailOp
);
488 state
->dsa_state
.stencil
[1].enabled
= dsa
->stencilTestEnable
;
489 state
->dsa_state
.stencil
[1].func
= dsa
->back
.compareOp
;
490 state
->dsa_state
.stencil
[1].fail_op
= vk_conv_stencil_op(dsa
->back
.failOp
);
491 state
->dsa_state
.stencil
[1].zpass_op
= vk_conv_stencil_op(dsa
->back
.passOp
);
492 state
->dsa_state
.stencil
[1].zfail_op
= vk_conv_stencil_op(dsa
->back
.depthFailOp
);
494 if (!dynamic_states
[VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK
]) {
495 state
->dsa_state
.stencil
[0].valuemask
= dsa
->front
.compareMask
;
496 state
->dsa_state
.stencil
[1].valuemask
= dsa
->back
.compareMask
;
499 if (!dynamic_states
[VK_DYNAMIC_STATE_STENCIL_WRITE_MASK
]) {
500 state
->dsa_state
.stencil
[0].writemask
= dsa
->front
.writeMask
;
501 state
->dsa_state
.stencil
[1].writemask
= dsa
->back
.writeMask
;
504 if (dsa
->stencilTestEnable
) {
505 if (!dynamic_states
[VK_DYNAMIC_STATE_STENCIL_REFERENCE
]) {
506 state
->stencil_ref
.ref_value
[0] = dsa
->front
.reference
;
507 state
->stencil_ref
.ref_value
[1] = dsa
->back
.reference
;
508 state
->stencil_ref_dirty
= true;
512 state
->dsa_dirty
= true;
515 if (pipeline
->graphics_create_info
.pColorBlendState
) {
516 const VkPipelineColorBlendStateCreateInfo
*cb
= pipeline
->graphics_create_info
.pColorBlendState
;
518 if (cb
->attachmentCount
> 1)
519 state
->blend_state
.independent_blend_enable
= true;
520 for (i
= 0; i
< cb
->attachmentCount
; i
++) {
521 state
->blend_state
.rt
[i
].colormask
= cb
->pAttachments
[i
].colorWriteMask
;
522 state
->blend_state
.rt
[i
].blend_enable
= cb
->pAttachments
[i
].blendEnable
;
523 state
->blend_state
.rt
[i
].rgb_func
= vk_conv_blend_func(cb
->pAttachments
[i
].colorBlendOp
);
524 state
->blend_state
.rt
[i
].rgb_src_factor
= vk_conv_blend_factor(cb
->pAttachments
[i
].srcColorBlendFactor
);
525 state
->blend_state
.rt
[i
].rgb_dst_factor
= vk_conv_blend_factor(cb
->pAttachments
[i
].dstColorBlendFactor
);
526 state
->blend_state
.rt
[i
].alpha_func
= vk_conv_blend_func(cb
->pAttachments
[i
].alphaBlendOp
);
527 state
->blend_state
.rt
[i
].alpha_src_factor
= vk_conv_blend_factor(cb
->pAttachments
[i
].srcAlphaBlendFactor
);
528 state
->blend_state
.rt
[i
].alpha_dst_factor
= vk_conv_blend_factor(cb
->pAttachments
[i
].dstAlphaBlendFactor
);
530 /* At least llvmpipe applies the blend factor prior to the blend function,
531 * regardless of what function is used. (like i965 hardware).
532 * It means for MIN/MAX the blend factor has to be stomped to ONE.
534 if (cb
->pAttachments
[i
].colorBlendOp
== VK_BLEND_OP_MIN
||
535 cb
->pAttachments
[i
].colorBlendOp
== VK_BLEND_OP_MAX
) {
536 state
->blend_state
.rt
[i
].rgb_src_factor
= PIPE_BLENDFACTOR_ONE
;
537 state
->blend_state
.rt
[i
].rgb_dst_factor
= PIPE_BLENDFACTOR_ONE
;
540 if (cb
->pAttachments
[i
].alphaBlendOp
== VK_BLEND_OP_MIN
||
541 cb
->pAttachments
[i
].alphaBlendOp
== VK_BLEND_OP_MAX
) {
542 state
->blend_state
.rt
[i
].alpha_src_factor
= PIPE_BLENDFACTOR_ONE
;
543 state
->blend_state
.rt
[i
].alpha_dst_factor
= PIPE_BLENDFACTOR_ONE
;
546 state
->blend_dirty
= true;
547 if (!dynamic_states
[VK_DYNAMIC_STATE_BLEND_CONSTANTS
]) {
548 memcpy(state
->blend_color
.color
, cb
->blendConstants
, 4 * sizeof(float));
549 state
->blend_color_dirty
= true;
554 const VkPipelineVertexInputStateCreateInfo
*vi
= pipeline
->graphics_create_info
.pVertexInputState
;
557 for (i
= 0; i
< vi
->vertexBindingDescriptionCount
; i
++) {
558 state
->vb
[i
].stride
= vi
->pVertexBindingDescriptions
[i
].stride
;
561 int max_location
= -1;
562 for (i
= 0; i
< vi
->vertexAttributeDescriptionCount
; i
++) {
563 unsigned location
= vi
->pVertexAttributeDescriptions
[i
].location
;
564 state
->ve
[location
].src_offset
= vi
->pVertexAttributeDescriptions
[i
].offset
;
565 state
->ve
[location
].vertex_buffer_index
= vi
->pVertexAttributeDescriptions
[i
].binding
;
566 state
->ve
[location
].src_format
= vk_format_to_pipe(vi
->pVertexAttributeDescriptions
[i
].format
);
567 state
->ve
[location
].instance_divisor
= vi
->pVertexBindingDescriptions
[vi
->pVertexAttributeDescriptions
[i
].binding
].inputRate
;
569 if ((int)location
> max_location
)
570 max_location
= location
;
572 state
->num_ve
= max_location
+ 1;
573 state
->vb_dirty
= true;
574 state
->ve_dirty
= true;
578 const VkPipelineInputAssemblyStateCreateInfo
*ia
= pipeline
->graphics_create_info
.pInputAssemblyState
;
580 state
->info
.mode
= vk_conv_topology(ia
->topology
);
581 state
->info
.primitive_restart
= ia
->primitiveRestartEnable
;
584 if (pipeline
->graphics_create_info
.pTessellationState
) {
585 const VkPipelineTessellationStateCreateInfo
*ts
= pipeline
->graphics_create_info
.pTessellationState
;
586 state
->info
.vertices_per_patch
= ts
->patchControlPoints
;
588 state
->info
.vertices_per_patch
= 0;
590 if (pipeline
->graphics_create_info
.pViewportState
) {
591 const VkPipelineViewportStateCreateInfo
*vpi
= pipeline
->graphics_create_info
.pViewportState
;
594 state
->num_viewports
= vpi
->viewportCount
;
595 state
->num_scissors
= vpi
->scissorCount
;
596 state
->vp_dirty
= true;
597 if (!dynamic_states
[VK_DYNAMIC_STATE_VIEWPORT
]) {
598 for (i
= 0; i
< vpi
->viewportCount
; i
++)
599 get_viewport_xform(&vpi
->pViewports
[i
], state
->viewports
[i
].scale
, state
->viewports
[i
].translate
);
600 state
->vp_dirty
= true;
602 if (!dynamic_states
[VK_DYNAMIC_STATE_SCISSOR
]) {
603 for (i
= 0; i
< vpi
->scissorCount
; i
++) {
604 const VkRect2D
*ss
= &vpi
->pScissors
[i
];
605 state
->scissors
[i
].minx
= ss
->offset
.x
;
606 state
->scissors
[i
].miny
= ss
->offset
.y
;
607 state
->scissors
[i
].maxx
= ss
->offset
.x
+ ss
->extent
.width
;
608 state
->scissors
[i
].maxy
= ss
->offset
.y
+ ss
->extent
.height
;
609 state
->scissor_dirty
= true;
615 if (fb_samples
!= state
->framebuffer
.samples
) {
616 state
->framebuffer
.samples
= fb_samples
;
617 state
->pctx
->set_framebuffer_state(state
->pctx
, &state
->framebuffer
);
621 static void handle_pipeline(struct val_cmd_buffer_entry
*cmd
,
622 struct rendering_state
*state
)
624 struct val_pipeline
*pipeline
= cmd
->u
.pipeline
.pipeline
;
625 if (pipeline
->is_compute_pipeline
)
626 handle_compute_pipeline(cmd
, state
);
628 handle_graphics_pipeline(cmd
, state
);
631 static void handle_vertex_buffers(struct val_cmd_buffer_entry
*cmd
,
632 struct rendering_state
*state
)
635 struct val_cmd_bind_vertex_buffers
*vcb
= &cmd
->u
.vertex_buffers
;
636 for (i
= 0; i
< vcb
->binding_count
; i
++) {
637 int idx
= i
+ vcb
->first
;
639 state
->vb
[idx
].buffer_offset
= vcb
->offsets
[i
];
640 state
->vb
[idx
].buffer
.resource
= vcb
->buffers
[i
]->bo
;
642 if (vcb
->first
< state
->start_vb
)
643 state
->start_vb
= vcb
->first
;
644 if (vcb
->first
+ vcb
->binding_count
>= state
->num_vb
)
645 state
->num_vb
= vcb
->first
+ vcb
->binding_count
;
646 state
->vb_dirty
= true;
651 uint16_t const_buffer_count
;
652 uint16_t shader_buffer_count
;
653 uint16_t sampler_count
;
654 uint16_t sampler_view_count
;
655 uint16_t image_count
;
656 } stage
[MESA_SHADER_STAGES
];
659 const uint32_t *dynamic_offsets
;
660 uint32_t dynamic_offset_count
;
663 static void fill_sampler(struct pipe_sampler_state
*ss
,
664 struct val_sampler
*samp
)
666 ss
->wrap_s
= vk_conv_wrap_mode(samp
->create_info
.addressModeU
);
667 ss
->wrap_t
= vk_conv_wrap_mode(samp
->create_info
.addressModeV
);
668 ss
->wrap_r
= vk_conv_wrap_mode(samp
->create_info
.addressModeW
);
669 ss
->min_img_filter
= samp
->create_info
.minFilter
== VK_FILTER_LINEAR
? PIPE_TEX_FILTER_LINEAR
: PIPE_TEX_FILTER_NEAREST
;
670 ss
->min_mip_filter
= samp
->create_info
.mipmapMode
== VK_SAMPLER_MIPMAP_MODE_LINEAR
? PIPE_TEX_MIPFILTER_LINEAR
: PIPE_TEX_MIPFILTER_NEAREST
;
671 ss
->mag_img_filter
= samp
->create_info
.magFilter
== VK_FILTER_LINEAR
? PIPE_TEX_FILTER_LINEAR
: PIPE_TEX_FILTER_NEAREST
;
672 ss
->min_lod
= samp
->create_info
.minLod
;
673 ss
->max_lod
= samp
->create_info
.maxLod
;
674 ss
->lod_bias
= samp
->create_info
.mipLodBias
;
675 ss
->max_anisotropy
= samp
->create_info
.maxAnisotropy
;
676 ss
->normalized_coords
= !samp
->create_info
.unnormalizedCoordinates
;
677 ss
->compare_mode
= samp
->create_info
.compareEnable
? PIPE_TEX_COMPARE_R_TO_TEXTURE
: PIPE_TEX_COMPARE_NONE
;
678 ss
->compare_func
= samp
->create_info
.compareOp
;
679 ss
->seamless_cube_map
= true;
681 switch (samp
->create_info
.borderColor
) {
682 case VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK
:
683 case VK_BORDER_COLOR_INT_TRANSPARENT_BLACK
:
685 memset(ss
->border_color
.f
, 0, 4 * sizeof(float));
687 case VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK
:
688 ss
->border_color
.f
[0] = ss
->border_color
.f
[1] = ss
->border_color
.f
[2] = 0.0f
;
689 ss
->border_color
.f
[3] = 1.0f
;
691 case VK_BORDER_COLOR_INT_OPAQUE_BLACK
:
692 ss
->border_color
.i
[0] = ss
->border_color
.i
[1] = ss
->border_color
.i
[2] = 0;
693 ss
->border_color
.i
[3] = 1;
695 case VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE
:
696 ss
->border_color
.f
[0] = ss
->border_color
.f
[1] = ss
->border_color
.f
[2] = 1.0f
;
697 ss
->border_color
.f
[3] = 1.0f
;
699 case VK_BORDER_COLOR_INT_OPAQUE_WHITE
:
700 ss
->border_color
.i
[0] = ss
->border_color
.i
[1] = ss
->border_color
.i
[2] = 1;
701 ss
->border_color
.i
[3] = 1;
706 static void fill_sampler_stage(struct rendering_state
*state
,
707 struct dyn_info
*dyn_info
,
708 gl_shader_stage stage
,
709 enum pipe_shader_type p_stage
,
711 const struct val_descriptor
*descriptor
,
712 const struct val_descriptor_set_binding_layout
*binding
)
714 int ss_idx
= binding
->stage
[stage
].sampler_index
;
718 ss_idx
+= dyn_info
->stage
[stage
].sampler_count
;
719 fill_sampler(&state
->ss
[p_stage
][ss_idx
], descriptor
->sampler
);
720 if (state
->num_sampler_states
[p_stage
] <= ss_idx
)
721 state
->num_sampler_states
[p_stage
] = ss_idx
+ 1;
722 state
->ss_dirty
[p_stage
] = true;
725 static void fill_sampler_view_stage(struct rendering_state
*state
,
726 struct dyn_info
*dyn_info
,
727 gl_shader_stage stage
,
728 enum pipe_shader_type p_stage
,
730 const struct val_descriptor
*descriptor
,
731 const struct val_descriptor_set_binding_layout
*binding
)
733 int sv_idx
= binding
->stage
[stage
].sampler_view_index
;
737 sv_idx
+= dyn_info
->stage
[stage
].sampler_view_count
;
738 struct val_image_view
*iv
= descriptor
->image_view
;
739 struct pipe_sampler_view templ
;
741 enum pipe_format pformat
;
742 if (iv
->subresourceRange
.aspectMask
== VK_IMAGE_ASPECT_DEPTH_BIT
)
743 pformat
= vk_format_to_pipe(iv
->format
);
744 else if (iv
->subresourceRange
.aspectMask
== VK_IMAGE_ASPECT_STENCIL_BIT
)
745 pformat
= util_format_stencil_only(vk_format_to_pipe(iv
->format
));
747 pformat
= vk_format_to_pipe(iv
->format
);
748 u_sampler_view_default_template(&templ
,
751 if (iv
->view_type
== VK_IMAGE_VIEW_TYPE_1D
)
752 templ
.target
= PIPE_TEXTURE_1D
;
753 if (iv
->view_type
== VK_IMAGE_VIEW_TYPE_2D
)
754 templ
.target
= PIPE_TEXTURE_2D
;
755 if (iv
->view_type
== VK_IMAGE_VIEW_TYPE_CUBE
)
756 templ
.target
= PIPE_TEXTURE_CUBE
;
757 templ
.u
.tex
.first_layer
= iv
->subresourceRange
.baseArrayLayer
;
758 templ
.u
.tex
.last_layer
= iv
->subresourceRange
.baseArrayLayer
+ val_get_layerCount(iv
->image
, &iv
->subresourceRange
) - 1;
759 templ
.u
.tex
.first_level
= iv
->subresourceRange
.baseMipLevel
;
760 templ
.u
.tex
.last_level
= iv
->subresourceRange
.baseMipLevel
+ val_get_levelCount(iv
->image
, &iv
->subresourceRange
) - 1;
761 if (iv
->components
.r
!= VK_COMPONENT_SWIZZLE_IDENTITY
)
762 templ
.swizzle_r
= vk_conv_swizzle(iv
->components
.r
);
763 if (iv
->components
.g
!= VK_COMPONENT_SWIZZLE_IDENTITY
)
764 templ
.swizzle_g
= vk_conv_swizzle(iv
->components
.g
);
765 if (iv
->components
.b
!= VK_COMPONENT_SWIZZLE_IDENTITY
)
766 templ
.swizzle_b
= vk_conv_swizzle(iv
->components
.b
);
767 if (iv
->components
.a
!= VK_COMPONENT_SWIZZLE_IDENTITY
)
768 templ
.swizzle_a
= vk_conv_swizzle(iv
->components
.a
);
770 if (util_format_is_depth_or_stencil(templ
.format
)) {
771 templ
.swizzle_r
= PIPE_SWIZZLE_X
;
772 templ
.swizzle_g
= PIPE_SWIZZLE_0
;
773 templ
.swizzle_b
= PIPE_SWIZZLE_0
;
776 if (state
->sv
[p_stage
][sv_idx
])
777 pipe_sampler_view_reference(&state
->sv
[p_stage
][sv_idx
], NULL
);
778 state
->sv
[p_stage
][sv_idx
] = state
->pctx
->create_sampler_view(state
->pctx
, iv
->image
->bo
, &templ
);
779 if (state
->num_sampler_views
[p_stage
] <= sv_idx
)
780 state
->num_sampler_views
[p_stage
] = sv_idx
+ 1;
781 state
->sv_dirty
[p_stage
] = true;
784 static void fill_sampler_buffer_view_stage(struct rendering_state
*state
,
785 struct dyn_info
*dyn_info
,
786 gl_shader_stage stage
,
787 enum pipe_shader_type p_stage
,
789 const struct val_descriptor
*descriptor
,
790 const struct val_descriptor_set_binding_layout
*binding
)
792 int sv_idx
= binding
->stage
[stage
].sampler_view_index
;
796 sv_idx
+= dyn_info
->stage
[stage
].sampler_view_count
;
797 struct val_buffer_view
*bv
= descriptor
->buffer_view
;
798 struct pipe_sampler_view templ
;
799 memset(&templ
, 0, sizeof(templ
));
800 templ
.target
= PIPE_BUFFER
;
801 templ
.swizzle_r
= PIPE_SWIZZLE_X
;
802 templ
.swizzle_g
= PIPE_SWIZZLE_Y
;
803 templ
.swizzle_b
= PIPE_SWIZZLE_Z
;
804 templ
.swizzle_a
= PIPE_SWIZZLE_W
;
805 templ
.format
= bv
->pformat
;
806 templ
.u
.buf
.offset
= bv
->offset
+ bv
->buffer
->offset
;
807 templ
.u
.buf
.size
= bv
->range
== VK_WHOLE_SIZE
? (bv
->buffer
->size
- bv
->offset
) : bv
->range
;
808 templ
.texture
= bv
->buffer
->bo
;
809 templ
.context
= state
->pctx
;
811 if (state
->sv
[p_stage
][sv_idx
])
812 pipe_sampler_view_reference(&state
->sv
[p_stage
][sv_idx
], NULL
);
813 state
->sv
[p_stage
][sv_idx
] = state
->pctx
->create_sampler_view(state
->pctx
, bv
->buffer
->bo
, &templ
);
814 if (state
->num_sampler_views
[p_stage
] <= sv_idx
)
815 state
->num_sampler_views
[p_stage
] = sv_idx
+ 1;
816 state
->sv_dirty
[p_stage
] = true;
819 static void fill_image_view_stage(struct rendering_state
*state
,
820 struct dyn_info
*dyn_info
,
821 gl_shader_stage stage
,
822 enum pipe_shader_type p_stage
,
824 const struct val_descriptor
*descriptor
,
825 const struct val_descriptor_set_binding_layout
*binding
)
827 struct val_image_view
*iv
= descriptor
->image_view
;
828 int idx
= binding
->stage
[stage
].image_index
;
832 idx
+= dyn_info
->stage
[stage
].image_count
;
833 state
->iv
[p_stage
][idx
].resource
= iv
->image
->bo
;
834 if (iv
->subresourceRange
.aspectMask
== VK_IMAGE_ASPECT_DEPTH_BIT
)
835 state
->iv
[p_stage
][idx
].format
= vk_format_to_pipe(iv
->format
);
836 else if (iv
->subresourceRange
.aspectMask
== VK_IMAGE_ASPECT_STENCIL_BIT
)
837 state
->iv
[p_stage
][idx
].format
= util_format_stencil_only(vk_format_to_pipe(iv
->format
));
839 state
->iv
[p_stage
][idx
].format
= vk_format_to_pipe(iv
->format
);
840 state
->iv
[p_stage
][idx
].u
.tex
.first_layer
= iv
->subresourceRange
.baseArrayLayer
;
841 state
->iv
[p_stage
][idx
].u
.tex
.last_layer
= iv
->subresourceRange
.baseArrayLayer
+ val_get_layerCount(iv
->image
, &iv
->subresourceRange
) - 1;
842 state
->iv
[p_stage
][idx
].u
.tex
.level
= iv
->subresourceRange
.baseMipLevel
;
843 if (state
->num_shader_images
[p_stage
] <= idx
)
844 state
->num_shader_images
[p_stage
] = idx
+ 1;
845 state
->iv_dirty
[p_stage
] = true;
848 static void fill_image_buffer_view_stage(struct rendering_state
*state
,
849 struct dyn_info
*dyn_info
,
850 gl_shader_stage stage
,
851 enum pipe_shader_type p_stage
,
853 const struct val_descriptor
*descriptor
,
854 const struct val_descriptor_set_binding_layout
*binding
)
856 struct val_buffer_view
*bv
= descriptor
->buffer_view
;
857 int idx
= binding
->stage
[stage
].image_index
;
861 idx
+= dyn_info
->stage
[stage
].image_count
;
862 state
->iv
[p_stage
][idx
].resource
= bv
->buffer
->bo
;
863 state
->iv
[p_stage
][idx
].format
= bv
->pformat
;
864 state
->iv
[p_stage
][idx
].u
.buf
.offset
= bv
->offset
+ bv
->buffer
->offset
;
865 state
->iv
[p_stage
][idx
].u
.buf
.size
= bv
->range
== VK_WHOLE_SIZE
? (bv
->buffer
->size
- bv
->offset
): bv
->range
;
866 if (state
->num_shader_images
[p_stage
] <= idx
)
867 state
->num_shader_images
[p_stage
] = idx
+ 1;
868 state
->iv_dirty
[p_stage
] = true;
871 static void handle_descriptor(struct rendering_state
*state
,
872 struct dyn_info
*dyn_info
,
873 const struct val_descriptor_set_binding_layout
*binding
,
874 gl_shader_stage stage
,
875 enum pipe_shader_type p_stage
,
877 const struct val_descriptor
*descriptor
)
879 bool is_dynamic
= descriptor
->type
== VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
||
880 descriptor
->type
== VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
;
882 switch (descriptor
->type
) {
883 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
884 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
: {
885 fill_image_view_stage(state
, dyn_info
, stage
, p_stage
, array_idx
, descriptor
, binding
);
888 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
889 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
: {
890 int idx
= binding
->stage
[stage
].const_buffer_index
;
894 idx
+= dyn_info
->stage
[stage
].const_buffer_count
;
895 state
->const_buffer
[p_stage
][idx
].buffer
= descriptor
->buf
.buffer
->bo
;
896 state
->const_buffer
[p_stage
][idx
].buffer_offset
= descriptor
->buf
.offset
+ descriptor
->buf
.buffer
->offset
;
898 uint32_t offset
= dyn_info
->dynamic_offsets
[dyn_info
->dyn_index
+ binding
->dynamic_index
+ array_idx
];
899 state
->const_buffer
[p_stage
][idx
].buffer_offset
+= offset
;
901 if (descriptor
->buf
.range
== VK_WHOLE_SIZE
)
902 state
->const_buffer
[p_stage
][idx
].buffer_size
= descriptor
->buf
.buffer
->bo
->width0
- state
->const_buffer
[p_stage
][idx
].buffer_offset
;
904 state
->const_buffer
[p_stage
][idx
].buffer_size
= descriptor
->buf
.range
;
905 if (state
->num_const_bufs
[p_stage
] <= idx
)
906 state
->num_const_bufs
[p_stage
] = idx
+ 1;
907 state
->constbuf_dirty
[p_stage
] = true;
910 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
911 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
: {
912 int idx
= binding
->stage
[stage
].shader_buffer_index
;
916 idx
+= dyn_info
->stage
[stage
].shader_buffer_count
;
917 state
->sb
[p_stage
][idx
].buffer
= descriptor
->buf
.buffer
->bo
;
918 state
->sb
[p_stage
][idx
].buffer_offset
= descriptor
->buf
.offset
+ descriptor
->buf
.buffer
->offset
;
920 uint32_t offset
= dyn_info
->dynamic_offsets
[dyn_info
->dyn_index
+ binding
->dynamic_index
+ array_idx
];
921 state
->sb
[p_stage
][idx
].buffer_offset
+= offset
;
923 if (descriptor
->buf
.range
== VK_WHOLE_SIZE
)
924 state
->sb
[p_stage
][idx
].buffer_size
= descriptor
->buf
.buffer
->bo
->width0
- state
->sb
[p_stage
][idx
].buffer_offset
;
926 state
->sb
[p_stage
][idx
].buffer_size
= descriptor
->buf
.range
;
927 if (state
->num_shader_buffers
[p_stage
] <= idx
)
928 state
->num_shader_buffers
[p_stage
] = idx
+ 1;
929 state
->sb_dirty
[p_stage
] = true;
932 case VK_DESCRIPTOR_TYPE_SAMPLER
:
933 if (!descriptor
->sampler
)
935 fill_sampler_stage(state
, dyn_info
, stage
, p_stage
, array_idx
, descriptor
, binding
);
937 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
938 fill_sampler_view_stage(state
, dyn_info
, stage
, p_stage
, array_idx
, descriptor
, binding
);
940 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
941 fill_sampler_stage(state
, dyn_info
, stage
, p_stage
, array_idx
, descriptor
, binding
);
942 fill_sampler_view_stage(state
, dyn_info
, stage
, p_stage
, array_idx
, descriptor
, binding
);
944 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
945 fill_sampler_buffer_view_stage(state
, dyn_info
, stage
, p_stage
, array_idx
, descriptor
, binding
);
947 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
948 fill_image_buffer_view_stage(state
, dyn_info
, stage
, p_stage
, array_idx
, descriptor
, binding
);
951 fprintf(stderr
, "Unhandled descriptor set %d\n", descriptor
->type
);
956 static void handle_set_stage(struct rendering_state
*state
,
957 struct dyn_info
*dyn_info
,
958 const struct val_descriptor_set
*set
,
959 gl_shader_stage stage
,
960 enum pipe_shader_type p_stage
)
963 for (j
= 0; j
< set
->layout
->binding_count
; j
++) {
964 const struct val_descriptor_set_binding_layout
*binding
;
965 const struct val_descriptor
*descriptor
;
966 binding
= &set
->layout
->binding
[j
];
968 if (binding
->valid
) {
969 for (int i
= 0; i
< binding
->array_size
; i
++) {
970 descriptor
= &set
->descriptors
[binding
->descriptor_index
+ i
];
971 handle_descriptor(state
, dyn_info
, binding
, stage
, p_stage
, i
, descriptor
);
977 static void increment_dyn_info(struct dyn_info
*dyn_info
,
978 struct val_descriptor_set_layout
*layout
, bool inc_dyn
)
980 for (gl_shader_stage stage
= MESA_SHADER_VERTEX
; stage
< MESA_SHADER_STAGES
; stage
++) {
981 dyn_info
->stage
[stage
].const_buffer_count
+= layout
->stage
[stage
].const_buffer_count
;
982 dyn_info
->stage
[stage
].shader_buffer_count
+= layout
->stage
[stage
].shader_buffer_count
;
983 dyn_info
->stage
[stage
].sampler_count
+= layout
->stage
[stage
].sampler_count
;
984 dyn_info
->stage
[stage
].sampler_view_count
+= layout
->stage
[stage
].sampler_view_count
;
985 dyn_info
->stage
[stage
].image_count
+= layout
->stage
[stage
].image_count
;
988 dyn_info
->dyn_index
+= layout
->dynamic_offset_count
;
991 static void handle_compute_descriptor_sets(struct val_cmd_buffer_entry
*cmd
,
992 struct dyn_info
*dyn_info
,
993 struct rendering_state
*state
)
995 struct val_cmd_bind_descriptor_sets
*bds
= &cmd
->u
.descriptor_sets
;
998 for (i
= 0; i
< bds
->first
; i
++) {
999 increment_dyn_info(dyn_info
, bds
->layout
->set
[i
].layout
, false);
1001 for (i
= 0; i
< bds
->count
; i
++) {
1002 const struct val_descriptor_set
*set
= bds
->sets
[i
];
1004 if (set
->layout
->shader_stages
& VK_SHADER_STAGE_COMPUTE_BIT
)
1005 handle_set_stage(state
, dyn_info
, set
, MESA_SHADER_COMPUTE
, PIPE_SHADER_COMPUTE
);
1006 increment_dyn_info(dyn_info
, bds
->layout
->set
[bds
->first
+ i
].layout
, true);
1010 static void handle_descriptor_sets(struct val_cmd_buffer_entry
*cmd
,
1011 struct rendering_state
*state
)
1013 struct val_cmd_bind_descriptor_sets
*bds
= &cmd
->u
.descriptor_sets
;
1015 struct dyn_info dyn_info
;
1017 dyn_info
.dyn_index
= 0;
1018 dyn_info
.dynamic_offsets
= bds
->dynamic_offsets
;
1019 dyn_info
.dynamic_offset_count
= bds
->dynamic_offset_count
;
1021 memset(dyn_info
.stage
, 0, sizeof(dyn_info
.stage
));
1022 if (bds
->bind_point
== VK_PIPELINE_BIND_POINT_COMPUTE
) {
1023 handle_compute_descriptor_sets(cmd
, &dyn_info
, state
);
1027 for (i
= 0; i
< bds
->first
; i
++) {
1028 increment_dyn_info(&dyn_info
, bds
->layout
->set
[i
].layout
, false);
1031 for (i
= 0; i
< bds
->count
; i
++) {
1032 const struct val_descriptor_set
*set
= bds
->sets
[i
];
1034 if (set
->layout
->shader_stages
& VK_SHADER_STAGE_VERTEX_BIT
)
1035 handle_set_stage(state
, &dyn_info
, set
, MESA_SHADER_VERTEX
, PIPE_SHADER_VERTEX
);
1037 if (set
->layout
->shader_stages
& VK_SHADER_STAGE_FRAGMENT_BIT
)
1038 handle_set_stage(state
, &dyn_info
, set
, MESA_SHADER_FRAGMENT
, PIPE_SHADER_FRAGMENT
);
1040 if (set
->layout
->shader_stages
& VK_SHADER_STAGE_GEOMETRY_BIT
)
1041 handle_set_stage(state
, &dyn_info
, set
, MESA_SHADER_GEOMETRY
, PIPE_SHADER_GEOMETRY
);
1043 if (set
->layout
->shader_stages
& VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT
)
1044 handle_set_stage(state
, &dyn_info
, set
, MESA_SHADER_TESS_CTRL
, PIPE_SHADER_TESS_CTRL
);
1046 if (set
->layout
->shader_stages
& VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT
)
1047 handle_set_stage(state
, &dyn_info
, set
, MESA_SHADER_TESS_EVAL
, PIPE_SHADER_TESS_EVAL
);
1048 increment_dyn_info(&dyn_info
, bds
->layout
->set
[bds
->first
+ i
].layout
, true);
1052 static void add_img_view_surface(struct rendering_state
*state
,
1053 struct val_image_view
*imgv
, VkFormat format
, int width
, int height
)
1055 if (!imgv
->surface
) {
1056 struct pipe_surface
template;
1058 memset(&template, 0, sizeof(struct pipe_surface
));
1060 template.format
= vk_format_to_pipe(format
);
1061 template.width
= width
;
1062 template.height
= height
;
1063 template.u
.tex
.first_layer
= imgv
->subresourceRange
.baseArrayLayer
;
1064 template.u
.tex
.last_layer
= imgv
->subresourceRange
.baseArrayLayer
+ val_get_layerCount(imgv
->image
, &imgv
->subresourceRange
) - 1;
1065 template.u
.tex
.level
= imgv
->subresourceRange
.baseMipLevel
;
1067 if (template.format
== PIPE_FORMAT_NONE
)
1069 imgv
->surface
= state
->pctx
->create_surface(state
->pctx
,
1070 imgv
->image
->bo
, &template);
1075 attachment_needs_clear(struct rendering_state
*state
,
1078 return (a
!= VK_ATTACHMENT_UNUSED
&&
1079 state
->attachments
[a
].pending_clear_aspects
);
1083 subpass_needs_clear(struct rendering_state
*state
)
1086 struct val_subpass
*subpass
= &state
->pass
->subpasses
[state
->subpass
];
1087 for (uint32_t i
= 0; i
< subpass
->color_count
; i
++) {
1088 a
= subpass
->color_attachments
[i
].attachment
;
1089 if (attachment_needs_clear(state
, a
))
1092 if (subpass
->depth_stencil_attachment
) {
1093 a
= subpass
->depth_stencil_attachment
->attachment
;
1094 if (attachment_needs_clear(state
, a
))
1100 static void render_subpass_clear(struct rendering_state
*state
)
1102 struct val_subpass
*subpass
= &state
->pass
->subpasses
[state
->subpass
];
1104 if (!subpass_needs_clear(state
))
1107 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
1108 uint32_t a
= subpass
->color_attachments
[i
].attachment
;
1110 if (!attachment_needs_clear(state
, a
))
1113 struct val_render_pass_attachment
*att
= &state
->pass
->attachments
[a
];
1114 struct val_image_view
*imgv
= state
->vk_framebuffer
->attachments
[a
];
1116 add_img_view_surface(state
, imgv
, att
->format
, state
->framebuffer
.width
, state
->framebuffer
.height
);
1118 union pipe_color_union color_clear_val
= { 0 };
1119 const VkClearValue value
= state
->attachments
[a
].clear_value
;
1120 color_clear_val
.ui
[0] = value
.color
.uint32
[0];
1121 color_clear_val
.ui
[1] = value
.color
.uint32
[1];
1122 color_clear_val
.ui
[2] = value
.color
.uint32
[2];
1123 color_clear_val
.ui
[3] = value
.color
.uint32
[3];
1124 state
->pctx
->clear_render_target(state
->pctx
,
1127 state
->render_area
.offset
.x
, state
->render_area
.offset
.y
,
1128 state
->render_area
.extent
.width
, state
->render_area
.extent
.height
,
1131 state
->attachments
[a
].pending_clear_aspects
= 0;
1134 if (subpass
->depth_stencil_attachment
) {
1135 uint32_t ds
= subpass
->depth_stencil_attachment
->attachment
;
1137 if (!attachment_needs_clear(state
, ds
))
1140 struct val_render_pass_attachment
*att
= &state
->pass
->attachments
[ds
];
1141 struct val_image_view
*imgv
= state
->vk_framebuffer
->attachments
[ds
];
1143 add_img_view_surface(state
, imgv
, att
->format
, state
->framebuffer
.width
, state
->framebuffer
.height
);
1145 if (util_format_is_depth_or_stencil(imgv
->surface
->format
)) {
1146 const struct util_format_description
*desc
= util_format_description(imgv
->surface
->format
);
1147 double dclear_val
= 0;
1148 uint32_t sclear_val
= 0;
1149 uint32_t ds_clear_flags
= 0;
1151 if (util_format_has_stencil(desc
) && att
->stencil_load_op
== VK_ATTACHMENT_LOAD_OP_CLEAR
) {
1152 ds_clear_flags
|= PIPE_CLEAR_STENCIL
;
1153 sclear_val
= state
->attachments
[ds
].clear_value
.depthStencil
.stencil
;
1155 if (util_format_has_depth(desc
) && att
->load_op
== VK_ATTACHMENT_LOAD_OP_CLEAR
) {
1156 ds_clear_flags
|= PIPE_CLEAR_DEPTH
;
1157 dclear_val
= state
->attachments
[ds
].clear_value
.depthStencil
.depth
;
1161 state
->pctx
->clear_depth_stencil(state
->pctx
,
1164 dclear_val
, sclear_val
,
1165 state
->render_area
.offset
.x
, state
->render_area
.offset
.y
,
1166 state
->render_area
.extent
.width
, state
->render_area
.extent
.height
,
1168 state
->attachments
[ds
].pending_clear_aspects
= 0;
1174 static void render_pass_resolve(struct rendering_state
*state
)
1176 struct val_subpass
*subpass
= &state
->pass
->subpasses
[state
->subpass
];
1177 if (!subpass
->has_color_resolve
)
1179 for (uint32_t i
= 0; i
< subpass
->color_count
; i
++) {
1180 struct val_subpass_attachment src_att
= subpass
->color_attachments
[i
];
1181 struct val_subpass_attachment dst_att
= subpass
->resolve_attachments
[i
];
1183 if (dst_att
.attachment
== VK_ATTACHMENT_UNUSED
)
1186 struct val_image_view
*src_imgv
= state
->vk_framebuffer
->attachments
[src_att
.attachment
];
1187 struct val_image_view
*dst_imgv
= state
->vk_framebuffer
->attachments
[dst_att
.attachment
];
1189 struct pipe_blit_info info
;
1190 memset(&info
, 0, sizeof(info
));
1192 info
.src
.resource
= src_imgv
->image
->bo
;
1193 info
.dst
.resource
= dst_imgv
->image
->bo
;
1194 info
.src
.format
= src_imgv
->pformat
;
1195 info
.dst
.format
= dst_imgv
->pformat
;
1196 info
.filter
= PIPE_TEX_FILTER_NEAREST
;
1197 info
.mask
= PIPE_MASK_RGBA
;
1198 info
.src
.box
.x
= state
->render_area
.offset
.x
;
1199 info
.src
.box
.y
= state
->render_area
.offset
.y
;
1200 info
.src
.box
.width
= state
->render_area
.extent
.width
;
1201 info
.src
.box
.height
= state
->render_area
.extent
.height
;
1202 info
.src
.box
.depth
= state
->vk_framebuffer
->layers
;
1204 info
.dst
.box
= info
.src
.box
;
1206 state
->pctx
->blit(state
->pctx
, &info
);
1210 static void begin_render_subpass(struct rendering_state
*state
,
1213 state
->subpass
= subpass_idx
;
1215 render_subpass_clear(state
);
1217 state
->framebuffer
.nr_cbufs
= 0;
1219 struct val_subpass
*subpass
= &state
->pass
->subpasses
[subpass_idx
];
1220 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
1221 struct val_subpass_attachment
*color_att
= &subpass
->color_attachments
[i
];
1222 if (color_att
->attachment
!= VK_ATTACHMENT_UNUSED
) {
1223 struct val_image_view
*imgv
= state
->vk_framebuffer
->attachments
[color_att
->attachment
];
1225 add_img_view_surface(state
, imgv
, state
->pass
->attachments
[color_att
->attachment
].format
, state
->framebuffer
.width
, state
->framebuffer
.height
);
1226 state
->framebuffer
.cbufs
[state
->framebuffer
.nr_cbufs
] = imgv
->surface
;
1228 state
->framebuffer
.cbufs
[state
->framebuffer
.nr_cbufs
] = NULL
;
1229 state
->framebuffer
.nr_cbufs
++;
1232 if (subpass
->depth_stencil_attachment
) {
1233 struct val_subpass_attachment
*ds_att
= subpass
->depth_stencil_attachment
;
1235 if (ds_att
->attachment
!= VK_ATTACHMENT_UNUSED
) {
1236 struct val_image_view
*imgv
= state
->vk_framebuffer
->attachments
[ds_att
->attachment
];
1237 add_img_view_surface(state
, imgv
, state
->pass
->attachments
[ds_att
->attachment
].format
, state
->framebuffer
.width
, state
->framebuffer
.height
);
1238 state
->framebuffer
.zsbuf
= imgv
->surface
;
1242 state
->pctx
->set_framebuffer_state(state
->pctx
,
1243 &state
->framebuffer
);
1246 static void handle_begin_render_pass(struct val_cmd_buffer_entry
*cmd
,
1247 struct rendering_state
*state
)
1249 state
->pass
= cmd
->u
.begin_render_pass
.render_pass
;
1250 state
->vk_framebuffer
= cmd
->u
.begin_render_pass
.framebuffer
;
1251 state
->render_area
= cmd
->u
.begin_render_pass
.render_area
;
1253 state
->attachments
= cmd
->u
.begin_render_pass
.attachments
;
1255 state
->framebuffer
.width
= state
->vk_framebuffer
->width
;
1256 state
->framebuffer
.height
= state
->vk_framebuffer
->height
;
1257 state
->framebuffer
.layers
= state
->vk_framebuffer
->layers
;
1259 begin_render_subpass(state
, 0);
1262 static void handle_end_render_pass(struct val_cmd_buffer_entry
*cmd
,
1263 struct rendering_state
*state
)
1265 state
->pctx
->flush(state
->pctx
, NULL
, 0);
1267 render_pass_resolve(state
);
1269 state
->attachments
= NULL
;
1274 static void handle_next_subpass(struct val_cmd_buffer_entry
*cmd
,
1275 struct rendering_state
*state
)
1277 state
->pctx
->flush(state
->pctx
, NULL
, 0);
1278 render_pass_resolve(state
);
1280 begin_render_subpass(state
, state
->subpass
);
1283 static void handle_draw(struct val_cmd_buffer_entry
*cmd
,
1284 struct rendering_state
*state
)
1286 state
->info
.index_size
= 0;
1287 state
->info
.indirect
= NULL
;
1288 state
->info
.index
.resource
= NULL
;
1289 state
->info
.start
= cmd
->u
.draw
.first_vertex
;
1290 state
->info
.count
= cmd
->u
.draw
.vertex_count
;
1291 state
->info
.start_instance
= cmd
->u
.draw
.first_instance
;
1292 state
->info
.instance_count
= cmd
->u
.draw
.instance_count
;
1293 state
->pctx
->draw_vbo(state
->pctx
, &state
->info
);
1296 static void handle_set_viewport(struct val_cmd_buffer_entry
*cmd
,
1297 struct rendering_state
*state
)
1301 for (i
= 0; i
< cmd
->u
.set_viewport
.viewport_count
; i
++) {
1302 int idx
= i
+ cmd
->u
.set_viewport
.first_viewport
;
1303 const VkViewport
*vp
= &cmd
->u
.set_viewport
.viewports
[i
];
1304 get_viewport_xform(vp
, state
->viewports
[idx
].scale
, state
->viewports
[idx
].translate
);
1306 state
->vp_dirty
= true;
1309 static void handle_set_scissor(struct val_cmd_buffer_entry
*cmd
,
1310 struct rendering_state
*state
)
1314 for (i
= 0; i
< cmd
->u
.set_scissor
.scissor_count
; i
++) {
1315 int idx
= i
+ cmd
->u
.set_scissor
.first_scissor
;
1316 const VkRect2D
*ss
= &cmd
->u
.set_scissor
.scissors
[i
];
1317 state
->scissors
[idx
].minx
= ss
->offset
.x
;
1318 state
->scissors
[idx
].miny
= ss
->offset
.y
;
1319 state
->scissors
[idx
].maxx
= ss
->offset
.x
+ ss
->extent
.width
;
1320 state
->scissors
[idx
].maxy
= ss
->offset
.y
+ ss
->extent
.height
;
1322 state
->scissor_dirty
= true;
1325 static void handle_set_line_width(struct val_cmd_buffer_entry
*cmd
,
1326 struct rendering_state
*state
)
1328 state
->rs_state
.line_width
= cmd
->u
.set_line_width
.line_width
;
1329 state
->rs_dirty
= true;
1332 static void handle_set_depth_bias(struct val_cmd_buffer_entry
*cmd
,
1333 struct rendering_state
*state
)
1335 state
->rs_state
.offset_units
= cmd
->u
.set_depth_bias
.constant_factor
;
1336 state
->rs_state
.offset_scale
= cmd
->u
.set_depth_bias
.slope_factor
;
1337 state
->rs_state
.offset_clamp
= cmd
->u
.set_depth_bias
.clamp
;
1338 state
->rs_dirty
= true;
1341 static void handle_set_blend_constants(struct val_cmd_buffer_entry
*cmd
,
1342 struct rendering_state
*state
)
1344 memcpy(state
->blend_color
.color
, cmd
->u
.set_blend_constants
.blend_constants
, 4 * sizeof(float));
1345 state
->blend_color_dirty
= true;
1348 static void handle_set_depth_bounds(struct val_cmd_buffer_entry
*cmd
,
1349 struct rendering_state
*state
)
1351 state
->dsa_state
.depth
.bounds_min
= cmd
->u
.set_depth_bounds
.min_depth
;
1352 state
->dsa_state
.depth
.bounds_max
= cmd
->u
.set_depth_bounds
.max_depth
;
1353 state
->dsa_dirty
= true;
1356 static void handle_set_stencil_compare_mask(struct val_cmd_buffer_entry
*cmd
,
1357 struct rendering_state
*state
)
1359 if (cmd
->u
.stencil_vals
.face_mask
& VK_STENCIL_FACE_FRONT_BIT
)
1360 state
->dsa_state
.stencil
[0].valuemask
= cmd
->u
.stencil_vals
.value
;
1361 if (cmd
->u
.stencil_vals
.face_mask
& VK_STENCIL_FACE_BACK_BIT
)
1362 state
->dsa_state
.stencil
[1].valuemask
= cmd
->u
.stencil_vals
.value
;
1363 state
->dsa_dirty
= true;
1366 static void handle_set_stencil_write_mask(struct val_cmd_buffer_entry
*cmd
,
1367 struct rendering_state
*state
)
1369 if (cmd
->u
.stencil_vals
.face_mask
& VK_STENCIL_FACE_FRONT_BIT
)
1370 state
->dsa_state
.stencil
[0].writemask
= cmd
->u
.stencil_vals
.value
;
1371 if (cmd
->u
.stencil_vals
.face_mask
& VK_STENCIL_FACE_BACK_BIT
)
1372 state
->dsa_state
.stencil
[1].writemask
= cmd
->u
.stencil_vals
.value
;
1373 state
->dsa_dirty
= true;
1376 static void handle_set_stencil_reference(struct val_cmd_buffer_entry
*cmd
,
1377 struct rendering_state
*state
)
1379 if (cmd
->u
.stencil_vals
.face_mask
& VK_STENCIL_FACE_FRONT_BIT
)
1380 state
->stencil_ref
.ref_value
[0] = cmd
->u
.stencil_vals
.value
;
1381 if (cmd
->u
.stencil_vals
.face_mask
& VK_STENCIL_FACE_BACK_BIT
)
1382 state
->stencil_ref
.ref_value
[1] = cmd
->u
.stencil_vals
.value
;
1383 state
->stencil_ref_dirty
= true;
1387 copy_depth_rect(ubyte
* dst
,
1388 enum pipe_format dst_format
,
1389 unsigned dst_stride
,
1395 enum pipe_format src_format
,
1400 int src_stride_pos
= src_stride
< 0 ? -src_stride
: src_stride
;
1401 int src_blocksize
= util_format_get_blocksize(src_format
);
1402 int src_blockwidth
= util_format_get_blockwidth(src_format
);
1403 int src_blockheight
= util_format_get_blockheight(src_format
);
1404 int dst_blocksize
= util_format_get_blocksize(dst_format
);
1405 int dst_blockwidth
= util_format_get_blockwidth(dst_format
);
1406 int dst_blockheight
= util_format_get_blockheight(dst_format
);
1408 assert(src_blocksize
> 0);
1409 assert(src_blockwidth
> 0);
1410 assert(src_blockheight
> 0);
1412 dst_x
/= dst_blockwidth
;
1413 dst_y
/= dst_blockheight
;
1414 width
= (width
+ src_blockwidth
- 1)/src_blockwidth
;
1415 height
= (height
+ src_blockheight
- 1)/src_blockheight
;
1416 src_x
/= src_blockwidth
;
1417 src_y
/= src_blockheight
;
1419 dst
+= dst_x
* dst_blocksize
;
1420 src
+= src_x
* src_blocksize
;
1421 dst
+= dst_y
* dst_stride
;
1422 src
+= src_y
* src_stride_pos
;
1424 if (dst_format
== PIPE_FORMAT_S8_UINT
) {
1425 if (src_format
== PIPE_FORMAT_Z32_FLOAT_S8X24_UINT
) {
1426 util_format_z32_float_s8x24_uint_unpack_s_8uint(dst
, dst_stride
,
1429 } else if (src_format
== PIPE_FORMAT_Z24_UNORM_S8_UINT
) {
1430 util_format_z24_unorm_s8_uint_unpack_s_8uint(dst
, dst_stride
,
1435 } else if (dst_format
== PIPE_FORMAT_Z24X8_UNORM
) {
1436 util_format_z24_unorm_s8_uint_unpack_z24(dst
, dst_stride
,
1439 } else if (dst_format
== PIPE_FORMAT_Z32_FLOAT
) {
1440 if (src_format
== PIPE_FORMAT_Z32_FLOAT_S8X24_UINT
) {
1441 util_format_z32_float_s8x24_uint_unpack_z_float((float *)dst
, dst_stride
,
1445 } else if (dst_format
== PIPE_FORMAT_Z32_FLOAT_S8X24_UINT
) {
1446 if (src_format
== PIPE_FORMAT_Z32_FLOAT
)
1447 util_format_z32_float_s8x24_uint_pack_z_float(dst
, dst_stride
,
1448 (float *)src
, src_stride
,
1450 else if (src_format
== PIPE_FORMAT_S8_UINT
)
1451 util_format_z32_float_s8x24_uint_pack_s_8uint(dst
, dst_stride
,
1454 } else if (dst_format
== PIPE_FORMAT_Z24_UNORM_S8_UINT
) {
1455 if (src_format
== PIPE_FORMAT_S8_UINT
)
1456 util_format_z24_unorm_s8_uint_pack_s_8uint(dst
, dst_stride
,
1459 if (src_format
== PIPE_FORMAT_Z24X8_UNORM
)
1460 util_format_z24_unorm_s8_uint_pack_z24(dst
, dst_stride
,
1467 copy_depth_box(ubyte
*dst
,
1468 enum pipe_format dst_format
,
1469 unsigned dst_stride
, unsigned dst_slice_stride
,
1470 unsigned dst_x
, unsigned dst_y
, unsigned dst_z
,
1471 unsigned width
, unsigned height
, unsigned depth
,
1473 enum pipe_format src_format
,
1474 int src_stride
, unsigned src_slice_stride
,
1475 unsigned src_x
, unsigned src_y
, unsigned src_z
)
1478 dst
+= dst_z
* dst_slice_stride
;
1479 src
+= src_z
* src_slice_stride
;
1480 for (z
= 0; z
< depth
; ++z
) {
1481 copy_depth_rect(dst
,
1491 dst
+= dst_slice_stride
;
1492 src
+= src_slice_stride
;
1496 static void handle_copy_image_to_buffer(struct val_cmd_buffer_entry
*cmd
,
1497 struct rendering_state
*state
)
1500 struct val_cmd_copy_image_to_buffer
*copycmd
= &cmd
->u
.img_to_buffer
;
1501 struct pipe_box box
, dbox
;
1502 struct pipe_transfer
*src_t
, *dst_t
;
1503 ubyte
*src_data
, *dst_data
;
1505 state
->pctx
->flush(state
->pctx
, NULL
, 0);
1507 for (i
= 0; i
< copycmd
->region_count
; i
++) {
1509 box
.x
= copycmd
->regions
[i
].imageOffset
.x
;
1510 box
.y
= copycmd
->regions
[i
].imageOffset
.y
;
1511 box
.z
= copycmd
->src
->type
== VK_IMAGE_TYPE_3D
? copycmd
->regions
[i
].imageOffset
.z
: copycmd
->regions
[i
].imageSubresource
.baseArrayLayer
;
1512 box
.width
= copycmd
->regions
[i
].imageExtent
.width
;
1513 box
.height
= copycmd
->regions
[i
].imageExtent
.height
;
1514 box
.depth
= copycmd
->src
->type
== VK_IMAGE_TYPE_3D
? copycmd
->regions
[i
].imageExtent
.depth
: copycmd
->regions
[i
].imageSubresource
.layerCount
;
1516 src_data
= state
->pctx
->transfer_map(state
->pctx
,
1518 copycmd
->regions
[i
].imageSubresource
.mipLevel
,
1523 dbox
.x
= copycmd
->regions
[i
].bufferOffset
;
1526 dbox
.width
= copycmd
->dst
->bo
->width0
;
1529 dst_data
= state
->pctx
->transfer_map(state
->pctx
,
1532 PIPE_TRANSFER_WRITE
,
1536 enum pipe_format src_format
= copycmd
->src
->bo
->format
;
1537 enum pipe_format dst_format
= src_format
;
1538 if (util_format_is_depth_or_stencil(src_format
)) {
1539 if (copycmd
->regions
[i
].imageSubresource
.aspectMask
== VK_IMAGE_ASPECT_DEPTH_BIT
) {
1540 dst_format
= util_format_get_depth_only(src_format
);
1541 } else if (copycmd
->regions
[i
].imageSubresource
.aspectMask
== VK_IMAGE_ASPECT_STENCIL_BIT
) {
1542 dst_format
= PIPE_FORMAT_S8_UINT
;
1546 unsigned buffer_row_len
= util_format_get_stride(dst_format
, copycmd
->regions
[i
].bufferRowLength
);
1547 if (buffer_row_len
== 0)
1548 buffer_row_len
= util_format_get_stride(dst_format
, copycmd
->regions
[i
].imageExtent
.width
);
1549 unsigned buffer_image_height
= copycmd
->regions
[i
].bufferImageHeight
;
1550 if (buffer_image_height
== 0)
1551 buffer_image_height
= copycmd
->regions
[i
].imageExtent
.height
;
1553 if (src_format
!= dst_format
) {
1554 copy_depth_box(dst_data
, dst_format
,
1555 buffer_row_len
, buffer_row_len
* buffer_image_height
,
1557 copycmd
->regions
[i
].imageExtent
.width
,
1558 copycmd
->regions
[i
].imageExtent
.height
,
1560 src_data
, src_format
, src_t
->stride
, src_t
->layer_stride
, 0, 0, 0);
1562 util_copy_box((ubyte
*)dst_data
, src_format
,
1563 buffer_row_len
, buffer_row_len
* buffer_image_height
,
1565 copycmd
->regions
[i
].imageExtent
.width
,
1566 copycmd
->regions
[i
].imageExtent
.height
,
1568 src_data
, src_t
->stride
, src_t
->layer_stride
, 0, 0, 0);
1570 state
->pctx
->transfer_unmap(state
->pctx
, src_t
);
1571 state
->pctx
->transfer_unmap(state
->pctx
, dst_t
);
1575 static void handle_copy_buffer_to_image(struct val_cmd_buffer_entry
*cmd
,
1576 struct rendering_state
*state
)
1579 struct val_cmd_copy_buffer_to_image
*copycmd
= &cmd
->u
.buffer_to_img
;
1580 struct pipe_box box
, sbox
;
1581 struct pipe_transfer
*src_t
, *dst_t
;
1582 void *src_data
, *dst_data
;
1584 state
->pctx
->flush(state
->pctx
, NULL
, 0);
1586 for (i
= 0; i
< copycmd
->region_count
; i
++) {
1588 sbox
.x
= copycmd
->regions
[i
].bufferOffset
;
1591 sbox
.width
= copycmd
->src
->bo
->width0
;
1594 src_data
= state
->pctx
->transfer_map(state
->pctx
,
1602 box
.x
= copycmd
->regions
[i
].imageOffset
.x
;
1603 box
.y
= copycmd
->regions
[i
].imageOffset
.y
;
1604 box
.z
= copycmd
->dst
->type
== VK_IMAGE_TYPE_3D
? copycmd
->regions
[i
].imageOffset
.z
: copycmd
->regions
[i
].imageSubresource
.baseArrayLayer
;
1605 box
.width
= copycmd
->regions
[i
].imageExtent
.width
;
1606 box
.height
= copycmd
->regions
[i
].imageExtent
.height
;
1607 box
.depth
= copycmd
->dst
->type
== VK_IMAGE_TYPE_3D
? copycmd
->regions
[i
].imageExtent
.depth
: copycmd
->regions
[i
].imageSubresource
.layerCount
;
1609 dst_data
= state
->pctx
->transfer_map(state
->pctx
,
1611 copycmd
->regions
[i
].imageSubresource
.mipLevel
,
1612 PIPE_TRANSFER_WRITE
,
1616 enum pipe_format dst_format
= copycmd
->dst
->bo
->format
;
1617 enum pipe_format src_format
= dst_format
;
1618 if (util_format_is_depth_or_stencil(dst_format
)) {
1619 if (copycmd
->regions
[i
].imageSubresource
.aspectMask
== VK_IMAGE_ASPECT_DEPTH_BIT
) {
1620 src_format
= util_format_get_depth_only(copycmd
->dst
->bo
->format
);
1621 } else if (copycmd
->regions
[i
].imageSubresource
.aspectMask
== VK_IMAGE_ASPECT_STENCIL_BIT
) {
1622 src_format
= PIPE_FORMAT_S8_UINT
;
1626 unsigned buffer_row_len
= util_format_get_stride(src_format
, copycmd
->regions
[i
].bufferRowLength
);
1627 if (buffer_row_len
== 0)
1628 buffer_row_len
= util_format_get_stride(src_format
, copycmd
->regions
[i
].imageExtent
.width
);
1629 unsigned buffer_image_height
= copycmd
->regions
[i
].bufferImageHeight
;
1630 if (buffer_image_height
== 0)
1631 buffer_image_height
= copycmd
->regions
[i
].imageExtent
.height
;
1633 if (src_format
!= dst_format
) {
1634 copy_depth_box(dst_data
, dst_format
,
1635 dst_t
->stride
, dst_t
->layer_stride
,
1637 copycmd
->regions
[i
].imageExtent
.width
,
1638 copycmd
->regions
[i
].imageExtent
.height
,
1640 src_data
, src_format
,
1641 buffer_row_len
, buffer_row_len
* buffer_image_height
, 0, 0, 0);
1643 util_copy_box(dst_data
, dst_format
,
1644 dst_t
->stride
, dst_t
->layer_stride
,
1646 copycmd
->regions
[i
].imageExtent
.width
,
1647 copycmd
->regions
[i
].imageExtent
.height
,
1650 buffer_row_len
, buffer_row_len
* buffer_image_height
, 0, 0, 0);
1652 state
->pctx
->transfer_unmap(state
->pctx
, src_t
);
1653 state
->pctx
->transfer_unmap(state
->pctx
, dst_t
);
1657 static void handle_copy_image(struct val_cmd_buffer_entry
*cmd
,
1658 struct rendering_state
*state
)
1661 struct val_cmd_copy_image
*copycmd
= &cmd
->u
.copy_image
;
1663 state
->pctx
->flush(state
->pctx
, NULL
, 0);
1665 for (i
= 0; i
< copycmd
->region_count
; i
++) {
1666 struct pipe_box src_box
;
1667 src_box
.x
= copycmd
->regions
[i
].srcOffset
.x
;
1668 src_box
.y
= copycmd
->regions
[i
].srcOffset
.y
;
1669 src_box
.z
= copycmd
->regions
[i
].srcOffset
.z
+ copycmd
->regions
[i
].srcSubresource
.baseArrayLayer
;
1670 src_box
.width
= copycmd
->regions
[i
].extent
.width
;
1671 src_box
.height
= copycmd
->regions
[i
].extent
.height
;
1672 src_box
.depth
= copycmd
->regions
[i
].extent
.depth
;
1674 state
->pctx
->resource_copy_region(state
->pctx
, copycmd
->dst
->bo
,
1675 copycmd
->regions
[i
].dstSubresource
.mipLevel
,
1676 copycmd
->regions
[i
].dstOffset
.x
,
1677 copycmd
->regions
[i
].dstOffset
.y
,
1678 copycmd
->regions
[i
].dstOffset
.z
+ copycmd
->regions
[i
].dstSubresource
.baseArrayLayer
,
1680 copycmd
->regions
[i
].srcSubresource
.mipLevel
,
1685 static void handle_copy_buffer(struct val_cmd_buffer_entry
*cmd
,
1686 struct rendering_state
*state
)
1689 struct val_cmd_copy_buffer
*copycmd
= &cmd
->u
.copy_buffer
;
1691 for (i
= 0; i
< copycmd
->region_count
; i
++) {
1692 struct pipe_box box
= { 0 };
1693 u_box_1d(copycmd
->regions
[i
].srcOffset
, copycmd
->regions
[i
].size
, &box
);
1694 state
->pctx
->resource_copy_region(state
->pctx
, copycmd
->dst
->bo
, 0,
1695 copycmd
->regions
[i
].dstOffset
, 0, 0,
1696 copycmd
->src
->bo
, 0, &box
);
1700 static void handle_blit_image(struct val_cmd_buffer_entry
*cmd
,
1701 struct rendering_state
*state
)
1704 struct val_cmd_blit_image
*blitcmd
= &cmd
->u
.blit_image
;
1705 struct pipe_blit_info info
;
1707 memset(&info
, 0, sizeof(info
));
1709 state
->pctx
->flush(state
->pctx
, NULL
, 0);
1710 info
.src
.resource
= blitcmd
->src
->bo
;
1711 info
.dst
.resource
= blitcmd
->dst
->bo
;
1712 info
.src
.format
= blitcmd
->src
->bo
->format
;
1713 info
.dst
.format
= blitcmd
->dst
->bo
->format
;
1714 info
.mask
= util_format_is_depth_or_stencil(info
.src
.format
) ? PIPE_MASK_ZS
: PIPE_MASK_RGBA
;
1715 info
.filter
= blitcmd
->filter
== VK_FILTER_NEAREST
? PIPE_TEX_FILTER_NEAREST
: PIPE_TEX_FILTER_LINEAR
;
1716 for (i
= 0; i
< blitcmd
->region_count
; i
++) {
1717 int srcX0
, srcX1
, srcY0
, srcY1
;
1718 unsigned dstX0
, dstX1
, dstY0
, dstY1
;
1720 srcX0
= blitcmd
->regions
[i
].srcOffsets
[0].x
;
1721 srcX1
= blitcmd
->regions
[i
].srcOffsets
[1].x
;
1722 srcY0
= blitcmd
->regions
[i
].srcOffsets
[0].y
;
1723 srcY1
= blitcmd
->regions
[i
].srcOffsets
[1].y
;
1725 dstX0
= blitcmd
->regions
[i
].dstOffsets
[0].x
;
1726 dstX1
= blitcmd
->regions
[i
].dstOffsets
[1].x
;
1727 dstY0
= blitcmd
->regions
[i
].dstOffsets
[0].y
;
1728 dstY1
= blitcmd
->regions
[i
].dstOffsets
[1].y
;
1730 if (dstX0
< dstX1
) {
1731 info
.dst
.box
.x
= dstX0
;
1732 info
.src
.box
.x
= srcX0
;
1733 info
.dst
.box
.width
= dstX1
- dstX0
;
1734 info
.src
.box
.width
= srcX1
- srcX0
;
1736 info
.dst
.box
.x
= dstX1
;
1737 info
.src
.box
.x
= srcX1
;
1738 info
.dst
.box
.width
= dstX0
- dstX1
;
1739 info
.src
.box
.width
= srcX0
- srcX1
;
1742 if (dstY0
< dstY1
) {
1743 info
.dst
.box
.y
= dstY0
;
1744 info
.src
.box
.y
= srcY0
;
1745 info
.dst
.box
.height
= dstY1
- dstY0
;
1746 info
.src
.box
.height
= srcY1
- srcY0
;
1748 info
.dst
.box
.y
= dstY1
;
1749 info
.src
.box
.y
= srcY1
;
1750 info
.dst
.box
.height
= dstY0
- dstY1
;
1751 info
.src
.box
.height
= srcY0
- srcY1
;
1753 info
.src
.level
= blitcmd
->regions
[i
].srcSubresource
.mipLevel
;
1754 info
.src
.box
.z
= blitcmd
->regions
[i
].srcOffsets
[0].z
+ blitcmd
->regions
[i
].srcSubresource
.baseArrayLayer
;
1755 if (blitcmd
->src
->bo
->target
== PIPE_TEXTURE_3D
)
1756 info
.src
.box
.depth
= blitcmd
->regions
[i
].srcOffsets
[1].z
- blitcmd
->regions
[i
].srcOffsets
[0].z
;
1758 info
.src
.box
.depth
= blitcmd
->regions
[i
].srcSubresource
.layerCount
;
1760 info
.dst
.level
= blitcmd
->regions
[i
].dstSubresource
.mipLevel
;
1761 info
.dst
.box
.z
= blitcmd
->regions
[i
].dstOffsets
[0].z
+ blitcmd
->regions
[i
].dstSubresource
.baseArrayLayer
;
1762 if (blitcmd
->dst
->bo
->target
== PIPE_TEXTURE_3D
)
1763 info
.dst
.box
.depth
= blitcmd
->regions
[i
].dstOffsets
[1].z
- blitcmd
->regions
[i
].dstOffsets
[0].z
;
1765 info
.dst
.box
.depth
= blitcmd
->regions
[i
].dstSubresource
.layerCount
;
1766 state
->pctx
->blit(state
->pctx
, &info
);
1770 static void handle_fill_buffer(struct val_cmd_buffer_entry
*cmd
,
1771 struct rendering_state
*state
)
1773 struct val_cmd_fill_buffer
*fillcmd
= &cmd
->u
.fill_buffer
;
1775 struct pipe_transfer
*dst_t
;
1776 struct pipe_box box
;
1777 uint32_t size
= fillcmd
->fill_size
;
1779 if (fillcmd
->fill_size
== VK_WHOLE_SIZE
)
1780 size
= fillcmd
->buffer
->bo
->width0
- fillcmd
->offset
;
1782 u_box_1d(fillcmd
->offset
, size
, &box
);
1783 dst
= state
->pctx
->transfer_map(state
->pctx
,
1784 fillcmd
->buffer
->bo
,
1786 PIPE_TRANSFER_WRITE
,
1790 for (unsigned i
= 0; i
< size
/ 4; i
++)
1791 dst
[i
] = fillcmd
->data
;
1792 state
->pctx
->transfer_unmap(state
->pctx
, dst_t
);
1795 static void handle_update_buffer(struct val_cmd_buffer_entry
*cmd
,
1796 struct rendering_state
*state
)
1798 struct val_cmd_update_buffer
*updcmd
= &cmd
->u
.update_buffer
;
1800 struct pipe_transfer
*dst_t
;
1801 struct pipe_box box
;
1803 u_box_1d(updcmd
->offset
, updcmd
->data_size
, &box
);
1804 dst
= state
->pctx
->transfer_map(state
->pctx
,
1807 PIPE_TRANSFER_WRITE
,
1811 memcpy(dst
, updcmd
->data
, updcmd
->data_size
);
1812 state
->pctx
->transfer_unmap(state
->pctx
, dst_t
);
1815 static void handle_draw_indexed(struct val_cmd_buffer_entry
*cmd
,
1816 struct rendering_state
*state
)
1818 state
->info
.indirect
= NULL
;
1819 state
->info
.min_index
= 0;
1820 state
->info
.max_index
= ~0;
1821 state
->info
.index_size
= state
->index_size
;
1822 state
->info
.index
.resource
= state
->index_buffer
;
1823 state
->info
.start
= (state
->index_offset
/ state
->index_size
) + cmd
->u
.draw_indexed
.first_index
;
1824 state
->info
.count
= cmd
->u
.draw_indexed
.index_count
;
1825 state
->info
.start_instance
= cmd
->u
.draw_indexed
.first_instance
;
1826 state
->info
.instance_count
= cmd
->u
.draw_indexed
.instance_count
;
1827 state
->info
.index_bias
= cmd
->u
.draw_indexed
.vertex_offset
;
1829 if (state
->info
.primitive_restart
) {
1830 if (state
->info
.index_size
== 4)
1831 state
->info
.restart_index
= 0xffffffff;
1833 state
->info
.restart_index
= 0xffff;
1836 state
->pctx
->draw_vbo(state
->pctx
, &state
->info
);
1839 static void handle_draw_indirect(struct val_cmd_buffer_entry
*cmd
,
1840 struct rendering_state
*state
, bool indexed
)
1843 state
->info
.index_size
= state
->index_size
;
1844 state
->info
.index
.resource
= state
->index_buffer
;
1845 state
->info
.max_index
= ~0;
1847 state
->info
.index_size
= 0;
1848 state
->indirect_info
.offset
= cmd
->u
.draw_indirect
.offset
;
1849 state
->indirect_info
.stride
= cmd
->u
.draw_indirect
.stride
;
1850 state
->indirect_info
.draw_count
= cmd
->u
.draw_indirect
.draw_count
;
1851 state
->indirect_info
.buffer
= cmd
->u
.draw_indirect
.buffer
->bo
;
1852 state
->info
.indirect
= &state
->indirect_info
;
1853 state
->pctx
->draw_vbo(state
->pctx
, &state
->info
);
1856 static void handle_index_buffer(struct val_cmd_buffer_entry
*cmd
,
1857 struct rendering_state
*state
)
1859 struct val_cmd_bind_index_buffer
*ib
= &cmd
->u
.index_buffer
;
1860 switch (ib
->index_type
) {
1861 case VK_INDEX_TYPE_UINT16
:
1862 state
->index_size
= 2;
1864 case VK_INDEX_TYPE_UINT32
:
1865 state
->index_size
= 4;
1870 state
->index_offset
= ib
->offset
;
1872 state
->index_buffer
= ib
->buffer
->bo
;
1874 state
->index_buffer
= NULL
;
1876 state
->ib_dirty
= true;
1879 static void handle_dispatch(struct val_cmd_buffer_entry
*cmd
,
1880 struct rendering_state
*state
)
1882 state
->dispatch_info
.grid
[0] = cmd
->u
.dispatch
.x
;
1883 state
->dispatch_info
.grid
[1] = cmd
->u
.dispatch
.y
;
1884 state
->dispatch_info
.grid
[2] = cmd
->u
.dispatch
.z
;
1885 state
->dispatch_info
.indirect
= NULL
;
1886 state
->pctx
->launch_grid(state
->pctx
, &state
->dispatch_info
);
1889 static void handle_dispatch_indirect(struct val_cmd_buffer_entry
*cmd
,
1890 struct rendering_state
*state
)
1892 state
->dispatch_info
.indirect
= cmd
->u
.dispatch_indirect
.buffer
->bo
;
1893 state
->dispatch_info
.indirect_offset
= cmd
->u
.dispatch_indirect
.offset
;
1894 state
->pctx
->launch_grid(state
->pctx
, &state
->dispatch_info
);
1897 static void handle_push_constants(struct val_cmd_buffer_entry
*cmd
,
1898 struct rendering_state
*state
)
1900 memcpy(state
->push_constants
+ cmd
->u
.push_constants
.offset
, cmd
->u
.push_constants
.val
, cmd
->u
.push_constants
.size
);
1902 state
->pc_buffer
[PIPE_SHADER_VERTEX
].buffer_size
= 128 * 4;
1903 state
->pc_buffer
[PIPE_SHADER_VERTEX
].buffer_offset
= 0;
1904 state
->pc_buffer
[PIPE_SHADER_VERTEX
].user_buffer
= state
->push_constants
;
1905 state
->pcbuf_dirty
[PIPE_SHADER_VERTEX
] = true;
1906 state
->pc_buffer
[PIPE_SHADER_FRAGMENT
].buffer_size
= 128 * 4;
1907 state
->pc_buffer
[PIPE_SHADER_FRAGMENT
].buffer_offset
= 0;
1908 state
->pc_buffer
[PIPE_SHADER_FRAGMENT
].user_buffer
= state
->push_constants
;
1909 state
->pcbuf_dirty
[PIPE_SHADER_FRAGMENT
] = true;
1910 state
->pc_buffer
[PIPE_SHADER_GEOMETRY
].buffer_size
= 128 * 4;
1911 state
->pc_buffer
[PIPE_SHADER_GEOMETRY
].buffer_offset
= 0;
1912 state
->pc_buffer
[PIPE_SHADER_GEOMETRY
].user_buffer
= state
->push_constants
;
1913 state
->pcbuf_dirty
[PIPE_SHADER_GEOMETRY
] = true;
1914 state
->pc_buffer
[PIPE_SHADER_TESS_CTRL
].buffer_size
= 128 * 4;
1915 state
->pc_buffer
[PIPE_SHADER_TESS_CTRL
].buffer_offset
= 0;
1916 state
->pc_buffer
[PIPE_SHADER_TESS_CTRL
].user_buffer
= state
->push_constants
;
1917 state
->pcbuf_dirty
[PIPE_SHADER_TESS_CTRL
] = true;
1918 state
->pc_buffer
[PIPE_SHADER_TESS_EVAL
].buffer_size
= 128 * 4;
1919 state
->pc_buffer
[PIPE_SHADER_TESS_EVAL
].buffer_offset
= 0;
1920 state
->pc_buffer
[PIPE_SHADER_TESS_EVAL
].user_buffer
= state
->push_constants
;
1921 state
->pcbuf_dirty
[PIPE_SHADER_TESS_EVAL
] = true;
1922 state
->pc_buffer
[PIPE_SHADER_COMPUTE
].buffer_size
= 128 * 4;
1923 state
->pc_buffer
[PIPE_SHADER_COMPUTE
].buffer_offset
= 0;
1924 state
->pc_buffer
[PIPE_SHADER_COMPUTE
].user_buffer
= state
->push_constants
;
1925 state
->pcbuf_dirty
[PIPE_SHADER_COMPUTE
] = true;
1928 static void val_execute_cmd_buffer(struct val_cmd_buffer
*cmd_buffer
,
1929 struct rendering_state
*state
);
1931 static void handle_execute_commands(struct val_cmd_buffer_entry
*cmd
,
1932 struct rendering_state
*state
)
1934 for (unsigned i
= 0; i
< cmd
->u
.execute_commands
.command_buffer_count
; i
++) {
1935 struct val_cmd_buffer
*secondary_buf
= cmd
->u
.execute_commands
.cmd_buffers
[i
];
1936 val_execute_cmd_buffer(secondary_buf
, state
);
1940 static void handle_event_set(struct val_cmd_buffer_entry
*cmd
,
1941 struct rendering_state
*state
)
1943 struct val_event
*event
= cmd
->u
.event_set
.event
;
1945 if (cmd
->u
.event_set
.flush
)
1946 state
->pctx
->flush(state
->pctx
, NULL
, 0);
1947 event
->event_storage
= (cmd
->u
.event_set
.value
== true) ? 1 : 0;
1950 static void handle_wait_events(struct val_cmd_buffer_entry
*cmd
,
1951 struct rendering_state
*state
)
1953 for (unsigned i
= 0; i
< cmd
->u
.wait_events
.event_count
; i
++) {
1954 struct val_event
*event
= cmd
->u
.wait_events
.events
[i
];
1956 while (event
->event_storage
!= true);
1960 static void handle_pipeline_barrier(struct val_cmd_buffer_entry
*cmd
,
1961 struct rendering_state
*state
)
1963 /* why hello nail, I'm a hammer. - TODO */
1964 state
->pctx
->flush(state
->pctx
, NULL
, 0);
1967 static void handle_begin_query(struct val_cmd_buffer_entry
*cmd
,
1968 struct rendering_state
*state
)
1970 struct val_cmd_query_cmd
*qcmd
= &cmd
->u
.query
;
1971 struct val_query_pool
*pool
= qcmd
->pool
;
1973 if (!pool
->queries
[qcmd
->query
]) {
1974 enum pipe_query_type qtype
= pool
->base_type
;
1975 if (qtype
== PIPE_QUERY_OCCLUSION_COUNTER
&& !qcmd
->precise
)
1976 qtype
= PIPE_QUERY_OCCLUSION_PREDICATE
;
1977 pool
->queries
[qcmd
->query
] = state
->pctx
->create_query(state
->pctx
,
1978 qtype
, qcmd
->index
);
1981 state
->pctx
->begin_query(state
->pctx
, pool
->queries
[qcmd
->query
]);
1984 static void handle_end_query(struct val_cmd_buffer_entry
*cmd
,
1985 struct rendering_state
*state
)
1987 struct val_cmd_query_cmd
*qcmd
= &cmd
->u
.query
;
1988 struct val_query_pool
*pool
= qcmd
->pool
;
1989 assert(pool
->queries
[qcmd
->query
]);
1991 state
->pctx
->end_query(state
->pctx
, pool
->queries
[qcmd
->query
]);
1994 static void handle_reset_query_pool(struct val_cmd_buffer_entry
*cmd
,
1995 struct rendering_state
*state
)
1997 struct val_cmd_query_cmd
*qcmd
= &cmd
->u
.query
;
1998 struct val_query_pool
*pool
= qcmd
->pool
;
1999 for (unsigned i
= qcmd
->query
; i
< qcmd
->query
+ qcmd
->index
; i
++) {
2000 if (pool
->queries
[i
]) {
2001 state
->pctx
->destroy_query(state
->pctx
, pool
->queries
[i
]);
2002 pool
->queries
[i
] = NULL
;
2007 static void handle_write_timestamp(struct val_cmd_buffer_entry
*cmd
,
2008 struct rendering_state
*state
)
2010 struct val_cmd_query_cmd
*qcmd
= &cmd
->u
.query
;
2011 struct val_query_pool
*pool
= qcmd
->pool
;
2012 if (!pool
->queries
[qcmd
->query
]) {
2013 pool
->queries
[qcmd
->query
] = state
->pctx
->create_query(state
->pctx
,
2014 PIPE_QUERY_TIMESTAMP
, 0);
2018 state
->pctx
->flush(state
->pctx
, NULL
, 0);
2019 state
->pctx
->end_query(state
->pctx
, pool
->queries
[qcmd
->query
]);
2023 static void handle_copy_query_pool_results(struct val_cmd_buffer_entry
*cmd
,
2024 struct rendering_state
*state
)
2026 struct val_cmd_copy_query_pool_results
*copycmd
= &cmd
->u
.copy_query_pool_results
;
2027 struct val_query_pool
*pool
= copycmd
->pool
;
2029 for (unsigned i
= copycmd
->first_query
; i
< copycmd
->first_query
+ copycmd
->query_count
; i
++) {
2030 unsigned offset
= copycmd
->dst
->offset
+ (copycmd
->stride
* (i
- copycmd
->first_query
));
2031 if (pool
->queries
[i
]) {
2032 if (copycmd
->flags
& VK_QUERY_RESULT_WITH_AVAILABILITY_BIT
)
2033 state
->pctx
->get_query_result_resource(state
->pctx
,
2035 copycmd
->flags
& VK_QUERY_RESULT_WAIT_BIT
,
2036 copycmd
->flags
& VK_QUERY_RESULT_64_BIT
? PIPE_QUERY_TYPE_U64
: PIPE_QUERY_TYPE_U32
,
2039 offset
+ (copycmd
->flags
& VK_QUERY_RESULT_64_BIT
? 8 : 4));
2040 state
->pctx
->get_query_result_resource(state
->pctx
,
2042 copycmd
->flags
& VK_QUERY_RESULT_WAIT_BIT
,
2043 copycmd
->flags
& VK_QUERY_RESULT_64_BIT
? PIPE_QUERY_TYPE_U64
: PIPE_QUERY_TYPE_U32
,
2048 /* if no queries emitted yet, just reset the buffer to 0 so avail is reported correctly */
2049 if (copycmd
->flags
& VK_QUERY_RESULT_WITH_AVAILABILITY_BIT
) {
2050 struct pipe_transfer
*src_t
;
2053 struct pipe_box box
= {};
2054 box
.width
= copycmd
->stride
* copycmd
->query_count
;
2057 map
= state
->pctx
->transfer_map(state
->pctx
,
2058 copycmd
->dst
->bo
, 0, PIPE_TRANSFER_READ
, &box
,
2061 memset(map
, 0, box
.width
);
2062 state
->pctx
->transfer_unmap(state
->pctx
, src_t
);
2068 static void pack_clear_color(enum pipe_format pformat
, VkClearColorValue
*in_val
, uint32_t col_val
[4])
2070 const struct util_format_description
*desc
= util_format_description(pformat
);
2071 col_val
[0] = col_val
[1] = col_val
[2] = col_val
[3] = 0;
2072 for (unsigned c
= 0; c
< 4; c
++) {
2073 if (desc
->swizzle
[c
] >= 4)
2075 const struct util_format_channel_description
*channel
= &desc
->channel
[desc
->swizzle
[c
]];
2076 if (channel
->size
== 32) {
2077 col_val
[c
] = in_val
->uint32
[c
];
2080 if (channel
->pure_integer
) {
2081 uint64_t v
= in_val
->uint32
[c
] & ((1u << channel
->size
) - 1);
2082 switch (channel
->size
) {
2086 col_val
[0] |= (v
<< channel
->shift
);
2089 col_val
[c
/ 2] |= (v
<< (16 * (c
% 2)));
2093 util_pack_color(in_val
->float32
, pformat
, (union util_color
*)col_val
);
2099 static void handle_clear_color_image(struct val_cmd_buffer_entry
*cmd
,
2100 struct rendering_state
*state
)
2102 struct val_image
*image
= cmd
->u
.clear_color_image
.image
;
2103 uint32_t col_val
[4];
2104 pack_clear_color(image
->bo
->format
, &cmd
->u
.clear_color_image
.clear_val
, col_val
);
2105 for (unsigned i
= 0; i
< cmd
->u
.clear_color_image
.range_count
; i
++) {
2106 VkImageSubresourceRange
*range
= &cmd
->u
.clear_color_image
.ranges
[i
];
2107 struct pipe_box box
;
2112 uint32_t level_count
= val_get_levelCount(image
, range
);
2113 for (unsigned j
= range
->baseMipLevel
; j
< range
->baseMipLevel
+ level_count
; j
++) {
2114 box
.width
= u_minify(image
->bo
->width0
, j
);
2115 box
.height
= u_minify(image
->bo
->height0
, j
);
2117 if (image
->bo
->target
== PIPE_TEXTURE_3D
)
2118 box
.depth
= u_minify(image
->bo
->depth0
, j
);
2119 else if (image
->bo
->target
== PIPE_TEXTURE_1D_ARRAY
) {
2120 box
.y
= range
->baseArrayLayer
;
2121 box
.height
= val_get_layerCount(image
, range
);
2124 box
.z
= range
->baseArrayLayer
;
2125 box
.depth
= val_get_layerCount(image
, range
);
2128 state
->pctx
->clear_texture(state
->pctx
, image
->bo
,
2129 j
, &box
, (void *)col_val
);
2134 static void handle_clear_ds_image(struct val_cmd_buffer_entry
*cmd
,
2135 struct rendering_state
*state
)
2137 struct val_image
*image
= cmd
->u
.clear_ds_image
.image
;
2139 col_val
= util_pack64_z_stencil(image
->bo
->format
, cmd
->u
.clear_ds_image
.clear_val
.depth
, cmd
->u
.clear_ds_image
.clear_val
.stencil
);
2140 for (unsigned i
= 0; i
< cmd
->u
.clear_ds_image
.range_count
; i
++) {
2141 VkImageSubresourceRange
*range
= &cmd
->u
.clear_ds_image
.ranges
[i
];
2142 struct pipe_box box
;
2147 uint32_t level_count
= val_get_levelCount(image
, range
);
2148 for (unsigned j
= range
->baseMipLevel
; j
< range
->baseMipLevel
+ level_count
; j
++) {
2149 box
.width
= u_minify(image
->bo
->width0
, j
);
2150 box
.height
= u_minify(image
->bo
->height0
, j
);
2152 if (image
->bo
->target
== PIPE_TEXTURE_3D
)
2153 box
.depth
= u_minify(image
->bo
->depth0
, j
);
2154 else if (image
->bo
->target
== PIPE_TEXTURE_1D_ARRAY
) {
2155 box
.y
= range
->baseArrayLayer
;
2156 box
.height
= val_get_layerCount(image
, range
);
2159 box
.z
= range
->baseArrayLayer
;
2160 box
.depth
= val_get_layerCount(image
, range
);
2163 state
->pctx
->clear_texture(state
->pctx
, image
->bo
,
2164 j
, &box
, (void *)&col_val
);
2169 static void handle_clear_attachments(struct val_cmd_buffer_entry
*cmd
,
2170 struct rendering_state
*state
)
2172 for (uint32_t a
= 0; a
< cmd
->u
.clear_attachments
.attachment_count
; a
++) {
2173 VkClearAttachment
*att
= &cmd
->u
.clear_attachments
.attachments
[a
];
2174 struct val_subpass
*subpass
= &state
->pass
->subpasses
[state
->subpass
];
2175 struct val_image_view
*imgv
;
2177 if (att
->aspectMask
== VK_IMAGE_ASPECT_COLOR_BIT
) {
2178 struct val_subpass_attachment
*color_att
= &subpass
->color_attachments
[att
->colorAttachment
];
2179 if (!color_att
|| color_att
->attachment
== VK_ATTACHMENT_UNUSED
)
2181 imgv
= state
->vk_framebuffer
->attachments
[color_att
->attachment
];
2183 struct val_subpass_attachment
*ds_att
= subpass
->depth_stencil_attachment
;
2184 if (!ds_att
|| ds_att
->attachment
== VK_ATTACHMENT_UNUSED
)
2186 imgv
= state
->vk_framebuffer
->attachments
[ds_att
->attachment
];
2188 uint32_t col_val
[4];
2189 if (util_format_is_depth_or_stencil(imgv
->pformat
)) {
2190 int64_t val
= util_pack64_z_stencil(imgv
->pformat
, att
->clearValue
.depthStencil
.depth
, att
->clearValue
.depthStencil
.stencil
);
2191 memcpy(col_val
, &val
, 8);
2193 pack_clear_color(imgv
->pformat
, &att
->clearValue
.color
, col_val
);
2194 for (uint32_t r
= 0; r
< cmd
->u
.clear_attachments
.rect_count
; r
++) {
2195 struct pipe_box box
;
2196 VkClearRect
*rect
= &cmd
->u
.clear_attachments
.rects
[r
];
2197 box
.x
= rect
->rect
.offset
.x
;
2198 box
.y
= rect
->rect
.offset
.y
;
2199 box
.z
= imgv
->subresourceRange
.baseArrayLayer
+ rect
->baseArrayLayer
;
2200 box
.width
= rect
->rect
.extent
.width
;
2201 box
.height
= rect
->rect
.extent
.height
;
2202 box
.depth
= rect
->layerCount
;
2204 state
->pctx
->clear_texture(state
->pctx
, imgv
->image
->bo
,
2205 imgv
->subresourceRange
.baseMipLevel
,
2211 static void handle_resolve_image(struct val_cmd_buffer_entry
*cmd
,
2212 struct rendering_state
*state
)
2215 struct val_cmd_resolve_image
*resolvecmd
= &cmd
->u
.resolve_image
;
2216 struct pipe_blit_info info
;
2218 memset(&info
, 0, sizeof(info
));
2220 state
->pctx
->flush(state
->pctx
, NULL
, 0);
2221 info
.src
.resource
= resolvecmd
->src
->bo
;
2222 info
.dst
.resource
= resolvecmd
->dst
->bo
;
2223 info
.src
.format
= resolvecmd
->src
->bo
->format
;
2224 info
.dst
.format
= resolvecmd
->dst
->bo
->format
;
2225 info
.mask
= util_format_is_depth_or_stencil(info
.src
.format
) ? PIPE_MASK_ZS
: PIPE_MASK_RGBA
;
2226 info
.filter
= PIPE_TEX_FILTER_NEAREST
;
2227 for (i
= 0; i
< resolvecmd
->region_count
; i
++) {
2229 unsigned dstX0
, dstY0
;
2231 srcX0
= resolvecmd
->regions
[i
].srcOffset
.x
;
2232 srcY0
= resolvecmd
->regions
[i
].srcOffset
.y
;
2234 dstX0
= resolvecmd
->regions
[i
].dstOffset
.x
;
2235 dstY0
= resolvecmd
->regions
[i
].dstOffset
.y
;
2237 info
.dst
.box
.x
= dstX0
;
2238 info
.dst
.box
.y
= dstY0
;
2239 info
.src
.box
.x
= srcX0
;
2240 info
.src
.box
.y
= srcY0
;
2242 info
.dst
.box
.width
= resolvecmd
->regions
[i
].extent
.width
;
2243 info
.src
.box
.width
= resolvecmd
->regions
[i
].extent
.width
;
2244 info
.dst
.box
.height
= resolvecmd
->regions
[i
].extent
.height
;
2245 info
.src
.box
.height
= resolvecmd
->regions
[i
].extent
.height
;
2247 info
.dst
.box
.depth
= resolvecmd
->regions
[i
].dstSubresource
.layerCount
;
2248 info
.src
.box
.depth
= resolvecmd
->regions
[i
].srcSubresource
.layerCount
;
2250 info
.src
.level
= resolvecmd
->regions
[i
].srcSubresource
.mipLevel
;
2251 info
.src
.box
.z
= resolvecmd
->regions
[i
].srcOffset
.z
+ resolvecmd
->regions
[i
].srcSubresource
.baseArrayLayer
;
2253 info
.dst
.level
= resolvecmd
->regions
[i
].dstSubresource
.mipLevel
;
2254 info
.dst
.box
.z
= resolvecmd
->regions
[i
].dstOffset
.z
+ resolvecmd
->regions
[i
].dstSubresource
.baseArrayLayer
;
2256 state
->pctx
->blit(state
->pctx
, &info
);
2260 static void val_execute_cmd_buffer(struct val_cmd_buffer
*cmd_buffer
,
2261 struct rendering_state
*state
)
2263 struct val_cmd_buffer_entry
*cmd
;
2265 LIST_FOR_EACH_ENTRY(cmd
, &cmd_buffer
->cmds
, cmd_link
) {
2266 switch (cmd
->cmd_type
) {
2267 case VAL_CMD_BIND_PIPELINE
:
2268 handle_pipeline(cmd
, state
);
2270 case VAL_CMD_SET_VIEWPORT
:
2271 handle_set_viewport(cmd
, state
);
2273 case VAL_CMD_SET_SCISSOR
:
2274 handle_set_scissor(cmd
, state
);
2276 case VAL_CMD_SET_LINE_WIDTH
:
2277 handle_set_line_width(cmd
, state
);
2279 case VAL_CMD_SET_DEPTH_BIAS
:
2280 handle_set_depth_bias(cmd
, state
);
2282 case VAL_CMD_SET_BLEND_CONSTANTS
:
2283 handle_set_blend_constants(cmd
, state
);
2285 case VAL_CMD_SET_DEPTH_BOUNDS
:
2286 handle_set_depth_bounds(cmd
, state
);
2288 case VAL_CMD_SET_STENCIL_COMPARE_MASK
:
2289 handle_set_stencil_compare_mask(cmd
, state
);
2291 case VAL_CMD_SET_STENCIL_WRITE_MASK
:
2292 handle_set_stencil_write_mask(cmd
, state
);
2294 case VAL_CMD_SET_STENCIL_REFERENCE
:
2295 handle_set_stencil_reference(cmd
, state
);
2297 case VAL_CMD_BIND_DESCRIPTOR_SETS
:
2298 handle_descriptor_sets(cmd
, state
);
2300 case VAL_CMD_BIND_INDEX_BUFFER
:
2301 handle_index_buffer(cmd
, state
);
2303 case VAL_CMD_BIND_VERTEX_BUFFERS
:
2304 handle_vertex_buffers(cmd
, state
);
2308 handle_draw(cmd
, state
);
2310 case VAL_CMD_DRAW_INDEXED
:
2312 handle_draw_indexed(cmd
, state
);
2314 case VAL_CMD_DRAW_INDIRECT
:
2316 handle_draw_indirect(cmd
, state
, false);
2318 case VAL_CMD_DRAW_INDEXED_INDIRECT
:
2320 handle_draw_indirect(cmd
, state
, true);
2322 case VAL_CMD_DISPATCH
:
2323 emit_compute_state(state
);
2324 handle_dispatch(cmd
, state
);
2326 case VAL_CMD_DISPATCH_INDIRECT
:
2327 emit_compute_state(state
);
2328 handle_dispatch_indirect(cmd
, state
);
2330 case VAL_CMD_COPY_BUFFER
:
2331 handle_copy_buffer(cmd
, state
);
2333 case VAL_CMD_COPY_IMAGE
:
2334 handle_copy_image(cmd
, state
);
2336 case VAL_CMD_BLIT_IMAGE
:
2337 handle_blit_image(cmd
, state
);
2339 case VAL_CMD_COPY_BUFFER_TO_IMAGE
:
2340 handle_copy_buffer_to_image(cmd
, state
);
2342 case VAL_CMD_COPY_IMAGE_TO_BUFFER
:
2343 handle_copy_image_to_buffer(cmd
, state
);
2345 case VAL_CMD_UPDATE_BUFFER
:
2346 handle_update_buffer(cmd
, state
);
2348 case VAL_CMD_FILL_BUFFER
:
2349 handle_fill_buffer(cmd
, state
);
2351 case VAL_CMD_CLEAR_COLOR_IMAGE
:
2352 handle_clear_color_image(cmd
, state
);
2354 case VAL_CMD_CLEAR_DEPTH_STENCIL_IMAGE
:
2355 handle_clear_ds_image(cmd
, state
);
2357 case VAL_CMD_CLEAR_ATTACHMENTS
:
2358 handle_clear_attachments(cmd
, state
);
2360 case VAL_CMD_RESOLVE_IMAGE
:
2361 handle_resolve_image(cmd
, state
);
2363 case VAL_CMD_SET_EVENT
:
2364 case VAL_CMD_RESET_EVENT
:
2365 handle_event_set(cmd
, state
);
2367 case VAL_CMD_WAIT_EVENTS
:
2368 handle_wait_events(cmd
, state
);
2370 case VAL_CMD_PIPELINE_BARRIER
:
2371 handle_pipeline_barrier(cmd
, state
);
2373 case VAL_CMD_BEGIN_QUERY
:
2374 handle_begin_query(cmd
, state
);
2376 case VAL_CMD_END_QUERY
:
2377 handle_end_query(cmd
, state
);
2379 case VAL_CMD_RESET_QUERY_POOL
:
2380 handle_reset_query_pool(cmd
, state
);
2382 case VAL_CMD_WRITE_TIMESTAMP
:
2383 handle_write_timestamp(cmd
, state
);
2385 case VAL_CMD_COPY_QUERY_POOL_RESULTS
:
2386 handle_copy_query_pool_results(cmd
, state
);
2388 case VAL_CMD_PUSH_CONSTANTS
:
2389 handle_push_constants(cmd
, state
);
2391 case VAL_CMD_BEGIN_RENDER_PASS
:
2392 handle_begin_render_pass(cmd
, state
);
2394 case VAL_CMD_NEXT_SUBPASS
:
2395 handle_next_subpass(cmd
, state
);
2397 case VAL_CMD_END_RENDER_PASS
:
2398 handle_end_render_pass(cmd
, state
);
2400 case VAL_CMD_EXECUTE_COMMANDS
:
2401 handle_execute_commands(cmd
, state
);
2407 VkResult
val_execute_cmds(struct val_device
*device
,
2408 struct val_queue
*queue
,
2409 struct val_fence
*fence
,
2410 struct val_cmd_buffer
*cmd_buffer
)
2412 struct rendering_state state
;
2413 struct pipe_fence_handle
*handle
= NULL
;
2414 memset(&state
, 0, sizeof(state
));
2415 state
.pctx
= queue
->ctx
;
2416 state
.blend_dirty
= true;
2417 state
.dsa_dirty
= true;
2418 state
.rs_dirty
= true;
2419 /* create a gallium context */
2420 val_execute_cmd_buffer(cmd_buffer
, &state
);
2422 state
.pctx
->flush(state
.pctx
, fence
? &handle
: NULL
, 0);
2424 mtx_lock(&device
->fence_lock
);
2425 fence
->handle
= handle
;
2426 mtx_unlock(&device
->fence_lock
);
2428 state
.start_vb
= -1;
2430 state
.pctx
->set_vertex_buffers(state
.pctx
, 0, PIPE_MAX_ATTRIBS
, NULL
);
2431 state
.pctx
->bind_vertex_elements_state(state
.pctx
, NULL
);
2432 state
.pctx
->bind_vs_state(state
.pctx
, NULL
);
2433 state
.pctx
->bind_fs_state(state
.pctx
, NULL
);
2434 state
.pctx
->bind_gs_state(state
.pctx
, NULL
);
2435 if (state
.pctx
->bind_tcs_state
)
2436 state
.pctx
->bind_tcs_state(state
.pctx
, NULL
);
2437 if (state
.pctx
->bind_tes_state
)
2438 state
.pctx
->bind_tes_state(state
.pctx
, NULL
);
2439 if (state
.pctx
->bind_compute_state
)
2440 state
.pctx
->bind_compute_state(state
.pctx
, NULL
);
2441 if (state
.velems_cso
)
2442 state
.pctx
->delete_vertex_elements_state(state
.pctx
, state
.velems_cso
);
2444 state
.pctx
->bind_rasterizer_state(state
.pctx
, NULL
);
2445 state
.pctx
->delete_rasterizer_state(state
.pctx
, state
.rast_handle
);
2446 if (state
.blend_handle
) {
2447 state
.pctx
->bind_blend_state(state
.pctx
, NULL
);
2448 state
.pctx
->delete_blend_state(state
.pctx
, state
.blend_handle
);
2451 if (state
.dsa_handle
) {
2452 state
.pctx
->bind_depth_stencil_alpha_state(state
.pctx
, NULL
);
2453 state
.pctx
->delete_depth_stencil_alpha_state(state
.pctx
, state
.dsa_handle
);
2456 for (enum pipe_shader_type s
= PIPE_SHADER_VERTEX
; s
< PIPE_SHADER_TYPES
; s
++) {
2457 for (unsigned i
= 0; i
< PIPE_MAX_SAMPLERS
; i
++) {
2459 pipe_sampler_view_reference(&state
.sv
[s
][i
], NULL
);
2460 if (state
.ss_cso
[s
][i
]) {
2461 state
.pctx
->delete_sampler_state(state
.pctx
, state
.ss_cso
[s
][i
]);
2462 state
.ss_cso
[s
][i
] = NULL
;
2465 state
.pctx
->bind_sampler_states(state
.pctx
, s
, 0, PIPE_MAX_SAMPLERS
, state
.ss_cso
[s
]);
2467 state
.pctx
->set_shader_images(state
.pctx
, s
, 0, device
->physical_device
->max_images
, NULL
);