2 * Copyright © 2019 Red Hat.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 /* use a gallium context to execute a command buffer */
26 #include "val_private.h"
28 #include "pipe/p_context.h"
29 #include "pipe/p_state.h"
32 #include "pipe/p_shader_tokens.h"
33 #include "tgsi/tgsi_text.h"
34 #include "tgsi/tgsi_parse.h"
36 #include "util/format/u_format.h"
37 #include "util/u_surface.h"
38 #include "util/u_sampler.h"
39 #include "util/u_box.h"
40 #include "util/u_inlines.h"
41 #include "util/format/u_format_zs.h"
43 struct rendering_state
{
44 struct pipe_context
*pctx
;
49 bool stencil_ref_dirty
;
50 bool clip_state_dirty
;
51 bool blend_color_dirty
;
54 bool constbuf_dirty
[PIPE_SHADER_TYPES
];
55 bool pcbuf_dirty
[PIPE_SHADER_TYPES
];
59 bool sample_mask_dirty
;
60 bool min_samples_dirty
;
61 struct pipe_draw_indirect_info indirect_info
;
62 struct pipe_draw_info info
;
64 struct pipe_grid_info dispatch_info
;
65 struct pipe_framebuffer_state framebuffer
;
67 struct pipe_blend_state blend_state
;
69 struct pipe_rasterizer_state rs_state
;
71 struct pipe_depth_stencil_alpha_state dsa_state
;
74 struct pipe_blend_color blend_color
;
75 struct pipe_stencil_ref stencil_ref
;
76 struct pipe_clip_state clip_state
;
79 struct pipe_scissor_state scissors
[16];
82 struct pipe_viewport_state viewports
[16];
85 unsigned index_offset
;
86 struct pipe_resource
*index_buffer
;
87 struct pipe_constant_buffer pc_buffer
[PIPE_SHADER_TYPES
];
88 struct pipe_constant_buffer const_buffer
[PIPE_SHADER_TYPES
][16];
89 int num_const_bufs
[PIPE_SHADER_TYPES
];
92 struct pipe_vertex_buffer vb
[PIPE_MAX_ATTRIBS
];
94 struct pipe_vertex_element ve
[PIPE_MAX_ATTRIBS
];
96 struct pipe_sampler_view
*sv
[PIPE_SHADER_TYPES
][PIPE_MAX_SAMPLERS
];
97 int num_sampler_views
[PIPE_SHADER_TYPES
];
98 struct pipe_sampler_state ss
[PIPE_SHADER_TYPES
][PIPE_MAX_SAMPLERS
];
99 int num_sampler_states
[PIPE_SHADER_TYPES
];
100 bool sv_dirty
[PIPE_SHADER_TYPES
];
101 bool ss_dirty
[PIPE_SHADER_TYPES
];
103 struct pipe_image_view iv
[PIPE_SHADER_TYPES
][PIPE_MAX_SHADER_IMAGES
];
104 int num_shader_images
[PIPE_SHADER_TYPES
];
105 struct pipe_shader_buffer sb
[PIPE_SHADER_TYPES
][PIPE_MAX_SHADER_BUFFERS
];
106 int num_shader_buffers
[PIPE_SHADER_TYPES
];
107 bool iv_dirty
[PIPE_SHADER_TYPES
];
108 bool sb_dirty
[PIPE_SHADER_TYPES
];
109 void *ss_cso
[PIPE_SHADER_TYPES
][PIPE_MAX_SAMPLERS
];
112 uint8_t push_constants
[128 * 4];
114 struct val_render_pass
*pass
;
116 struct val_framebuffer
*vk_framebuffer
;
117 VkRect2D render_area
;
119 uint32_t sample_mask
;
120 unsigned min_samples
;
122 struct val_attachment_state
*attachments
;
125 static void emit_compute_state(struct rendering_state
*state
)
127 if (state
->iv_dirty
[PIPE_SHADER_COMPUTE
]) {
128 state
->pctx
->set_shader_images(state
->pctx
, PIPE_SHADER_COMPUTE
,
129 0, state
->num_shader_images
[PIPE_SHADER_COMPUTE
],
130 state
->iv
[PIPE_SHADER_COMPUTE
]);
131 state
->iv_dirty
[PIPE_SHADER_COMPUTE
] = false;
134 if (state
->pcbuf_dirty
[PIPE_SHADER_COMPUTE
]) {
135 state
->pctx
->set_constant_buffer(state
->pctx
, PIPE_SHADER_COMPUTE
,
136 0, &state
->pc_buffer
[PIPE_SHADER_COMPUTE
]);
137 state
->pcbuf_dirty
[PIPE_SHADER_COMPUTE
] = false;
140 if (state
->constbuf_dirty
[PIPE_SHADER_COMPUTE
]) {
141 for (unsigned i
= 0; i
< state
->num_const_bufs
[PIPE_SHADER_COMPUTE
]; i
++)
142 state
->pctx
->set_constant_buffer(state
->pctx
, PIPE_SHADER_COMPUTE
,
143 i
+ 1, &state
->const_buffer
[PIPE_SHADER_COMPUTE
][i
]);
144 state
->constbuf_dirty
[PIPE_SHADER_COMPUTE
] = false;
147 if (state
->sb_dirty
[PIPE_SHADER_COMPUTE
]) {
148 state
->pctx
->set_shader_buffers(state
->pctx
, PIPE_SHADER_COMPUTE
,
149 0, state
->num_shader_buffers
[PIPE_SHADER_COMPUTE
],
150 state
->sb
[PIPE_SHADER_COMPUTE
], 0);
151 state
->sb_dirty
[PIPE_SHADER_COMPUTE
] = false;
154 if (state
->sv_dirty
[PIPE_SHADER_COMPUTE
]) {
155 state
->pctx
->set_sampler_views(state
->pctx
, PIPE_SHADER_COMPUTE
, 0, state
->num_sampler_views
[PIPE_SHADER_COMPUTE
],
156 state
->sv
[PIPE_SHADER_COMPUTE
]);
157 state
->sv_dirty
[PIPE_SHADER_COMPUTE
] = false;
160 if (state
->ss_dirty
[PIPE_SHADER_COMPUTE
]) {
161 for (unsigned i
= 0; i
< state
->num_sampler_states
[PIPE_SHADER_COMPUTE
]; i
++) {
162 if (state
->ss_cso
[PIPE_SHADER_COMPUTE
][i
])
163 state
->pctx
->delete_sampler_state(state
->pctx
, state
->ss_cso
[PIPE_SHADER_COMPUTE
][i
]);
164 state
->ss_cso
[PIPE_SHADER_COMPUTE
][i
] = state
->pctx
->create_sampler_state(state
->pctx
, &state
->ss
[PIPE_SHADER_COMPUTE
][i
]);
166 state
->pctx
->bind_sampler_states(state
->pctx
, PIPE_SHADER_COMPUTE
, 0, state
->num_sampler_states
[PIPE_SHADER_COMPUTE
], state
->ss_cso
[PIPE_SHADER_COMPUTE
]);
167 state
->ss_dirty
[PIPE_SHADER_COMPUTE
] = false;
171 static void emit_state(struct rendering_state
*state
)
174 if (state
->blend_dirty
) {
175 if (state
->blend_handle
) {
176 state
->pctx
->bind_blend_state(state
->pctx
, NULL
);
177 state
->pctx
->delete_blend_state(state
->pctx
, state
->blend_handle
);
179 state
->blend_handle
= state
->pctx
->create_blend_state(state
->pctx
,
180 &state
->blend_state
);
181 state
->pctx
->bind_blend_state(state
->pctx
, state
->blend_handle
);
183 state
->blend_dirty
= false;
186 if (state
->rs_dirty
) {
187 if (state
->rast_handle
) {
188 state
->pctx
->bind_rasterizer_state(state
->pctx
, NULL
);
189 state
->pctx
->delete_rasterizer_state(state
->pctx
, state
->rast_handle
);
191 state
->rast_handle
= state
->pctx
->create_rasterizer_state(state
->pctx
,
193 state
->pctx
->bind_rasterizer_state(state
->pctx
, state
->rast_handle
);
194 state
->rs_dirty
= false;
197 if (state
->dsa_dirty
) {
198 if (state
->dsa_handle
) {
199 state
->pctx
->bind_depth_stencil_alpha_state(state
->pctx
, NULL
);
200 state
->pctx
->delete_depth_stencil_alpha_state(state
->pctx
, state
->dsa_handle
);
202 state
->dsa_handle
= state
->pctx
->create_depth_stencil_alpha_state(state
->pctx
,
204 state
->pctx
->bind_depth_stencil_alpha_state(state
->pctx
, state
->dsa_handle
);
206 state
->dsa_dirty
= false;
209 if (state
->sample_mask_dirty
) {
210 state
->pctx
->set_sample_mask(state
->pctx
, state
->sample_mask
);
211 state
->sample_mask_dirty
= false;
214 if (state
->min_samples_dirty
) {
215 state
->pctx
->set_min_samples(state
->pctx
, state
->min_samples
);
216 state
->min_samples_dirty
= false;
219 if (state
->blend_color_dirty
) {
220 state
->pctx
->set_blend_color(state
->pctx
, &state
->blend_color
);
221 state
->blend_color_dirty
= false;
224 if (state
->stencil_ref_dirty
) {
225 state
->pctx
->set_stencil_ref(state
->pctx
, &state
->stencil_ref
);
226 state
->stencil_ref_dirty
= false;
229 if (state
->vb_dirty
) {
230 state
->pctx
->set_vertex_buffers(state
->pctx
, state
->start_vb
,
231 state
->num_vb
, state
->vb
);
232 state
->vb_dirty
= false;
235 if (state
->ve_dirty
) {
237 if (state
->velems_cso
)
238 ve
= state
->velems_cso
;
240 state
->velems_cso
= state
->pctx
->create_vertex_elements_state(state
->pctx
, state
->num_ve
,
242 state
->pctx
->bind_vertex_elements_state(state
->pctx
, state
->velems_cso
);
245 state
->pctx
->delete_vertex_elements_state(state
->pctx
, ve
);
248 for (sh
= 0; sh
< PIPE_SHADER_TYPES
; sh
++) {
249 if (state
->constbuf_dirty
[sh
]) {
250 for (unsigned idx
= 0; idx
< state
->num_const_bufs
[sh
]; idx
++)
251 state
->pctx
->set_constant_buffer(state
->pctx
, sh
,
252 idx
+ 1, &state
->const_buffer
[sh
][idx
]);
254 state
->constbuf_dirty
[sh
] = false;
257 for (sh
= 0; sh
< PIPE_SHADER_TYPES
; sh
++) {
258 if (state
->pcbuf_dirty
[sh
]) {
259 state
->pctx
->set_constant_buffer(state
->pctx
, sh
,
260 0, &state
->pc_buffer
[sh
]);
264 for (sh
= 0; sh
< PIPE_SHADER_TYPES
; sh
++) {
265 if (state
->sb_dirty
[sh
]) {
266 state
->pctx
->set_shader_buffers(state
->pctx
, sh
,
267 0, state
->num_shader_buffers
[sh
],
272 for (sh
= 0; sh
< PIPE_SHADER_TYPES
; sh
++) {
273 if (state
->iv_dirty
[sh
]) {
274 state
->pctx
->set_shader_images(state
->pctx
, sh
,
275 0, state
->num_shader_images
[sh
],
280 for (sh
= 0; sh
< PIPE_SHADER_TYPES
; sh
++) {
282 if (!state
->sv_dirty
[sh
])
285 state
->pctx
->set_sampler_views(state
->pctx
, sh
, 0, state
->num_sampler_views
[sh
],
287 state
->sv_dirty
[sh
] = false;
290 for (sh
= 0; sh
< PIPE_SHADER_TYPES
; sh
++) {
292 if (!state
->ss_dirty
[sh
])
295 for (i
= 0; i
< state
->num_sampler_states
[sh
]; i
++) {
296 if (state
->ss_cso
[sh
][i
])
297 state
->pctx
->delete_sampler_state(state
->pctx
, state
->ss_cso
[sh
][i
]);
298 state
->ss_cso
[sh
][i
] = state
->pctx
->create_sampler_state(state
->pctx
, &state
->ss
[sh
][i
]);
301 state
->pctx
->bind_sampler_states(state
->pctx
, sh
, 0, state
->num_sampler_states
[sh
], state
->ss_cso
[sh
]);
304 if (state
->vp_dirty
) {
305 state
->pctx
->set_viewport_states(state
->pctx
, 0, state
->num_viewports
, state
->viewports
);
306 state
->vp_dirty
= false;
309 if (state
->scissor_dirty
) {
310 state
->pctx
->set_scissor_states(state
->pctx
, 0, state
->num_scissors
, state
->scissors
);
311 state
->scissor_dirty
= false;
315 static void handle_compute_pipeline(struct val_cmd_buffer_entry
*cmd
,
316 struct rendering_state
*state
)
318 struct val_pipeline
*pipeline
= cmd
->u
.pipeline
.pipeline
;
320 state
->dispatch_info
.block
[0] = pipeline
->pipeline_nir
[MESA_SHADER_COMPUTE
]->info
.cs
.local_size
[0];
321 state
->dispatch_info
.block
[1] = pipeline
->pipeline_nir
[MESA_SHADER_COMPUTE
]->info
.cs
.local_size
[1];
322 state
->dispatch_info
.block
[2] = pipeline
->pipeline_nir
[MESA_SHADER_COMPUTE
]->info
.cs
.local_size
[2];
323 state
->pctx
->bind_compute_state(state
->pctx
, pipeline
->shader_cso
[PIPE_SHADER_COMPUTE
]);
327 get_viewport_xform(const VkViewport
*viewport
,
328 float scale
[3], float translate
[3])
330 float x
= viewport
->x
;
331 float y
= viewport
->y
;
332 float half_width
= 0.5f
* viewport
->width
;
333 float half_height
= 0.5f
* viewport
->height
;
334 double n
= viewport
->minDepth
;
335 double f
= viewport
->maxDepth
;
337 scale
[0] = half_width
;
338 translate
[0] = half_width
+ x
;
339 scale
[1] = half_height
;
340 translate
[1] = half_height
+ y
;
346 static void handle_graphics_pipeline(struct val_cmd_buffer_entry
*cmd
,
347 struct rendering_state
*state
)
349 struct val_pipeline
*pipeline
= cmd
->u
.pipeline
.pipeline
;
350 bool dynamic_states
[VK_DYNAMIC_STATE_STENCIL_REFERENCE
+1];
351 unsigned fb_samples
= 0;
353 memset(dynamic_states
, 0, sizeof(dynamic_states
));
354 if (pipeline
->graphics_create_info
.pDynamicState
)
356 const VkPipelineDynamicStateCreateInfo
*dyn
= pipeline
->graphics_create_info
.pDynamicState
;
358 for (i
= 0; i
< dyn
->dynamicStateCount
; i
++) {
359 if (dyn
->pDynamicStates
[i
] > VK_DYNAMIC_STATE_STENCIL_REFERENCE
)
361 dynamic_states
[dyn
->pDynamicStates
[i
]] = true;
365 bool has_stage
[PIPE_SHADER_TYPES
] = { false };
367 state
->pctx
->bind_gs_state(state
->pctx
, NULL
);
368 if (state
->pctx
->bind_tcs_state
)
369 state
->pctx
->bind_tcs_state(state
->pctx
, NULL
);
370 if (state
->pctx
->bind_tes_state
)
371 state
->pctx
->bind_tes_state(state
->pctx
, NULL
);
374 for (i
= 0; i
< pipeline
->graphics_create_info
.stageCount
; i
++) {
375 const VkPipelineShaderStageCreateInfo
*sh
= &pipeline
->graphics_create_info
.pStages
[i
];
377 case VK_SHADER_STAGE_FRAGMENT_BIT
:
378 state
->pctx
->bind_fs_state(state
->pctx
, pipeline
->shader_cso
[PIPE_SHADER_FRAGMENT
]);
379 has_stage
[PIPE_SHADER_FRAGMENT
] = true;
381 case VK_SHADER_STAGE_VERTEX_BIT
:
382 state
->pctx
->bind_vs_state(state
->pctx
, pipeline
->shader_cso
[PIPE_SHADER_VERTEX
]);
383 has_stage
[PIPE_SHADER_VERTEX
] = true;
385 case VK_SHADER_STAGE_GEOMETRY_BIT
:
386 state
->pctx
->bind_gs_state(state
->pctx
, pipeline
->shader_cso
[PIPE_SHADER_GEOMETRY
]);
387 has_stage
[PIPE_SHADER_GEOMETRY
] = true;
389 case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT
:
390 state
->pctx
->bind_tcs_state(state
->pctx
, pipeline
->shader_cso
[PIPE_SHADER_TESS_CTRL
]);
391 has_stage
[PIPE_SHADER_TESS_CTRL
] = true;
393 case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT
:
394 state
->pctx
->bind_tes_state(state
->pctx
, pipeline
->shader_cso
[PIPE_SHADER_TESS_EVAL
]);
395 has_stage
[PIPE_SHADER_TESS_EVAL
] = true;
404 /* there should always be a dummy fs. */
405 if (!has_stage
[PIPE_SHADER_FRAGMENT
])
406 state
->pctx
->bind_fs_state(state
->pctx
, pipeline
->shader_cso
[PIPE_SHADER_FRAGMENT
]);
407 if (state
->pctx
->bind_gs_state
&& !has_stage
[PIPE_SHADER_GEOMETRY
])
408 state
->pctx
->bind_gs_state(state
->pctx
, NULL
);
409 if (state
->pctx
->bind_tcs_state
&& !has_stage
[PIPE_SHADER_TESS_CTRL
])
410 state
->pctx
->bind_tcs_state(state
->pctx
, NULL
);
411 if (state
->pctx
->bind_tes_state
&& !has_stage
[PIPE_SHADER_TESS_EVAL
])
412 state
->pctx
->bind_tes_state(state
->pctx
, NULL
);
414 /* rasterization state */
415 if (pipeline
->graphics_create_info
.pRasterizationState
) {
416 const VkPipelineRasterizationStateCreateInfo
*rsc
= pipeline
->graphics_create_info
.pRasterizationState
;
417 state
->rs_state
.depth_clip_near
= state
->rs_state
.depth_clip_far
= !rsc
->depthClampEnable
;
418 state
->rs_state
.rasterizer_discard
= rsc
->rasterizerDiscardEnable
;
419 state
->rs_state
.front_ccw
= (rsc
->frontFace
== VK_FRONT_FACE_COUNTER_CLOCKWISE
);
420 state
->rs_state
.cull_face
= vk_cull_to_pipe(rsc
->cullMode
);
421 state
->rs_state
.fill_front
= vk_polygon_mode_to_pipe(rsc
->polygonMode
);
422 state
->rs_state
.fill_back
= vk_polygon_mode_to_pipe(rsc
->polygonMode
);
423 state
->rs_state
.point_size_per_vertex
= true;
424 state
->rs_state
.flatshade_first
= true;
425 state
->rs_state
.point_quad_rasterization
= true;
426 state
->rs_state
.clip_halfz
= true;
427 state
->rs_state
.half_pixel_center
= true;
428 state
->rs_state
.scissor
= true;
430 if (!dynamic_states
[VK_DYNAMIC_STATE_LINE_WIDTH
])
431 state
->rs_state
.line_width
= rsc
->lineWidth
;
433 if (!dynamic_states
[VK_DYNAMIC_STATE_DEPTH_BIAS
]) {
434 state
->rs_state
.offset_units
= rsc
->depthBiasConstantFactor
;
435 state
->rs_state
.offset_scale
= rsc
->depthBiasSlopeFactor
;
436 state
->rs_state
.offset_clamp
= rsc
->depthBiasClamp
;
438 state
->rs_dirty
= true;
441 if (pipeline
->graphics_create_info
.pMultisampleState
) {
442 const VkPipelineMultisampleStateCreateInfo
*ms
= pipeline
->graphics_create_info
.pMultisampleState
;
443 state
->rs_state
.multisample
= ms
->rasterizationSamples
> 1;
444 state
->sample_mask
= ms
->pSampleMask
? ms
->pSampleMask
[0] : 0xffffffff;
445 state
->blend_state
.alpha_to_coverage
= ms
->alphaToCoverageEnable
;
446 state
->blend_state
.alpha_to_one
= ms
->alphaToOneEnable
;
447 state
->blend_dirty
= true;
448 state
->rs_dirty
= true;
449 state
->min_samples
= 1;
450 state
->sample_mask_dirty
= true;
451 fb_samples
= ms
->rasterizationSamples
;
452 if (ms
->sampleShadingEnable
) {
453 state
->min_samples
= ceil(ms
->rasterizationSamples
* ms
->minSampleShading
);
454 if (state
->min_samples
> 1)
455 state
->min_samples
= ms
->rasterizationSamples
;
456 if (state
->min_samples
< 1)
457 state
->min_samples
= 1;
459 if (pipeline
->force_min_sample
)
460 state
->min_samples
= ms
->rasterizationSamples
;
461 state
->min_samples_dirty
= true;
463 state
->rs_state
.multisample
= false;
464 state
->blend_state
.alpha_to_coverage
= false;
465 state
->blend_state
.alpha_to_one
= false;
466 state
->rs_dirty
= true;
469 if (pipeline
->graphics_create_info
.pDepthStencilState
) {
470 const VkPipelineDepthStencilStateCreateInfo
*dsa
= pipeline
->graphics_create_info
.pDepthStencilState
;
472 state
->dsa_state
.depth
.enabled
= dsa
->depthTestEnable
;
473 state
->dsa_state
.depth
.writemask
= dsa
->depthWriteEnable
;
474 state
->dsa_state
.depth
.func
= dsa
->depthCompareOp
;
475 state
->dsa_state
.depth
.bounds_test
= dsa
->depthBoundsTestEnable
;
477 if (!dynamic_states
[VK_DYNAMIC_STATE_DEPTH_BOUNDS
]) {
478 state
->dsa_state
.depth
.bounds_min
= dsa
->minDepthBounds
;
479 state
->dsa_state
.depth
.bounds_max
= dsa
->maxDepthBounds
;
482 state
->dsa_state
.stencil
[0].enabled
= dsa
->stencilTestEnable
;
483 state
->dsa_state
.stencil
[0].func
= dsa
->front
.compareOp
;
484 state
->dsa_state
.stencil
[0].fail_op
= vk_conv_stencil_op(dsa
->front
.failOp
);
485 state
->dsa_state
.stencil
[0].zpass_op
= vk_conv_stencil_op(dsa
->front
.passOp
);
486 state
->dsa_state
.stencil
[0].zfail_op
= vk_conv_stencil_op(dsa
->front
.depthFailOp
);
488 state
->dsa_state
.stencil
[1].enabled
= dsa
->stencilTestEnable
;
489 state
->dsa_state
.stencil
[1].func
= dsa
->back
.compareOp
;
490 state
->dsa_state
.stencil
[1].fail_op
= vk_conv_stencil_op(dsa
->back
.failOp
);
491 state
->dsa_state
.stencil
[1].zpass_op
= vk_conv_stencil_op(dsa
->back
.passOp
);
492 state
->dsa_state
.stencil
[1].zfail_op
= vk_conv_stencil_op(dsa
->back
.depthFailOp
);
494 if (!dynamic_states
[VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK
]) {
495 state
->dsa_state
.stencil
[0].valuemask
= dsa
->front
.compareMask
;
496 state
->dsa_state
.stencil
[1].valuemask
= dsa
->back
.compareMask
;
499 if (!dynamic_states
[VK_DYNAMIC_STATE_STENCIL_WRITE_MASK
]) {
500 state
->dsa_state
.stencil
[0].writemask
= dsa
->front
.writeMask
;
501 state
->dsa_state
.stencil
[1].writemask
= dsa
->back
.writeMask
;
504 if (dsa
->stencilTestEnable
) {
505 if (!dynamic_states
[VK_DYNAMIC_STATE_STENCIL_REFERENCE
]) {
506 state
->stencil_ref
.ref_value
[0] = dsa
->front
.reference
;
507 state
->stencil_ref
.ref_value
[1] = dsa
->back
.reference
;
508 state
->stencil_ref_dirty
= true;
512 state
->dsa_dirty
= true;
515 if (pipeline
->graphics_create_info
.pColorBlendState
) {
516 const VkPipelineColorBlendStateCreateInfo
*cb
= pipeline
->graphics_create_info
.pColorBlendState
;
518 if (cb
->attachmentCount
> 1)
519 state
->blend_state
.independent_blend_enable
= true;
520 for (i
= 0; i
< cb
->attachmentCount
; i
++) {
521 state
->blend_state
.rt
[i
].colormask
= cb
->pAttachments
[i
].colorWriteMask
;
522 state
->blend_state
.rt
[i
].blend_enable
= cb
->pAttachments
[i
].blendEnable
;
523 state
->blend_state
.rt
[i
].rgb_func
= vk_conv_blend_func(cb
->pAttachments
[i
].colorBlendOp
);
524 state
->blend_state
.rt
[i
].rgb_src_factor
= vk_conv_blend_factor(cb
->pAttachments
[i
].srcColorBlendFactor
);
525 state
->blend_state
.rt
[i
].rgb_dst_factor
= vk_conv_blend_factor(cb
->pAttachments
[i
].dstColorBlendFactor
);
526 state
->blend_state
.rt
[i
].alpha_func
= vk_conv_blend_func(cb
->pAttachments
[i
].alphaBlendOp
);
527 state
->blend_state
.rt
[i
].alpha_src_factor
= vk_conv_blend_factor(cb
->pAttachments
[i
].srcAlphaBlendFactor
);
528 state
->blend_state
.rt
[i
].alpha_dst_factor
= vk_conv_blend_factor(cb
->pAttachments
[i
].dstAlphaBlendFactor
);
530 /* At least llvmpipe applies the blend factor prior to the blend function,
531 * regardless of what function is used. (like i965 hardware).
532 * It means for MIN/MAX the blend factor has to be stomped to ONE.
534 if (cb
->pAttachments
[i
].colorBlendOp
== VK_BLEND_OP_MIN
||
535 cb
->pAttachments
[i
].colorBlendOp
== VK_BLEND_OP_MAX
) {
536 state
->blend_state
.rt
[i
].rgb_src_factor
= PIPE_BLENDFACTOR_ONE
;
537 state
->blend_state
.rt
[i
].rgb_dst_factor
= PIPE_BLENDFACTOR_ONE
;
540 if (cb
->pAttachments
[i
].alphaBlendOp
== VK_BLEND_OP_MIN
||
541 cb
->pAttachments
[i
].alphaBlendOp
== VK_BLEND_OP_MAX
) {
542 state
->blend_state
.rt
[i
].alpha_src_factor
= PIPE_BLENDFACTOR_ONE
;
543 state
->blend_state
.rt
[i
].alpha_dst_factor
= PIPE_BLENDFACTOR_ONE
;
546 state
->blend_dirty
= true;
547 if (!dynamic_states
[VK_DYNAMIC_STATE_BLEND_CONSTANTS
]) {
548 memcpy(state
->blend_color
.color
, cb
->blendConstants
, 4 * sizeof(float));
549 state
->blend_color_dirty
= true;
554 const VkPipelineVertexInputStateCreateInfo
*vi
= pipeline
->graphics_create_info
.pVertexInputState
;
557 for (i
= 0; i
< vi
->vertexBindingDescriptionCount
; i
++) {
558 state
->vb
[i
].stride
= vi
->pVertexBindingDescriptions
[i
].stride
;
561 int max_location
= -1;
562 for (i
= 0; i
< vi
->vertexAttributeDescriptionCount
; i
++) {
563 unsigned location
= vi
->pVertexAttributeDescriptions
[i
].location
;
564 state
->ve
[location
].src_offset
= vi
->pVertexAttributeDescriptions
[i
].offset
;
565 state
->ve
[location
].vertex_buffer_index
= vi
->pVertexAttributeDescriptions
[i
].binding
;
566 state
->ve
[location
].src_format
= vk_format_to_pipe(vi
->pVertexAttributeDescriptions
[i
].format
);
567 state
->ve
[location
].instance_divisor
= vi
->pVertexBindingDescriptions
[vi
->pVertexAttributeDescriptions
[i
].binding
].inputRate
;
569 if ((int)location
> max_location
)
570 max_location
= location
;
572 state
->num_ve
= max_location
+ 1;
573 state
->vb_dirty
= true;
574 state
->ve_dirty
= true;
578 const VkPipelineInputAssemblyStateCreateInfo
*ia
= pipeline
->graphics_create_info
.pInputAssemblyState
;
580 state
->info
.mode
= vk_conv_topology(ia
->topology
);
581 state
->info
.primitive_restart
= ia
->primitiveRestartEnable
;
584 if (pipeline
->graphics_create_info
.pTessellationState
) {
585 const VkPipelineTessellationStateCreateInfo
*ts
= pipeline
->graphics_create_info
.pTessellationState
;
586 state
->info
.vertices_per_patch
= ts
->patchControlPoints
;
588 state
->info
.vertices_per_patch
= 0;
590 if (pipeline
->graphics_create_info
.pViewportState
) {
591 const VkPipelineViewportStateCreateInfo
*vpi
= pipeline
->graphics_create_info
.pViewportState
;
594 state
->num_viewports
= vpi
->viewportCount
;
595 state
->num_scissors
= vpi
->scissorCount
;
596 state
->vp_dirty
= true;
597 if (!dynamic_states
[VK_DYNAMIC_STATE_VIEWPORT
]) {
598 for (i
= 0; i
< vpi
->viewportCount
; i
++)
599 get_viewport_xform(&vpi
->pViewports
[i
], state
->viewports
[i
].scale
, state
->viewports
[i
].translate
);
600 state
->vp_dirty
= true;
602 if (!dynamic_states
[VK_DYNAMIC_STATE_SCISSOR
]) {
603 for (i
= 0; i
< vpi
->scissorCount
; i
++) {
604 const VkRect2D
*ss
= &vpi
->pScissors
[i
];
605 state
->scissors
[i
].minx
= ss
->offset
.x
;
606 state
->scissors
[i
].miny
= ss
->offset
.y
;
607 state
->scissors
[i
].maxx
= ss
->offset
.x
+ ss
->extent
.width
;
608 state
->scissors
[i
].maxy
= ss
->offset
.y
+ ss
->extent
.height
;
609 state
->scissor_dirty
= true;
615 if (fb_samples
!= state
->framebuffer
.samples
) {
616 state
->framebuffer
.samples
= fb_samples
;
617 state
->pctx
->set_framebuffer_state(state
->pctx
, &state
->framebuffer
);
621 static void handle_pipeline(struct val_cmd_buffer_entry
*cmd
,
622 struct rendering_state
*state
)
624 struct val_pipeline
*pipeline
= cmd
->u
.pipeline
.pipeline
;
625 if (pipeline
->is_compute_pipeline
)
626 handle_compute_pipeline(cmd
, state
);
628 handle_graphics_pipeline(cmd
, state
);
631 static void handle_vertex_buffers(struct val_cmd_buffer_entry
*cmd
,
632 struct rendering_state
*state
)
635 struct val_cmd_bind_vertex_buffers
*vcb
= &cmd
->u
.vertex_buffers
;
636 for (i
= 0; i
< vcb
->binding_count
; i
++) {
637 int idx
= i
+ vcb
->first
;
639 state
->vb
[idx
].buffer_offset
= vcb
->offsets
[i
];
640 state
->vb
[idx
].buffer
.resource
= vcb
->buffers
[i
]->bo
;
642 if (vcb
->first
< state
->start_vb
)
643 state
->start_vb
= vcb
->first
;
644 if (vcb
->first
+ vcb
->binding_count
>= state
->num_vb
)
645 state
->num_vb
= vcb
->first
+ vcb
->binding_count
;
646 state
->vb_dirty
= true;
651 uint16_t const_buffer_count
;
652 uint16_t shader_buffer_count
;
653 uint16_t sampler_count
;
654 uint16_t sampler_view_count
;
655 uint16_t image_count
;
656 } stage
[MESA_SHADER_STAGES
];
659 const uint32_t *dynamic_offsets
;
660 uint32_t dynamic_offset_count
;
663 static void fill_sampler(struct pipe_sampler_state
*ss
,
664 struct val_sampler
*samp
)
666 ss
->wrap_s
= vk_conv_wrap_mode(samp
->create_info
.addressModeU
);
667 ss
->wrap_t
= vk_conv_wrap_mode(samp
->create_info
.addressModeV
);
668 ss
->wrap_r
= vk_conv_wrap_mode(samp
->create_info
.addressModeW
);
669 ss
->min_img_filter
= samp
->create_info
.minFilter
== VK_FILTER_LINEAR
? PIPE_TEX_FILTER_LINEAR
: PIPE_TEX_FILTER_NEAREST
;
670 ss
->min_mip_filter
= samp
->create_info
.mipmapMode
== VK_SAMPLER_MIPMAP_MODE_LINEAR
? PIPE_TEX_MIPFILTER_LINEAR
: PIPE_TEX_MIPFILTER_NEAREST
;
671 ss
->mag_img_filter
= samp
->create_info
.magFilter
== VK_FILTER_LINEAR
? PIPE_TEX_FILTER_LINEAR
: PIPE_TEX_FILTER_NEAREST
;
672 ss
->min_lod
= samp
->create_info
.minLod
;
673 ss
->max_lod
= samp
->create_info
.maxLod
;
674 ss
->lod_bias
= samp
->create_info
.mipLodBias
;
675 ss
->max_anisotropy
= samp
->create_info
.maxAnisotropy
;
676 ss
->normalized_coords
= !samp
->create_info
.unnormalizedCoordinates
;
677 ss
->compare_mode
= samp
->create_info
.compareEnable
? PIPE_TEX_COMPARE_R_TO_TEXTURE
: PIPE_TEX_COMPARE_NONE
;
678 ss
->compare_func
= samp
->create_info
.compareOp
;
679 ss
->seamless_cube_map
= true;
681 switch (samp
->create_info
.borderColor
) {
682 case VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK
:
683 case VK_BORDER_COLOR_INT_TRANSPARENT_BLACK
:
685 memset(ss
->border_color
.f
, 0, 4 * sizeof(float));
687 case VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK
:
688 ss
->border_color
.f
[0] = ss
->border_color
.f
[1] = ss
->border_color
.f
[2] = 0.0f
;
689 ss
->border_color
.f
[3] = 1.0f
;
691 case VK_BORDER_COLOR_INT_OPAQUE_BLACK
:
692 ss
->border_color
.i
[0] = ss
->border_color
.i
[1] = ss
->border_color
.i
[2] = 0;
693 ss
->border_color
.i
[3] = 1;
695 case VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE
:
696 ss
->border_color
.f
[0] = ss
->border_color
.f
[1] = ss
->border_color
.f
[2] = 1.0f
;
697 ss
->border_color
.f
[3] = 1.0f
;
699 case VK_BORDER_COLOR_INT_OPAQUE_WHITE
:
700 ss
->border_color
.i
[0] = ss
->border_color
.i
[1] = ss
->border_color
.i
[2] = 1;
701 ss
->border_color
.i
[3] = 1;
706 static void fill_sampler_stage(struct rendering_state
*state
,
707 struct dyn_info
*dyn_info
,
708 gl_shader_stage stage
,
709 enum pipe_shader_type p_stage
,
711 const struct val_descriptor
*descriptor
,
712 const struct val_descriptor_set_binding_layout
*binding
)
714 int ss_idx
= binding
->stage
[stage
].sampler_index
;
718 ss_idx
+= dyn_info
->stage
[stage
].sampler_count
;
719 fill_sampler(&state
->ss
[p_stage
][ss_idx
], descriptor
->sampler
);
720 if (state
->num_sampler_states
[p_stage
] <= ss_idx
)
721 state
->num_sampler_states
[p_stage
] = ss_idx
+ 1;
722 state
->ss_dirty
[p_stage
] = true;
725 static void fill_sampler_view_stage(struct rendering_state
*state
,
726 struct dyn_info
*dyn_info
,
727 gl_shader_stage stage
,
728 enum pipe_shader_type p_stage
,
730 const struct val_descriptor
*descriptor
,
731 const struct val_descriptor_set_binding_layout
*binding
)
733 int sv_idx
= binding
->stage
[stage
].sampler_view_index
;
737 sv_idx
+= dyn_info
->stage
[stage
].sampler_view_count
;
738 struct val_image_view
*iv
= descriptor
->image_view
;
739 struct pipe_sampler_view templ
;
741 enum pipe_format pformat
;
742 if (iv
->subresourceRange
.aspectMask
== VK_IMAGE_ASPECT_DEPTH_BIT
)
743 pformat
= vk_format_to_pipe(iv
->format
);
744 else if (iv
->subresourceRange
.aspectMask
== VK_IMAGE_ASPECT_STENCIL_BIT
)
745 pformat
= util_format_stencil_only(vk_format_to_pipe(iv
->format
));
747 pformat
= vk_format_to_pipe(iv
->format
);
748 u_sampler_view_default_template(&templ
,
751 if (iv
->view_type
== VK_IMAGE_VIEW_TYPE_1D
)
752 templ
.target
= PIPE_TEXTURE_1D
;
753 if (iv
->view_type
== VK_IMAGE_VIEW_TYPE_2D
)
754 templ
.target
= PIPE_TEXTURE_2D
;
755 if (iv
->view_type
== VK_IMAGE_VIEW_TYPE_CUBE
)
756 templ
.target
= PIPE_TEXTURE_CUBE
;
757 templ
.u
.tex
.first_layer
= iv
->subresourceRange
.baseArrayLayer
;
758 templ
.u
.tex
.last_layer
= iv
->subresourceRange
.baseArrayLayer
+ val_get_layerCount(iv
->image
, &iv
->subresourceRange
) - 1;
759 templ
.u
.tex
.first_level
= iv
->subresourceRange
.baseMipLevel
;
760 templ
.u
.tex
.last_level
= iv
->subresourceRange
.baseMipLevel
+ val_get_levelCount(iv
->image
, &iv
->subresourceRange
) - 1;
761 if (iv
->components
.r
!= VK_COMPONENT_SWIZZLE_IDENTITY
)
762 templ
.swizzle_r
= vk_conv_swizzle(iv
->components
.r
);
763 if (iv
->components
.g
!= VK_COMPONENT_SWIZZLE_IDENTITY
)
764 templ
.swizzle_g
= vk_conv_swizzle(iv
->components
.g
);
765 if (iv
->components
.b
!= VK_COMPONENT_SWIZZLE_IDENTITY
)
766 templ
.swizzle_b
= vk_conv_swizzle(iv
->components
.b
);
767 if (iv
->components
.a
!= VK_COMPONENT_SWIZZLE_IDENTITY
)
768 templ
.swizzle_a
= vk_conv_swizzle(iv
->components
.a
);
770 if (util_format_is_depth_or_stencil(templ
.format
)) {
771 templ
.swizzle_r
= PIPE_SWIZZLE_X
;
772 templ
.swizzle_g
= PIPE_SWIZZLE_0
;
773 templ
.swizzle_b
= PIPE_SWIZZLE_0
;
776 if (state
->sv
[p_stage
][sv_idx
])
777 pipe_sampler_view_reference(&state
->sv
[p_stage
][sv_idx
], NULL
);
778 state
->sv
[p_stage
][sv_idx
] = state
->pctx
->create_sampler_view(state
->pctx
, iv
->image
->bo
, &templ
);
779 if (state
->num_sampler_views
[p_stage
] <= sv_idx
)
780 state
->num_sampler_views
[p_stage
] = sv_idx
+ 1;
781 state
->sv_dirty
[p_stage
] = true;
784 static void fill_sampler_buffer_view_stage(struct rendering_state
*state
,
785 struct dyn_info
*dyn_info
,
786 gl_shader_stage stage
,
787 enum pipe_shader_type p_stage
,
789 const struct val_descriptor
*descriptor
,
790 const struct val_descriptor_set_binding_layout
*binding
)
792 int sv_idx
= binding
->stage
[stage
].sampler_view_index
;
796 sv_idx
+= dyn_info
->stage
[stage
].sampler_view_count
;
797 struct val_buffer_view
*bv
= descriptor
->buffer_view
;
798 struct pipe_sampler_view templ
;
799 memset(&templ
, 0, sizeof(templ
));
800 templ
.target
= PIPE_BUFFER
;
801 templ
.swizzle_r
= PIPE_SWIZZLE_X
;
802 templ
.swizzle_g
= PIPE_SWIZZLE_Y
;
803 templ
.swizzle_b
= PIPE_SWIZZLE_Z
;
804 templ
.swizzle_a
= PIPE_SWIZZLE_W
;
805 templ
.format
= bv
->pformat
;
806 templ
.u
.buf
.offset
= bv
->offset
+ bv
->buffer
->offset
;
807 templ
.u
.buf
.size
= bv
->range
== VK_WHOLE_SIZE
? (bv
->buffer
->size
- bv
->offset
) : bv
->range
;
808 templ
.texture
= bv
->buffer
->bo
;
809 templ
.context
= state
->pctx
;
811 if (state
->sv
[p_stage
][sv_idx
])
812 pipe_sampler_view_reference(&state
->sv
[p_stage
][sv_idx
], NULL
);
813 state
->sv
[p_stage
][sv_idx
] = state
->pctx
->create_sampler_view(state
->pctx
, bv
->buffer
->bo
, &templ
);
814 if (state
->num_sampler_views
[p_stage
] <= sv_idx
)
815 state
->num_sampler_views
[p_stage
] = sv_idx
+ 1;
816 state
->sv_dirty
[p_stage
] = true;
819 static void fill_image_view_stage(struct rendering_state
*state
,
820 struct dyn_info
*dyn_info
,
821 gl_shader_stage stage
,
822 enum pipe_shader_type p_stage
,
824 const struct val_descriptor
*descriptor
,
825 const struct val_descriptor_set_binding_layout
*binding
)
827 struct val_image_view
*iv
= descriptor
->image_view
;
828 int idx
= binding
->stage
[stage
].image_index
;
832 idx
+= dyn_info
->stage
[stage
].image_count
;
833 state
->iv
[p_stage
][idx
].resource
= iv
->image
->bo
;
834 if (iv
->subresourceRange
.aspectMask
== VK_IMAGE_ASPECT_DEPTH_BIT
)
835 state
->iv
[p_stage
][idx
].format
= vk_format_to_pipe(iv
->format
);
836 else if (iv
->subresourceRange
.aspectMask
== VK_IMAGE_ASPECT_STENCIL_BIT
)
837 state
->iv
[p_stage
][idx
].format
= util_format_stencil_only(vk_format_to_pipe(iv
->format
));
839 state
->iv
[p_stage
][idx
].format
= vk_format_to_pipe(iv
->format
);
841 if (iv
->view_type
== VK_IMAGE_VIEW_TYPE_3D
) {
842 state
->iv
[p_stage
][idx
].u
.tex
.first_layer
= 0;
843 state
->iv
[p_stage
][idx
].u
.tex
.last_layer
= u_minify(iv
->image
->bo
->depth0
, iv
->subresourceRange
.baseMipLevel
) - 1;
845 state
->iv
[p_stage
][idx
].u
.tex
.first_layer
= iv
->subresourceRange
.baseArrayLayer
;
846 state
->iv
[p_stage
][idx
].u
.tex
.last_layer
= iv
->subresourceRange
.baseArrayLayer
+ val_get_layerCount(iv
->image
, &iv
->subresourceRange
) - 1;
848 state
->iv
[p_stage
][idx
].u
.tex
.level
= iv
->subresourceRange
.baseMipLevel
;
849 if (state
->num_shader_images
[p_stage
] <= idx
)
850 state
->num_shader_images
[p_stage
] = idx
+ 1;
851 state
->iv_dirty
[p_stage
] = true;
854 static void fill_image_buffer_view_stage(struct rendering_state
*state
,
855 struct dyn_info
*dyn_info
,
856 gl_shader_stage stage
,
857 enum pipe_shader_type p_stage
,
859 const struct val_descriptor
*descriptor
,
860 const struct val_descriptor_set_binding_layout
*binding
)
862 struct val_buffer_view
*bv
= descriptor
->buffer_view
;
863 int idx
= binding
->stage
[stage
].image_index
;
867 idx
+= dyn_info
->stage
[stage
].image_count
;
868 state
->iv
[p_stage
][idx
].resource
= bv
->buffer
->bo
;
869 state
->iv
[p_stage
][idx
].format
= bv
->pformat
;
870 state
->iv
[p_stage
][idx
].u
.buf
.offset
= bv
->offset
+ bv
->buffer
->offset
;
871 state
->iv
[p_stage
][idx
].u
.buf
.size
= bv
->range
== VK_WHOLE_SIZE
? (bv
->buffer
->size
- bv
->offset
): bv
->range
;
872 if (state
->num_shader_images
[p_stage
] <= idx
)
873 state
->num_shader_images
[p_stage
] = idx
+ 1;
874 state
->iv_dirty
[p_stage
] = true;
877 static void handle_descriptor(struct rendering_state
*state
,
878 struct dyn_info
*dyn_info
,
879 const struct val_descriptor_set_binding_layout
*binding
,
880 gl_shader_stage stage
,
881 enum pipe_shader_type p_stage
,
883 const struct val_descriptor
*descriptor
)
885 bool is_dynamic
= descriptor
->type
== VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
||
886 descriptor
->type
== VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
;
888 switch (descriptor
->type
) {
889 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT
:
890 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE
: {
891 fill_image_view_stage(state
, dyn_info
, stage
, p_stage
, array_idx
, descriptor
, binding
);
894 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
:
895 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC
: {
896 int idx
= binding
->stage
[stage
].const_buffer_index
;
900 idx
+= dyn_info
->stage
[stage
].const_buffer_count
;
901 state
->const_buffer
[p_stage
][idx
].buffer
= descriptor
->buf
.buffer
->bo
;
902 state
->const_buffer
[p_stage
][idx
].buffer_offset
= descriptor
->buf
.offset
+ descriptor
->buf
.buffer
->offset
;
904 uint32_t offset
= dyn_info
->dynamic_offsets
[dyn_info
->dyn_index
+ binding
->dynamic_index
+ array_idx
];
905 state
->const_buffer
[p_stage
][idx
].buffer_offset
+= offset
;
907 if (descriptor
->buf
.range
== VK_WHOLE_SIZE
)
908 state
->const_buffer
[p_stage
][idx
].buffer_size
= descriptor
->buf
.buffer
->bo
->width0
- state
->const_buffer
[p_stage
][idx
].buffer_offset
;
910 state
->const_buffer
[p_stage
][idx
].buffer_size
= descriptor
->buf
.range
;
911 if (state
->num_const_bufs
[p_stage
] <= idx
)
912 state
->num_const_bufs
[p_stage
] = idx
+ 1;
913 state
->constbuf_dirty
[p_stage
] = true;
916 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER
:
917 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC
: {
918 int idx
= binding
->stage
[stage
].shader_buffer_index
;
922 idx
+= dyn_info
->stage
[stage
].shader_buffer_count
;
923 state
->sb
[p_stage
][idx
].buffer
= descriptor
->buf
.buffer
->bo
;
924 state
->sb
[p_stage
][idx
].buffer_offset
= descriptor
->buf
.offset
+ descriptor
->buf
.buffer
->offset
;
926 uint32_t offset
= dyn_info
->dynamic_offsets
[dyn_info
->dyn_index
+ binding
->dynamic_index
+ array_idx
];
927 state
->sb
[p_stage
][idx
].buffer_offset
+= offset
;
929 if (descriptor
->buf
.range
== VK_WHOLE_SIZE
)
930 state
->sb
[p_stage
][idx
].buffer_size
= descriptor
->buf
.buffer
->bo
->width0
- state
->sb
[p_stage
][idx
].buffer_offset
;
932 state
->sb
[p_stage
][idx
].buffer_size
= descriptor
->buf
.range
;
933 if (state
->num_shader_buffers
[p_stage
] <= idx
)
934 state
->num_shader_buffers
[p_stage
] = idx
+ 1;
935 state
->sb_dirty
[p_stage
] = true;
938 case VK_DESCRIPTOR_TYPE_SAMPLER
:
939 if (!descriptor
->sampler
)
941 fill_sampler_stage(state
, dyn_info
, stage
, p_stage
, array_idx
, descriptor
, binding
);
943 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE
:
944 fill_sampler_view_stage(state
, dyn_info
, stage
, p_stage
, array_idx
, descriptor
, binding
);
946 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER
:
947 fill_sampler_stage(state
, dyn_info
, stage
, p_stage
, array_idx
, descriptor
, binding
);
948 fill_sampler_view_stage(state
, dyn_info
, stage
, p_stage
, array_idx
, descriptor
, binding
);
950 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER
:
951 fill_sampler_buffer_view_stage(state
, dyn_info
, stage
, p_stage
, array_idx
, descriptor
, binding
);
953 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
:
954 fill_image_buffer_view_stage(state
, dyn_info
, stage
, p_stage
, array_idx
, descriptor
, binding
);
957 fprintf(stderr
, "Unhandled descriptor set %d\n", descriptor
->type
);
962 static void handle_set_stage(struct rendering_state
*state
,
963 struct dyn_info
*dyn_info
,
964 const struct val_descriptor_set
*set
,
965 gl_shader_stage stage
,
966 enum pipe_shader_type p_stage
)
969 for (j
= 0; j
< set
->layout
->binding_count
; j
++) {
970 const struct val_descriptor_set_binding_layout
*binding
;
971 const struct val_descriptor
*descriptor
;
972 binding
= &set
->layout
->binding
[j
];
974 if (binding
->valid
) {
975 for (int i
= 0; i
< binding
->array_size
; i
++) {
976 descriptor
= &set
->descriptors
[binding
->descriptor_index
+ i
];
977 handle_descriptor(state
, dyn_info
, binding
, stage
, p_stage
, i
, descriptor
);
983 static void increment_dyn_info(struct dyn_info
*dyn_info
,
984 struct val_descriptor_set_layout
*layout
, bool inc_dyn
)
986 for (gl_shader_stage stage
= MESA_SHADER_VERTEX
; stage
< MESA_SHADER_STAGES
; stage
++) {
987 dyn_info
->stage
[stage
].const_buffer_count
+= layout
->stage
[stage
].const_buffer_count
;
988 dyn_info
->stage
[stage
].shader_buffer_count
+= layout
->stage
[stage
].shader_buffer_count
;
989 dyn_info
->stage
[stage
].sampler_count
+= layout
->stage
[stage
].sampler_count
;
990 dyn_info
->stage
[stage
].sampler_view_count
+= layout
->stage
[stage
].sampler_view_count
;
991 dyn_info
->stage
[stage
].image_count
+= layout
->stage
[stage
].image_count
;
994 dyn_info
->dyn_index
+= layout
->dynamic_offset_count
;
997 static void handle_compute_descriptor_sets(struct val_cmd_buffer_entry
*cmd
,
998 struct dyn_info
*dyn_info
,
999 struct rendering_state
*state
)
1001 struct val_cmd_bind_descriptor_sets
*bds
= &cmd
->u
.descriptor_sets
;
1004 for (i
= 0; i
< bds
->first
; i
++) {
1005 increment_dyn_info(dyn_info
, bds
->layout
->set
[i
].layout
, false);
1007 for (i
= 0; i
< bds
->count
; i
++) {
1008 const struct val_descriptor_set
*set
= bds
->sets
[i
];
1010 if (set
->layout
->shader_stages
& VK_SHADER_STAGE_COMPUTE_BIT
)
1011 handle_set_stage(state
, dyn_info
, set
, MESA_SHADER_COMPUTE
, PIPE_SHADER_COMPUTE
);
1012 increment_dyn_info(dyn_info
, bds
->layout
->set
[bds
->first
+ i
].layout
, true);
1016 static void handle_descriptor_sets(struct val_cmd_buffer_entry
*cmd
,
1017 struct rendering_state
*state
)
1019 struct val_cmd_bind_descriptor_sets
*bds
= &cmd
->u
.descriptor_sets
;
1021 struct dyn_info dyn_info
;
1023 dyn_info
.dyn_index
= 0;
1024 dyn_info
.dynamic_offsets
= bds
->dynamic_offsets
;
1025 dyn_info
.dynamic_offset_count
= bds
->dynamic_offset_count
;
1027 memset(dyn_info
.stage
, 0, sizeof(dyn_info
.stage
));
1028 if (bds
->bind_point
== VK_PIPELINE_BIND_POINT_COMPUTE
) {
1029 handle_compute_descriptor_sets(cmd
, &dyn_info
, state
);
1033 for (i
= 0; i
< bds
->first
; i
++) {
1034 increment_dyn_info(&dyn_info
, bds
->layout
->set
[i
].layout
, false);
1037 for (i
= 0; i
< bds
->count
; i
++) {
1038 const struct val_descriptor_set
*set
= bds
->sets
[i
];
1040 if (set
->layout
->shader_stages
& VK_SHADER_STAGE_VERTEX_BIT
)
1041 handle_set_stage(state
, &dyn_info
, set
, MESA_SHADER_VERTEX
, PIPE_SHADER_VERTEX
);
1043 if (set
->layout
->shader_stages
& VK_SHADER_STAGE_FRAGMENT_BIT
)
1044 handle_set_stage(state
, &dyn_info
, set
, MESA_SHADER_FRAGMENT
, PIPE_SHADER_FRAGMENT
);
1046 if (set
->layout
->shader_stages
& VK_SHADER_STAGE_GEOMETRY_BIT
)
1047 handle_set_stage(state
, &dyn_info
, set
, MESA_SHADER_GEOMETRY
, PIPE_SHADER_GEOMETRY
);
1049 if (set
->layout
->shader_stages
& VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT
)
1050 handle_set_stage(state
, &dyn_info
, set
, MESA_SHADER_TESS_CTRL
, PIPE_SHADER_TESS_CTRL
);
1052 if (set
->layout
->shader_stages
& VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT
)
1053 handle_set_stage(state
, &dyn_info
, set
, MESA_SHADER_TESS_EVAL
, PIPE_SHADER_TESS_EVAL
);
1054 increment_dyn_info(&dyn_info
, bds
->layout
->set
[bds
->first
+ i
].layout
, true);
1058 static void add_img_view_surface(struct rendering_state
*state
,
1059 struct val_image_view
*imgv
, VkFormat format
, int width
, int height
)
1061 if (!imgv
->surface
) {
1062 struct pipe_surface
template;
1064 memset(&template, 0, sizeof(struct pipe_surface
));
1066 template.format
= vk_format_to_pipe(format
);
1067 template.width
= width
;
1068 template.height
= height
;
1069 template.u
.tex
.first_layer
= imgv
->subresourceRange
.baseArrayLayer
;
1070 template.u
.tex
.last_layer
= imgv
->subresourceRange
.baseArrayLayer
+ val_get_layerCount(imgv
->image
, &imgv
->subresourceRange
) - 1;
1071 template.u
.tex
.level
= imgv
->subresourceRange
.baseMipLevel
;
1073 if (template.format
== PIPE_FORMAT_NONE
)
1075 imgv
->surface
= state
->pctx
->create_surface(state
->pctx
,
1076 imgv
->image
->bo
, &template);
1081 attachment_needs_clear(struct rendering_state
*state
,
1084 return (a
!= VK_ATTACHMENT_UNUSED
&&
1085 state
->attachments
[a
].pending_clear_aspects
);
1089 subpass_needs_clear(struct rendering_state
*state
)
1092 struct val_subpass
*subpass
= &state
->pass
->subpasses
[state
->subpass
];
1093 for (uint32_t i
= 0; i
< subpass
->color_count
; i
++) {
1094 a
= subpass
->color_attachments
[i
].attachment
;
1095 if (attachment_needs_clear(state
, a
))
1098 if (subpass
->depth_stencil_attachment
) {
1099 a
= subpass
->depth_stencil_attachment
->attachment
;
1100 if (attachment_needs_clear(state
, a
))
1106 static void render_subpass_clear(struct rendering_state
*state
)
1108 struct val_subpass
*subpass
= &state
->pass
->subpasses
[state
->subpass
];
1110 if (!subpass_needs_clear(state
))
1113 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
1114 uint32_t a
= subpass
->color_attachments
[i
].attachment
;
1116 if (!attachment_needs_clear(state
, a
))
1119 struct val_render_pass_attachment
*att
= &state
->pass
->attachments
[a
];
1120 struct val_image_view
*imgv
= state
->vk_framebuffer
->attachments
[a
];
1122 add_img_view_surface(state
, imgv
, att
->format
, state
->framebuffer
.width
, state
->framebuffer
.height
);
1124 union pipe_color_union color_clear_val
= { 0 };
1125 const VkClearValue value
= state
->attachments
[a
].clear_value
;
1126 color_clear_val
.ui
[0] = value
.color
.uint32
[0];
1127 color_clear_val
.ui
[1] = value
.color
.uint32
[1];
1128 color_clear_val
.ui
[2] = value
.color
.uint32
[2];
1129 color_clear_val
.ui
[3] = value
.color
.uint32
[3];
1130 state
->pctx
->clear_render_target(state
->pctx
,
1133 state
->render_area
.offset
.x
, state
->render_area
.offset
.y
,
1134 state
->render_area
.extent
.width
, state
->render_area
.extent
.height
,
1137 state
->attachments
[a
].pending_clear_aspects
= 0;
1140 if (subpass
->depth_stencil_attachment
) {
1141 uint32_t ds
= subpass
->depth_stencil_attachment
->attachment
;
1143 if (!attachment_needs_clear(state
, ds
))
1146 struct val_render_pass_attachment
*att
= &state
->pass
->attachments
[ds
];
1147 struct val_image_view
*imgv
= state
->vk_framebuffer
->attachments
[ds
];
1149 add_img_view_surface(state
, imgv
, att
->format
, state
->framebuffer
.width
, state
->framebuffer
.height
);
1151 if (util_format_is_depth_or_stencil(imgv
->surface
->format
)) {
1152 const struct util_format_description
*desc
= util_format_description(imgv
->surface
->format
);
1153 double dclear_val
= 0;
1154 uint32_t sclear_val
= 0;
1155 uint32_t ds_clear_flags
= 0;
1157 if (util_format_has_stencil(desc
) && att
->stencil_load_op
== VK_ATTACHMENT_LOAD_OP_CLEAR
) {
1158 ds_clear_flags
|= PIPE_CLEAR_STENCIL
;
1159 sclear_val
= state
->attachments
[ds
].clear_value
.depthStencil
.stencil
;
1161 if (util_format_has_depth(desc
) && att
->load_op
== VK_ATTACHMENT_LOAD_OP_CLEAR
) {
1162 ds_clear_flags
|= PIPE_CLEAR_DEPTH
;
1163 dclear_val
= state
->attachments
[ds
].clear_value
.depthStencil
.depth
;
1167 state
->pctx
->clear_depth_stencil(state
->pctx
,
1170 dclear_val
, sclear_val
,
1171 state
->render_area
.offset
.x
, state
->render_area
.offset
.y
,
1172 state
->render_area
.extent
.width
, state
->render_area
.extent
.height
,
1174 state
->attachments
[ds
].pending_clear_aspects
= 0;
1180 static void render_pass_resolve(struct rendering_state
*state
)
1182 struct val_subpass
*subpass
= &state
->pass
->subpasses
[state
->subpass
];
1183 if (!subpass
->has_color_resolve
)
1185 for (uint32_t i
= 0; i
< subpass
->color_count
; i
++) {
1186 struct val_subpass_attachment src_att
= subpass
->color_attachments
[i
];
1187 struct val_subpass_attachment dst_att
= subpass
->resolve_attachments
[i
];
1189 if (dst_att
.attachment
== VK_ATTACHMENT_UNUSED
)
1192 struct val_image_view
*src_imgv
= state
->vk_framebuffer
->attachments
[src_att
.attachment
];
1193 struct val_image_view
*dst_imgv
= state
->vk_framebuffer
->attachments
[dst_att
.attachment
];
1195 struct pipe_blit_info info
;
1196 memset(&info
, 0, sizeof(info
));
1198 info
.src
.resource
= src_imgv
->image
->bo
;
1199 info
.dst
.resource
= dst_imgv
->image
->bo
;
1200 info
.src
.format
= src_imgv
->pformat
;
1201 info
.dst
.format
= dst_imgv
->pformat
;
1202 info
.filter
= PIPE_TEX_FILTER_NEAREST
;
1203 info
.mask
= PIPE_MASK_RGBA
;
1204 info
.src
.box
.x
= state
->render_area
.offset
.x
;
1205 info
.src
.box
.y
= state
->render_area
.offset
.y
;
1206 info
.src
.box
.width
= state
->render_area
.extent
.width
;
1207 info
.src
.box
.height
= state
->render_area
.extent
.height
;
1208 info
.src
.box
.depth
= state
->vk_framebuffer
->layers
;
1210 info
.dst
.box
= info
.src
.box
;
1212 state
->pctx
->blit(state
->pctx
, &info
);
1216 static void begin_render_subpass(struct rendering_state
*state
,
1219 state
->subpass
= subpass_idx
;
1221 render_subpass_clear(state
);
1223 state
->framebuffer
.nr_cbufs
= 0;
1225 struct val_subpass
*subpass
= &state
->pass
->subpasses
[subpass_idx
];
1226 for (unsigned i
= 0; i
< subpass
->color_count
; i
++) {
1227 struct val_subpass_attachment
*color_att
= &subpass
->color_attachments
[i
];
1228 if (color_att
->attachment
!= VK_ATTACHMENT_UNUSED
) {
1229 struct val_image_view
*imgv
= state
->vk_framebuffer
->attachments
[color_att
->attachment
];
1231 add_img_view_surface(state
, imgv
, state
->pass
->attachments
[color_att
->attachment
].format
, state
->framebuffer
.width
, state
->framebuffer
.height
);
1232 state
->framebuffer
.cbufs
[state
->framebuffer
.nr_cbufs
] = imgv
->surface
;
1234 state
->framebuffer
.cbufs
[state
->framebuffer
.nr_cbufs
] = NULL
;
1235 state
->framebuffer
.nr_cbufs
++;
1238 if (subpass
->depth_stencil_attachment
) {
1239 struct val_subpass_attachment
*ds_att
= subpass
->depth_stencil_attachment
;
1241 if (ds_att
->attachment
!= VK_ATTACHMENT_UNUSED
) {
1242 struct val_image_view
*imgv
= state
->vk_framebuffer
->attachments
[ds_att
->attachment
];
1243 add_img_view_surface(state
, imgv
, state
->pass
->attachments
[ds_att
->attachment
].format
, state
->framebuffer
.width
, state
->framebuffer
.height
);
1244 state
->framebuffer
.zsbuf
= imgv
->surface
;
1248 state
->pctx
->set_framebuffer_state(state
->pctx
,
1249 &state
->framebuffer
);
1252 static void handle_begin_render_pass(struct val_cmd_buffer_entry
*cmd
,
1253 struct rendering_state
*state
)
1255 state
->pass
= cmd
->u
.begin_render_pass
.render_pass
;
1256 state
->vk_framebuffer
= cmd
->u
.begin_render_pass
.framebuffer
;
1257 state
->render_area
= cmd
->u
.begin_render_pass
.render_area
;
1259 state
->attachments
= cmd
->u
.begin_render_pass
.attachments
;
1261 state
->framebuffer
.width
= state
->vk_framebuffer
->width
;
1262 state
->framebuffer
.height
= state
->vk_framebuffer
->height
;
1263 state
->framebuffer
.layers
= state
->vk_framebuffer
->layers
;
1265 begin_render_subpass(state
, 0);
1268 static void handle_end_render_pass(struct val_cmd_buffer_entry
*cmd
,
1269 struct rendering_state
*state
)
1271 state
->pctx
->flush(state
->pctx
, NULL
, 0);
1273 render_pass_resolve(state
);
1275 state
->attachments
= NULL
;
1280 static void handle_next_subpass(struct val_cmd_buffer_entry
*cmd
,
1281 struct rendering_state
*state
)
1283 state
->pctx
->flush(state
->pctx
, NULL
, 0);
1284 render_pass_resolve(state
);
1286 begin_render_subpass(state
, state
->subpass
);
1289 static void handle_draw(struct val_cmd_buffer_entry
*cmd
,
1290 struct rendering_state
*state
)
1292 state
->info
.index_size
= 0;
1293 state
->info
.indirect
= NULL
;
1294 state
->info
.index
.resource
= NULL
;
1295 state
->info
.start
= cmd
->u
.draw
.first_vertex
;
1296 state
->info
.count
= cmd
->u
.draw
.vertex_count
;
1297 state
->info
.start_instance
= cmd
->u
.draw
.first_instance
;
1298 state
->info
.instance_count
= cmd
->u
.draw
.instance_count
;
1299 state
->pctx
->draw_vbo(state
->pctx
, &state
->info
);
1302 static void handle_set_viewport(struct val_cmd_buffer_entry
*cmd
,
1303 struct rendering_state
*state
)
1307 for (i
= 0; i
< cmd
->u
.set_viewport
.viewport_count
; i
++) {
1308 int idx
= i
+ cmd
->u
.set_viewport
.first_viewport
;
1309 const VkViewport
*vp
= &cmd
->u
.set_viewport
.viewports
[i
];
1310 get_viewport_xform(vp
, state
->viewports
[idx
].scale
, state
->viewports
[idx
].translate
);
1312 state
->vp_dirty
= true;
1315 static void handle_set_scissor(struct val_cmd_buffer_entry
*cmd
,
1316 struct rendering_state
*state
)
1320 for (i
= 0; i
< cmd
->u
.set_scissor
.scissor_count
; i
++) {
1321 int idx
= i
+ cmd
->u
.set_scissor
.first_scissor
;
1322 const VkRect2D
*ss
= &cmd
->u
.set_scissor
.scissors
[i
];
1323 state
->scissors
[idx
].minx
= ss
->offset
.x
;
1324 state
->scissors
[idx
].miny
= ss
->offset
.y
;
1325 state
->scissors
[idx
].maxx
= ss
->offset
.x
+ ss
->extent
.width
;
1326 state
->scissors
[idx
].maxy
= ss
->offset
.y
+ ss
->extent
.height
;
1328 state
->scissor_dirty
= true;
1331 static void handle_set_line_width(struct val_cmd_buffer_entry
*cmd
,
1332 struct rendering_state
*state
)
1334 state
->rs_state
.line_width
= cmd
->u
.set_line_width
.line_width
;
1335 state
->rs_dirty
= true;
1338 static void handle_set_depth_bias(struct val_cmd_buffer_entry
*cmd
,
1339 struct rendering_state
*state
)
1341 state
->rs_state
.offset_units
= cmd
->u
.set_depth_bias
.constant_factor
;
1342 state
->rs_state
.offset_scale
= cmd
->u
.set_depth_bias
.slope_factor
;
1343 state
->rs_state
.offset_clamp
= cmd
->u
.set_depth_bias
.clamp
;
1344 state
->rs_dirty
= true;
1347 static void handle_set_blend_constants(struct val_cmd_buffer_entry
*cmd
,
1348 struct rendering_state
*state
)
1350 memcpy(state
->blend_color
.color
, cmd
->u
.set_blend_constants
.blend_constants
, 4 * sizeof(float));
1351 state
->blend_color_dirty
= true;
1354 static void handle_set_depth_bounds(struct val_cmd_buffer_entry
*cmd
,
1355 struct rendering_state
*state
)
1357 state
->dsa_state
.depth
.bounds_min
= cmd
->u
.set_depth_bounds
.min_depth
;
1358 state
->dsa_state
.depth
.bounds_max
= cmd
->u
.set_depth_bounds
.max_depth
;
1359 state
->dsa_dirty
= true;
1362 static void handle_set_stencil_compare_mask(struct val_cmd_buffer_entry
*cmd
,
1363 struct rendering_state
*state
)
1365 if (cmd
->u
.stencil_vals
.face_mask
& VK_STENCIL_FACE_FRONT_BIT
)
1366 state
->dsa_state
.stencil
[0].valuemask
= cmd
->u
.stencil_vals
.value
;
1367 if (cmd
->u
.stencil_vals
.face_mask
& VK_STENCIL_FACE_BACK_BIT
)
1368 state
->dsa_state
.stencil
[1].valuemask
= cmd
->u
.stencil_vals
.value
;
1369 state
->dsa_dirty
= true;
1372 static void handle_set_stencil_write_mask(struct val_cmd_buffer_entry
*cmd
,
1373 struct rendering_state
*state
)
1375 if (cmd
->u
.stencil_vals
.face_mask
& VK_STENCIL_FACE_FRONT_BIT
)
1376 state
->dsa_state
.stencil
[0].writemask
= cmd
->u
.stencil_vals
.value
;
1377 if (cmd
->u
.stencil_vals
.face_mask
& VK_STENCIL_FACE_BACK_BIT
)
1378 state
->dsa_state
.stencil
[1].writemask
= cmd
->u
.stencil_vals
.value
;
1379 state
->dsa_dirty
= true;
1382 static void handle_set_stencil_reference(struct val_cmd_buffer_entry
*cmd
,
1383 struct rendering_state
*state
)
1385 if (cmd
->u
.stencil_vals
.face_mask
& VK_STENCIL_FACE_FRONT_BIT
)
1386 state
->stencil_ref
.ref_value
[0] = cmd
->u
.stencil_vals
.value
;
1387 if (cmd
->u
.stencil_vals
.face_mask
& VK_STENCIL_FACE_BACK_BIT
)
1388 state
->stencil_ref
.ref_value
[1] = cmd
->u
.stencil_vals
.value
;
1389 state
->stencil_ref_dirty
= true;
1393 copy_depth_rect(ubyte
* dst
,
1394 enum pipe_format dst_format
,
1395 unsigned dst_stride
,
1401 enum pipe_format src_format
,
1406 int src_stride_pos
= src_stride
< 0 ? -src_stride
: src_stride
;
1407 int src_blocksize
= util_format_get_blocksize(src_format
);
1408 int src_blockwidth
= util_format_get_blockwidth(src_format
);
1409 int src_blockheight
= util_format_get_blockheight(src_format
);
1410 int dst_blocksize
= util_format_get_blocksize(dst_format
);
1411 int dst_blockwidth
= util_format_get_blockwidth(dst_format
);
1412 int dst_blockheight
= util_format_get_blockheight(dst_format
);
1414 assert(src_blocksize
> 0);
1415 assert(src_blockwidth
> 0);
1416 assert(src_blockheight
> 0);
1418 dst_x
/= dst_blockwidth
;
1419 dst_y
/= dst_blockheight
;
1420 width
= (width
+ src_blockwidth
- 1)/src_blockwidth
;
1421 height
= (height
+ src_blockheight
- 1)/src_blockheight
;
1422 src_x
/= src_blockwidth
;
1423 src_y
/= src_blockheight
;
1425 dst
+= dst_x
* dst_blocksize
;
1426 src
+= src_x
* src_blocksize
;
1427 dst
+= dst_y
* dst_stride
;
1428 src
+= src_y
* src_stride_pos
;
1430 if (dst_format
== PIPE_FORMAT_S8_UINT
) {
1431 if (src_format
== PIPE_FORMAT_Z32_FLOAT_S8X24_UINT
) {
1432 util_format_z32_float_s8x24_uint_unpack_s_8uint(dst
, dst_stride
,
1435 } else if (src_format
== PIPE_FORMAT_Z24_UNORM_S8_UINT
) {
1436 util_format_z24_unorm_s8_uint_unpack_s_8uint(dst
, dst_stride
,
1441 } else if (dst_format
== PIPE_FORMAT_Z24X8_UNORM
) {
1442 util_format_z24_unorm_s8_uint_unpack_z24(dst
, dst_stride
,
1445 } else if (dst_format
== PIPE_FORMAT_Z32_FLOAT
) {
1446 if (src_format
== PIPE_FORMAT_Z32_FLOAT_S8X24_UINT
) {
1447 util_format_z32_float_s8x24_uint_unpack_z_float((float *)dst
, dst_stride
,
1451 } else if (dst_format
== PIPE_FORMAT_Z32_FLOAT_S8X24_UINT
) {
1452 if (src_format
== PIPE_FORMAT_Z32_FLOAT
)
1453 util_format_z32_float_s8x24_uint_pack_z_float(dst
, dst_stride
,
1454 (float *)src
, src_stride
,
1456 else if (src_format
== PIPE_FORMAT_S8_UINT
)
1457 util_format_z32_float_s8x24_uint_pack_s_8uint(dst
, dst_stride
,
1460 } else if (dst_format
== PIPE_FORMAT_Z24_UNORM_S8_UINT
) {
1461 if (src_format
== PIPE_FORMAT_S8_UINT
)
1462 util_format_z24_unorm_s8_uint_pack_s_8uint(dst
, dst_stride
,
1465 if (src_format
== PIPE_FORMAT_Z24X8_UNORM
)
1466 util_format_z24_unorm_s8_uint_pack_z24(dst
, dst_stride
,
1473 copy_depth_box(ubyte
*dst
,
1474 enum pipe_format dst_format
,
1475 unsigned dst_stride
, unsigned dst_slice_stride
,
1476 unsigned dst_x
, unsigned dst_y
, unsigned dst_z
,
1477 unsigned width
, unsigned height
, unsigned depth
,
1479 enum pipe_format src_format
,
1480 int src_stride
, unsigned src_slice_stride
,
1481 unsigned src_x
, unsigned src_y
, unsigned src_z
)
1484 dst
+= dst_z
* dst_slice_stride
;
1485 src
+= src_z
* src_slice_stride
;
1486 for (z
= 0; z
< depth
; ++z
) {
1487 copy_depth_rect(dst
,
1497 dst
+= dst_slice_stride
;
1498 src
+= src_slice_stride
;
1502 static void handle_copy_image_to_buffer(struct val_cmd_buffer_entry
*cmd
,
1503 struct rendering_state
*state
)
1506 struct val_cmd_copy_image_to_buffer
*copycmd
= &cmd
->u
.img_to_buffer
;
1507 struct pipe_box box
, dbox
;
1508 struct pipe_transfer
*src_t
, *dst_t
;
1509 ubyte
*src_data
, *dst_data
;
1511 state
->pctx
->flush(state
->pctx
, NULL
, 0);
1513 for (i
= 0; i
< copycmd
->region_count
; i
++) {
1515 box
.x
= copycmd
->regions
[i
].imageOffset
.x
;
1516 box
.y
= copycmd
->regions
[i
].imageOffset
.y
;
1517 box
.z
= copycmd
->src
->type
== VK_IMAGE_TYPE_3D
? copycmd
->regions
[i
].imageOffset
.z
: copycmd
->regions
[i
].imageSubresource
.baseArrayLayer
;
1518 box
.width
= copycmd
->regions
[i
].imageExtent
.width
;
1519 box
.height
= copycmd
->regions
[i
].imageExtent
.height
;
1520 box
.depth
= copycmd
->src
->type
== VK_IMAGE_TYPE_3D
? copycmd
->regions
[i
].imageExtent
.depth
: copycmd
->regions
[i
].imageSubresource
.layerCount
;
1522 src_data
= state
->pctx
->transfer_map(state
->pctx
,
1524 copycmd
->regions
[i
].imageSubresource
.mipLevel
,
1529 dbox
.x
= copycmd
->regions
[i
].bufferOffset
;
1532 dbox
.width
= copycmd
->dst
->bo
->width0
;
1535 dst_data
= state
->pctx
->transfer_map(state
->pctx
,
1538 PIPE_TRANSFER_WRITE
,
1542 enum pipe_format src_format
= copycmd
->src
->bo
->format
;
1543 enum pipe_format dst_format
= src_format
;
1544 if (util_format_is_depth_or_stencil(src_format
)) {
1545 if (copycmd
->regions
[i
].imageSubresource
.aspectMask
== VK_IMAGE_ASPECT_DEPTH_BIT
) {
1546 dst_format
= util_format_get_depth_only(src_format
);
1547 } else if (copycmd
->regions
[i
].imageSubresource
.aspectMask
== VK_IMAGE_ASPECT_STENCIL_BIT
) {
1548 dst_format
= PIPE_FORMAT_S8_UINT
;
1552 unsigned buffer_row_len
= util_format_get_stride(dst_format
, copycmd
->regions
[i
].bufferRowLength
);
1553 if (buffer_row_len
== 0)
1554 buffer_row_len
= util_format_get_stride(dst_format
, copycmd
->regions
[i
].imageExtent
.width
);
1555 unsigned buffer_image_height
= copycmd
->regions
[i
].bufferImageHeight
;
1556 if (buffer_image_height
== 0)
1557 buffer_image_height
= copycmd
->regions
[i
].imageExtent
.height
;
1559 if (src_format
!= dst_format
) {
1560 copy_depth_box(dst_data
, dst_format
,
1561 buffer_row_len
, buffer_row_len
* buffer_image_height
,
1563 copycmd
->regions
[i
].imageExtent
.width
,
1564 copycmd
->regions
[i
].imageExtent
.height
,
1566 src_data
, src_format
, src_t
->stride
, src_t
->layer_stride
, 0, 0, 0);
1568 util_copy_box((ubyte
*)dst_data
, src_format
,
1569 buffer_row_len
, buffer_row_len
* buffer_image_height
,
1571 copycmd
->regions
[i
].imageExtent
.width
,
1572 copycmd
->regions
[i
].imageExtent
.height
,
1574 src_data
, src_t
->stride
, src_t
->layer_stride
, 0, 0, 0);
1576 state
->pctx
->transfer_unmap(state
->pctx
, src_t
);
1577 state
->pctx
->transfer_unmap(state
->pctx
, dst_t
);
1581 static void handle_copy_buffer_to_image(struct val_cmd_buffer_entry
*cmd
,
1582 struct rendering_state
*state
)
1585 struct val_cmd_copy_buffer_to_image
*copycmd
= &cmd
->u
.buffer_to_img
;
1586 struct pipe_box box
, sbox
;
1587 struct pipe_transfer
*src_t
, *dst_t
;
1588 void *src_data
, *dst_data
;
1590 state
->pctx
->flush(state
->pctx
, NULL
, 0);
1592 for (i
= 0; i
< copycmd
->region_count
; i
++) {
1594 sbox
.x
= copycmd
->regions
[i
].bufferOffset
;
1597 sbox
.width
= copycmd
->src
->bo
->width0
;
1600 src_data
= state
->pctx
->transfer_map(state
->pctx
,
1608 box
.x
= copycmd
->regions
[i
].imageOffset
.x
;
1609 box
.y
= copycmd
->regions
[i
].imageOffset
.y
;
1610 box
.z
= copycmd
->dst
->type
== VK_IMAGE_TYPE_3D
? copycmd
->regions
[i
].imageOffset
.z
: copycmd
->regions
[i
].imageSubresource
.baseArrayLayer
;
1611 box
.width
= copycmd
->regions
[i
].imageExtent
.width
;
1612 box
.height
= copycmd
->regions
[i
].imageExtent
.height
;
1613 box
.depth
= copycmd
->dst
->type
== VK_IMAGE_TYPE_3D
? copycmd
->regions
[i
].imageExtent
.depth
: copycmd
->regions
[i
].imageSubresource
.layerCount
;
1615 dst_data
= state
->pctx
->transfer_map(state
->pctx
,
1617 copycmd
->regions
[i
].imageSubresource
.mipLevel
,
1618 PIPE_TRANSFER_WRITE
,
1622 enum pipe_format dst_format
= copycmd
->dst
->bo
->format
;
1623 enum pipe_format src_format
= dst_format
;
1624 if (util_format_is_depth_or_stencil(dst_format
)) {
1625 if (copycmd
->regions
[i
].imageSubresource
.aspectMask
== VK_IMAGE_ASPECT_DEPTH_BIT
) {
1626 src_format
= util_format_get_depth_only(copycmd
->dst
->bo
->format
);
1627 } else if (copycmd
->regions
[i
].imageSubresource
.aspectMask
== VK_IMAGE_ASPECT_STENCIL_BIT
) {
1628 src_format
= PIPE_FORMAT_S8_UINT
;
1632 unsigned buffer_row_len
= util_format_get_stride(src_format
, copycmd
->regions
[i
].bufferRowLength
);
1633 if (buffer_row_len
== 0)
1634 buffer_row_len
= util_format_get_stride(src_format
, copycmd
->regions
[i
].imageExtent
.width
);
1635 unsigned buffer_image_height
= copycmd
->regions
[i
].bufferImageHeight
;
1636 if (buffer_image_height
== 0)
1637 buffer_image_height
= copycmd
->regions
[i
].imageExtent
.height
;
1639 if (src_format
!= dst_format
) {
1640 copy_depth_box(dst_data
, dst_format
,
1641 dst_t
->stride
, dst_t
->layer_stride
,
1643 copycmd
->regions
[i
].imageExtent
.width
,
1644 copycmd
->regions
[i
].imageExtent
.height
,
1646 src_data
, src_format
,
1647 buffer_row_len
, buffer_row_len
* buffer_image_height
, 0, 0, 0);
1649 util_copy_box(dst_data
, dst_format
,
1650 dst_t
->stride
, dst_t
->layer_stride
,
1652 copycmd
->regions
[i
].imageExtent
.width
,
1653 copycmd
->regions
[i
].imageExtent
.height
,
1656 buffer_row_len
, buffer_row_len
* buffer_image_height
, 0, 0, 0);
1658 state
->pctx
->transfer_unmap(state
->pctx
, src_t
);
1659 state
->pctx
->transfer_unmap(state
->pctx
, dst_t
);
1663 static void handle_copy_image(struct val_cmd_buffer_entry
*cmd
,
1664 struct rendering_state
*state
)
1667 struct val_cmd_copy_image
*copycmd
= &cmd
->u
.copy_image
;
1669 state
->pctx
->flush(state
->pctx
, NULL
, 0);
1671 for (i
= 0; i
< copycmd
->region_count
; i
++) {
1672 struct pipe_box src_box
;
1673 src_box
.x
= copycmd
->regions
[i
].srcOffset
.x
;
1674 src_box
.y
= copycmd
->regions
[i
].srcOffset
.y
;
1675 src_box
.z
= copycmd
->regions
[i
].srcOffset
.z
+ copycmd
->regions
[i
].srcSubresource
.baseArrayLayer
;
1676 src_box
.width
= copycmd
->regions
[i
].extent
.width
;
1677 src_box
.height
= copycmd
->regions
[i
].extent
.height
;
1678 src_box
.depth
= copycmd
->regions
[i
].extent
.depth
;
1680 state
->pctx
->resource_copy_region(state
->pctx
, copycmd
->dst
->bo
,
1681 copycmd
->regions
[i
].dstSubresource
.mipLevel
,
1682 copycmd
->regions
[i
].dstOffset
.x
,
1683 copycmd
->regions
[i
].dstOffset
.y
,
1684 copycmd
->regions
[i
].dstOffset
.z
+ copycmd
->regions
[i
].dstSubresource
.baseArrayLayer
,
1686 copycmd
->regions
[i
].srcSubresource
.mipLevel
,
1691 static void handle_copy_buffer(struct val_cmd_buffer_entry
*cmd
,
1692 struct rendering_state
*state
)
1695 struct val_cmd_copy_buffer
*copycmd
= &cmd
->u
.copy_buffer
;
1697 for (i
= 0; i
< copycmd
->region_count
; i
++) {
1698 struct pipe_box box
= { 0 };
1699 u_box_1d(copycmd
->regions
[i
].srcOffset
, copycmd
->regions
[i
].size
, &box
);
1700 state
->pctx
->resource_copy_region(state
->pctx
, copycmd
->dst
->bo
, 0,
1701 copycmd
->regions
[i
].dstOffset
, 0, 0,
1702 copycmd
->src
->bo
, 0, &box
);
1706 static void handle_blit_image(struct val_cmd_buffer_entry
*cmd
,
1707 struct rendering_state
*state
)
1710 struct val_cmd_blit_image
*blitcmd
= &cmd
->u
.blit_image
;
1711 struct pipe_blit_info info
;
1713 memset(&info
, 0, sizeof(info
));
1715 state
->pctx
->flush(state
->pctx
, NULL
, 0);
1716 info
.src
.resource
= blitcmd
->src
->bo
;
1717 info
.dst
.resource
= blitcmd
->dst
->bo
;
1718 info
.src
.format
= blitcmd
->src
->bo
->format
;
1719 info
.dst
.format
= blitcmd
->dst
->bo
->format
;
1720 info
.mask
= util_format_is_depth_or_stencil(info
.src
.format
) ? PIPE_MASK_ZS
: PIPE_MASK_RGBA
;
1721 info
.filter
= blitcmd
->filter
== VK_FILTER_NEAREST
? PIPE_TEX_FILTER_NEAREST
: PIPE_TEX_FILTER_LINEAR
;
1722 for (i
= 0; i
< blitcmd
->region_count
; i
++) {
1723 int srcX0
, srcX1
, srcY0
, srcY1
;
1724 unsigned dstX0
, dstX1
, dstY0
, dstY1
;
1726 srcX0
= blitcmd
->regions
[i
].srcOffsets
[0].x
;
1727 srcX1
= blitcmd
->regions
[i
].srcOffsets
[1].x
;
1728 srcY0
= blitcmd
->regions
[i
].srcOffsets
[0].y
;
1729 srcY1
= blitcmd
->regions
[i
].srcOffsets
[1].y
;
1731 dstX0
= blitcmd
->regions
[i
].dstOffsets
[0].x
;
1732 dstX1
= blitcmd
->regions
[i
].dstOffsets
[1].x
;
1733 dstY0
= blitcmd
->regions
[i
].dstOffsets
[0].y
;
1734 dstY1
= blitcmd
->regions
[i
].dstOffsets
[1].y
;
1736 if (dstX0
< dstX1
) {
1737 info
.dst
.box
.x
= dstX0
;
1738 info
.src
.box
.x
= srcX0
;
1739 info
.dst
.box
.width
= dstX1
- dstX0
;
1740 info
.src
.box
.width
= srcX1
- srcX0
;
1742 info
.dst
.box
.x
= dstX1
;
1743 info
.src
.box
.x
= srcX1
;
1744 info
.dst
.box
.width
= dstX0
- dstX1
;
1745 info
.src
.box
.width
= srcX0
- srcX1
;
1748 if (dstY0
< dstY1
) {
1749 info
.dst
.box
.y
= dstY0
;
1750 info
.src
.box
.y
= srcY0
;
1751 info
.dst
.box
.height
= dstY1
- dstY0
;
1752 info
.src
.box
.height
= srcY1
- srcY0
;
1754 info
.dst
.box
.y
= dstY1
;
1755 info
.src
.box
.y
= srcY1
;
1756 info
.dst
.box
.height
= dstY0
- dstY1
;
1757 info
.src
.box
.height
= srcY0
- srcY1
;
1759 info
.src
.level
= blitcmd
->regions
[i
].srcSubresource
.mipLevel
;
1760 info
.src
.box
.z
= blitcmd
->regions
[i
].srcOffsets
[0].z
+ blitcmd
->regions
[i
].srcSubresource
.baseArrayLayer
;
1761 if (blitcmd
->src
->bo
->target
== PIPE_TEXTURE_3D
)
1762 info
.src
.box
.depth
= blitcmd
->regions
[i
].srcOffsets
[1].z
- blitcmd
->regions
[i
].srcOffsets
[0].z
;
1764 info
.src
.box
.depth
= blitcmd
->regions
[i
].srcSubresource
.layerCount
;
1766 info
.dst
.level
= blitcmd
->regions
[i
].dstSubresource
.mipLevel
;
1767 info
.dst
.box
.z
= blitcmd
->regions
[i
].dstOffsets
[0].z
+ blitcmd
->regions
[i
].dstSubresource
.baseArrayLayer
;
1768 if (blitcmd
->dst
->bo
->target
== PIPE_TEXTURE_3D
)
1769 info
.dst
.box
.depth
= blitcmd
->regions
[i
].dstOffsets
[1].z
- blitcmd
->regions
[i
].dstOffsets
[0].z
;
1771 info
.dst
.box
.depth
= blitcmd
->regions
[i
].dstSubresource
.layerCount
;
1772 state
->pctx
->blit(state
->pctx
, &info
);
1776 static void handle_fill_buffer(struct val_cmd_buffer_entry
*cmd
,
1777 struct rendering_state
*state
)
1779 struct val_cmd_fill_buffer
*fillcmd
= &cmd
->u
.fill_buffer
;
1781 struct pipe_transfer
*dst_t
;
1782 struct pipe_box box
;
1783 uint32_t size
= fillcmd
->fill_size
;
1785 if (fillcmd
->fill_size
== VK_WHOLE_SIZE
)
1786 size
= fillcmd
->buffer
->bo
->width0
- fillcmd
->offset
;
1788 u_box_1d(fillcmd
->offset
, size
, &box
);
1789 dst
= state
->pctx
->transfer_map(state
->pctx
,
1790 fillcmd
->buffer
->bo
,
1792 PIPE_TRANSFER_WRITE
,
1796 for (unsigned i
= 0; i
< size
/ 4; i
++)
1797 dst
[i
] = fillcmd
->data
;
1798 state
->pctx
->transfer_unmap(state
->pctx
, dst_t
);
1801 static void handle_update_buffer(struct val_cmd_buffer_entry
*cmd
,
1802 struct rendering_state
*state
)
1804 struct val_cmd_update_buffer
*updcmd
= &cmd
->u
.update_buffer
;
1806 struct pipe_transfer
*dst_t
;
1807 struct pipe_box box
;
1809 u_box_1d(updcmd
->offset
, updcmd
->data_size
, &box
);
1810 dst
= state
->pctx
->transfer_map(state
->pctx
,
1813 PIPE_TRANSFER_WRITE
,
1817 memcpy(dst
, updcmd
->data
, updcmd
->data_size
);
1818 state
->pctx
->transfer_unmap(state
->pctx
, dst_t
);
1821 static void handle_draw_indexed(struct val_cmd_buffer_entry
*cmd
,
1822 struct rendering_state
*state
)
1824 state
->info
.indirect
= NULL
;
1825 state
->info
.min_index
= 0;
1826 state
->info
.max_index
= ~0;
1827 state
->info
.index_size
= state
->index_size
;
1828 state
->info
.index
.resource
= state
->index_buffer
;
1829 state
->info
.start
= (state
->index_offset
/ state
->index_size
) + cmd
->u
.draw_indexed
.first_index
;
1830 state
->info
.count
= cmd
->u
.draw_indexed
.index_count
;
1831 state
->info
.start_instance
= cmd
->u
.draw_indexed
.first_instance
;
1832 state
->info
.instance_count
= cmd
->u
.draw_indexed
.instance_count
;
1833 state
->info
.index_bias
= cmd
->u
.draw_indexed
.vertex_offset
;
1835 if (state
->info
.primitive_restart
) {
1836 if (state
->info
.index_size
== 4)
1837 state
->info
.restart_index
= 0xffffffff;
1839 state
->info
.restart_index
= 0xffff;
1842 state
->pctx
->draw_vbo(state
->pctx
, &state
->info
);
1845 static void handle_draw_indirect(struct val_cmd_buffer_entry
*cmd
,
1846 struct rendering_state
*state
, bool indexed
)
1849 state
->info
.index_size
= state
->index_size
;
1850 state
->info
.index
.resource
= state
->index_buffer
;
1851 state
->info
.max_index
= ~0;
1853 state
->info
.index_size
= 0;
1854 state
->indirect_info
.offset
= cmd
->u
.draw_indirect
.offset
;
1855 state
->indirect_info
.stride
= cmd
->u
.draw_indirect
.stride
;
1856 state
->indirect_info
.draw_count
= cmd
->u
.draw_indirect
.draw_count
;
1857 state
->indirect_info
.buffer
= cmd
->u
.draw_indirect
.buffer
->bo
;
1858 state
->info
.indirect
= &state
->indirect_info
;
1859 state
->pctx
->draw_vbo(state
->pctx
, &state
->info
);
1862 static void handle_index_buffer(struct val_cmd_buffer_entry
*cmd
,
1863 struct rendering_state
*state
)
1865 struct val_cmd_bind_index_buffer
*ib
= &cmd
->u
.index_buffer
;
1866 switch (ib
->index_type
) {
1867 case VK_INDEX_TYPE_UINT16
:
1868 state
->index_size
= 2;
1870 case VK_INDEX_TYPE_UINT32
:
1871 state
->index_size
= 4;
1876 state
->index_offset
= ib
->offset
;
1878 state
->index_buffer
= ib
->buffer
->bo
;
1880 state
->index_buffer
= NULL
;
1882 state
->ib_dirty
= true;
1885 static void handle_dispatch(struct val_cmd_buffer_entry
*cmd
,
1886 struct rendering_state
*state
)
1888 state
->dispatch_info
.grid
[0] = cmd
->u
.dispatch
.x
;
1889 state
->dispatch_info
.grid
[1] = cmd
->u
.dispatch
.y
;
1890 state
->dispatch_info
.grid
[2] = cmd
->u
.dispatch
.z
;
1891 state
->dispatch_info
.indirect
= NULL
;
1892 state
->pctx
->launch_grid(state
->pctx
, &state
->dispatch_info
);
1895 static void handle_dispatch_indirect(struct val_cmd_buffer_entry
*cmd
,
1896 struct rendering_state
*state
)
1898 state
->dispatch_info
.indirect
= cmd
->u
.dispatch_indirect
.buffer
->bo
;
1899 state
->dispatch_info
.indirect_offset
= cmd
->u
.dispatch_indirect
.offset
;
1900 state
->pctx
->launch_grid(state
->pctx
, &state
->dispatch_info
);
1903 static void handle_push_constants(struct val_cmd_buffer_entry
*cmd
,
1904 struct rendering_state
*state
)
1906 memcpy(state
->push_constants
+ cmd
->u
.push_constants
.offset
, cmd
->u
.push_constants
.val
, cmd
->u
.push_constants
.size
);
1908 state
->pc_buffer
[PIPE_SHADER_VERTEX
].buffer_size
= 128 * 4;
1909 state
->pc_buffer
[PIPE_SHADER_VERTEX
].buffer_offset
= 0;
1910 state
->pc_buffer
[PIPE_SHADER_VERTEX
].user_buffer
= state
->push_constants
;
1911 state
->pcbuf_dirty
[PIPE_SHADER_VERTEX
] = true;
1912 state
->pc_buffer
[PIPE_SHADER_FRAGMENT
].buffer_size
= 128 * 4;
1913 state
->pc_buffer
[PIPE_SHADER_FRAGMENT
].buffer_offset
= 0;
1914 state
->pc_buffer
[PIPE_SHADER_FRAGMENT
].user_buffer
= state
->push_constants
;
1915 state
->pcbuf_dirty
[PIPE_SHADER_FRAGMENT
] = true;
1916 state
->pc_buffer
[PIPE_SHADER_GEOMETRY
].buffer_size
= 128 * 4;
1917 state
->pc_buffer
[PIPE_SHADER_GEOMETRY
].buffer_offset
= 0;
1918 state
->pc_buffer
[PIPE_SHADER_GEOMETRY
].user_buffer
= state
->push_constants
;
1919 state
->pcbuf_dirty
[PIPE_SHADER_GEOMETRY
] = true;
1920 state
->pc_buffer
[PIPE_SHADER_TESS_CTRL
].buffer_size
= 128 * 4;
1921 state
->pc_buffer
[PIPE_SHADER_TESS_CTRL
].buffer_offset
= 0;
1922 state
->pc_buffer
[PIPE_SHADER_TESS_CTRL
].user_buffer
= state
->push_constants
;
1923 state
->pcbuf_dirty
[PIPE_SHADER_TESS_CTRL
] = true;
1924 state
->pc_buffer
[PIPE_SHADER_TESS_EVAL
].buffer_size
= 128 * 4;
1925 state
->pc_buffer
[PIPE_SHADER_TESS_EVAL
].buffer_offset
= 0;
1926 state
->pc_buffer
[PIPE_SHADER_TESS_EVAL
].user_buffer
= state
->push_constants
;
1927 state
->pcbuf_dirty
[PIPE_SHADER_TESS_EVAL
] = true;
1928 state
->pc_buffer
[PIPE_SHADER_COMPUTE
].buffer_size
= 128 * 4;
1929 state
->pc_buffer
[PIPE_SHADER_COMPUTE
].buffer_offset
= 0;
1930 state
->pc_buffer
[PIPE_SHADER_COMPUTE
].user_buffer
= state
->push_constants
;
1931 state
->pcbuf_dirty
[PIPE_SHADER_COMPUTE
] = true;
1934 static void val_execute_cmd_buffer(struct val_cmd_buffer
*cmd_buffer
,
1935 struct rendering_state
*state
);
1937 static void handle_execute_commands(struct val_cmd_buffer_entry
*cmd
,
1938 struct rendering_state
*state
)
1940 for (unsigned i
= 0; i
< cmd
->u
.execute_commands
.command_buffer_count
; i
++) {
1941 struct val_cmd_buffer
*secondary_buf
= cmd
->u
.execute_commands
.cmd_buffers
[i
];
1942 val_execute_cmd_buffer(secondary_buf
, state
);
1946 static void handle_event_set(struct val_cmd_buffer_entry
*cmd
,
1947 struct rendering_state
*state
)
1949 struct val_event
*event
= cmd
->u
.event_set
.event
;
1951 if (cmd
->u
.event_set
.flush
)
1952 state
->pctx
->flush(state
->pctx
, NULL
, 0);
1953 event
->event_storage
= (cmd
->u
.event_set
.value
== true) ? 1 : 0;
1956 static void handle_wait_events(struct val_cmd_buffer_entry
*cmd
,
1957 struct rendering_state
*state
)
1959 for (unsigned i
= 0; i
< cmd
->u
.wait_events
.event_count
; i
++) {
1960 struct val_event
*event
= cmd
->u
.wait_events
.events
[i
];
1962 while (event
->event_storage
!= true);
1966 static void handle_pipeline_barrier(struct val_cmd_buffer_entry
*cmd
,
1967 struct rendering_state
*state
)
1969 /* why hello nail, I'm a hammer. - TODO */
1970 state
->pctx
->flush(state
->pctx
, NULL
, 0);
1973 static void handle_begin_query(struct val_cmd_buffer_entry
*cmd
,
1974 struct rendering_state
*state
)
1976 struct val_cmd_query_cmd
*qcmd
= &cmd
->u
.query
;
1977 struct val_query_pool
*pool
= qcmd
->pool
;
1979 if (!pool
->queries
[qcmd
->query
]) {
1980 enum pipe_query_type qtype
= pool
->base_type
;
1981 if (qtype
== PIPE_QUERY_OCCLUSION_COUNTER
&& !qcmd
->precise
)
1982 qtype
= PIPE_QUERY_OCCLUSION_PREDICATE
;
1983 pool
->queries
[qcmd
->query
] = state
->pctx
->create_query(state
->pctx
,
1984 qtype
, qcmd
->index
);
1987 state
->pctx
->begin_query(state
->pctx
, pool
->queries
[qcmd
->query
]);
1990 static void handle_end_query(struct val_cmd_buffer_entry
*cmd
,
1991 struct rendering_state
*state
)
1993 struct val_cmd_query_cmd
*qcmd
= &cmd
->u
.query
;
1994 struct val_query_pool
*pool
= qcmd
->pool
;
1995 assert(pool
->queries
[qcmd
->query
]);
1997 state
->pctx
->end_query(state
->pctx
, pool
->queries
[qcmd
->query
]);
2000 static void handle_reset_query_pool(struct val_cmd_buffer_entry
*cmd
,
2001 struct rendering_state
*state
)
2003 struct val_cmd_query_cmd
*qcmd
= &cmd
->u
.query
;
2004 struct val_query_pool
*pool
= qcmd
->pool
;
2005 for (unsigned i
= qcmd
->query
; i
< qcmd
->query
+ qcmd
->index
; i
++) {
2006 if (pool
->queries
[i
]) {
2007 state
->pctx
->destroy_query(state
->pctx
, pool
->queries
[i
]);
2008 pool
->queries
[i
] = NULL
;
2013 static void handle_write_timestamp(struct val_cmd_buffer_entry
*cmd
,
2014 struct rendering_state
*state
)
2016 struct val_cmd_query_cmd
*qcmd
= &cmd
->u
.query
;
2017 struct val_query_pool
*pool
= qcmd
->pool
;
2018 if (!pool
->queries
[qcmd
->query
]) {
2019 pool
->queries
[qcmd
->query
] = state
->pctx
->create_query(state
->pctx
,
2020 PIPE_QUERY_TIMESTAMP
, 0);
2024 state
->pctx
->flush(state
->pctx
, NULL
, 0);
2025 state
->pctx
->end_query(state
->pctx
, pool
->queries
[qcmd
->query
]);
2029 static void handle_copy_query_pool_results(struct val_cmd_buffer_entry
*cmd
,
2030 struct rendering_state
*state
)
2032 struct val_cmd_copy_query_pool_results
*copycmd
= &cmd
->u
.copy_query_pool_results
;
2033 struct val_query_pool
*pool
= copycmd
->pool
;
2035 for (unsigned i
= copycmd
->first_query
; i
< copycmd
->first_query
+ copycmd
->query_count
; i
++) {
2036 unsigned offset
= copycmd
->dst
->offset
+ (copycmd
->stride
* (i
- copycmd
->first_query
));
2037 if (pool
->queries
[i
]) {
2038 if (copycmd
->flags
& VK_QUERY_RESULT_WITH_AVAILABILITY_BIT
)
2039 state
->pctx
->get_query_result_resource(state
->pctx
,
2041 copycmd
->flags
& VK_QUERY_RESULT_WAIT_BIT
,
2042 copycmd
->flags
& VK_QUERY_RESULT_64_BIT
? PIPE_QUERY_TYPE_U64
: PIPE_QUERY_TYPE_U32
,
2045 offset
+ (copycmd
->flags
& VK_QUERY_RESULT_64_BIT
? 8 : 4));
2046 state
->pctx
->get_query_result_resource(state
->pctx
,
2048 copycmd
->flags
& VK_QUERY_RESULT_WAIT_BIT
,
2049 copycmd
->flags
& VK_QUERY_RESULT_64_BIT
? PIPE_QUERY_TYPE_U64
: PIPE_QUERY_TYPE_U32
,
2054 /* if no queries emitted yet, just reset the buffer to 0 so avail is reported correctly */
2055 if (copycmd
->flags
& VK_QUERY_RESULT_WITH_AVAILABILITY_BIT
) {
2056 struct pipe_transfer
*src_t
;
2059 struct pipe_box box
= {};
2060 box
.width
= copycmd
->stride
* copycmd
->query_count
;
2063 map
= state
->pctx
->transfer_map(state
->pctx
,
2064 copycmd
->dst
->bo
, 0, PIPE_TRANSFER_READ
, &box
,
2067 memset(map
, 0, box
.width
);
2068 state
->pctx
->transfer_unmap(state
->pctx
, src_t
);
2074 static void pack_clear_color(enum pipe_format pformat
, VkClearColorValue
*in_val
, uint32_t col_val
[4])
2076 const struct util_format_description
*desc
= util_format_description(pformat
);
2077 col_val
[0] = col_val
[1] = col_val
[2] = col_val
[3] = 0;
2078 for (unsigned c
= 0; c
< 4; c
++) {
2079 if (desc
->swizzle
[c
] >= 4)
2081 const struct util_format_channel_description
*channel
= &desc
->channel
[desc
->swizzle
[c
]];
2082 if (channel
->size
== 32) {
2083 col_val
[c
] = in_val
->uint32
[c
];
2086 if (channel
->pure_integer
) {
2087 uint64_t v
= in_val
->uint32
[c
] & ((1u << channel
->size
) - 1);
2088 switch (channel
->size
) {
2092 col_val
[0] |= (v
<< channel
->shift
);
2095 col_val
[c
/ 2] |= (v
<< (16 * (c
% 2)));
2099 util_pack_color(in_val
->float32
, pformat
, (union util_color
*)col_val
);
2105 static void handle_clear_color_image(struct val_cmd_buffer_entry
*cmd
,
2106 struct rendering_state
*state
)
2108 struct val_image
*image
= cmd
->u
.clear_color_image
.image
;
2109 uint32_t col_val
[4];
2110 pack_clear_color(image
->bo
->format
, &cmd
->u
.clear_color_image
.clear_val
, col_val
);
2111 for (unsigned i
= 0; i
< cmd
->u
.clear_color_image
.range_count
; i
++) {
2112 VkImageSubresourceRange
*range
= &cmd
->u
.clear_color_image
.ranges
[i
];
2113 struct pipe_box box
;
2118 uint32_t level_count
= val_get_levelCount(image
, range
);
2119 for (unsigned j
= range
->baseMipLevel
; j
< range
->baseMipLevel
+ level_count
; j
++) {
2120 box
.width
= u_minify(image
->bo
->width0
, j
);
2121 box
.height
= u_minify(image
->bo
->height0
, j
);
2123 if (image
->bo
->target
== PIPE_TEXTURE_3D
)
2124 box
.depth
= u_minify(image
->bo
->depth0
, j
);
2125 else if (image
->bo
->target
== PIPE_TEXTURE_1D_ARRAY
) {
2126 box
.y
= range
->baseArrayLayer
;
2127 box
.height
= val_get_layerCount(image
, range
);
2130 box
.z
= range
->baseArrayLayer
;
2131 box
.depth
= val_get_layerCount(image
, range
);
2134 state
->pctx
->clear_texture(state
->pctx
, image
->bo
,
2135 j
, &box
, (void *)col_val
);
2140 static void handle_clear_ds_image(struct val_cmd_buffer_entry
*cmd
,
2141 struct rendering_state
*state
)
2143 struct val_image
*image
= cmd
->u
.clear_ds_image
.image
;
2145 col_val
= util_pack64_z_stencil(image
->bo
->format
, cmd
->u
.clear_ds_image
.clear_val
.depth
, cmd
->u
.clear_ds_image
.clear_val
.stencil
);
2146 for (unsigned i
= 0; i
< cmd
->u
.clear_ds_image
.range_count
; i
++) {
2147 VkImageSubresourceRange
*range
= &cmd
->u
.clear_ds_image
.ranges
[i
];
2148 struct pipe_box box
;
2153 uint32_t level_count
= val_get_levelCount(image
, range
);
2154 for (unsigned j
= range
->baseMipLevel
; j
< range
->baseMipLevel
+ level_count
; j
++) {
2155 box
.width
= u_minify(image
->bo
->width0
, j
);
2156 box
.height
= u_minify(image
->bo
->height0
, j
);
2158 if (image
->bo
->target
== PIPE_TEXTURE_3D
)
2159 box
.depth
= u_minify(image
->bo
->depth0
, j
);
2160 else if (image
->bo
->target
== PIPE_TEXTURE_1D_ARRAY
) {
2161 box
.y
= range
->baseArrayLayer
;
2162 box
.height
= val_get_layerCount(image
, range
);
2165 box
.z
= range
->baseArrayLayer
;
2166 box
.depth
= val_get_layerCount(image
, range
);
2169 state
->pctx
->clear_texture(state
->pctx
, image
->bo
,
2170 j
, &box
, (void *)&col_val
);
2175 static void handle_clear_attachments(struct val_cmd_buffer_entry
*cmd
,
2176 struct rendering_state
*state
)
2178 for (uint32_t a
= 0; a
< cmd
->u
.clear_attachments
.attachment_count
; a
++) {
2179 VkClearAttachment
*att
= &cmd
->u
.clear_attachments
.attachments
[a
];
2180 struct val_subpass
*subpass
= &state
->pass
->subpasses
[state
->subpass
];
2181 struct val_image_view
*imgv
;
2183 if (att
->aspectMask
== VK_IMAGE_ASPECT_COLOR_BIT
) {
2184 struct val_subpass_attachment
*color_att
= &subpass
->color_attachments
[att
->colorAttachment
];
2185 if (!color_att
|| color_att
->attachment
== VK_ATTACHMENT_UNUSED
)
2187 imgv
= state
->vk_framebuffer
->attachments
[color_att
->attachment
];
2189 struct val_subpass_attachment
*ds_att
= subpass
->depth_stencil_attachment
;
2190 if (!ds_att
|| ds_att
->attachment
== VK_ATTACHMENT_UNUSED
)
2192 imgv
= state
->vk_framebuffer
->attachments
[ds_att
->attachment
];
2194 uint32_t col_val
[4];
2195 if (util_format_is_depth_or_stencil(imgv
->pformat
)) {
2196 int64_t val
= util_pack64_z_stencil(imgv
->pformat
, att
->clearValue
.depthStencil
.depth
, att
->clearValue
.depthStencil
.stencil
);
2197 memcpy(col_val
, &val
, 8);
2199 pack_clear_color(imgv
->pformat
, &att
->clearValue
.color
, col_val
);
2200 for (uint32_t r
= 0; r
< cmd
->u
.clear_attachments
.rect_count
; r
++) {
2201 struct pipe_box box
;
2202 VkClearRect
*rect
= &cmd
->u
.clear_attachments
.rects
[r
];
2203 box
.x
= rect
->rect
.offset
.x
;
2204 box
.y
= rect
->rect
.offset
.y
;
2205 box
.z
= imgv
->subresourceRange
.baseArrayLayer
+ rect
->baseArrayLayer
;
2206 box
.width
= rect
->rect
.extent
.width
;
2207 box
.height
= rect
->rect
.extent
.height
;
2208 box
.depth
= rect
->layerCount
;
2210 state
->pctx
->clear_texture(state
->pctx
, imgv
->image
->bo
,
2211 imgv
->subresourceRange
.baseMipLevel
,
2217 static void handle_resolve_image(struct val_cmd_buffer_entry
*cmd
,
2218 struct rendering_state
*state
)
2221 struct val_cmd_resolve_image
*resolvecmd
= &cmd
->u
.resolve_image
;
2222 struct pipe_blit_info info
;
2224 memset(&info
, 0, sizeof(info
));
2226 state
->pctx
->flush(state
->pctx
, NULL
, 0);
2227 info
.src
.resource
= resolvecmd
->src
->bo
;
2228 info
.dst
.resource
= resolvecmd
->dst
->bo
;
2229 info
.src
.format
= resolvecmd
->src
->bo
->format
;
2230 info
.dst
.format
= resolvecmd
->dst
->bo
->format
;
2231 info
.mask
= util_format_is_depth_or_stencil(info
.src
.format
) ? PIPE_MASK_ZS
: PIPE_MASK_RGBA
;
2232 info
.filter
= PIPE_TEX_FILTER_NEAREST
;
2233 for (i
= 0; i
< resolvecmd
->region_count
; i
++) {
2235 unsigned dstX0
, dstY0
;
2237 srcX0
= resolvecmd
->regions
[i
].srcOffset
.x
;
2238 srcY0
= resolvecmd
->regions
[i
].srcOffset
.y
;
2240 dstX0
= resolvecmd
->regions
[i
].dstOffset
.x
;
2241 dstY0
= resolvecmd
->regions
[i
].dstOffset
.y
;
2243 info
.dst
.box
.x
= dstX0
;
2244 info
.dst
.box
.y
= dstY0
;
2245 info
.src
.box
.x
= srcX0
;
2246 info
.src
.box
.y
= srcY0
;
2248 info
.dst
.box
.width
= resolvecmd
->regions
[i
].extent
.width
;
2249 info
.src
.box
.width
= resolvecmd
->regions
[i
].extent
.width
;
2250 info
.dst
.box
.height
= resolvecmd
->regions
[i
].extent
.height
;
2251 info
.src
.box
.height
= resolvecmd
->regions
[i
].extent
.height
;
2253 info
.dst
.box
.depth
= resolvecmd
->regions
[i
].dstSubresource
.layerCount
;
2254 info
.src
.box
.depth
= resolvecmd
->regions
[i
].srcSubresource
.layerCount
;
2256 info
.src
.level
= resolvecmd
->regions
[i
].srcSubresource
.mipLevel
;
2257 info
.src
.box
.z
= resolvecmd
->regions
[i
].srcOffset
.z
+ resolvecmd
->regions
[i
].srcSubresource
.baseArrayLayer
;
2259 info
.dst
.level
= resolvecmd
->regions
[i
].dstSubresource
.mipLevel
;
2260 info
.dst
.box
.z
= resolvecmd
->regions
[i
].dstOffset
.z
+ resolvecmd
->regions
[i
].dstSubresource
.baseArrayLayer
;
2262 state
->pctx
->blit(state
->pctx
, &info
);
2266 static void val_execute_cmd_buffer(struct val_cmd_buffer
*cmd_buffer
,
2267 struct rendering_state
*state
)
2269 struct val_cmd_buffer_entry
*cmd
;
2271 LIST_FOR_EACH_ENTRY(cmd
, &cmd_buffer
->cmds
, cmd_link
) {
2272 switch (cmd
->cmd_type
) {
2273 case VAL_CMD_BIND_PIPELINE
:
2274 handle_pipeline(cmd
, state
);
2276 case VAL_CMD_SET_VIEWPORT
:
2277 handle_set_viewport(cmd
, state
);
2279 case VAL_CMD_SET_SCISSOR
:
2280 handle_set_scissor(cmd
, state
);
2282 case VAL_CMD_SET_LINE_WIDTH
:
2283 handle_set_line_width(cmd
, state
);
2285 case VAL_CMD_SET_DEPTH_BIAS
:
2286 handle_set_depth_bias(cmd
, state
);
2288 case VAL_CMD_SET_BLEND_CONSTANTS
:
2289 handle_set_blend_constants(cmd
, state
);
2291 case VAL_CMD_SET_DEPTH_BOUNDS
:
2292 handle_set_depth_bounds(cmd
, state
);
2294 case VAL_CMD_SET_STENCIL_COMPARE_MASK
:
2295 handle_set_stencil_compare_mask(cmd
, state
);
2297 case VAL_CMD_SET_STENCIL_WRITE_MASK
:
2298 handle_set_stencil_write_mask(cmd
, state
);
2300 case VAL_CMD_SET_STENCIL_REFERENCE
:
2301 handle_set_stencil_reference(cmd
, state
);
2303 case VAL_CMD_BIND_DESCRIPTOR_SETS
:
2304 handle_descriptor_sets(cmd
, state
);
2306 case VAL_CMD_BIND_INDEX_BUFFER
:
2307 handle_index_buffer(cmd
, state
);
2309 case VAL_CMD_BIND_VERTEX_BUFFERS
:
2310 handle_vertex_buffers(cmd
, state
);
2314 handle_draw(cmd
, state
);
2316 case VAL_CMD_DRAW_INDEXED
:
2318 handle_draw_indexed(cmd
, state
);
2320 case VAL_CMD_DRAW_INDIRECT
:
2322 handle_draw_indirect(cmd
, state
, false);
2324 case VAL_CMD_DRAW_INDEXED_INDIRECT
:
2326 handle_draw_indirect(cmd
, state
, true);
2328 case VAL_CMD_DISPATCH
:
2329 emit_compute_state(state
);
2330 handle_dispatch(cmd
, state
);
2332 case VAL_CMD_DISPATCH_INDIRECT
:
2333 emit_compute_state(state
);
2334 handle_dispatch_indirect(cmd
, state
);
2336 case VAL_CMD_COPY_BUFFER
:
2337 handle_copy_buffer(cmd
, state
);
2339 case VAL_CMD_COPY_IMAGE
:
2340 handle_copy_image(cmd
, state
);
2342 case VAL_CMD_BLIT_IMAGE
:
2343 handle_blit_image(cmd
, state
);
2345 case VAL_CMD_COPY_BUFFER_TO_IMAGE
:
2346 handle_copy_buffer_to_image(cmd
, state
);
2348 case VAL_CMD_COPY_IMAGE_TO_BUFFER
:
2349 handle_copy_image_to_buffer(cmd
, state
);
2351 case VAL_CMD_UPDATE_BUFFER
:
2352 handle_update_buffer(cmd
, state
);
2354 case VAL_CMD_FILL_BUFFER
:
2355 handle_fill_buffer(cmd
, state
);
2357 case VAL_CMD_CLEAR_COLOR_IMAGE
:
2358 handle_clear_color_image(cmd
, state
);
2360 case VAL_CMD_CLEAR_DEPTH_STENCIL_IMAGE
:
2361 handle_clear_ds_image(cmd
, state
);
2363 case VAL_CMD_CLEAR_ATTACHMENTS
:
2364 handle_clear_attachments(cmd
, state
);
2366 case VAL_CMD_RESOLVE_IMAGE
:
2367 handle_resolve_image(cmd
, state
);
2369 case VAL_CMD_SET_EVENT
:
2370 case VAL_CMD_RESET_EVENT
:
2371 handle_event_set(cmd
, state
);
2373 case VAL_CMD_WAIT_EVENTS
:
2374 handle_wait_events(cmd
, state
);
2376 case VAL_CMD_PIPELINE_BARRIER
:
2377 handle_pipeline_barrier(cmd
, state
);
2379 case VAL_CMD_BEGIN_QUERY
:
2380 handle_begin_query(cmd
, state
);
2382 case VAL_CMD_END_QUERY
:
2383 handle_end_query(cmd
, state
);
2385 case VAL_CMD_RESET_QUERY_POOL
:
2386 handle_reset_query_pool(cmd
, state
);
2388 case VAL_CMD_WRITE_TIMESTAMP
:
2389 handle_write_timestamp(cmd
, state
);
2391 case VAL_CMD_COPY_QUERY_POOL_RESULTS
:
2392 handle_copy_query_pool_results(cmd
, state
);
2394 case VAL_CMD_PUSH_CONSTANTS
:
2395 handle_push_constants(cmd
, state
);
2397 case VAL_CMD_BEGIN_RENDER_PASS
:
2398 handle_begin_render_pass(cmd
, state
);
2400 case VAL_CMD_NEXT_SUBPASS
:
2401 handle_next_subpass(cmd
, state
);
2403 case VAL_CMD_END_RENDER_PASS
:
2404 handle_end_render_pass(cmd
, state
);
2406 case VAL_CMD_EXECUTE_COMMANDS
:
2407 handle_execute_commands(cmd
, state
);
2413 VkResult
val_execute_cmds(struct val_device
*device
,
2414 struct val_queue
*queue
,
2415 struct val_fence
*fence
,
2416 struct val_cmd_buffer
*cmd_buffer
)
2418 struct rendering_state state
;
2419 struct pipe_fence_handle
*handle
= NULL
;
2420 memset(&state
, 0, sizeof(state
));
2421 state
.pctx
= queue
->ctx
;
2422 state
.blend_dirty
= true;
2423 state
.dsa_dirty
= true;
2424 state
.rs_dirty
= true;
2425 /* create a gallium context */
2426 val_execute_cmd_buffer(cmd_buffer
, &state
);
2428 state
.pctx
->flush(state
.pctx
, fence
? &handle
: NULL
, 0);
2430 mtx_lock(&device
->fence_lock
);
2431 fence
->handle
= handle
;
2432 mtx_unlock(&device
->fence_lock
);
2434 state
.start_vb
= -1;
2436 state
.pctx
->set_vertex_buffers(state
.pctx
, 0, PIPE_MAX_ATTRIBS
, NULL
);
2437 state
.pctx
->bind_vertex_elements_state(state
.pctx
, NULL
);
2438 state
.pctx
->bind_vs_state(state
.pctx
, NULL
);
2439 state
.pctx
->bind_fs_state(state
.pctx
, NULL
);
2440 state
.pctx
->bind_gs_state(state
.pctx
, NULL
);
2441 if (state
.pctx
->bind_tcs_state
)
2442 state
.pctx
->bind_tcs_state(state
.pctx
, NULL
);
2443 if (state
.pctx
->bind_tes_state
)
2444 state
.pctx
->bind_tes_state(state
.pctx
, NULL
);
2445 if (state
.pctx
->bind_compute_state
)
2446 state
.pctx
->bind_compute_state(state
.pctx
, NULL
);
2447 if (state
.velems_cso
)
2448 state
.pctx
->delete_vertex_elements_state(state
.pctx
, state
.velems_cso
);
2450 state
.pctx
->bind_rasterizer_state(state
.pctx
, NULL
);
2451 state
.pctx
->delete_rasterizer_state(state
.pctx
, state
.rast_handle
);
2452 if (state
.blend_handle
) {
2453 state
.pctx
->bind_blend_state(state
.pctx
, NULL
);
2454 state
.pctx
->delete_blend_state(state
.pctx
, state
.blend_handle
);
2457 if (state
.dsa_handle
) {
2458 state
.pctx
->bind_depth_stencil_alpha_state(state
.pctx
, NULL
);
2459 state
.pctx
->delete_depth_stencil_alpha_state(state
.pctx
, state
.dsa_handle
);
2462 for (enum pipe_shader_type s
= PIPE_SHADER_VERTEX
; s
< PIPE_SHADER_TYPES
; s
++) {
2463 for (unsigned i
= 0; i
< PIPE_MAX_SAMPLERS
; i
++) {
2465 pipe_sampler_view_reference(&state
.sv
[s
][i
], NULL
);
2466 if (state
.ss_cso
[s
][i
]) {
2467 state
.pctx
->delete_sampler_state(state
.pctx
, state
.ss_cso
[s
][i
]);
2468 state
.ss_cso
[s
][i
] = NULL
;
2471 state
.pctx
->bind_sampler_states(state
.pctx
, s
, 0, PIPE_MAX_SAMPLERS
, state
.ss_cso
[s
]);
2473 state
.pctx
->set_shader_images(state
.pctx
, s
, 0, device
->physical_device
->max_images
, NULL
);