anv: add support for dynamic primitive topology change
[mesa.git] / src / intel / vulkan / anv_cmd_buffer.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31
32 #include "vk_format_info.h"
33 #include "vk_util.h"
34
35 /** \file anv_cmd_buffer.c
36 *
37 * This file contains all of the stuff for emitting commands into a command
38 * buffer. This includes implementations of most of the vkCmd*
39 * entrypoints. This file is concerned entirely with state emission and
40 * not with the command buffer data structure itself. As far as this file
41 * is concerned, most of anv_cmd_buffer is magic.
42 */
43
44 /* TODO: These are taken from GLES. We should check the Vulkan spec */
45 const struct anv_dynamic_state default_dynamic_state = {
46 .viewport = {
47 .count = 0,
48 },
49 .scissor = {
50 .count = 0,
51 },
52 .line_width = 1.0f,
53 .depth_bias = {
54 .bias = 0.0f,
55 .clamp = 0.0f,
56 .slope = 0.0f,
57 },
58 .blend_constants = { 0.0f, 0.0f, 0.0f, 0.0f },
59 .depth_bounds = {
60 .min = 0.0f,
61 .max = 1.0f,
62 },
63 .stencil_compare_mask = {
64 .front = ~0u,
65 .back = ~0u,
66 },
67 .stencil_write_mask = {
68 .front = ~0u,
69 .back = ~0u,
70 },
71 .stencil_reference = {
72 .front = 0u,
73 .back = 0u,
74 },
75 .line_stipple = {
76 .factor = 0u,
77 .pattern = 0u,
78 },
79 .cull_mode = 0,
80 .front_face = 0,
81 .primitive_topology = 0,
82 };
83
84 /**
85 * Copy the dynamic state from src to dest based on the copy_mask.
86 *
87 * Avoid copying states that have not changed, except for VIEWPORT, SCISSOR and
88 * BLEND_CONSTANTS (always copy them if they are in the copy_mask).
89 *
90 * Returns a mask of the states which changed.
91 */
92 anv_cmd_dirty_mask_t
93 anv_dynamic_state_copy(struct anv_dynamic_state *dest,
94 const struct anv_dynamic_state *src,
95 anv_cmd_dirty_mask_t copy_mask)
96 {
97 anv_cmd_dirty_mask_t changed = 0;
98
99 if (copy_mask & ANV_CMD_DIRTY_DYNAMIC_VIEWPORT) {
100 dest->viewport.count = src->viewport.count;
101 typed_memcpy(dest->viewport.viewports, src->viewport.viewports,
102 src->viewport.count);
103 changed |= ANV_CMD_DIRTY_DYNAMIC_VIEWPORT;
104 }
105
106 if (copy_mask & ANV_CMD_DIRTY_DYNAMIC_SCISSOR) {
107 dest->scissor.count = src->scissor.count;
108 typed_memcpy(dest->scissor.scissors, src->scissor.scissors,
109 src->scissor.count);
110 changed |= ANV_CMD_DIRTY_DYNAMIC_SCISSOR;
111 }
112
113 if (copy_mask & ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS) {
114 typed_memcpy(dest->blend_constants, src->blend_constants, 4);
115 changed |= ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS;
116 }
117
118 #define ANV_CMP_COPY(field, flag) \
119 if (copy_mask & flag) { \
120 if (dest->field != src->field) { \
121 dest->field = src->field; \
122 changed |= flag; \
123 } \
124 }
125
126 ANV_CMP_COPY(line_width, ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH);
127
128 ANV_CMP_COPY(depth_bias.bias, ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS);
129 ANV_CMP_COPY(depth_bias.clamp, ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS);
130 ANV_CMP_COPY(depth_bias.slope, ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS);
131
132 ANV_CMP_COPY(depth_bounds.min, ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS);
133 ANV_CMP_COPY(depth_bounds.max, ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS);
134
135 ANV_CMP_COPY(stencil_compare_mask.front, ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK);
136 ANV_CMP_COPY(stencil_compare_mask.back, ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK);
137
138 ANV_CMP_COPY(stencil_write_mask.front, ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK);
139 ANV_CMP_COPY(stencil_write_mask.back, ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK);
140
141 ANV_CMP_COPY(stencil_reference.front, ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE);
142 ANV_CMP_COPY(stencil_reference.back, ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE);
143
144 ANV_CMP_COPY(line_stipple.factor, ANV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE);
145 ANV_CMP_COPY(line_stipple.pattern, ANV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE);
146
147 ANV_CMP_COPY(cull_mode, ANV_CMD_DIRTY_DYNAMIC_CULL_MODE);
148 ANV_CMP_COPY(front_face, ANV_CMD_DIRTY_DYNAMIC_FRONT_FACE);
149 ANV_CMP_COPY(primitive_topology, ANV_CMD_DIRTY_DYNAMIC_PRIMITIVE_TOPOLOGY);
150
151 #undef ANV_CMP_COPY
152
153 return changed;
154 }
155
156 static void
157 anv_cmd_state_init(struct anv_cmd_buffer *cmd_buffer)
158 {
159 struct anv_cmd_state *state = &cmd_buffer->state;
160
161 memset(state, 0, sizeof(*state));
162
163 state->current_pipeline = UINT32_MAX;
164 state->restart_index = UINT32_MAX;
165 state->gfx.dynamic = default_dynamic_state;
166 }
167
168 static void
169 anv_cmd_pipeline_state_finish(struct anv_cmd_buffer *cmd_buffer,
170 struct anv_cmd_pipeline_state *pipe_state)
171 {
172 for (uint32_t i = 0; i < ARRAY_SIZE(pipe_state->push_descriptors); i++) {
173 if (pipe_state->push_descriptors[i]) {
174 anv_descriptor_set_layout_unref(cmd_buffer->device,
175 pipe_state->push_descriptors[i]->set.layout);
176 vk_free(&cmd_buffer->pool->alloc, pipe_state->push_descriptors[i]);
177 }
178 }
179 }
180
181 static void
182 anv_cmd_state_finish(struct anv_cmd_buffer *cmd_buffer)
183 {
184 struct anv_cmd_state *state = &cmd_buffer->state;
185
186 anv_cmd_pipeline_state_finish(cmd_buffer, &state->gfx.base);
187 anv_cmd_pipeline_state_finish(cmd_buffer, &state->compute.base);
188
189 vk_free(&cmd_buffer->pool->alloc, state->attachments);
190 }
191
192 static void
193 anv_cmd_state_reset(struct anv_cmd_buffer *cmd_buffer)
194 {
195 anv_cmd_state_finish(cmd_buffer);
196 anv_cmd_state_init(cmd_buffer);
197 }
198
199 static VkResult anv_create_cmd_buffer(
200 struct anv_device * device,
201 struct anv_cmd_pool * pool,
202 VkCommandBufferLevel level,
203 VkCommandBuffer* pCommandBuffer)
204 {
205 struct anv_cmd_buffer *cmd_buffer;
206 VkResult result;
207
208 cmd_buffer = vk_alloc(&pool->alloc, sizeof(*cmd_buffer), 8,
209 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
210 if (cmd_buffer == NULL)
211 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
212
213 vk_object_base_init(&device->vk, &cmd_buffer->base,
214 VK_OBJECT_TYPE_COMMAND_BUFFER);
215
216 cmd_buffer->batch.status = VK_SUCCESS;
217
218 cmd_buffer->device = device;
219 cmd_buffer->pool = pool;
220 cmd_buffer->level = level;
221
222 result = anv_cmd_buffer_init_batch_bo_chain(cmd_buffer);
223 if (result != VK_SUCCESS)
224 goto fail;
225
226 anv_state_stream_init(&cmd_buffer->surface_state_stream,
227 &device->surface_state_pool, 4096);
228 anv_state_stream_init(&cmd_buffer->dynamic_state_stream,
229 &device->dynamic_state_pool, 16384);
230
231 anv_cmd_state_init(cmd_buffer);
232
233 if (pool) {
234 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
235 } else {
236 /* Init the pool_link so we can safefly call list_del when we destroy
237 * the command buffer
238 */
239 list_inithead(&cmd_buffer->pool_link);
240 }
241
242 *pCommandBuffer = anv_cmd_buffer_to_handle(cmd_buffer);
243
244 return VK_SUCCESS;
245
246 fail:
247 vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
248
249 return result;
250 }
251
252 VkResult anv_AllocateCommandBuffers(
253 VkDevice _device,
254 const VkCommandBufferAllocateInfo* pAllocateInfo,
255 VkCommandBuffer* pCommandBuffers)
256 {
257 ANV_FROM_HANDLE(anv_device, device, _device);
258 ANV_FROM_HANDLE(anv_cmd_pool, pool, pAllocateInfo->commandPool);
259
260 VkResult result = VK_SUCCESS;
261 uint32_t i;
262
263 for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
264 result = anv_create_cmd_buffer(device, pool, pAllocateInfo->level,
265 &pCommandBuffers[i]);
266 if (result != VK_SUCCESS)
267 break;
268 }
269
270 if (result != VK_SUCCESS) {
271 anv_FreeCommandBuffers(_device, pAllocateInfo->commandPool,
272 i, pCommandBuffers);
273 for (i = 0; i < pAllocateInfo->commandBufferCount; i++)
274 pCommandBuffers[i] = VK_NULL_HANDLE;
275 }
276
277 return result;
278 }
279
280 static void
281 anv_cmd_buffer_destroy(struct anv_cmd_buffer *cmd_buffer)
282 {
283 list_del(&cmd_buffer->pool_link);
284
285 anv_cmd_buffer_fini_batch_bo_chain(cmd_buffer);
286
287 anv_state_stream_finish(&cmd_buffer->surface_state_stream);
288 anv_state_stream_finish(&cmd_buffer->dynamic_state_stream);
289
290 anv_cmd_state_finish(cmd_buffer);
291
292 vk_object_base_finish(&cmd_buffer->base);
293 vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
294 }
295
296 void anv_FreeCommandBuffers(
297 VkDevice device,
298 VkCommandPool commandPool,
299 uint32_t commandBufferCount,
300 const VkCommandBuffer* pCommandBuffers)
301 {
302 for (uint32_t i = 0; i < commandBufferCount; i++) {
303 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
304
305 if (!cmd_buffer)
306 continue;
307
308 anv_cmd_buffer_destroy(cmd_buffer);
309 }
310 }
311
312 VkResult
313 anv_cmd_buffer_reset(struct anv_cmd_buffer *cmd_buffer)
314 {
315 cmd_buffer->usage_flags = 0;
316 cmd_buffer->perf_query_pool = NULL;
317 anv_cmd_buffer_reset_batch_bo_chain(cmd_buffer);
318 anv_cmd_state_reset(cmd_buffer);
319
320 anv_state_stream_finish(&cmd_buffer->surface_state_stream);
321 anv_state_stream_init(&cmd_buffer->surface_state_stream,
322 &cmd_buffer->device->surface_state_pool, 4096);
323
324 anv_state_stream_finish(&cmd_buffer->dynamic_state_stream);
325 anv_state_stream_init(&cmd_buffer->dynamic_state_stream,
326 &cmd_buffer->device->dynamic_state_pool, 16384);
327 return VK_SUCCESS;
328 }
329
330 VkResult anv_ResetCommandBuffer(
331 VkCommandBuffer commandBuffer,
332 VkCommandBufferResetFlags flags)
333 {
334 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
335 return anv_cmd_buffer_reset(cmd_buffer);
336 }
337
338 #define anv_genX_call(devinfo, func, ...) \
339 switch ((devinfo)->gen) { \
340 case 7: \
341 if ((devinfo)->is_haswell) { \
342 gen75_##func(__VA_ARGS__); \
343 } else { \
344 gen7_##func(__VA_ARGS__); \
345 } \
346 break; \
347 case 8: \
348 gen8_##func(__VA_ARGS__); \
349 break; \
350 case 9: \
351 gen9_##func(__VA_ARGS__); \
352 break; \
353 case 10: \
354 gen10_##func(__VA_ARGS__); \
355 break; \
356 case 11: \
357 gen11_##func(__VA_ARGS__); \
358 break; \
359 case 12: \
360 gen12_##func(__VA_ARGS__); \
361 break; \
362 default: \
363 assert(!"Unknown hardware generation"); \
364 }
365
366 void
367 anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer)
368 {
369 anv_genX_call(&cmd_buffer->device->info,
370 cmd_buffer_emit_state_base_address,
371 cmd_buffer);
372 }
373
374 void
375 anv_cmd_buffer_mark_image_written(struct anv_cmd_buffer *cmd_buffer,
376 const struct anv_image *image,
377 VkImageAspectFlagBits aspect,
378 enum isl_aux_usage aux_usage,
379 uint32_t level,
380 uint32_t base_layer,
381 uint32_t layer_count)
382 {
383 anv_genX_call(&cmd_buffer->device->info,
384 cmd_buffer_mark_image_written,
385 cmd_buffer, image, aspect, aux_usage,
386 level, base_layer, layer_count);
387 }
388
389 void
390 anv_cmd_emit_conditional_render_predicate(struct anv_cmd_buffer *cmd_buffer)
391 {
392 anv_genX_call(&cmd_buffer->device->info,
393 cmd_emit_conditional_render_predicate,
394 cmd_buffer);
395 }
396
397 static bool
398 mem_update(void *dst, const void *src, size_t size)
399 {
400 if (memcmp(dst, src, size) == 0)
401 return false;
402
403 memcpy(dst, src, size);
404 return true;
405 }
406
407 static void
408 set_dirty_for_bind_map(struct anv_cmd_buffer *cmd_buffer,
409 gl_shader_stage stage,
410 const struct anv_pipeline_bind_map *map)
411 {
412 if (mem_update(cmd_buffer->state.surface_sha1s[stage],
413 map->surface_sha1, sizeof(map->surface_sha1)))
414 cmd_buffer->state.descriptors_dirty |= mesa_to_vk_shader_stage(stage);
415
416 if (mem_update(cmd_buffer->state.sampler_sha1s[stage],
417 map->sampler_sha1, sizeof(map->sampler_sha1)))
418 cmd_buffer->state.descriptors_dirty |= mesa_to_vk_shader_stage(stage);
419
420 if (mem_update(cmd_buffer->state.push_sha1s[stage],
421 map->push_sha1, sizeof(map->push_sha1)))
422 cmd_buffer->state.push_constants_dirty |= mesa_to_vk_shader_stage(stage);
423 }
424
425 void anv_CmdBindPipeline(
426 VkCommandBuffer commandBuffer,
427 VkPipelineBindPoint pipelineBindPoint,
428 VkPipeline _pipeline)
429 {
430 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
431 ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
432
433 switch (pipelineBindPoint) {
434 case VK_PIPELINE_BIND_POINT_COMPUTE: {
435 struct anv_compute_pipeline *compute_pipeline =
436 anv_pipeline_to_compute(pipeline);
437 if (cmd_buffer->state.compute.pipeline == compute_pipeline)
438 return;
439
440 cmd_buffer->state.compute.pipeline = compute_pipeline;
441 cmd_buffer->state.compute.pipeline_dirty = true;
442 set_dirty_for_bind_map(cmd_buffer, MESA_SHADER_COMPUTE,
443 &compute_pipeline->cs->bind_map);
444 break;
445 }
446
447 case VK_PIPELINE_BIND_POINT_GRAPHICS: {
448 struct anv_graphics_pipeline *gfx_pipeline =
449 anv_pipeline_to_graphics(pipeline);
450 if (cmd_buffer->state.gfx.pipeline == gfx_pipeline)
451 return;
452
453 cmd_buffer->state.gfx.pipeline = gfx_pipeline;
454 cmd_buffer->state.gfx.vb_dirty |= gfx_pipeline->vb_used;
455 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_PIPELINE;
456
457 anv_foreach_stage(stage, gfx_pipeline->active_stages) {
458 set_dirty_for_bind_map(cmd_buffer, stage,
459 &gfx_pipeline->shaders[stage]->bind_map);
460 }
461
462 /* Apply the dynamic state from the pipeline */
463 cmd_buffer->state.gfx.dirty |=
464 anv_dynamic_state_copy(&cmd_buffer->state.gfx.dynamic,
465 &gfx_pipeline->dynamic_state,
466 gfx_pipeline->dynamic_state_mask);
467 break;
468 }
469
470 default:
471 assert(!"invalid bind point");
472 break;
473 }
474 }
475
476 void anv_CmdSetViewport(
477 VkCommandBuffer commandBuffer,
478 uint32_t firstViewport,
479 uint32_t viewportCount,
480 const VkViewport* pViewports)
481 {
482 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
483
484 const uint32_t total_count = firstViewport + viewportCount;
485 if (cmd_buffer->state.gfx.dynamic.viewport.count < total_count)
486 cmd_buffer->state.gfx.dynamic.viewport.count = total_count;
487
488 memcpy(cmd_buffer->state.gfx.dynamic.viewport.viewports + firstViewport,
489 pViewports, viewportCount * sizeof(*pViewports));
490
491 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_VIEWPORT;
492 }
493
494 void anv_CmdSetViewportWithCountEXT(
495 VkCommandBuffer commandBuffer,
496 uint32_t viewportCount,
497 const VkViewport* pViewports)
498 {
499 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
500
501 cmd_buffer->state.gfx.dynamic.viewport.count = viewportCount;
502
503 memcpy(cmd_buffer->state.gfx.dynamic.viewport.viewports,
504 pViewports, viewportCount * sizeof(*pViewports));
505
506 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_VIEWPORT;
507 }
508
509 void anv_CmdSetScissor(
510 VkCommandBuffer commandBuffer,
511 uint32_t firstScissor,
512 uint32_t scissorCount,
513 const VkRect2D* pScissors)
514 {
515 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
516
517 const uint32_t total_count = firstScissor + scissorCount;
518 if (cmd_buffer->state.gfx.dynamic.scissor.count < total_count)
519 cmd_buffer->state.gfx.dynamic.scissor.count = total_count;
520
521 memcpy(cmd_buffer->state.gfx.dynamic.scissor.scissors + firstScissor,
522 pScissors, scissorCount * sizeof(*pScissors));
523
524 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_SCISSOR;
525 }
526
527 void anv_CmdSetScissorWithCountEXT(
528 VkCommandBuffer commandBuffer,
529 uint32_t scissorCount,
530 const VkRect2D* pScissors)
531 {
532 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
533
534 cmd_buffer->state.gfx.dynamic.scissor.count = scissorCount;
535
536 memcpy(cmd_buffer->state.gfx.dynamic.scissor.scissors,
537 pScissors, scissorCount * sizeof(*pScissors));
538
539 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_SCISSOR;
540 }
541
542 void anv_CmdSetPrimitiveTopologyEXT(
543 VkCommandBuffer commandBuffer,
544 VkPrimitiveTopology primitiveTopology)
545 {
546 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
547
548 cmd_buffer->state.gfx.dynamic.primitive_topology = primitiveTopology;
549
550 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_PRIMITIVE_TOPOLOGY;
551 }
552
553 void anv_CmdSetLineWidth(
554 VkCommandBuffer commandBuffer,
555 float lineWidth)
556 {
557 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
558
559 cmd_buffer->state.gfx.dynamic.line_width = lineWidth;
560 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH;
561 }
562
563 void anv_CmdSetDepthBias(
564 VkCommandBuffer commandBuffer,
565 float depthBiasConstantFactor,
566 float depthBiasClamp,
567 float depthBiasSlopeFactor)
568 {
569 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
570
571 cmd_buffer->state.gfx.dynamic.depth_bias.bias = depthBiasConstantFactor;
572 cmd_buffer->state.gfx.dynamic.depth_bias.clamp = depthBiasClamp;
573 cmd_buffer->state.gfx.dynamic.depth_bias.slope = depthBiasSlopeFactor;
574
575 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
576 }
577
578 void anv_CmdSetBlendConstants(
579 VkCommandBuffer commandBuffer,
580 const float blendConstants[4])
581 {
582 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
583
584 memcpy(cmd_buffer->state.gfx.dynamic.blend_constants,
585 blendConstants, sizeof(float) * 4);
586
587 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS;
588 }
589
590 void anv_CmdSetDepthBounds(
591 VkCommandBuffer commandBuffer,
592 float minDepthBounds,
593 float maxDepthBounds)
594 {
595 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
596
597 cmd_buffer->state.gfx.dynamic.depth_bounds.min = minDepthBounds;
598 cmd_buffer->state.gfx.dynamic.depth_bounds.max = maxDepthBounds;
599
600 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS;
601 }
602
603 void anv_CmdSetStencilCompareMask(
604 VkCommandBuffer commandBuffer,
605 VkStencilFaceFlags faceMask,
606 uint32_t compareMask)
607 {
608 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
609
610 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
611 cmd_buffer->state.gfx.dynamic.stencil_compare_mask.front = compareMask;
612 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
613 cmd_buffer->state.gfx.dynamic.stencil_compare_mask.back = compareMask;
614
615 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK;
616 }
617
618 void anv_CmdSetStencilWriteMask(
619 VkCommandBuffer commandBuffer,
620 VkStencilFaceFlags faceMask,
621 uint32_t writeMask)
622 {
623 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
624
625 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
626 cmd_buffer->state.gfx.dynamic.stencil_write_mask.front = writeMask;
627 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
628 cmd_buffer->state.gfx.dynamic.stencil_write_mask.back = writeMask;
629
630 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK;
631 }
632
633 void anv_CmdSetStencilReference(
634 VkCommandBuffer commandBuffer,
635 VkStencilFaceFlags faceMask,
636 uint32_t reference)
637 {
638 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
639
640 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
641 cmd_buffer->state.gfx.dynamic.stencil_reference.front = reference;
642 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
643 cmd_buffer->state.gfx.dynamic.stencil_reference.back = reference;
644
645 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE;
646 }
647
648 void anv_CmdSetLineStippleEXT(
649 VkCommandBuffer commandBuffer,
650 uint32_t lineStippleFactor,
651 uint16_t lineStipplePattern)
652 {
653 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
654
655 cmd_buffer->state.gfx.dynamic.line_stipple.factor = lineStippleFactor;
656 cmd_buffer->state.gfx.dynamic.line_stipple.pattern = lineStipplePattern;
657
658 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE;
659 }
660
661 void anv_CmdSetCullModeEXT(
662 VkCommandBuffer commandBuffer,
663 VkCullModeFlags cullMode)
664 {
665 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
666
667 cmd_buffer->state.gfx.dynamic.cull_mode = cullMode;
668
669 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_CULL_MODE;
670 }
671
672 void anv_CmdSetFrontFaceEXT(
673 VkCommandBuffer commandBuffer,
674 VkFrontFace frontFace)
675 {
676 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
677
678 cmd_buffer->state.gfx.dynamic.front_face = frontFace;
679
680 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_FRONT_FACE;
681 }
682
683 static void
684 anv_cmd_buffer_bind_descriptor_set(struct anv_cmd_buffer *cmd_buffer,
685 VkPipelineBindPoint bind_point,
686 struct anv_pipeline_layout *layout,
687 uint32_t set_index,
688 struct anv_descriptor_set *set,
689 uint32_t *dynamic_offset_count,
690 const uint32_t **dynamic_offsets)
691 {
692 struct anv_descriptor_set_layout *set_layout =
693 layout->set[set_index].layout;
694
695 VkShaderStageFlags stages = set_layout->shader_stages;
696 struct anv_cmd_pipeline_state *pipe_state;
697
698 switch (bind_point) {
699 case VK_PIPELINE_BIND_POINT_GRAPHICS:
700 stages &= VK_SHADER_STAGE_ALL_GRAPHICS;
701 pipe_state = &cmd_buffer->state.gfx.base;
702 break;
703
704 case VK_PIPELINE_BIND_POINT_COMPUTE:
705 stages &= VK_SHADER_STAGE_COMPUTE_BIT;
706 pipe_state = &cmd_buffer->state.compute.base;
707 break;
708
709 default:
710 unreachable("invalid bind point");
711 }
712
713 VkShaderStageFlags dirty_stages = 0;
714 if (pipe_state->descriptors[set_index] != set) {
715 pipe_state->descriptors[set_index] = set;
716 dirty_stages |= stages;
717 }
718
719 /* If it's a push descriptor set, we have to flag things as dirty
720 * regardless of whether or not the CPU-side data structure changed as we
721 * may have edited in-place.
722 */
723 if (set->pool == NULL)
724 dirty_stages |= stages;
725
726 if (dynamic_offsets) {
727 if (set_layout->dynamic_offset_count > 0) {
728 uint32_t dynamic_offset_start =
729 layout->set[set_index].dynamic_offset_start;
730
731 anv_foreach_stage(stage, stages) {
732 struct anv_push_constants *push =
733 &cmd_buffer->state.push_constants[stage];
734 uint32_t *push_offsets =
735 &push->dynamic_offsets[dynamic_offset_start];
736
737 /* Assert that everything is in range */
738 assert(set_layout->dynamic_offset_count <= *dynamic_offset_count);
739 assert(dynamic_offset_start + set_layout->dynamic_offset_count <=
740 ARRAY_SIZE(push->dynamic_offsets));
741
742 unsigned mask = set_layout->stage_dynamic_offsets[stage];
743 STATIC_ASSERT(MAX_DYNAMIC_BUFFERS <= sizeof(mask) * 8);
744 while (mask) {
745 int i = u_bit_scan(&mask);
746 if (push_offsets[i] != (*dynamic_offsets)[i]) {
747 push_offsets[i] = (*dynamic_offsets)[i];
748 dirty_stages |= mesa_to_vk_shader_stage(stage);
749 }
750 }
751 }
752
753 *dynamic_offsets += set_layout->dynamic_offset_count;
754 *dynamic_offset_count -= set_layout->dynamic_offset_count;
755 }
756 }
757
758 cmd_buffer->state.descriptors_dirty |= dirty_stages;
759 cmd_buffer->state.push_constants_dirty |= dirty_stages;
760 }
761
762 void anv_CmdBindDescriptorSets(
763 VkCommandBuffer commandBuffer,
764 VkPipelineBindPoint pipelineBindPoint,
765 VkPipelineLayout _layout,
766 uint32_t firstSet,
767 uint32_t descriptorSetCount,
768 const VkDescriptorSet* pDescriptorSets,
769 uint32_t dynamicOffsetCount,
770 const uint32_t* pDynamicOffsets)
771 {
772 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
773 ANV_FROM_HANDLE(anv_pipeline_layout, layout, _layout);
774
775 assert(firstSet + descriptorSetCount <= MAX_SETS);
776
777 for (uint32_t i = 0; i < descriptorSetCount; i++) {
778 ANV_FROM_HANDLE(anv_descriptor_set, set, pDescriptorSets[i]);
779 anv_cmd_buffer_bind_descriptor_set(cmd_buffer, pipelineBindPoint,
780 layout, firstSet + i, set,
781 &dynamicOffsetCount,
782 &pDynamicOffsets);
783 }
784 }
785
786 void anv_CmdBindVertexBuffers(
787 VkCommandBuffer commandBuffer,
788 uint32_t firstBinding,
789 uint32_t bindingCount,
790 const VkBuffer* pBuffers,
791 const VkDeviceSize* pOffsets)
792 {
793 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
794 struct anv_vertex_binding *vb = cmd_buffer->state.vertex_bindings;
795
796 /* We have to defer setting up vertex buffer since we need the buffer
797 * stride from the pipeline. */
798
799 assert(firstBinding + bindingCount <= MAX_VBS);
800 for (uint32_t i = 0; i < bindingCount; i++) {
801 vb[firstBinding + i].buffer = anv_buffer_from_handle(pBuffers[i]);
802 vb[firstBinding + i].offset = pOffsets[i];
803 cmd_buffer->state.gfx.vb_dirty |= 1 << (firstBinding + i);
804 }
805 }
806
807 void anv_CmdBindTransformFeedbackBuffersEXT(
808 VkCommandBuffer commandBuffer,
809 uint32_t firstBinding,
810 uint32_t bindingCount,
811 const VkBuffer* pBuffers,
812 const VkDeviceSize* pOffsets,
813 const VkDeviceSize* pSizes)
814 {
815 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
816 struct anv_xfb_binding *xfb = cmd_buffer->state.xfb_bindings;
817
818 /* We have to defer setting up vertex buffer since we need the buffer
819 * stride from the pipeline. */
820
821 assert(firstBinding + bindingCount <= MAX_XFB_BUFFERS);
822 for (uint32_t i = 0; i < bindingCount; i++) {
823 if (pBuffers[i] == VK_NULL_HANDLE) {
824 xfb[firstBinding + i].buffer = NULL;
825 } else {
826 ANV_FROM_HANDLE(anv_buffer, buffer, pBuffers[i]);
827 xfb[firstBinding + i].buffer = buffer;
828 xfb[firstBinding + i].offset = pOffsets[i];
829 xfb[firstBinding + i].size =
830 anv_buffer_get_range(buffer, pOffsets[i],
831 pSizes ? pSizes[i] : VK_WHOLE_SIZE);
832 }
833 }
834 }
835
836 enum isl_format
837 anv_isl_format_for_descriptor_type(VkDescriptorType type)
838 {
839 switch (type) {
840 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
841 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
842 return ISL_FORMAT_R32G32B32A32_FLOAT;
843
844 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
845 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
846 return ISL_FORMAT_RAW;
847
848 default:
849 unreachable("Invalid descriptor type");
850 }
851 }
852
853 struct anv_state
854 anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer *cmd_buffer,
855 const void *data, uint32_t size, uint32_t alignment)
856 {
857 struct anv_state state;
858
859 state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, size, alignment);
860 memcpy(state.map, data, size);
861
862 VG(VALGRIND_CHECK_MEM_IS_DEFINED(state.map, size));
863
864 return state;
865 }
866
867 struct anv_state
868 anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer *cmd_buffer,
869 uint32_t *a, uint32_t *b,
870 uint32_t dwords, uint32_t alignment)
871 {
872 struct anv_state state;
873 uint32_t *p;
874
875 state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
876 dwords * 4, alignment);
877 p = state.map;
878 for (uint32_t i = 0; i < dwords; i++)
879 p[i] = a[i] | b[i];
880
881 VG(VALGRIND_CHECK_MEM_IS_DEFINED(p, dwords * 4));
882
883 return state;
884 }
885
886 struct anv_state
887 anv_cmd_buffer_push_constants(struct anv_cmd_buffer *cmd_buffer,
888 gl_shader_stage stage)
889 {
890 struct anv_push_constants *data =
891 &cmd_buffer->state.push_constants[stage];
892
893 struct anv_state state =
894 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
895 sizeof(struct anv_push_constants),
896 32 /* bottom 5 bits MBZ */);
897 memcpy(state.map, data, sizeof(struct anv_push_constants));
898
899 return state;
900 }
901
902 struct anv_state
903 anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer *cmd_buffer)
904 {
905 struct anv_push_constants *data =
906 &cmd_buffer->state.push_constants[MESA_SHADER_COMPUTE];
907 struct anv_compute_pipeline *pipeline = cmd_buffer->state.compute.pipeline;
908 const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline);
909 const struct anv_push_range *range = &pipeline->cs->bind_map.push_ranges[0];
910
911 const struct anv_cs_parameters cs_params = anv_cs_parameters(pipeline);
912 const unsigned total_push_constants_size =
913 brw_cs_push_const_total_size(cs_prog_data, cs_params.threads);
914 if (total_push_constants_size == 0)
915 return (struct anv_state) { .offset = 0 };
916
917 const unsigned push_constant_alignment =
918 cmd_buffer->device->info.gen < 8 ? 32 : 64;
919 const unsigned aligned_total_push_constants_size =
920 ALIGN(total_push_constants_size, push_constant_alignment);
921 struct anv_state state =
922 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
923 aligned_total_push_constants_size,
924 push_constant_alignment);
925
926 void *dst = state.map;
927 const void *src = (char *)data + (range->start * 32);
928
929 if (cs_prog_data->push.cross_thread.size > 0) {
930 memcpy(dst, src, cs_prog_data->push.cross_thread.size);
931 dst += cs_prog_data->push.cross_thread.size;
932 src += cs_prog_data->push.cross_thread.size;
933 }
934
935 if (cs_prog_data->push.per_thread.size > 0) {
936 for (unsigned t = 0; t < cs_params.threads; t++) {
937 memcpy(dst, src, cs_prog_data->push.per_thread.size);
938
939 uint32_t *subgroup_id = dst +
940 offsetof(struct anv_push_constants, cs.subgroup_id) -
941 (range->start * 32 + cs_prog_data->push.cross_thread.size);
942 *subgroup_id = t;
943
944 dst += cs_prog_data->push.per_thread.size;
945 }
946 }
947
948 return state;
949 }
950
951 void anv_CmdPushConstants(
952 VkCommandBuffer commandBuffer,
953 VkPipelineLayout layout,
954 VkShaderStageFlags stageFlags,
955 uint32_t offset,
956 uint32_t size,
957 const void* pValues)
958 {
959 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
960
961 anv_foreach_stage(stage, stageFlags) {
962 memcpy(cmd_buffer->state.push_constants[stage].client_data + offset,
963 pValues, size);
964 }
965
966 cmd_buffer->state.push_constants_dirty |= stageFlags;
967 }
968
969 VkResult anv_CreateCommandPool(
970 VkDevice _device,
971 const VkCommandPoolCreateInfo* pCreateInfo,
972 const VkAllocationCallbacks* pAllocator,
973 VkCommandPool* pCmdPool)
974 {
975 ANV_FROM_HANDLE(anv_device, device, _device);
976 struct anv_cmd_pool *pool;
977
978 pool = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*pool), 8,
979 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
980 if (pool == NULL)
981 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
982
983 vk_object_base_init(&device->vk, &pool->base, VK_OBJECT_TYPE_COMMAND_POOL);
984
985 if (pAllocator)
986 pool->alloc = *pAllocator;
987 else
988 pool->alloc = device->vk.alloc;
989
990 list_inithead(&pool->cmd_buffers);
991
992 *pCmdPool = anv_cmd_pool_to_handle(pool);
993
994 return VK_SUCCESS;
995 }
996
997 void anv_DestroyCommandPool(
998 VkDevice _device,
999 VkCommandPool commandPool,
1000 const VkAllocationCallbacks* pAllocator)
1001 {
1002 ANV_FROM_HANDLE(anv_device, device, _device);
1003 ANV_FROM_HANDLE(anv_cmd_pool, pool, commandPool);
1004
1005 if (!pool)
1006 return;
1007
1008 list_for_each_entry_safe(struct anv_cmd_buffer, cmd_buffer,
1009 &pool->cmd_buffers, pool_link) {
1010 anv_cmd_buffer_destroy(cmd_buffer);
1011 }
1012
1013 vk_object_base_finish(&pool->base);
1014 vk_free2(&device->vk.alloc, pAllocator, pool);
1015 }
1016
1017 VkResult anv_ResetCommandPool(
1018 VkDevice device,
1019 VkCommandPool commandPool,
1020 VkCommandPoolResetFlags flags)
1021 {
1022 ANV_FROM_HANDLE(anv_cmd_pool, pool, commandPool);
1023
1024 list_for_each_entry(struct anv_cmd_buffer, cmd_buffer,
1025 &pool->cmd_buffers, pool_link) {
1026 anv_cmd_buffer_reset(cmd_buffer);
1027 }
1028
1029 return VK_SUCCESS;
1030 }
1031
1032 void anv_TrimCommandPool(
1033 VkDevice device,
1034 VkCommandPool commandPool,
1035 VkCommandPoolTrimFlags flags)
1036 {
1037 /* Nothing for us to do here. Our pools stay pretty tidy. */
1038 }
1039
1040 /**
1041 * Return NULL if the current subpass has no depthstencil attachment.
1042 */
1043 const struct anv_image_view *
1044 anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer *cmd_buffer)
1045 {
1046 const struct anv_subpass *subpass = cmd_buffer->state.subpass;
1047
1048 if (subpass->depth_stencil_attachment == NULL)
1049 return NULL;
1050
1051 const struct anv_image_view *iview =
1052 cmd_buffer->state.attachments[subpass->depth_stencil_attachment->attachment].image_view;
1053
1054 assert(iview->aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT |
1055 VK_IMAGE_ASPECT_STENCIL_BIT));
1056
1057 return iview;
1058 }
1059
1060 static struct anv_descriptor_set *
1061 anv_cmd_buffer_push_descriptor_set(struct anv_cmd_buffer *cmd_buffer,
1062 VkPipelineBindPoint bind_point,
1063 struct anv_descriptor_set_layout *layout,
1064 uint32_t _set)
1065 {
1066 struct anv_cmd_pipeline_state *pipe_state;
1067 if (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE) {
1068 pipe_state = &cmd_buffer->state.compute.base;
1069 } else {
1070 assert(bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS);
1071 pipe_state = &cmd_buffer->state.gfx.base;
1072 }
1073
1074 struct anv_push_descriptor_set **push_set =
1075 &pipe_state->push_descriptors[_set];
1076
1077 if (*push_set == NULL) {
1078 *push_set = vk_zalloc(&cmd_buffer->pool->alloc,
1079 sizeof(struct anv_push_descriptor_set), 8,
1080 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1081 if (*push_set == NULL) {
1082 anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_HOST_MEMORY);
1083 return NULL;
1084 }
1085 }
1086
1087 struct anv_descriptor_set *set = &(*push_set)->set;
1088
1089 if (set->layout != layout) {
1090 if (set->layout)
1091 anv_descriptor_set_layout_unref(cmd_buffer->device, set->layout);
1092 anv_descriptor_set_layout_ref(layout);
1093 set->layout = layout;
1094 }
1095 set->size = anv_descriptor_set_layout_size(layout);
1096 set->buffer_view_count = layout->buffer_view_count;
1097 set->buffer_views = (*push_set)->buffer_views;
1098
1099 if (layout->descriptor_buffer_size &&
1100 ((*push_set)->set_used_on_gpu ||
1101 set->desc_mem.alloc_size < layout->descriptor_buffer_size)) {
1102 /* The previous buffer is either actively used by some GPU command (so
1103 * we can't modify it) or is too small. Allocate a new one.
1104 */
1105 struct anv_state desc_mem =
1106 anv_state_stream_alloc(&cmd_buffer->dynamic_state_stream,
1107 layout->descriptor_buffer_size, 32);
1108 if (set->desc_mem.alloc_size) {
1109 /* TODO: Do we really need to copy all the time? */
1110 memcpy(desc_mem.map, set->desc_mem.map,
1111 MIN2(desc_mem.alloc_size, set->desc_mem.alloc_size));
1112 }
1113 set->desc_mem = desc_mem;
1114
1115 struct anv_address addr = {
1116 .bo = cmd_buffer->dynamic_state_stream.state_pool->block_pool.bo,
1117 .offset = set->desc_mem.offset,
1118 };
1119
1120 const struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
1121 set->desc_surface_state =
1122 anv_state_stream_alloc(&cmd_buffer->surface_state_stream,
1123 isl_dev->ss.size, isl_dev->ss.align);
1124 anv_fill_buffer_surface_state(cmd_buffer->device,
1125 set->desc_surface_state,
1126 ISL_FORMAT_R32G32B32A32_FLOAT,
1127 addr, layout->descriptor_buffer_size, 1);
1128 }
1129
1130 return set;
1131 }
1132
1133 void anv_CmdPushDescriptorSetKHR(
1134 VkCommandBuffer commandBuffer,
1135 VkPipelineBindPoint pipelineBindPoint,
1136 VkPipelineLayout _layout,
1137 uint32_t _set,
1138 uint32_t descriptorWriteCount,
1139 const VkWriteDescriptorSet* pDescriptorWrites)
1140 {
1141 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1142 ANV_FROM_HANDLE(anv_pipeline_layout, layout, _layout);
1143
1144 assert(_set < MAX_SETS);
1145
1146 struct anv_descriptor_set_layout *set_layout = layout->set[_set].layout;
1147
1148 struct anv_descriptor_set *set =
1149 anv_cmd_buffer_push_descriptor_set(cmd_buffer, pipelineBindPoint,
1150 set_layout, _set);
1151 if (!set)
1152 return;
1153
1154 /* Go through the user supplied descriptors. */
1155 for (uint32_t i = 0; i < descriptorWriteCount; i++) {
1156 const VkWriteDescriptorSet *write = &pDescriptorWrites[i];
1157
1158 switch (write->descriptorType) {
1159 case VK_DESCRIPTOR_TYPE_SAMPLER:
1160 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1161 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1162 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1163 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1164 for (uint32_t j = 0; j < write->descriptorCount; j++) {
1165 anv_descriptor_set_write_image_view(cmd_buffer->device, set,
1166 write->pImageInfo + j,
1167 write->descriptorType,
1168 write->dstBinding,
1169 write->dstArrayElement + j);
1170 }
1171 break;
1172
1173 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1174 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1175 for (uint32_t j = 0; j < write->descriptorCount; j++) {
1176 ANV_FROM_HANDLE(anv_buffer_view, bview,
1177 write->pTexelBufferView[j]);
1178
1179 anv_descriptor_set_write_buffer_view(cmd_buffer->device, set,
1180 write->descriptorType,
1181 bview,
1182 write->dstBinding,
1183 write->dstArrayElement + j);
1184 }
1185 break;
1186
1187 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1188 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1189 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1190 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1191 for (uint32_t j = 0; j < write->descriptorCount; j++) {
1192 ANV_FROM_HANDLE(anv_buffer, buffer, write->pBufferInfo[j].buffer);
1193
1194 anv_descriptor_set_write_buffer(cmd_buffer->device, set,
1195 &cmd_buffer->surface_state_stream,
1196 write->descriptorType,
1197 buffer,
1198 write->dstBinding,
1199 write->dstArrayElement + j,
1200 write->pBufferInfo[j].offset,
1201 write->pBufferInfo[j].range);
1202 }
1203 break;
1204
1205 default:
1206 break;
1207 }
1208 }
1209
1210 anv_cmd_buffer_bind_descriptor_set(cmd_buffer, pipelineBindPoint,
1211 layout, _set, set, NULL, NULL);
1212 }
1213
1214 void anv_CmdPushDescriptorSetWithTemplateKHR(
1215 VkCommandBuffer commandBuffer,
1216 VkDescriptorUpdateTemplate descriptorUpdateTemplate,
1217 VkPipelineLayout _layout,
1218 uint32_t _set,
1219 const void* pData)
1220 {
1221 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1222 ANV_FROM_HANDLE(anv_descriptor_update_template, template,
1223 descriptorUpdateTemplate);
1224 ANV_FROM_HANDLE(anv_pipeline_layout, layout, _layout);
1225
1226 assert(_set < MAX_PUSH_DESCRIPTORS);
1227
1228 struct anv_descriptor_set_layout *set_layout = layout->set[_set].layout;
1229
1230 struct anv_descriptor_set *set =
1231 anv_cmd_buffer_push_descriptor_set(cmd_buffer, template->bind_point,
1232 set_layout, _set);
1233 if (!set)
1234 return;
1235
1236 anv_descriptor_set_write_template(cmd_buffer->device, set,
1237 &cmd_buffer->surface_state_stream,
1238 template,
1239 pData);
1240
1241 anv_cmd_buffer_bind_descriptor_set(cmd_buffer, template->bind_point,
1242 layout, _set, set, NULL, NULL);
1243 }
1244
1245 void anv_CmdSetDeviceMask(
1246 VkCommandBuffer commandBuffer,
1247 uint32_t deviceMask)
1248 {
1249 /* No-op */
1250 }