anv: Rework arguments to anv_descriptor_set_write_*
[mesa.git] / src / intel / vulkan / anv_cmd_buffer.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31
32 #include "vk_format_info.h"
33
34 /** \file anv_cmd_buffer.c
35 *
36 * This file contains all of the stuff for emitting commands into a command
37 * buffer. This includes implementations of most of the vkCmd*
38 * entrypoints. This file is concerned entirely with state emission and
39 * not with the command buffer data structure itself. As far as this file
40 * is concerned, most of anv_cmd_buffer is magic.
41 */
42
43 /* TODO: These are taken from GLES. We should check the Vulkan spec */
44 const struct anv_dynamic_state default_dynamic_state = {
45 .viewport = {
46 .count = 0,
47 },
48 .scissor = {
49 .count = 0,
50 },
51 .line_width = 1.0f,
52 .depth_bias = {
53 .bias = 0.0f,
54 .clamp = 0.0f,
55 .slope = 0.0f,
56 },
57 .blend_constants = { 0.0f, 0.0f, 0.0f, 0.0f },
58 .depth_bounds = {
59 .min = 0.0f,
60 .max = 1.0f,
61 },
62 .stencil_compare_mask = {
63 .front = ~0u,
64 .back = ~0u,
65 },
66 .stencil_write_mask = {
67 .front = ~0u,
68 .back = ~0u,
69 },
70 .stencil_reference = {
71 .front = 0u,
72 .back = 0u,
73 },
74 };
75
76 void
77 anv_dynamic_state_copy(struct anv_dynamic_state *dest,
78 const struct anv_dynamic_state *src,
79 uint32_t copy_mask)
80 {
81 if (copy_mask & (1 << VK_DYNAMIC_STATE_VIEWPORT)) {
82 dest->viewport.count = src->viewport.count;
83 typed_memcpy(dest->viewport.viewports, src->viewport.viewports,
84 src->viewport.count);
85 }
86
87 if (copy_mask & (1 << VK_DYNAMIC_STATE_SCISSOR)) {
88 dest->scissor.count = src->scissor.count;
89 typed_memcpy(dest->scissor.scissors, src->scissor.scissors,
90 src->scissor.count);
91 }
92
93 if (copy_mask & (1 << VK_DYNAMIC_STATE_LINE_WIDTH))
94 dest->line_width = src->line_width;
95
96 if (copy_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BIAS))
97 dest->depth_bias = src->depth_bias;
98
99 if (copy_mask & (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS))
100 typed_memcpy(dest->blend_constants, src->blend_constants, 4);
101
102 if (copy_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS))
103 dest->depth_bounds = src->depth_bounds;
104
105 if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK))
106 dest->stencil_compare_mask = src->stencil_compare_mask;
107
108 if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK))
109 dest->stencil_write_mask = src->stencil_write_mask;
110
111 if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE))
112 dest->stencil_reference = src->stencil_reference;
113 }
114
115 static void
116 anv_cmd_state_init(struct anv_cmd_buffer *cmd_buffer)
117 {
118 struct anv_cmd_state *state = &cmd_buffer->state;
119
120 memset(state, 0, sizeof(*state));
121
122 state->current_pipeline = UINT32_MAX;
123 state->restart_index = UINT32_MAX;
124 state->gfx.dynamic = default_dynamic_state;
125 }
126
127 static void
128 anv_cmd_pipeline_state_finish(struct anv_cmd_buffer *cmd_buffer,
129 struct anv_cmd_pipeline_state *pipe_state)
130 {
131 for (uint32_t i = 0; i < ARRAY_SIZE(pipe_state->push_descriptors); i++)
132 vk_free(&cmd_buffer->pool->alloc, pipe_state->push_descriptors[i]);
133 }
134
135 static void
136 anv_cmd_state_finish(struct anv_cmd_buffer *cmd_buffer)
137 {
138 struct anv_cmd_state *state = &cmd_buffer->state;
139
140 anv_cmd_pipeline_state_finish(cmd_buffer, &state->gfx.base);
141 anv_cmd_pipeline_state_finish(cmd_buffer, &state->compute.base);
142
143 for (uint32_t i = 0; i < MESA_SHADER_STAGES; i++)
144 vk_free(&cmd_buffer->pool->alloc, state->push_constants[i]);
145
146 vk_free(&cmd_buffer->pool->alloc, state->attachments);
147 }
148
149 static void
150 anv_cmd_state_reset(struct anv_cmd_buffer *cmd_buffer)
151 {
152 anv_cmd_state_finish(cmd_buffer);
153 anv_cmd_state_init(cmd_buffer);
154 }
155
156 /**
157 * This function updates the size of the push constant buffer we need to emit.
158 * This is called in various parts of the driver to ensure that different
159 * pieces of push constant data get emitted as needed. However, it is important
160 * that we never shrink the size of the buffer. For example, a compute shader
161 * dispatch will always call this for the base group id, which has an
162 * offset in the push constant buffer that is smaller than the offset for
163 * storage image data. If the compute shader has storage images, we will call
164 * this again with a larger size during binding table emission. However,
165 * if we dispatch the compute shader again without dirtying our descriptors,
166 * we would still call this function with a smaller size for the base group
167 * id, and not for the images, which would incorrectly shrink the size of the
168 * push constant data we emit with that dispatch, making us drop the image data.
169 */
170 VkResult
171 anv_cmd_buffer_ensure_push_constants_size(struct anv_cmd_buffer *cmd_buffer,
172 gl_shader_stage stage, uint32_t size)
173 {
174 struct anv_push_constants **ptr = &cmd_buffer->state.push_constants[stage];
175
176 if (*ptr == NULL) {
177 *ptr = vk_alloc(&cmd_buffer->pool->alloc, size, 8,
178 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
179 if (*ptr == NULL) {
180 anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_HOST_MEMORY);
181 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
182 }
183 (*ptr)->size = size;
184 } else if ((*ptr)->size < size) {
185 *ptr = vk_realloc(&cmd_buffer->pool->alloc, *ptr, size, 8,
186 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
187 if (*ptr == NULL) {
188 anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_HOST_MEMORY);
189 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
190 }
191 (*ptr)->size = size;
192 }
193
194 return VK_SUCCESS;
195 }
196
197 static VkResult anv_create_cmd_buffer(
198 struct anv_device * device,
199 struct anv_cmd_pool * pool,
200 VkCommandBufferLevel level,
201 VkCommandBuffer* pCommandBuffer)
202 {
203 struct anv_cmd_buffer *cmd_buffer;
204 VkResult result;
205
206 cmd_buffer = vk_alloc(&pool->alloc, sizeof(*cmd_buffer), 8,
207 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
208 if (cmd_buffer == NULL)
209 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
210
211 cmd_buffer->batch.status = VK_SUCCESS;
212
213 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
214 cmd_buffer->device = device;
215 cmd_buffer->pool = pool;
216 cmd_buffer->level = level;
217
218 result = anv_cmd_buffer_init_batch_bo_chain(cmd_buffer);
219 if (result != VK_SUCCESS)
220 goto fail;
221
222 anv_state_stream_init(&cmd_buffer->surface_state_stream,
223 &device->surface_state_pool, 4096);
224 anv_state_stream_init(&cmd_buffer->dynamic_state_stream,
225 &device->dynamic_state_pool, 16384);
226
227 anv_cmd_state_init(cmd_buffer);
228
229 if (pool) {
230 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
231 } else {
232 /* Init the pool_link so we can safefly call list_del when we destroy
233 * the command buffer
234 */
235 list_inithead(&cmd_buffer->pool_link);
236 }
237
238 *pCommandBuffer = anv_cmd_buffer_to_handle(cmd_buffer);
239
240 return VK_SUCCESS;
241
242 fail:
243 vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
244
245 return result;
246 }
247
248 VkResult anv_AllocateCommandBuffers(
249 VkDevice _device,
250 const VkCommandBufferAllocateInfo* pAllocateInfo,
251 VkCommandBuffer* pCommandBuffers)
252 {
253 ANV_FROM_HANDLE(anv_device, device, _device);
254 ANV_FROM_HANDLE(anv_cmd_pool, pool, pAllocateInfo->commandPool);
255
256 VkResult result = VK_SUCCESS;
257 uint32_t i;
258
259 for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
260 result = anv_create_cmd_buffer(device, pool, pAllocateInfo->level,
261 &pCommandBuffers[i]);
262 if (result != VK_SUCCESS)
263 break;
264 }
265
266 if (result != VK_SUCCESS) {
267 anv_FreeCommandBuffers(_device, pAllocateInfo->commandPool,
268 i, pCommandBuffers);
269 for (i = 0; i < pAllocateInfo->commandBufferCount; i++)
270 pCommandBuffers[i] = VK_NULL_HANDLE;
271 }
272
273 return result;
274 }
275
276 static void
277 anv_cmd_buffer_destroy(struct anv_cmd_buffer *cmd_buffer)
278 {
279 list_del(&cmd_buffer->pool_link);
280
281 anv_cmd_buffer_fini_batch_bo_chain(cmd_buffer);
282
283 anv_state_stream_finish(&cmd_buffer->surface_state_stream);
284 anv_state_stream_finish(&cmd_buffer->dynamic_state_stream);
285
286 anv_cmd_state_finish(cmd_buffer);
287
288 vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
289 }
290
291 void anv_FreeCommandBuffers(
292 VkDevice device,
293 VkCommandPool commandPool,
294 uint32_t commandBufferCount,
295 const VkCommandBuffer* pCommandBuffers)
296 {
297 for (uint32_t i = 0; i < commandBufferCount; i++) {
298 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
299
300 if (!cmd_buffer)
301 continue;
302
303 anv_cmd_buffer_destroy(cmd_buffer);
304 }
305 }
306
307 VkResult
308 anv_cmd_buffer_reset(struct anv_cmd_buffer *cmd_buffer)
309 {
310 cmd_buffer->usage_flags = 0;
311 anv_cmd_buffer_reset_batch_bo_chain(cmd_buffer);
312 anv_cmd_state_reset(cmd_buffer);
313
314 anv_state_stream_finish(&cmd_buffer->surface_state_stream);
315 anv_state_stream_init(&cmd_buffer->surface_state_stream,
316 &cmd_buffer->device->surface_state_pool, 4096);
317
318 anv_state_stream_finish(&cmd_buffer->dynamic_state_stream);
319 anv_state_stream_init(&cmd_buffer->dynamic_state_stream,
320 &cmd_buffer->device->dynamic_state_pool, 16384);
321 return VK_SUCCESS;
322 }
323
324 VkResult anv_ResetCommandBuffer(
325 VkCommandBuffer commandBuffer,
326 VkCommandBufferResetFlags flags)
327 {
328 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
329 return anv_cmd_buffer_reset(cmd_buffer);
330 }
331
332 #define anv_genX_call(devinfo, func, ...) \
333 switch ((devinfo)->gen) { \
334 case 7: \
335 if ((devinfo)->is_haswell) { \
336 gen75_##func(__VA_ARGS__); \
337 } else { \
338 gen7_##func(__VA_ARGS__); \
339 } \
340 break; \
341 case 8: \
342 gen8_##func(__VA_ARGS__); \
343 break; \
344 case 9: \
345 gen9_##func(__VA_ARGS__); \
346 break; \
347 case 10: \
348 gen10_##func(__VA_ARGS__); \
349 break; \
350 case 11: \
351 gen11_##func(__VA_ARGS__); \
352 break; \
353 default: \
354 assert(!"Unknown hardware generation"); \
355 }
356
357 void
358 anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer)
359 {
360 anv_genX_call(&cmd_buffer->device->info,
361 cmd_buffer_emit_state_base_address,
362 cmd_buffer);
363 }
364
365 void
366 anv_cmd_buffer_mark_image_written(struct anv_cmd_buffer *cmd_buffer,
367 const struct anv_image *image,
368 VkImageAspectFlagBits aspect,
369 enum isl_aux_usage aux_usage,
370 uint32_t level,
371 uint32_t base_layer,
372 uint32_t layer_count)
373 {
374 anv_genX_call(&cmd_buffer->device->info,
375 cmd_buffer_mark_image_written,
376 cmd_buffer, image, aspect, aux_usage,
377 level, base_layer, layer_count);
378 }
379
380 void
381 anv_cmd_emit_conditional_render_predicate(struct anv_cmd_buffer *cmd_buffer)
382 {
383 anv_genX_call(&cmd_buffer->device->info,
384 cmd_emit_conditional_render_predicate,
385 cmd_buffer);
386 }
387
388 void anv_CmdBindPipeline(
389 VkCommandBuffer commandBuffer,
390 VkPipelineBindPoint pipelineBindPoint,
391 VkPipeline _pipeline)
392 {
393 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
394 ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
395
396 switch (pipelineBindPoint) {
397 case VK_PIPELINE_BIND_POINT_COMPUTE:
398 cmd_buffer->state.compute.base.pipeline = pipeline;
399 cmd_buffer->state.compute.pipeline_dirty = true;
400 cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
401 cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
402 break;
403
404 case VK_PIPELINE_BIND_POINT_GRAPHICS:
405 cmd_buffer->state.gfx.base.pipeline = pipeline;
406 cmd_buffer->state.gfx.vb_dirty |= pipeline->vb_used;
407 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_PIPELINE;
408 cmd_buffer->state.push_constants_dirty |= pipeline->active_stages;
409 cmd_buffer->state.descriptors_dirty |= pipeline->active_stages;
410
411 /* Apply the dynamic state from the pipeline */
412 cmd_buffer->state.gfx.dirty |= pipeline->dynamic_state_mask;
413 anv_dynamic_state_copy(&cmd_buffer->state.gfx.dynamic,
414 &pipeline->dynamic_state,
415 pipeline->dynamic_state_mask);
416 break;
417
418 default:
419 assert(!"invalid bind point");
420 break;
421 }
422 }
423
424 void anv_CmdSetViewport(
425 VkCommandBuffer commandBuffer,
426 uint32_t firstViewport,
427 uint32_t viewportCount,
428 const VkViewport* pViewports)
429 {
430 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
431
432 const uint32_t total_count = firstViewport + viewportCount;
433 if (cmd_buffer->state.gfx.dynamic.viewport.count < total_count)
434 cmd_buffer->state.gfx.dynamic.viewport.count = total_count;
435
436 memcpy(cmd_buffer->state.gfx.dynamic.viewport.viewports + firstViewport,
437 pViewports, viewportCount * sizeof(*pViewports));
438
439 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_VIEWPORT;
440 }
441
442 void anv_CmdSetScissor(
443 VkCommandBuffer commandBuffer,
444 uint32_t firstScissor,
445 uint32_t scissorCount,
446 const VkRect2D* pScissors)
447 {
448 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
449
450 const uint32_t total_count = firstScissor + scissorCount;
451 if (cmd_buffer->state.gfx.dynamic.scissor.count < total_count)
452 cmd_buffer->state.gfx.dynamic.scissor.count = total_count;
453
454 memcpy(cmd_buffer->state.gfx.dynamic.scissor.scissors + firstScissor,
455 pScissors, scissorCount * sizeof(*pScissors));
456
457 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_SCISSOR;
458 }
459
460 void anv_CmdSetLineWidth(
461 VkCommandBuffer commandBuffer,
462 float lineWidth)
463 {
464 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
465
466 cmd_buffer->state.gfx.dynamic.line_width = lineWidth;
467 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH;
468 }
469
470 void anv_CmdSetDepthBias(
471 VkCommandBuffer commandBuffer,
472 float depthBiasConstantFactor,
473 float depthBiasClamp,
474 float depthBiasSlopeFactor)
475 {
476 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
477
478 cmd_buffer->state.gfx.dynamic.depth_bias.bias = depthBiasConstantFactor;
479 cmd_buffer->state.gfx.dynamic.depth_bias.clamp = depthBiasClamp;
480 cmd_buffer->state.gfx.dynamic.depth_bias.slope = depthBiasSlopeFactor;
481
482 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
483 }
484
485 void anv_CmdSetBlendConstants(
486 VkCommandBuffer commandBuffer,
487 const float blendConstants[4])
488 {
489 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
490
491 memcpy(cmd_buffer->state.gfx.dynamic.blend_constants,
492 blendConstants, sizeof(float) * 4);
493
494 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS;
495 }
496
497 void anv_CmdSetDepthBounds(
498 VkCommandBuffer commandBuffer,
499 float minDepthBounds,
500 float maxDepthBounds)
501 {
502 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
503
504 cmd_buffer->state.gfx.dynamic.depth_bounds.min = minDepthBounds;
505 cmd_buffer->state.gfx.dynamic.depth_bounds.max = maxDepthBounds;
506
507 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS;
508 }
509
510 void anv_CmdSetStencilCompareMask(
511 VkCommandBuffer commandBuffer,
512 VkStencilFaceFlags faceMask,
513 uint32_t compareMask)
514 {
515 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
516
517 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
518 cmd_buffer->state.gfx.dynamic.stencil_compare_mask.front = compareMask;
519 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
520 cmd_buffer->state.gfx.dynamic.stencil_compare_mask.back = compareMask;
521
522 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK;
523 }
524
525 void anv_CmdSetStencilWriteMask(
526 VkCommandBuffer commandBuffer,
527 VkStencilFaceFlags faceMask,
528 uint32_t writeMask)
529 {
530 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
531
532 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
533 cmd_buffer->state.gfx.dynamic.stencil_write_mask.front = writeMask;
534 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
535 cmd_buffer->state.gfx.dynamic.stencil_write_mask.back = writeMask;
536
537 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK;
538 }
539
540 void anv_CmdSetStencilReference(
541 VkCommandBuffer commandBuffer,
542 VkStencilFaceFlags faceMask,
543 uint32_t reference)
544 {
545 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
546
547 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
548 cmd_buffer->state.gfx.dynamic.stencil_reference.front = reference;
549 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
550 cmd_buffer->state.gfx.dynamic.stencil_reference.back = reference;
551
552 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE;
553 }
554
555 static void
556 anv_cmd_buffer_bind_descriptor_set(struct anv_cmd_buffer *cmd_buffer,
557 VkPipelineBindPoint bind_point,
558 struct anv_pipeline_layout *layout,
559 uint32_t set_index,
560 struct anv_descriptor_set *set,
561 uint32_t *dynamic_offset_count,
562 const uint32_t **dynamic_offsets)
563 {
564 struct anv_descriptor_set_layout *set_layout =
565 layout->set[set_index].layout;
566
567 struct anv_cmd_pipeline_state *pipe_state;
568 if (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE) {
569 pipe_state = &cmd_buffer->state.compute.base;
570 } else {
571 assert(bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS);
572 pipe_state = &cmd_buffer->state.gfx.base;
573 }
574 pipe_state->descriptors[set_index] = set;
575
576 if (dynamic_offsets) {
577 if (set_layout->dynamic_offset_count > 0) {
578 uint32_t dynamic_offset_start =
579 layout->set[set_index].dynamic_offset_start;
580
581 /* Assert that everything is in range */
582 assert(set_layout->dynamic_offset_count <= *dynamic_offset_count);
583 assert(dynamic_offset_start + set_layout->dynamic_offset_count <=
584 ARRAY_SIZE(pipe_state->dynamic_offsets));
585
586 typed_memcpy(&pipe_state->dynamic_offsets[dynamic_offset_start],
587 *dynamic_offsets, set_layout->dynamic_offset_count);
588
589 *dynamic_offsets += set_layout->dynamic_offset_count;
590 *dynamic_offset_count -= set_layout->dynamic_offset_count;
591 }
592 }
593
594 if (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE) {
595 cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
596 } else {
597 assert(bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS);
598 cmd_buffer->state.descriptors_dirty |=
599 set_layout->shader_stages & VK_SHADER_STAGE_ALL_GRAPHICS;
600 }
601
602 /* Pipeline layout objects are required to live at least while any command
603 * buffers that use them are in recording state. We need to grab a reference
604 * to the pipeline layout being bound here so we can compute correct dynamic
605 * offsets for VK_DESCRIPTOR_TYPE_*_DYNAMIC in dynamic_offset_for_binding()
606 * when we record draw commands that come after this.
607 */
608 pipe_state->layout = layout;
609 }
610
611 void anv_CmdBindDescriptorSets(
612 VkCommandBuffer commandBuffer,
613 VkPipelineBindPoint pipelineBindPoint,
614 VkPipelineLayout _layout,
615 uint32_t firstSet,
616 uint32_t descriptorSetCount,
617 const VkDescriptorSet* pDescriptorSets,
618 uint32_t dynamicOffsetCount,
619 const uint32_t* pDynamicOffsets)
620 {
621 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
622 ANV_FROM_HANDLE(anv_pipeline_layout, layout, _layout);
623
624 assert(firstSet + descriptorSetCount <= MAX_SETS);
625
626 for (uint32_t i = 0; i < descriptorSetCount; i++) {
627 ANV_FROM_HANDLE(anv_descriptor_set, set, pDescriptorSets[i]);
628 anv_cmd_buffer_bind_descriptor_set(cmd_buffer, pipelineBindPoint,
629 layout, firstSet + i, set,
630 &dynamicOffsetCount,
631 &pDynamicOffsets);
632 }
633 }
634
635 void anv_CmdBindVertexBuffers(
636 VkCommandBuffer commandBuffer,
637 uint32_t firstBinding,
638 uint32_t bindingCount,
639 const VkBuffer* pBuffers,
640 const VkDeviceSize* pOffsets)
641 {
642 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
643 struct anv_vertex_binding *vb = cmd_buffer->state.vertex_bindings;
644
645 /* We have to defer setting up vertex buffer since we need the buffer
646 * stride from the pipeline. */
647
648 assert(firstBinding + bindingCount <= MAX_VBS);
649 for (uint32_t i = 0; i < bindingCount; i++) {
650 vb[firstBinding + i].buffer = anv_buffer_from_handle(pBuffers[i]);
651 vb[firstBinding + i].offset = pOffsets[i];
652 cmd_buffer->state.gfx.vb_dirty |= 1 << (firstBinding + i);
653 }
654 }
655
656 void anv_CmdBindTransformFeedbackBuffersEXT(
657 VkCommandBuffer commandBuffer,
658 uint32_t firstBinding,
659 uint32_t bindingCount,
660 const VkBuffer* pBuffers,
661 const VkDeviceSize* pOffsets,
662 const VkDeviceSize* pSizes)
663 {
664 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
665 struct anv_xfb_binding *xfb = cmd_buffer->state.xfb_bindings;
666
667 /* We have to defer setting up vertex buffer since we need the buffer
668 * stride from the pipeline. */
669
670 assert(firstBinding + bindingCount <= MAX_XFB_BUFFERS);
671 for (uint32_t i = 0; i < bindingCount; i++) {
672 if (pBuffers[i] == VK_NULL_HANDLE) {
673 xfb[firstBinding + i].buffer = NULL;
674 } else {
675 ANV_FROM_HANDLE(anv_buffer, buffer, pBuffers[i]);
676 xfb[firstBinding + i].buffer = buffer;
677 xfb[firstBinding + i].offset = pOffsets[i];
678 xfb[firstBinding + i].size =
679 anv_buffer_get_range(buffer, pOffsets[i],
680 pSizes ? pSizes[i] : VK_WHOLE_SIZE);
681 }
682 }
683 }
684
685 enum isl_format
686 anv_isl_format_for_descriptor_type(VkDescriptorType type)
687 {
688 switch (type) {
689 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
690 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
691 return ISL_FORMAT_R32G32B32A32_FLOAT;
692
693 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
694 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
695 return ISL_FORMAT_RAW;
696
697 default:
698 unreachable("Invalid descriptor type");
699 }
700 }
701
702 struct anv_state
703 anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer *cmd_buffer,
704 const void *data, uint32_t size, uint32_t alignment)
705 {
706 struct anv_state state;
707
708 state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, size, alignment);
709 memcpy(state.map, data, size);
710
711 VG(VALGRIND_CHECK_MEM_IS_DEFINED(state.map, size));
712
713 return state;
714 }
715
716 struct anv_state
717 anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer *cmd_buffer,
718 uint32_t *a, uint32_t *b,
719 uint32_t dwords, uint32_t alignment)
720 {
721 struct anv_state state;
722 uint32_t *p;
723
724 state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
725 dwords * 4, alignment);
726 p = state.map;
727 for (uint32_t i = 0; i < dwords; i++)
728 p[i] = a[i] | b[i];
729
730 VG(VALGRIND_CHECK_MEM_IS_DEFINED(p, dwords * 4));
731
732 return state;
733 }
734
735 static uint32_t
736 anv_push_constant_value(struct anv_push_constants *data, uint32_t param)
737 {
738 if (BRW_PARAM_IS_BUILTIN(param)) {
739 switch (param) {
740 case BRW_PARAM_BUILTIN_ZERO:
741 return 0;
742 case BRW_PARAM_BUILTIN_BASE_WORK_GROUP_ID_X:
743 return data->base_work_group_id[0];
744 case BRW_PARAM_BUILTIN_BASE_WORK_GROUP_ID_Y:
745 return data->base_work_group_id[1];
746 case BRW_PARAM_BUILTIN_BASE_WORK_GROUP_ID_Z:
747 return data->base_work_group_id[2];
748 default:
749 unreachable("Invalid param builtin");
750 }
751 } else {
752 uint32_t offset = ANV_PARAM_PUSH_OFFSET(param);
753 assert(offset % sizeof(uint32_t) == 0);
754 if (offset < data->size)
755 return *(uint32_t *)((uint8_t *)data + offset);
756 else
757 return 0;
758 }
759 }
760
761 struct anv_state
762 anv_cmd_buffer_push_constants(struct anv_cmd_buffer *cmd_buffer,
763 gl_shader_stage stage)
764 {
765 struct anv_pipeline *pipeline = cmd_buffer->state.gfx.base.pipeline;
766
767 /* If we don't have this stage, bail. */
768 if (!anv_pipeline_has_stage(pipeline, stage))
769 return (struct anv_state) { .offset = 0 };
770
771 struct anv_push_constants *data =
772 cmd_buffer->state.push_constants[stage];
773 const struct brw_stage_prog_data *prog_data =
774 pipeline->shaders[stage]->prog_data;
775
776 /* If we don't actually have any push constants, bail. */
777 if (data == NULL || prog_data == NULL || prog_data->nr_params == 0)
778 return (struct anv_state) { .offset = 0 };
779
780 struct anv_state state =
781 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
782 prog_data->nr_params * sizeof(float),
783 32 /* bottom 5 bits MBZ */);
784
785 /* Walk through the param array and fill the buffer with data */
786 uint32_t *u32_map = state.map;
787 for (unsigned i = 0; i < prog_data->nr_params; i++)
788 u32_map[i] = anv_push_constant_value(data, prog_data->param[i]);
789
790 return state;
791 }
792
793 struct anv_state
794 anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer *cmd_buffer)
795 {
796 struct anv_push_constants *data =
797 cmd_buffer->state.push_constants[MESA_SHADER_COMPUTE];
798 struct anv_pipeline *pipeline = cmd_buffer->state.compute.base.pipeline;
799 const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline);
800 const struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
801
802 /* If we don't actually have any push constants, bail. */
803 if (cs_prog_data->push.total.size == 0)
804 return (struct anv_state) { .offset = 0 };
805
806 const unsigned push_constant_alignment =
807 cmd_buffer->device->info.gen < 8 ? 32 : 64;
808 const unsigned aligned_total_push_constants_size =
809 ALIGN(cs_prog_data->push.total.size, push_constant_alignment);
810 struct anv_state state =
811 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
812 aligned_total_push_constants_size,
813 push_constant_alignment);
814
815 /* Walk through the param array and fill the buffer with data */
816 uint32_t *u32_map = state.map;
817
818 if (cs_prog_data->push.cross_thread.size > 0) {
819 for (unsigned i = 0;
820 i < cs_prog_data->push.cross_thread.dwords;
821 i++) {
822 assert(prog_data->param[i] != BRW_PARAM_BUILTIN_SUBGROUP_ID);
823 u32_map[i] = anv_push_constant_value(data, prog_data->param[i]);
824 }
825 }
826
827 if (cs_prog_data->push.per_thread.size > 0) {
828 for (unsigned t = 0; t < cs_prog_data->threads; t++) {
829 unsigned dst =
830 8 * (cs_prog_data->push.per_thread.regs * t +
831 cs_prog_data->push.cross_thread.regs);
832 unsigned src = cs_prog_data->push.cross_thread.dwords;
833 for ( ; src < prog_data->nr_params; src++, dst++) {
834 if (prog_data->param[src] == BRW_PARAM_BUILTIN_SUBGROUP_ID) {
835 u32_map[dst] = t;
836 } else {
837 u32_map[dst] =
838 anv_push_constant_value(data, prog_data->param[src]);
839 }
840 }
841 }
842 }
843
844 return state;
845 }
846
847 void anv_CmdPushConstants(
848 VkCommandBuffer commandBuffer,
849 VkPipelineLayout layout,
850 VkShaderStageFlags stageFlags,
851 uint32_t offset,
852 uint32_t size,
853 const void* pValues)
854 {
855 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
856
857 anv_foreach_stage(stage, stageFlags) {
858 VkResult result =
859 anv_cmd_buffer_ensure_push_constant_field(cmd_buffer,
860 stage, client_data);
861 if (result != VK_SUCCESS)
862 return;
863
864 memcpy(cmd_buffer->state.push_constants[stage]->client_data + offset,
865 pValues, size);
866 }
867
868 cmd_buffer->state.push_constants_dirty |= stageFlags;
869 }
870
871 VkResult anv_CreateCommandPool(
872 VkDevice _device,
873 const VkCommandPoolCreateInfo* pCreateInfo,
874 const VkAllocationCallbacks* pAllocator,
875 VkCommandPool* pCmdPool)
876 {
877 ANV_FROM_HANDLE(anv_device, device, _device);
878 struct anv_cmd_pool *pool;
879
880 pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
881 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
882 if (pool == NULL)
883 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
884
885 if (pAllocator)
886 pool->alloc = *pAllocator;
887 else
888 pool->alloc = device->alloc;
889
890 list_inithead(&pool->cmd_buffers);
891
892 *pCmdPool = anv_cmd_pool_to_handle(pool);
893
894 return VK_SUCCESS;
895 }
896
897 void anv_DestroyCommandPool(
898 VkDevice _device,
899 VkCommandPool commandPool,
900 const VkAllocationCallbacks* pAllocator)
901 {
902 ANV_FROM_HANDLE(anv_device, device, _device);
903 ANV_FROM_HANDLE(anv_cmd_pool, pool, commandPool);
904
905 if (!pool)
906 return;
907
908 list_for_each_entry_safe(struct anv_cmd_buffer, cmd_buffer,
909 &pool->cmd_buffers, pool_link) {
910 anv_cmd_buffer_destroy(cmd_buffer);
911 }
912
913 vk_free2(&device->alloc, pAllocator, pool);
914 }
915
916 VkResult anv_ResetCommandPool(
917 VkDevice device,
918 VkCommandPool commandPool,
919 VkCommandPoolResetFlags flags)
920 {
921 ANV_FROM_HANDLE(anv_cmd_pool, pool, commandPool);
922
923 list_for_each_entry(struct anv_cmd_buffer, cmd_buffer,
924 &pool->cmd_buffers, pool_link) {
925 anv_cmd_buffer_reset(cmd_buffer);
926 }
927
928 return VK_SUCCESS;
929 }
930
931 void anv_TrimCommandPool(
932 VkDevice device,
933 VkCommandPool commandPool,
934 VkCommandPoolTrimFlags flags)
935 {
936 /* Nothing for us to do here. Our pools stay pretty tidy. */
937 }
938
939 /**
940 * Return NULL if the current subpass has no depthstencil attachment.
941 */
942 const struct anv_image_view *
943 anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer *cmd_buffer)
944 {
945 const struct anv_subpass *subpass = cmd_buffer->state.subpass;
946 const struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
947
948 if (subpass->depth_stencil_attachment == NULL)
949 return NULL;
950
951 const struct anv_image_view *iview =
952 fb->attachments[subpass->depth_stencil_attachment->attachment];
953
954 assert(iview->aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT |
955 VK_IMAGE_ASPECT_STENCIL_BIT));
956
957 return iview;
958 }
959
960 static struct anv_push_descriptor_set *
961 anv_cmd_buffer_get_push_descriptor_set(struct anv_cmd_buffer *cmd_buffer,
962 VkPipelineBindPoint bind_point,
963 uint32_t set)
964 {
965 struct anv_cmd_pipeline_state *pipe_state;
966 if (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE) {
967 pipe_state = &cmd_buffer->state.compute.base;
968 } else {
969 assert(bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS);
970 pipe_state = &cmd_buffer->state.gfx.base;
971 }
972
973 struct anv_push_descriptor_set **push_set =
974 &pipe_state->push_descriptors[set];
975
976 if (*push_set == NULL) {
977 *push_set = vk_alloc(&cmd_buffer->pool->alloc,
978 sizeof(struct anv_push_descriptor_set), 8,
979 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
980 if (*push_set == NULL) {
981 anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_HOST_MEMORY);
982 return NULL;
983 }
984 }
985
986 return *push_set;
987 }
988
989 void anv_CmdPushDescriptorSetKHR(
990 VkCommandBuffer commandBuffer,
991 VkPipelineBindPoint pipelineBindPoint,
992 VkPipelineLayout _layout,
993 uint32_t _set,
994 uint32_t descriptorWriteCount,
995 const VkWriteDescriptorSet* pDescriptorWrites)
996 {
997 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
998 ANV_FROM_HANDLE(anv_pipeline_layout, layout, _layout);
999
1000 assert(_set < MAX_SETS);
1001
1002 struct anv_descriptor_set_layout *set_layout = layout->set[_set].layout;
1003
1004 struct anv_push_descriptor_set *push_set =
1005 anv_cmd_buffer_get_push_descriptor_set(cmd_buffer,
1006 pipelineBindPoint, _set);
1007 if (!push_set)
1008 return;
1009
1010 struct anv_descriptor_set *set = &push_set->set;
1011
1012 set->layout = set_layout;
1013 set->size = anv_descriptor_set_layout_size(set_layout);
1014 set->buffer_count = set_layout->buffer_count;
1015 set->buffer_views = push_set->buffer_views;
1016
1017 /* Go through the user supplied descriptors. */
1018 for (uint32_t i = 0; i < descriptorWriteCount; i++) {
1019 const VkWriteDescriptorSet *write = &pDescriptorWrites[i];
1020
1021 switch (write->descriptorType) {
1022 case VK_DESCRIPTOR_TYPE_SAMPLER:
1023 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1024 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1025 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1026 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1027 for (uint32_t j = 0; j < write->descriptorCount; j++) {
1028 anv_descriptor_set_write_image_view(cmd_buffer->device, set,
1029 write->pImageInfo + j,
1030 write->descriptorType,
1031 write->dstBinding,
1032 write->dstArrayElement + j);
1033 }
1034 break;
1035
1036 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1037 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1038 for (uint32_t j = 0; j < write->descriptorCount; j++) {
1039 ANV_FROM_HANDLE(anv_buffer_view, bview,
1040 write->pTexelBufferView[j]);
1041
1042 anv_descriptor_set_write_buffer_view(cmd_buffer->device, set,
1043 write->descriptorType,
1044 bview,
1045 write->dstBinding,
1046 write->dstArrayElement + j);
1047 }
1048 break;
1049
1050 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1051 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1052 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1053 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1054 for (uint32_t j = 0; j < write->descriptorCount; j++) {
1055 assert(write->pBufferInfo[j].buffer);
1056 ANV_FROM_HANDLE(anv_buffer, buffer, write->pBufferInfo[j].buffer);
1057 assert(buffer);
1058
1059 anv_descriptor_set_write_buffer(cmd_buffer->device, set,
1060 &cmd_buffer->surface_state_stream,
1061 write->descriptorType,
1062 buffer,
1063 write->dstBinding,
1064 write->dstArrayElement + j,
1065 write->pBufferInfo[j].offset,
1066 write->pBufferInfo[j].range);
1067 }
1068 break;
1069
1070 default:
1071 break;
1072 }
1073 }
1074
1075 anv_cmd_buffer_bind_descriptor_set(cmd_buffer, pipelineBindPoint,
1076 layout, _set, set, NULL, NULL);
1077 }
1078
1079 void anv_CmdPushDescriptorSetWithTemplateKHR(
1080 VkCommandBuffer commandBuffer,
1081 VkDescriptorUpdateTemplate descriptorUpdateTemplate,
1082 VkPipelineLayout _layout,
1083 uint32_t _set,
1084 const void* pData)
1085 {
1086 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1087 ANV_FROM_HANDLE(anv_descriptor_update_template, template,
1088 descriptorUpdateTemplate);
1089 ANV_FROM_HANDLE(anv_pipeline_layout, layout, _layout);
1090
1091 assert(_set < MAX_PUSH_DESCRIPTORS);
1092
1093 struct anv_descriptor_set_layout *set_layout = layout->set[_set].layout;
1094
1095 struct anv_push_descriptor_set *push_set =
1096 anv_cmd_buffer_get_push_descriptor_set(cmd_buffer,
1097 template->bind_point, _set);
1098 if (!push_set)
1099 return;
1100
1101 struct anv_descriptor_set *set = &push_set->set;
1102
1103 set->layout = set_layout;
1104 set->size = anv_descriptor_set_layout_size(set_layout);
1105 set->buffer_count = set_layout->buffer_count;
1106 set->buffer_views = push_set->buffer_views;
1107
1108 anv_descriptor_set_write_template(cmd_buffer->device, set,
1109 &cmd_buffer->surface_state_stream,
1110 template,
1111 pData);
1112
1113 anv_cmd_buffer_bind_descriptor_set(cmd_buffer, template->bind_point,
1114 layout, _set, set, NULL, NULL);
1115 }
1116
1117 void anv_CmdSetDeviceMask(
1118 VkCommandBuffer commandBuffer,
1119 uint32_t deviceMask)
1120 {
1121 /* No-op */
1122 }