anv: Move push constant allocation to the command buffer
[mesa.git] / src / intel / vulkan / anv_cmd_buffer.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31
32 #include "vk_format_info.h"
33
34 /** \file anv_cmd_buffer.c
35 *
36 * This file contains all of the stuff for emitting commands into a command
37 * buffer. This includes implementations of most of the vkCmd*
38 * entrypoints. This file is concerned entirely with state emission and
39 * not with the command buffer data structure itself. As far as this file
40 * is concerned, most of anv_cmd_buffer is magic.
41 */
42
43 /* TODO: These are taken from GLES. We should check the Vulkan spec */
44 const struct anv_dynamic_state default_dynamic_state = {
45 .viewport = {
46 .count = 0,
47 },
48 .scissor = {
49 .count = 0,
50 },
51 .line_width = 1.0f,
52 .depth_bias = {
53 .bias = 0.0f,
54 .clamp = 0.0f,
55 .slope = 0.0f,
56 },
57 .blend_constants = { 0.0f, 0.0f, 0.0f, 0.0f },
58 .depth_bounds = {
59 .min = 0.0f,
60 .max = 1.0f,
61 },
62 .stencil_compare_mask = {
63 .front = ~0u,
64 .back = ~0u,
65 },
66 .stencil_write_mask = {
67 .front = ~0u,
68 .back = ~0u,
69 },
70 .stencil_reference = {
71 .front = 0u,
72 .back = 0u,
73 },
74 };
75
76 void
77 anv_dynamic_state_copy(struct anv_dynamic_state *dest,
78 const struct anv_dynamic_state *src,
79 uint32_t copy_mask)
80 {
81 if (copy_mask & (1 << VK_DYNAMIC_STATE_VIEWPORT)) {
82 dest->viewport.count = src->viewport.count;
83 typed_memcpy(dest->viewport.viewports, src->viewport.viewports,
84 src->viewport.count);
85 }
86
87 if (copy_mask & (1 << VK_DYNAMIC_STATE_SCISSOR)) {
88 dest->scissor.count = src->scissor.count;
89 typed_memcpy(dest->scissor.scissors, src->scissor.scissors,
90 src->scissor.count);
91 }
92
93 if (copy_mask & (1 << VK_DYNAMIC_STATE_LINE_WIDTH))
94 dest->line_width = src->line_width;
95
96 if (copy_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BIAS))
97 dest->depth_bias = src->depth_bias;
98
99 if (copy_mask & (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS))
100 typed_memcpy(dest->blend_constants, src->blend_constants, 4);
101
102 if (copy_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS))
103 dest->depth_bounds = src->depth_bounds;
104
105 if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK))
106 dest->stencil_compare_mask = src->stencil_compare_mask;
107
108 if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK))
109 dest->stencil_write_mask = src->stencil_write_mask;
110
111 if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE))
112 dest->stencil_reference = src->stencil_reference;
113 }
114
115 static void
116 anv_cmd_state_reset(struct anv_cmd_buffer *cmd_buffer)
117 {
118 struct anv_cmd_state *state = &cmd_buffer->state;
119
120 memset(&state->descriptors, 0, sizeof(state->descriptors));
121 memset(&state->push_constants, 0, sizeof(state->push_constants));
122 memset(state->binding_tables, 0, sizeof(state->binding_tables));
123 memset(state->samplers, 0, sizeof(state->samplers));
124
125 /* 0 isn't a valid config. This ensures that we always configure L3$. */
126 cmd_buffer->state.current_l3_config = 0;
127
128 state->dirty = 0;
129 state->vb_dirty = 0;
130 state->descriptors_dirty = 0;
131 state->push_constants_dirty = 0;
132 state->pipeline = NULL;
133 state->push_constant_stages = 0;
134 state->restart_index = UINT32_MAX;
135 state->dynamic = default_dynamic_state;
136 state->need_query_wa = true;
137
138 if (state->attachments != NULL) {
139 anv_free(&cmd_buffer->pool->alloc, state->attachments);
140 state->attachments = NULL;
141 }
142
143 state->gen7.index_buffer = NULL;
144 }
145
146 /**
147 * Setup anv_cmd_state::attachments for vkCmdBeginRenderPass.
148 */
149 void
150 anv_cmd_state_setup_attachments(struct anv_cmd_buffer *cmd_buffer,
151 const VkRenderPassBeginInfo *info)
152 {
153 struct anv_cmd_state *state = &cmd_buffer->state;
154 ANV_FROM_HANDLE(anv_render_pass, pass, info->renderPass);
155
156 anv_free(&cmd_buffer->pool->alloc, state->attachments);
157
158 if (pass->attachment_count == 0) {
159 state->attachments = NULL;
160 return;
161 }
162
163 state->attachments = anv_alloc(&cmd_buffer->pool->alloc,
164 pass->attachment_count *
165 sizeof(state->attachments[0]),
166 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
167 if (state->attachments == NULL) {
168 /* FIXME: Propagate VK_ERROR_OUT_OF_HOST_MEMORY to vkEndCommandBuffer */
169 abort();
170 }
171
172 for (uint32_t i = 0; i < pass->attachment_count; ++i) {
173 struct anv_render_pass_attachment *att = &pass->attachments[i];
174 VkImageAspectFlags att_aspects = vk_format_aspects(att->format);
175 VkImageAspectFlags clear_aspects = 0;
176
177 if (att_aspects == VK_IMAGE_ASPECT_COLOR_BIT) {
178 /* color attachment */
179 if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
180 clear_aspects |= VK_IMAGE_ASPECT_COLOR_BIT;
181 }
182 } else {
183 /* depthstencil attachment */
184 if ((att_aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
185 att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
186 clear_aspects |= VK_IMAGE_ASPECT_DEPTH_BIT;
187 }
188 if ((att_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
189 att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
190 clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
191 }
192 }
193
194 state->attachments[i].pending_clear_aspects = clear_aspects;
195 if (clear_aspects) {
196 assert(info->clearValueCount > i);
197 state->attachments[i].clear_value = info->pClearValues[i];
198 }
199 }
200 }
201
202 static VkResult
203 anv_cmd_buffer_ensure_push_constants_size(struct anv_cmd_buffer *cmd_buffer,
204 gl_shader_stage stage, uint32_t size)
205 {
206 struct anv_push_constants **ptr = &cmd_buffer->state.push_constants[stage];
207
208 if (*ptr == NULL) {
209 *ptr = anv_alloc(&cmd_buffer->pool->alloc, size, 8,
210 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
211 if (*ptr == NULL)
212 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
213 } else if ((*ptr)->size < size) {
214 *ptr = anv_realloc(&cmd_buffer->pool->alloc, *ptr, size, 8,
215 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
216 if (*ptr == NULL)
217 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
218 }
219 (*ptr)->size = size;
220
221 return VK_SUCCESS;
222 }
223
224 #define anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, field) \
225 anv_cmd_buffer_ensure_push_constants_size(cmd_buffer, stage, \
226 (offsetof(struct anv_push_constants, field) + \
227 sizeof(cmd_buffer->state.push_constants[0]->field)))
228
229 static VkResult anv_create_cmd_buffer(
230 struct anv_device * device,
231 struct anv_cmd_pool * pool,
232 VkCommandBufferLevel level,
233 VkCommandBuffer* pCommandBuffer)
234 {
235 struct anv_cmd_buffer *cmd_buffer;
236 VkResult result;
237
238 cmd_buffer = anv_alloc(&pool->alloc, sizeof(*cmd_buffer), 8,
239 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
240 if (cmd_buffer == NULL)
241 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
242
243 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
244 cmd_buffer->device = device;
245 cmd_buffer->pool = pool;
246 cmd_buffer->level = level;
247 cmd_buffer->state.attachments = NULL;
248
249 result = anv_cmd_buffer_init_batch_bo_chain(cmd_buffer);
250 if (result != VK_SUCCESS)
251 goto fail;
252
253 anv_state_stream_init(&cmd_buffer->surface_state_stream,
254 &device->surface_state_block_pool);
255 anv_state_stream_init(&cmd_buffer->dynamic_state_stream,
256 &device->dynamic_state_block_pool);
257
258 if (pool) {
259 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
260 } else {
261 /* Init the pool_link so we can safefly call list_del when we destroy
262 * the command buffer
263 */
264 list_inithead(&cmd_buffer->pool_link);
265 }
266
267 *pCommandBuffer = anv_cmd_buffer_to_handle(cmd_buffer);
268
269 return VK_SUCCESS;
270
271 fail:
272 anv_free(&cmd_buffer->pool->alloc, cmd_buffer);
273
274 return result;
275 }
276
277 VkResult anv_AllocateCommandBuffers(
278 VkDevice _device,
279 const VkCommandBufferAllocateInfo* pAllocateInfo,
280 VkCommandBuffer* pCommandBuffers)
281 {
282 ANV_FROM_HANDLE(anv_device, device, _device);
283 ANV_FROM_HANDLE(anv_cmd_pool, pool, pAllocateInfo->commandPool);
284
285 VkResult result = VK_SUCCESS;
286 uint32_t i;
287
288 for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
289 result = anv_create_cmd_buffer(device, pool, pAllocateInfo->level,
290 &pCommandBuffers[i]);
291 if (result != VK_SUCCESS)
292 break;
293 }
294
295 if (result != VK_SUCCESS)
296 anv_FreeCommandBuffers(_device, pAllocateInfo->commandPool,
297 i, pCommandBuffers);
298
299 return result;
300 }
301
302 static void
303 anv_cmd_buffer_destroy(struct anv_cmd_buffer *cmd_buffer)
304 {
305 list_del(&cmd_buffer->pool_link);
306
307 anv_cmd_buffer_fini_batch_bo_chain(cmd_buffer);
308
309 anv_state_stream_finish(&cmd_buffer->surface_state_stream);
310 anv_state_stream_finish(&cmd_buffer->dynamic_state_stream);
311
312 anv_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
313 anv_free(&cmd_buffer->pool->alloc, cmd_buffer);
314 }
315
316 void anv_FreeCommandBuffers(
317 VkDevice device,
318 VkCommandPool commandPool,
319 uint32_t commandBufferCount,
320 const VkCommandBuffer* pCommandBuffers)
321 {
322 for (uint32_t i = 0; i < commandBufferCount; i++) {
323 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
324
325 anv_cmd_buffer_destroy(cmd_buffer);
326 }
327 }
328
329 VkResult anv_ResetCommandBuffer(
330 VkCommandBuffer commandBuffer,
331 VkCommandBufferResetFlags flags)
332 {
333 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
334
335 cmd_buffer->usage_flags = 0;
336 cmd_buffer->state.current_pipeline = UINT32_MAX;
337 anv_cmd_buffer_reset_batch_bo_chain(cmd_buffer);
338 anv_cmd_state_reset(cmd_buffer);
339
340 anv_state_stream_finish(&cmd_buffer->surface_state_stream);
341 anv_state_stream_init(&cmd_buffer->surface_state_stream,
342 &cmd_buffer->device->surface_state_block_pool);
343
344 anv_state_stream_finish(&cmd_buffer->dynamic_state_stream);
345 anv_state_stream_init(&cmd_buffer->dynamic_state_stream,
346 &cmd_buffer->device->dynamic_state_block_pool);
347
348 return VK_SUCCESS;
349 }
350
351 void
352 anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer)
353 {
354 switch (cmd_buffer->device->info.gen) {
355 case 7:
356 if (cmd_buffer->device->info.is_haswell)
357 return gen7_cmd_buffer_emit_state_base_address(cmd_buffer);
358 else
359 return gen7_cmd_buffer_emit_state_base_address(cmd_buffer);
360 case 8:
361 return gen8_cmd_buffer_emit_state_base_address(cmd_buffer);
362 case 9:
363 return gen9_cmd_buffer_emit_state_base_address(cmd_buffer);
364 default:
365 unreachable("unsupported gen\n");
366 }
367 }
368
369 VkResult anv_BeginCommandBuffer(
370 VkCommandBuffer commandBuffer,
371 const VkCommandBufferBeginInfo* pBeginInfo)
372 {
373 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
374
375 /* If this is the first vkBeginCommandBuffer, we must *initialize* the
376 * command buffer's state. Otherwise, we must *reset* its state. In both
377 * cases we reset it.
378 *
379 * From the Vulkan 1.0 spec:
380 *
381 * If a command buffer is in the executable state and the command buffer
382 * was allocated from a command pool with the
383 * VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT flag set, then
384 * vkBeginCommandBuffer implicitly resets the command buffer, behaving
385 * as if vkResetCommandBuffer had been called with
386 * VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT not set. It then puts
387 * the command buffer in the recording state.
388 */
389 anv_ResetCommandBuffer(commandBuffer, /*flags*/ 0);
390
391 cmd_buffer->usage_flags = pBeginInfo->flags;
392
393 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY ||
394 !(cmd_buffer->usage_flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT));
395
396 anv_cmd_buffer_emit_state_base_address(cmd_buffer);
397
398 if (cmd_buffer->usage_flags &
399 VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
400 cmd_buffer->state.framebuffer =
401 anv_framebuffer_from_handle(pBeginInfo->pInheritanceInfo->framebuffer);
402 cmd_buffer->state.pass =
403 anv_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass);
404
405 struct anv_subpass *subpass =
406 &cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass];
407
408 anv_cmd_buffer_set_subpass(cmd_buffer, subpass);
409 }
410
411 return VK_SUCCESS;
412 }
413
414 VkResult anv_EndCommandBuffer(
415 VkCommandBuffer commandBuffer)
416 {
417 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
418 struct anv_device *device = cmd_buffer->device;
419
420 anv_cmd_buffer_end_batch_buffer(cmd_buffer);
421
422 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
423 /* The algorithm used to compute the validate list is not threadsafe as
424 * it uses the bo->index field. We have to lock the device around it.
425 * Fortunately, the chances for contention here are probably very low.
426 */
427 pthread_mutex_lock(&device->mutex);
428 anv_cmd_buffer_prepare_execbuf(cmd_buffer);
429 pthread_mutex_unlock(&device->mutex);
430 }
431
432 return VK_SUCCESS;
433 }
434
435 void anv_CmdBindPipeline(
436 VkCommandBuffer commandBuffer,
437 VkPipelineBindPoint pipelineBindPoint,
438 VkPipeline _pipeline)
439 {
440 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
441 ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
442
443 switch (pipelineBindPoint) {
444 case VK_PIPELINE_BIND_POINT_COMPUTE:
445 cmd_buffer->state.compute_pipeline = pipeline;
446 cmd_buffer->state.compute_dirty |= ANV_CMD_DIRTY_PIPELINE;
447 cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
448 cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
449 break;
450
451 case VK_PIPELINE_BIND_POINT_GRAPHICS:
452 cmd_buffer->state.pipeline = pipeline;
453 cmd_buffer->state.vb_dirty |= pipeline->vb_used;
454 cmd_buffer->state.dirty |= ANV_CMD_DIRTY_PIPELINE;
455 cmd_buffer->state.push_constants_dirty |= pipeline->active_stages;
456 cmd_buffer->state.descriptors_dirty |= pipeline->active_stages;
457
458 /* Apply the dynamic state from the pipeline */
459 cmd_buffer->state.dirty |= pipeline->dynamic_state_mask;
460 anv_dynamic_state_copy(&cmd_buffer->state.dynamic,
461 &pipeline->dynamic_state,
462 pipeline->dynamic_state_mask);
463 break;
464
465 default:
466 assert(!"invalid bind point");
467 break;
468 }
469 }
470
471 void anv_CmdSetViewport(
472 VkCommandBuffer commandBuffer,
473 uint32_t firstViewport,
474 uint32_t viewportCount,
475 const VkViewport* pViewports)
476 {
477 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
478
479 const uint32_t total_count = firstViewport + viewportCount;
480 if (cmd_buffer->state.dynamic.viewport.count < total_count)
481 cmd_buffer->state.dynamic.viewport.count = total_count;
482
483 memcpy(cmd_buffer->state.dynamic.viewport.viewports + firstViewport,
484 pViewports, viewportCount * sizeof(*pViewports));
485
486 cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_VIEWPORT;
487 }
488
489 void anv_CmdSetScissor(
490 VkCommandBuffer commandBuffer,
491 uint32_t firstScissor,
492 uint32_t scissorCount,
493 const VkRect2D* pScissors)
494 {
495 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
496
497 const uint32_t total_count = firstScissor + scissorCount;
498 if (cmd_buffer->state.dynamic.scissor.count < total_count)
499 cmd_buffer->state.dynamic.scissor.count = total_count;
500
501 memcpy(cmd_buffer->state.dynamic.scissor.scissors + firstScissor,
502 pScissors, scissorCount * sizeof(*pScissors));
503
504 cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_SCISSOR;
505 }
506
507 void anv_CmdSetLineWidth(
508 VkCommandBuffer commandBuffer,
509 float lineWidth)
510 {
511 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
512
513 cmd_buffer->state.dynamic.line_width = lineWidth;
514 cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH;
515 }
516
517 void anv_CmdSetDepthBias(
518 VkCommandBuffer commandBuffer,
519 float depthBiasConstantFactor,
520 float depthBiasClamp,
521 float depthBiasSlopeFactor)
522 {
523 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
524
525 cmd_buffer->state.dynamic.depth_bias.bias = depthBiasConstantFactor;
526 cmd_buffer->state.dynamic.depth_bias.clamp = depthBiasClamp;
527 cmd_buffer->state.dynamic.depth_bias.slope = depthBiasSlopeFactor;
528
529 cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
530 }
531
532 void anv_CmdSetBlendConstants(
533 VkCommandBuffer commandBuffer,
534 const float blendConstants[4])
535 {
536 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
537
538 memcpy(cmd_buffer->state.dynamic.blend_constants,
539 blendConstants, sizeof(float) * 4);
540
541 cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS;
542 }
543
544 void anv_CmdSetDepthBounds(
545 VkCommandBuffer commandBuffer,
546 float minDepthBounds,
547 float maxDepthBounds)
548 {
549 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
550
551 cmd_buffer->state.dynamic.depth_bounds.min = minDepthBounds;
552 cmd_buffer->state.dynamic.depth_bounds.max = maxDepthBounds;
553
554 cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS;
555 }
556
557 void anv_CmdSetStencilCompareMask(
558 VkCommandBuffer commandBuffer,
559 VkStencilFaceFlags faceMask,
560 uint32_t compareMask)
561 {
562 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
563
564 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
565 cmd_buffer->state.dynamic.stencil_compare_mask.front = compareMask;
566 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
567 cmd_buffer->state.dynamic.stencil_compare_mask.back = compareMask;
568
569 cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK;
570 }
571
572 void anv_CmdSetStencilWriteMask(
573 VkCommandBuffer commandBuffer,
574 VkStencilFaceFlags faceMask,
575 uint32_t writeMask)
576 {
577 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
578
579 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
580 cmd_buffer->state.dynamic.stencil_write_mask.front = writeMask;
581 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
582 cmd_buffer->state.dynamic.stencil_write_mask.back = writeMask;
583
584 cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK;
585 }
586
587 void anv_CmdSetStencilReference(
588 VkCommandBuffer commandBuffer,
589 VkStencilFaceFlags faceMask,
590 uint32_t reference)
591 {
592 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
593
594 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
595 cmd_buffer->state.dynamic.stencil_reference.front = reference;
596 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
597 cmd_buffer->state.dynamic.stencil_reference.back = reference;
598
599 cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE;
600 }
601
602 void anv_CmdBindDescriptorSets(
603 VkCommandBuffer commandBuffer,
604 VkPipelineBindPoint pipelineBindPoint,
605 VkPipelineLayout _layout,
606 uint32_t firstSet,
607 uint32_t descriptorSetCount,
608 const VkDescriptorSet* pDescriptorSets,
609 uint32_t dynamicOffsetCount,
610 const uint32_t* pDynamicOffsets)
611 {
612 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
613 ANV_FROM_HANDLE(anv_pipeline_layout, layout, _layout);
614 struct anv_descriptor_set_layout *set_layout;
615
616 assert(firstSet + descriptorSetCount < MAX_SETS);
617
618 uint32_t dynamic_slot = 0;
619 for (uint32_t i = 0; i < descriptorSetCount; i++) {
620 ANV_FROM_HANDLE(anv_descriptor_set, set, pDescriptorSets[i]);
621 set_layout = layout->set[firstSet + i].layout;
622
623 if (cmd_buffer->state.descriptors[firstSet + i] != set) {
624 cmd_buffer->state.descriptors[firstSet + i] = set;
625 cmd_buffer->state.descriptors_dirty |= set_layout->shader_stages;
626 }
627
628 if (set_layout->dynamic_offset_count > 0) {
629 anv_foreach_stage(s, set_layout->shader_stages) {
630 anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, s, dynamic);
631
632 struct anv_push_constants *push =
633 cmd_buffer->state.push_constants[s];
634
635 unsigned d = layout->set[firstSet + i].dynamic_offset_start;
636 const uint32_t *offsets = pDynamicOffsets + dynamic_slot;
637 struct anv_descriptor *desc = set->descriptors;
638
639 for (unsigned b = 0; b < set_layout->binding_count; b++) {
640 if (set_layout->binding[b].dynamic_offset_index < 0)
641 continue;
642
643 unsigned array_size = set_layout->binding[b].array_size;
644 for (unsigned j = 0; j < array_size; j++) {
645 uint32_t range = 0;
646 if (desc->buffer_view)
647 range = desc->buffer_view->range;
648 push->dynamic[d].offset = *(offsets++);
649 push->dynamic[d].range = range;
650 desc++;
651 d++;
652 }
653 }
654 }
655 cmd_buffer->state.push_constants_dirty |= set_layout->shader_stages;
656 }
657 }
658 }
659
660 void anv_CmdBindVertexBuffers(
661 VkCommandBuffer commandBuffer,
662 uint32_t firstBinding,
663 uint32_t bindingCount,
664 const VkBuffer* pBuffers,
665 const VkDeviceSize* pOffsets)
666 {
667 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
668 struct anv_vertex_binding *vb = cmd_buffer->state.vertex_bindings;
669
670 /* We have to defer setting up vertex buffer since we need the buffer
671 * stride from the pipeline. */
672
673 assert(firstBinding + bindingCount < MAX_VBS);
674 for (uint32_t i = 0; i < bindingCount; i++) {
675 vb[firstBinding + i].buffer = anv_buffer_from_handle(pBuffers[i]);
676 vb[firstBinding + i].offset = pOffsets[i];
677 cmd_buffer->state.vb_dirty |= 1 << (firstBinding + i);
678 }
679 }
680
681 static void
682 add_surface_state_reloc(struct anv_cmd_buffer *cmd_buffer,
683 struct anv_state state, struct anv_bo *bo, uint32_t offset)
684 {
685 /* The address goes in SURFACE_STATE dword 1 for gens < 8 and dwords 8 and
686 * 9 for gen8+. We only write the first dword for gen8+ here and rely on
687 * the initial state to set the high bits to 0. */
688
689 const uint32_t dword = cmd_buffer->device->info.gen < 8 ? 1 : 8;
690
691 anv_reloc_list_add(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc,
692 state.offset + dword * 4, bo, offset);
693 }
694
695 enum isl_format
696 anv_isl_format_for_descriptor_type(VkDescriptorType type)
697 {
698 switch (type) {
699 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
700 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
701 return ISL_FORMAT_R32G32B32A32_FLOAT;
702
703 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
704 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
705 return ISL_FORMAT_RAW;
706
707 default:
708 unreachable("Invalid descriptor type");
709 }
710 }
711
712 static struct anv_state
713 anv_cmd_buffer_alloc_null_surface_state(struct anv_cmd_buffer *cmd_buffer,
714 struct anv_framebuffer *fb)
715 {
716 switch (cmd_buffer->device->info.gen) {
717 case 7:
718 if (cmd_buffer->device->info.is_haswell) {
719 return gen75_cmd_buffer_alloc_null_surface_state(cmd_buffer, fb);
720 } else {
721 return gen7_cmd_buffer_alloc_null_surface_state(cmd_buffer, fb);
722 }
723 case 8:
724 return gen8_cmd_buffer_alloc_null_surface_state(cmd_buffer, fb);
725 case 9:
726 return gen9_cmd_buffer_alloc_null_surface_state(cmd_buffer, fb);
727 default:
728 unreachable("Invalid hardware generation");
729 }
730 }
731
732 VkResult
733 anv_cmd_buffer_emit_binding_table(struct anv_cmd_buffer *cmd_buffer,
734 gl_shader_stage stage,
735 struct anv_state *bt_state)
736 {
737 struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
738 struct anv_subpass *subpass = cmd_buffer->state.subpass;
739 struct anv_pipeline_bind_map *map;
740 uint32_t bias, state_offset;
741
742 switch (stage) {
743 case MESA_SHADER_COMPUTE:
744 map = &cmd_buffer->state.compute_pipeline->bindings[stage];
745 bias = 1;
746 break;
747 default:
748 map = &cmd_buffer->state.pipeline->bindings[stage];
749 bias = 0;
750 break;
751 }
752
753 if (bias + map->surface_count == 0) {
754 *bt_state = (struct anv_state) { 0, };
755 return VK_SUCCESS;
756 }
757
758 *bt_state = anv_cmd_buffer_alloc_binding_table(cmd_buffer,
759 bias + map->surface_count,
760 &state_offset);
761 uint32_t *bt_map = bt_state->map;
762
763 if (bt_state->map == NULL)
764 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
765
766 if (stage == MESA_SHADER_COMPUTE &&
767 get_cs_prog_data(cmd_buffer->state.compute_pipeline)->uses_num_work_groups) {
768 struct anv_bo *bo = cmd_buffer->state.num_workgroups_bo;
769 uint32_t bo_offset = cmd_buffer->state.num_workgroups_offset;
770
771 struct anv_state surface_state;
772 surface_state =
773 anv_cmd_buffer_alloc_surface_state(cmd_buffer);
774
775 const enum isl_format format =
776 anv_isl_format_for_descriptor_type(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
777 anv_fill_buffer_surface_state(cmd_buffer->device, surface_state,
778 format, bo_offset, 12, 1);
779
780 bt_map[0] = surface_state.offset + state_offset;
781 add_surface_state_reloc(cmd_buffer, surface_state, bo, bo_offset);
782 }
783
784 if (map->surface_count == 0)
785 goto out;
786
787 if (map->image_count > 0) {
788 VkResult result =
789 anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, images);
790 if (result != VK_SUCCESS)
791 return result;
792
793 cmd_buffer->state.push_constants_dirty |= 1 << stage;
794 }
795
796 uint32_t image = 0;
797 for (uint32_t s = 0; s < map->surface_count; s++) {
798 struct anv_pipeline_binding *binding = &map->surface_to_descriptor[s];
799
800 struct anv_state surface_state;
801 struct anv_bo *bo;
802 uint32_t bo_offset;
803
804 if (binding->set == ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS) {
805 /* Color attachment binding */
806 assert(stage == MESA_SHADER_FRAGMENT);
807 if (binding->offset < subpass->color_count) {
808 const struct anv_image_view *iview =
809 fb->attachments[subpass->color_attachments[binding->offset]];
810
811 assert(iview->color_rt_surface_state.alloc_size);
812 surface_state = iview->color_rt_surface_state;
813 add_surface_state_reloc(cmd_buffer, iview->color_rt_surface_state,
814 iview->bo, iview->offset);
815 } else {
816 /* Null render target */
817 struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
818 surface_state =
819 anv_cmd_buffer_alloc_null_surface_state(cmd_buffer, fb);
820 }
821
822 bt_map[bias + s] = surface_state.offset + state_offset;
823 continue;
824 }
825
826 struct anv_descriptor_set *set =
827 cmd_buffer->state.descriptors[binding->set];
828 struct anv_descriptor *desc = &set->descriptors[binding->offset];
829
830 switch (desc->type) {
831 case VK_DESCRIPTOR_TYPE_SAMPLER:
832 /* Nothing for us to do here */
833 continue;
834
835 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
836 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
837 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
838 surface_state = desc->image_view->sampler_surface_state;
839 assert(surface_state.alloc_size);
840 bo = desc->image_view->bo;
841 bo_offset = desc->image_view->offset;
842 break;
843
844 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
845 surface_state = desc->image_view->storage_surface_state;
846 assert(surface_state.alloc_size);
847 bo = desc->image_view->bo;
848 bo_offset = desc->image_view->offset;
849
850 struct brw_image_param *image_param =
851 &cmd_buffer->state.push_constants[stage]->images[image++];
852
853 *image_param = desc->image_view->storage_image_param;
854 image_param->surface_idx = bias + s;
855 break;
856 }
857
858 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
859 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
860 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
861 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
862 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
863 surface_state = desc->buffer_view->surface_state;
864 assert(surface_state.alloc_size);
865 bo = desc->buffer_view->bo;
866 bo_offset = desc->buffer_view->offset;
867 break;
868
869 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
870 surface_state = desc->buffer_view->storage_surface_state;
871 assert(surface_state.alloc_size);
872 bo = desc->buffer_view->bo;
873 bo_offset = desc->buffer_view->offset;
874
875 struct brw_image_param *image_param =
876 &cmd_buffer->state.push_constants[stage]->images[image++];
877
878 *image_param = desc->buffer_view->storage_image_param;
879 image_param->surface_idx = bias + s;
880 break;
881
882 default:
883 assert(!"Invalid descriptor type");
884 continue;
885 }
886
887 bt_map[bias + s] = surface_state.offset + state_offset;
888 add_surface_state_reloc(cmd_buffer, surface_state, bo, bo_offset);
889 }
890 assert(image == map->image_count);
891
892 out:
893 if (!cmd_buffer->device->info.has_llc)
894 anv_state_clflush(*bt_state);
895
896 return VK_SUCCESS;
897 }
898
899 VkResult
900 anv_cmd_buffer_emit_samplers(struct anv_cmd_buffer *cmd_buffer,
901 gl_shader_stage stage, struct anv_state *state)
902 {
903 struct anv_pipeline_bind_map *map;
904
905 if (stage == MESA_SHADER_COMPUTE)
906 map = &cmd_buffer->state.compute_pipeline->bindings[stage];
907 else
908 map = &cmd_buffer->state.pipeline->bindings[stage];
909
910 if (map->sampler_count == 0) {
911 *state = (struct anv_state) { 0, };
912 return VK_SUCCESS;
913 }
914
915 uint32_t size = map->sampler_count * 16;
916 *state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, size, 32);
917
918 if (state->map == NULL)
919 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
920
921 for (uint32_t s = 0; s < map->sampler_count; s++) {
922 struct anv_pipeline_binding *binding = &map->sampler_to_descriptor[s];
923 struct anv_descriptor_set *set =
924 cmd_buffer->state.descriptors[binding->set];
925 struct anv_descriptor *desc = &set->descriptors[binding->offset];
926
927 if (desc->type != VK_DESCRIPTOR_TYPE_SAMPLER &&
928 desc->type != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
929 continue;
930
931 struct anv_sampler *sampler = desc->sampler;
932
933 /* This can happen if we have an unfilled slot since TYPE_SAMPLER
934 * happens to be zero.
935 */
936 if (sampler == NULL)
937 continue;
938
939 memcpy(state->map + (s * 16),
940 sampler->state, sizeof(sampler->state));
941 }
942
943 if (!cmd_buffer->device->info.has_llc)
944 anv_state_clflush(*state);
945
946 return VK_SUCCESS;
947 }
948
949 struct anv_state
950 anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer *cmd_buffer,
951 const void *data, uint32_t size, uint32_t alignment)
952 {
953 struct anv_state state;
954
955 state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, size, alignment);
956 memcpy(state.map, data, size);
957
958 if (!cmd_buffer->device->info.has_llc)
959 anv_state_clflush(state);
960
961 VG(VALGRIND_CHECK_MEM_IS_DEFINED(state.map, size));
962
963 return state;
964 }
965
966 struct anv_state
967 anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer *cmd_buffer,
968 uint32_t *a, uint32_t *b,
969 uint32_t dwords, uint32_t alignment)
970 {
971 struct anv_state state;
972 uint32_t *p;
973
974 state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
975 dwords * 4, alignment);
976 p = state.map;
977 for (uint32_t i = 0; i < dwords; i++)
978 p[i] = a[i] | b[i];
979
980 if (!cmd_buffer->device->info.has_llc)
981 anv_state_clflush(state);
982
983 VG(VALGRIND_CHECK_MEM_IS_DEFINED(p, dwords * 4));
984
985 return state;
986 }
987
988 /**
989 * @brief Setup the command buffer for recording commands inside the given
990 * subpass.
991 *
992 * This does not record all commands needed for starting the subpass.
993 * Starting the subpass may require additional commands.
994 *
995 * Note that vkCmdBeginRenderPass, vkCmdNextSubpass, and vkBeginCommandBuffer
996 * with VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT, all setup the
997 * command buffer for recording commands for some subpass. But only the first
998 * two, vkCmdBeginRenderPass and vkCmdNextSubpass, can start a subpass.
999 */
1000 void
1001 anv_cmd_buffer_set_subpass(struct anv_cmd_buffer *cmd_buffer,
1002 struct anv_subpass *subpass)
1003 {
1004 switch (cmd_buffer->device->info.gen) {
1005 case 7:
1006 if (cmd_buffer->device->info.is_haswell) {
1007 gen75_cmd_buffer_set_subpass(cmd_buffer, subpass);
1008 } else {
1009 gen7_cmd_buffer_set_subpass(cmd_buffer, subpass);
1010 }
1011 break;
1012 case 8:
1013 gen8_cmd_buffer_set_subpass(cmd_buffer, subpass);
1014 break;
1015 case 9:
1016 gen9_cmd_buffer_set_subpass(cmd_buffer, subpass);
1017 break;
1018 default:
1019 unreachable("unsupported gen\n");
1020 }
1021 }
1022
1023 struct anv_state
1024 anv_cmd_buffer_push_constants(struct anv_cmd_buffer *cmd_buffer,
1025 gl_shader_stage stage)
1026 {
1027 struct anv_push_constants *data =
1028 cmd_buffer->state.push_constants[stage];
1029 const struct brw_stage_prog_data *prog_data =
1030 cmd_buffer->state.pipeline->prog_data[stage];
1031
1032 /* If we don't actually have any push constants, bail. */
1033 if (data == NULL || prog_data->nr_params == 0)
1034 return (struct anv_state) { .offset = 0 };
1035
1036 struct anv_state state =
1037 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
1038 prog_data->nr_params * sizeof(float),
1039 32 /* bottom 5 bits MBZ */);
1040
1041 /* Walk through the param array and fill the buffer with data */
1042 uint32_t *u32_map = state.map;
1043 for (unsigned i = 0; i < prog_data->nr_params; i++) {
1044 uint32_t offset = (uintptr_t)prog_data->param[i];
1045 u32_map[i] = *(uint32_t *)((uint8_t *)data + offset);
1046 }
1047
1048 if (!cmd_buffer->device->info.has_llc)
1049 anv_state_clflush(state);
1050
1051 return state;
1052 }
1053
1054 struct anv_state
1055 anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer *cmd_buffer)
1056 {
1057 struct anv_push_constants *data =
1058 cmd_buffer->state.push_constants[MESA_SHADER_COMPUTE];
1059 struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
1060 const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline);
1061 const struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
1062
1063 const unsigned local_id_dwords = cs_prog_data->local_invocation_id_regs * 8;
1064 const unsigned push_constant_data_size =
1065 (local_id_dwords + prog_data->nr_params) * 4;
1066 const unsigned reg_aligned_constant_size = ALIGN(push_constant_data_size, 32);
1067 const unsigned param_aligned_count =
1068 reg_aligned_constant_size / sizeof(uint32_t);
1069
1070 /* If we don't actually have any push constants, bail. */
1071 if (reg_aligned_constant_size == 0)
1072 return (struct anv_state) { .offset = 0 };
1073
1074 const unsigned threads = pipeline->cs_thread_width_max;
1075 const unsigned total_push_constants_size =
1076 reg_aligned_constant_size * threads;
1077 const unsigned push_constant_alignment =
1078 cmd_buffer->device->info.gen < 8 ? 32 : 64;
1079 const unsigned aligned_total_push_constants_size =
1080 ALIGN(total_push_constants_size, push_constant_alignment);
1081 struct anv_state state =
1082 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
1083 aligned_total_push_constants_size,
1084 push_constant_alignment);
1085
1086 /* Walk through the param array and fill the buffer with data */
1087 uint32_t *u32_map = state.map;
1088
1089 brw_cs_fill_local_id_payload(cs_prog_data, u32_map, threads,
1090 reg_aligned_constant_size);
1091
1092 /* Setup uniform data for the first thread */
1093 for (unsigned i = 0; i < prog_data->nr_params; i++) {
1094 uint32_t offset = (uintptr_t)prog_data->param[i];
1095 u32_map[local_id_dwords + i] = *(uint32_t *)((uint8_t *)data + offset);
1096 }
1097
1098 /* Copy uniform data from the first thread to every other thread */
1099 const size_t uniform_data_size = prog_data->nr_params * sizeof(uint32_t);
1100 for (unsigned t = 1; t < threads; t++) {
1101 memcpy(&u32_map[t * param_aligned_count + local_id_dwords],
1102 &u32_map[local_id_dwords],
1103 uniform_data_size);
1104 }
1105
1106 if (!cmd_buffer->device->info.has_llc)
1107 anv_state_clflush(state);
1108
1109 return state;
1110 }
1111
1112 void anv_CmdPushConstants(
1113 VkCommandBuffer commandBuffer,
1114 VkPipelineLayout layout,
1115 VkShaderStageFlags stageFlags,
1116 uint32_t offset,
1117 uint32_t size,
1118 const void* pValues)
1119 {
1120 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1121
1122 anv_foreach_stage(stage, stageFlags) {
1123 anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, client_data);
1124
1125 memcpy(cmd_buffer->state.push_constants[stage]->client_data + offset,
1126 pValues, size);
1127 }
1128
1129 cmd_buffer->state.push_constants_dirty |= stageFlags;
1130 }
1131
1132 void anv_CmdExecuteCommands(
1133 VkCommandBuffer commandBuffer,
1134 uint32_t commandBufferCount,
1135 const VkCommandBuffer* pCmdBuffers)
1136 {
1137 ANV_FROM_HANDLE(anv_cmd_buffer, primary, commandBuffer);
1138
1139 assert(primary->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1140
1141 for (uint32_t i = 0; i < commandBufferCount; i++) {
1142 ANV_FROM_HANDLE(anv_cmd_buffer, secondary, pCmdBuffers[i]);
1143
1144 assert(secondary->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
1145
1146 anv_cmd_buffer_add_secondary(primary, secondary);
1147 }
1148 }
1149
1150 VkResult anv_CreateCommandPool(
1151 VkDevice _device,
1152 const VkCommandPoolCreateInfo* pCreateInfo,
1153 const VkAllocationCallbacks* pAllocator,
1154 VkCommandPool* pCmdPool)
1155 {
1156 ANV_FROM_HANDLE(anv_device, device, _device);
1157 struct anv_cmd_pool *pool;
1158
1159 pool = anv_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
1160 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1161 if (pool == NULL)
1162 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1163
1164 if (pAllocator)
1165 pool->alloc = *pAllocator;
1166 else
1167 pool->alloc = device->alloc;
1168
1169 list_inithead(&pool->cmd_buffers);
1170
1171 *pCmdPool = anv_cmd_pool_to_handle(pool);
1172
1173 return VK_SUCCESS;
1174 }
1175
1176 void anv_DestroyCommandPool(
1177 VkDevice _device,
1178 VkCommandPool commandPool,
1179 const VkAllocationCallbacks* pAllocator)
1180 {
1181 ANV_FROM_HANDLE(anv_device, device, _device);
1182 ANV_FROM_HANDLE(anv_cmd_pool, pool, commandPool);
1183
1184 anv_ResetCommandPool(_device, commandPool, 0);
1185
1186 anv_free2(&device->alloc, pAllocator, pool);
1187 }
1188
1189 VkResult anv_ResetCommandPool(
1190 VkDevice device,
1191 VkCommandPool commandPool,
1192 VkCommandPoolResetFlags flags)
1193 {
1194 ANV_FROM_HANDLE(anv_cmd_pool, pool, commandPool);
1195
1196 /* FIXME: vkResetCommandPool must not destroy its command buffers. The
1197 * Vulkan 1.0 spec requires that it only reset them:
1198 *
1199 * Resetting a command pool recycles all of the resources from all of
1200 * the command buffers allocated from the command pool back to the
1201 * command pool. All command buffers that have been allocated from the
1202 * command pool are put in the initial state.
1203 */
1204 list_for_each_entry_safe(struct anv_cmd_buffer, cmd_buffer,
1205 &pool->cmd_buffers, pool_link) {
1206 anv_cmd_buffer_destroy(cmd_buffer);
1207 }
1208
1209 return VK_SUCCESS;
1210 }
1211
1212 /**
1213 * Return NULL if the current subpass has no depthstencil attachment.
1214 */
1215 const struct anv_image_view *
1216 anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer *cmd_buffer)
1217 {
1218 const struct anv_subpass *subpass = cmd_buffer->state.subpass;
1219 const struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
1220
1221 if (subpass->depth_stencil_attachment == VK_ATTACHMENT_UNUSED)
1222 return NULL;
1223
1224 const struct anv_image_view *iview =
1225 fb->attachments[subpass->depth_stencil_attachment];
1226
1227 assert(iview->aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT |
1228 VK_IMAGE_ASPECT_STENCIL_BIT));
1229
1230 return iview;
1231 }