Added few more stubs so that control reaches to DestroyDevice().
[mesa.git] / src / intel / vulkan / anv_cmd_buffer.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31
32 #include "vk_format_info.h"
33 #include "vk_util.h"
34
35 /** \file anv_cmd_buffer.c
36 *
37 * This file contains all of the stuff for emitting commands into a command
38 * buffer. This includes implementations of most of the vkCmd*
39 * entrypoints. This file is concerned entirely with state emission and
40 * not with the command buffer data structure itself. As far as this file
41 * is concerned, most of anv_cmd_buffer is magic.
42 */
43
44 /* TODO: These are taken from GLES. We should check the Vulkan spec */
45 const struct anv_dynamic_state default_dynamic_state = {
46 .viewport = {
47 .count = 0,
48 },
49 .scissor = {
50 .count = 0,
51 },
52 .line_width = 1.0f,
53 .depth_bias = {
54 .bias = 0.0f,
55 .clamp = 0.0f,
56 .slope = 0.0f,
57 },
58 .blend_constants = { 0.0f, 0.0f, 0.0f, 0.0f },
59 .depth_bounds = {
60 .min = 0.0f,
61 .max = 1.0f,
62 },
63 .stencil_compare_mask = {
64 .front = ~0u,
65 .back = ~0u,
66 },
67 .stencil_write_mask = {
68 .front = ~0u,
69 .back = ~0u,
70 },
71 .stencil_reference = {
72 .front = 0u,
73 .back = 0u,
74 },
75 .stencil_op = {
76 .front = {
77 .fail_op = 0,
78 .pass_op = 0,
79 .depth_fail_op = 0,
80 .compare_op = 0,
81 },
82 .back = {
83 .fail_op = 0,
84 .pass_op = 0,
85 .depth_fail_op = 0,
86 .compare_op = 0,
87 },
88 },
89 .line_stipple = {
90 .factor = 0u,
91 .pattern = 0u,
92 },
93 .cull_mode = 0,
94 .front_face = 0,
95 .primitive_topology = 0,
96 .depth_test_enable = 0,
97 .depth_write_enable = 0,
98 .depth_compare_op = 0,
99 .depth_bounds_test_enable = 0,
100 .stencil_test_enable = 0,
101 .dyn_vbo_stride = 0,
102 .dyn_vbo_size = 0,
103 };
104
105 /**
106 * Copy the dynamic state from src to dest based on the copy_mask.
107 *
108 * Avoid copying states that have not changed, except for VIEWPORT, SCISSOR and
109 * BLEND_CONSTANTS (always copy them if they are in the copy_mask).
110 *
111 * Returns a mask of the states which changed.
112 */
113 anv_cmd_dirty_mask_t
114 anv_dynamic_state_copy(struct anv_dynamic_state *dest,
115 const struct anv_dynamic_state *src,
116 anv_cmd_dirty_mask_t copy_mask)
117 {
118 anv_cmd_dirty_mask_t changed = 0;
119
120 if (copy_mask & ANV_CMD_DIRTY_DYNAMIC_VIEWPORT) {
121 dest->viewport.count = src->viewport.count;
122 typed_memcpy(dest->viewport.viewports, src->viewport.viewports,
123 src->viewport.count);
124 changed |= ANV_CMD_DIRTY_DYNAMIC_VIEWPORT;
125 }
126
127 if (copy_mask & ANV_CMD_DIRTY_DYNAMIC_SCISSOR) {
128 dest->scissor.count = src->scissor.count;
129 typed_memcpy(dest->scissor.scissors, src->scissor.scissors,
130 src->scissor.count);
131 changed |= ANV_CMD_DIRTY_DYNAMIC_SCISSOR;
132 }
133
134 if (copy_mask & ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS) {
135 typed_memcpy(dest->blend_constants, src->blend_constants, 4);
136 changed |= ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS;
137 }
138
139 #define ANV_CMP_COPY(field, flag) \
140 if (copy_mask & flag) { \
141 if (dest->field != src->field) { \
142 dest->field = src->field; \
143 changed |= flag; \
144 } \
145 }
146
147 ANV_CMP_COPY(line_width, ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH);
148
149 ANV_CMP_COPY(depth_bias.bias, ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS);
150 ANV_CMP_COPY(depth_bias.clamp, ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS);
151 ANV_CMP_COPY(depth_bias.slope, ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS);
152
153 ANV_CMP_COPY(depth_bounds.min, ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS);
154 ANV_CMP_COPY(depth_bounds.max, ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS);
155
156 ANV_CMP_COPY(stencil_compare_mask.front, ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK);
157 ANV_CMP_COPY(stencil_compare_mask.back, ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK);
158
159 ANV_CMP_COPY(stencil_write_mask.front, ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK);
160 ANV_CMP_COPY(stencil_write_mask.back, ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK);
161
162 ANV_CMP_COPY(stencil_reference.front, ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE);
163 ANV_CMP_COPY(stencil_reference.back, ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE);
164
165 ANV_CMP_COPY(line_stipple.factor, ANV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE);
166 ANV_CMP_COPY(line_stipple.pattern, ANV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE);
167
168 ANV_CMP_COPY(cull_mode, ANV_CMD_DIRTY_DYNAMIC_CULL_MODE);
169 ANV_CMP_COPY(front_face, ANV_CMD_DIRTY_DYNAMIC_FRONT_FACE);
170 ANV_CMP_COPY(primitive_topology, ANV_CMD_DIRTY_DYNAMIC_PRIMITIVE_TOPOLOGY);
171 ANV_CMP_COPY(depth_test_enable, ANV_CMD_DIRTY_DYNAMIC_DEPTH_TEST_ENABLE);
172 ANV_CMP_COPY(depth_write_enable, ANV_CMD_DIRTY_DYNAMIC_DEPTH_WRITE_ENABLE);
173 ANV_CMP_COPY(depth_compare_op, ANV_CMD_DIRTY_DYNAMIC_DEPTH_COMPARE_OP);
174 ANV_CMP_COPY(depth_bounds_test_enable, ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS_TEST_ENABLE);
175 ANV_CMP_COPY(stencil_test_enable, ANV_CMD_DIRTY_DYNAMIC_STENCIL_TEST_ENABLE);
176
177 if (copy_mask & VK_DYNAMIC_STATE_STENCIL_OP_EXT) {
178 ANV_CMP_COPY(stencil_op.front.fail_op, ANV_CMD_DIRTY_DYNAMIC_STENCIL_OP);
179 ANV_CMP_COPY(stencil_op.front.pass_op, ANV_CMD_DIRTY_DYNAMIC_STENCIL_OP);
180 ANV_CMP_COPY(stencil_op.front.depth_fail_op, ANV_CMD_DIRTY_DYNAMIC_STENCIL_OP);
181 ANV_CMP_COPY(stencil_op.front.compare_op, ANV_CMD_DIRTY_DYNAMIC_STENCIL_OP);
182 ANV_CMP_COPY(stencil_op.back.fail_op, ANV_CMD_DIRTY_DYNAMIC_STENCIL_OP);
183 ANV_CMP_COPY(stencil_op.back.pass_op, ANV_CMD_DIRTY_DYNAMIC_STENCIL_OP);
184 ANV_CMP_COPY(stencil_op.back.depth_fail_op, ANV_CMD_DIRTY_DYNAMIC_STENCIL_OP);
185 ANV_CMP_COPY(stencil_op.back.compare_op, ANV_CMD_DIRTY_DYNAMIC_STENCIL_OP);
186 }
187
188 ANV_CMP_COPY(dyn_vbo_stride, ANV_CMD_DIRTY_DYNAMIC_VERTEX_INPUT_BINDING_STRIDE);
189 ANV_CMP_COPY(dyn_vbo_size, ANV_CMD_DIRTY_DYNAMIC_VERTEX_INPUT_BINDING_STRIDE);
190
191 #undef ANV_CMP_COPY
192
193 return changed;
194 }
195
196 static void
197 anv_cmd_state_init(struct anv_cmd_buffer *cmd_buffer)
198 {
199 struct anv_cmd_state *state = &cmd_buffer->state;
200
201 memset(state, 0, sizeof(*state));
202
203 state->current_pipeline = UINT32_MAX;
204 state->restart_index = UINT32_MAX;
205 state->gfx.dynamic = default_dynamic_state;
206 }
207
208 static void
209 anv_cmd_pipeline_state_finish(struct anv_cmd_buffer *cmd_buffer,
210 struct anv_cmd_pipeline_state *pipe_state)
211 {
212 for (uint32_t i = 0; i < ARRAY_SIZE(pipe_state->push_descriptors); i++) {
213 if (pipe_state->push_descriptors[i]) {
214 anv_descriptor_set_layout_unref(cmd_buffer->device,
215 pipe_state->push_descriptors[i]->set.layout);
216 vk_free(&cmd_buffer->pool->alloc, pipe_state->push_descriptors[i]);
217 }
218 }
219 }
220
221 static void
222 anv_cmd_state_finish(struct anv_cmd_buffer *cmd_buffer)
223 {
224 struct anv_cmd_state *state = &cmd_buffer->state;
225
226 anv_cmd_pipeline_state_finish(cmd_buffer, &state->gfx.base);
227 anv_cmd_pipeline_state_finish(cmd_buffer, &state->compute.base);
228
229 vk_free(&cmd_buffer->pool->alloc, state->attachments);
230 }
231
232 static void
233 anv_cmd_state_reset(struct anv_cmd_buffer *cmd_buffer)
234 {
235 anv_cmd_state_finish(cmd_buffer);
236 anv_cmd_state_init(cmd_buffer);
237 }
238
239 static VkResult anv_create_cmd_buffer(
240 struct anv_device * device,
241 struct anv_cmd_pool * pool,
242 VkCommandBufferLevel level,
243 VkCommandBuffer* pCommandBuffer)
244 {
245 struct anv_cmd_buffer *cmd_buffer;
246 VkResult result;
247
248 cmd_buffer = vk_alloc(&pool->alloc, sizeof(*cmd_buffer), 8,
249 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
250 if (cmd_buffer == NULL)
251 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
252
253 vk_object_base_init(&device->vk, &cmd_buffer->base,
254 VK_OBJECT_TYPE_COMMAND_BUFFER);
255
256 cmd_buffer->batch.status = VK_SUCCESS;
257
258 cmd_buffer->device = device;
259 cmd_buffer->pool = pool;
260 cmd_buffer->level = level;
261
262 result = anv_cmd_buffer_init_batch_bo_chain(cmd_buffer);
263 if (result != VK_SUCCESS)
264 goto fail;
265
266 anv_state_stream_init(&cmd_buffer->surface_state_stream,
267 &device->surface_state_pool, 4096);
268 anv_state_stream_init(&cmd_buffer->dynamic_state_stream,
269 &device->dynamic_state_pool, 16384);
270
271 anv_cmd_state_init(cmd_buffer);
272
273 if (pool) {
274 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
275 } else {
276 /* Init the pool_link so we can safefly call list_del when we destroy
277 * the command buffer
278 */
279 list_inithead(&cmd_buffer->pool_link);
280 }
281
282 *pCommandBuffer = anv_cmd_buffer_to_handle(cmd_buffer);
283
284 return VK_SUCCESS;
285
286 fail:
287 vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
288
289 return result;
290 }
291
292 VkResult anv_AllocateCommandBuffers(
293 VkDevice _device,
294 const VkCommandBufferAllocateInfo* pAllocateInfo,
295 VkCommandBuffer* pCommandBuffers)
296 {
297 ANV_FROM_HANDLE(anv_device, device, _device);
298 ANV_FROM_HANDLE(anv_cmd_pool, pool, pAllocateInfo->commandPool);
299
300 VkResult result = VK_SUCCESS;
301 uint32_t i;
302
303 for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
304 result = anv_create_cmd_buffer(device, pool, pAllocateInfo->level,
305 &pCommandBuffers[i]);
306 if (result != VK_SUCCESS)
307 break;
308 }
309
310 if (result != VK_SUCCESS) {
311 anv_FreeCommandBuffers(_device, pAllocateInfo->commandPool,
312 i, pCommandBuffers);
313 for (i = 0; i < pAllocateInfo->commandBufferCount; i++)
314 pCommandBuffers[i] = VK_NULL_HANDLE;
315 }
316
317 return result;
318 }
319
320 static void
321 anv_cmd_buffer_destroy(struct anv_cmd_buffer *cmd_buffer)
322 {
323 list_del(&cmd_buffer->pool_link);
324
325 anv_cmd_buffer_fini_batch_bo_chain(cmd_buffer);
326
327 anv_state_stream_finish(&cmd_buffer->surface_state_stream);
328 anv_state_stream_finish(&cmd_buffer->dynamic_state_stream);
329
330 anv_cmd_state_finish(cmd_buffer);
331
332 vk_object_base_finish(&cmd_buffer->base);
333 vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
334 }
335
336 void anv_FreeCommandBuffers(
337 VkDevice device,
338 VkCommandPool commandPool,
339 uint32_t commandBufferCount,
340 const VkCommandBuffer* pCommandBuffers)
341 {
342 for (uint32_t i = 0; i < commandBufferCount; i++) {
343 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
344
345 if (!cmd_buffer)
346 continue;
347
348 anv_cmd_buffer_destroy(cmd_buffer);
349 }
350 }
351
352 VkResult
353 anv_cmd_buffer_reset(struct anv_cmd_buffer *cmd_buffer)
354 {
355 cmd_buffer->usage_flags = 0;
356 cmd_buffer->perf_query_pool = NULL;
357 anv_cmd_buffer_reset_batch_bo_chain(cmd_buffer);
358 anv_cmd_state_reset(cmd_buffer);
359
360 anv_state_stream_finish(&cmd_buffer->surface_state_stream);
361 anv_state_stream_init(&cmd_buffer->surface_state_stream,
362 &cmd_buffer->device->surface_state_pool, 4096);
363
364 anv_state_stream_finish(&cmd_buffer->dynamic_state_stream);
365 anv_state_stream_init(&cmd_buffer->dynamic_state_stream,
366 &cmd_buffer->device->dynamic_state_pool, 16384);
367 return VK_SUCCESS;
368 }
369
370 VkResult anv_ResetCommandBuffer(
371 VkCommandBuffer commandBuffer,
372 VkCommandBufferResetFlags flags)
373 {
374 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
375 return anv_cmd_buffer_reset(cmd_buffer);
376 }
377
378 #define anv_genX_call(devinfo, func, ...) \
379 switch ((devinfo)->gen) { \
380 case 7: \
381 if ((devinfo)->is_haswell) { \
382 gen75_##func(__VA_ARGS__); \
383 } else { \
384 gen7_##func(__VA_ARGS__); \
385 } \
386 break; \
387 case 8: \
388 gen8_##func(__VA_ARGS__); \
389 break; \
390 case 9: \
391 gen9_##func(__VA_ARGS__); \
392 break; \
393 case 10: \
394 gen10_##func(__VA_ARGS__); \
395 break; \
396 case 11: \
397 gen11_##func(__VA_ARGS__); \
398 break; \
399 case 12: \
400 gen12_##func(__VA_ARGS__); \
401 break; \
402 default: \
403 assert(!"Unknown hardware generation"); \
404 }
405
406 void
407 anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer)
408 {
409 anv_genX_call(&cmd_buffer->device->info,
410 cmd_buffer_emit_state_base_address,
411 cmd_buffer);
412 }
413
414 void
415 anv_cmd_buffer_mark_image_written(struct anv_cmd_buffer *cmd_buffer,
416 const struct anv_image *image,
417 VkImageAspectFlagBits aspect,
418 enum isl_aux_usage aux_usage,
419 uint32_t level,
420 uint32_t base_layer,
421 uint32_t layer_count)
422 {
423 anv_genX_call(&cmd_buffer->device->info,
424 cmd_buffer_mark_image_written,
425 cmd_buffer, image, aspect, aux_usage,
426 level, base_layer, layer_count);
427 }
428
429 void
430 anv_cmd_emit_conditional_render_predicate(struct anv_cmd_buffer *cmd_buffer)
431 {
432 anv_genX_call(&cmd_buffer->device->info,
433 cmd_emit_conditional_render_predicate,
434 cmd_buffer);
435 }
436
437 static bool
438 mem_update(void *dst, const void *src, size_t size)
439 {
440 if (memcmp(dst, src, size) == 0)
441 return false;
442
443 memcpy(dst, src, size);
444 return true;
445 }
446
447 static void
448 set_dirty_for_bind_map(struct anv_cmd_buffer *cmd_buffer,
449 gl_shader_stage stage,
450 const struct anv_pipeline_bind_map *map)
451 {
452 if (mem_update(cmd_buffer->state.surface_sha1s[stage],
453 map->surface_sha1, sizeof(map->surface_sha1)))
454 cmd_buffer->state.descriptors_dirty |= mesa_to_vk_shader_stage(stage);
455
456 if (mem_update(cmd_buffer->state.sampler_sha1s[stage],
457 map->sampler_sha1, sizeof(map->sampler_sha1)))
458 cmd_buffer->state.descriptors_dirty |= mesa_to_vk_shader_stage(stage);
459
460 if (mem_update(cmd_buffer->state.push_sha1s[stage],
461 map->push_sha1, sizeof(map->push_sha1)))
462 cmd_buffer->state.push_constants_dirty |= mesa_to_vk_shader_stage(stage);
463 }
464
465 void anv_CmdBindPipeline(
466 VkCommandBuffer commandBuffer,
467 VkPipelineBindPoint pipelineBindPoint,
468 VkPipeline _pipeline)
469 {
470 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
471 ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
472
473 switch (pipelineBindPoint) {
474 case VK_PIPELINE_BIND_POINT_COMPUTE: {
475 struct anv_compute_pipeline *compute_pipeline =
476 anv_pipeline_to_compute(pipeline);
477 if (cmd_buffer->state.compute.pipeline == compute_pipeline)
478 return;
479
480 cmd_buffer->state.compute.pipeline = compute_pipeline;
481 cmd_buffer->state.compute.pipeline_dirty = true;
482 set_dirty_for_bind_map(cmd_buffer, MESA_SHADER_COMPUTE,
483 &compute_pipeline->cs->bind_map);
484 break;
485 }
486
487 case VK_PIPELINE_BIND_POINT_GRAPHICS: {
488 struct anv_graphics_pipeline *gfx_pipeline =
489 anv_pipeline_to_graphics(pipeline);
490 if (cmd_buffer->state.gfx.pipeline == gfx_pipeline)
491 return;
492
493 cmd_buffer->state.gfx.pipeline = gfx_pipeline;
494 cmd_buffer->state.gfx.vb_dirty |= gfx_pipeline->vb_used;
495 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_PIPELINE;
496
497 anv_foreach_stage(stage, gfx_pipeline->active_stages) {
498 set_dirty_for_bind_map(cmd_buffer, stage,
499 &gfx_pipeline->shaders[stage]->bind_map);
500 }
501
502 /* Apply the dynamic state from the pipeline */
503 cmd_buffer->state.gfx.dirty |=
504 anv_dynamic_state_copy(&cmd_buffer->state.gfx.dynamic,
505 &gfx_pipeline->dynamic_state,
506 gfx_pipeline->dynamic_state_mask);
507 break;
508 }
509
510 default:
511 assert(!"invalid bind point");
512 break;
513 }
514 }
515
516 void anv_CmdSetViewport(
517 VkCommandBuffer commandBuffer,
518 uint32_t firstViewport,
519 uint32_t viewportCount,
520 const VkViewport* pViewports)
521 {
522 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
523
524 const uint32_t total_count = firstViewport + viewportCount;
525 if (cmd_buffer->state.gfx.dynamic.viewport.count < total_count)
526 cmd_buffer->state.gfx.dynamic.viewport.count = total_count;
527
528 memcpy(cmd_buffer->state.gfx.dynamic.viewport.viewports + firstViewport,
529 pViewports, viewportCount * sizeof(*pViewports));
530
531 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_VIEWPORT;
532 }
533
534 void anv_CmdSetViewportWithCountEXT(
535 VkCommandBuffer commandBuffer,
536 uint32_t viewportCount,
537 const VkViewport* pViewports)
538 {
539 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
540
541 cmd_buffer->state.gfx.dynamic.viewport.count = viewportCount;
542
543 memcpy(cmd_buffer->state.gfx.dynamic.viewport.viewports,
544 pViewports, viewportCount * sizeof(*pViewports));
545
546 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_VIEWPORT;
547 }
548
549 void anv_CmdSetScissor(
550 VkCommandBuffer commandBuffer,
551 uint32_t firstScissor,
552 uint32_t scissorCount,
553 const VkRect2D* pScissors)
554 {
555 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
556
557 const uint32_t total_count = firstScissor + scissorCount;
558 if (cmd_buffer->state.gfx.dynamic.scissor.count < total_count)
559 cmd_buffer->state.gfx.dynamic.scissor.count = total_count;
560
561 memcpy(cmd_buffer->state.gfx.dynamic.scissor.scissors + firstScissor,
562 pScissors, scissorCount * sizeof(*pScissors));
563
564 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_SCISSOR;
565 }
566
567 void anv_CmdSetScissorWithCountEXT(
568 VkCommandBuffer commandBuffer,
569 uint32_t scissorCount,
570 const VkRect2D* pScissors)
571 {
572 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
573
574 cmd_buffer->state.gfx.dynamic.scissor.count = scissorCount;
575
576 memcpy(cmd_buffer->state.gfx.dynamic.scissor.scissors,
577 pScissors, scissorCount * sizeof(*pScissors));
578
579 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_SCISSOR;
580 }
581
582 void anv_CmdSetPrimitiveTopologyEXT(
583 VkCommandBuffer commandBuffer,
584 VkPrimitiveTopology primitiveTopology)
585 {
586 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
587
588 cmd_buffer->state.gfx.dynamic.primitive_topology = primitiveTopology;
589
590 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_PRIMITIVE_TOPOLOGY;
591 }
592
593 void anv_CmdSetLineWidth(
594 VkCommandBuffer commandBuffer,
595 float lineWidth)
596 {
597 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
598
599 cmd_buffer->state.gfx.dynamic.line_width = lineWidth;
600 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH;
601 }
602
603 void anv_CmdSetDepthBias(
604 VkCommandBuffer commandBuffer,
605 float depthBiasConstantFactor,
606 float depthBiasClamp,
607 float depthBiasSlopeFactor)
608 {
609 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
610
611 cmd_buffer->state.gfx.dynamic.depth_bias.bias = depthBiasConstantFactor;
612 cmd_buffer->state.gfx.dynamic.depth_bias.clamp = depthBiasClamp;
613 cmd_buffer->state.gfx.dynamic.depth_bias.slope = depthBiasSlopeFactor;
614
615 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
616 }
617
618 void anv_CmdSetBlendConstants(
619 VkCommandBuffer commandBuffer,
620 const float blendConstants[4])
621 {
622 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
623
624 memcpy(cmd_buffer->state.gfx.dynamic.blend_constants,
625 blendConstants, sizeof(float) * 4);
626
627 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS;
628 }
629
630 void anv_CmdSetDepthBounds(
631 VkCommandBuffer commandBuffer,
632 float minDepthBounds,
633 float maxDepthBounds)
634 {
635 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
636
637 cmd_buffer->state.gfx.dynamic.depth_bounds.min = minDepthBounds;
638 cmd_buffer->state.gfx.dynamic.depth_bounds.max = maxDepthBounds;
639
640 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS;
641 }
642
643 void anv_CmdSetStencilCompareMask(
644 VkCommandBuffer commandBuffer,
645 VkStencilFaceFlags faceMask,
646 uint32_t compareMask)
647 {
648 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
649
650 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
651 cmd_buffer->state.gfx.dynamic.stencil_compare_mask.front = compareMask;
652 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
653 cmd_buffer->state.gfx.dynamic.stencil_compare_mask.back = compareMask;
654
655 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK;
656 }
657
658 void anv_CmdSetStencilWriteMask(
659 VkCommandBuffer commandBuffer,
660 VkStencilFaceFlags faceMask,
661 uint32_t writeMask)
662 {
663 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
664
665 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
666 cmd_buffer->state.gfx.dynamic.stencil_write_mask.front = writeMask;
667 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
668 cmd_buffer->state.gfx.dynamic.stencil_write_mask.back = writeMask;
669
670 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK;
671 }
672
673 void anv_CmdSetStencilReference(
674 VkCommandBuffer commandBuffer,
675 VkStencilFaceFlags faceMask,
676 uint32_t reference)
677 {
678 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
679
680 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
681 cmd_buffer->state.gfx.dynamic.stencil_reference.front = reference;
682 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
683 cmd_buffer->state.gfx.dynamic.stencil_reference.back = reference;
684
685 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE;
686 }
687
688 void anv_CmdSetLineStippleEXT(
689 VkCommandBuffer commandBuffer,
690 uint32_t lineStippleFactor,
691 uint16_t lineStipplePattern)
692 {
693 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
694
695 cmd_buffer->state.gfx.dynamic.line_stipple.factor = lineStippleFactor;
696 cmd_buffer->state.gfx.dynamic.line_stipple.pattern = lineStipplePattern;
697
698 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE;
699 }
700
701 void anv_CmdSetCullModeEXT(
702 VkCommandBuffer commandBuffer,
703 VkCullModeFlags cullMode)
704 {
705 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
706
707 cmd_buffer->state.gfx.dynamic.cull_mode = cullMode;
708
709 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_CULL_MODE;
710 }
711
712 void anv_CmdSetFrontFaceEXT(
713 VkCommandBuffer commandBuffer,
714 VkFrontFace frontFace)
715 {
716 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
717
718 cmd_buffer->state.gfx.dynamic.front_face = frontFace;
719
720 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_FRONT_FACE;
721 }
722
723 void anv_CmdSetDepthTestEnableEXT(
724 VkCommandBuffer commandBuffer,
725 VkBool32 depthTestEnable)
726
727 {
728 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
729
730 cmd_buffer->state.gfx.dynamic.depth_test_enable = depthTestEnable;
731
732 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_DEPTH_TEST_ENABLE;
733 }
734
735 void anv_CmdSetDepthWriteEnableEXT(
736 VkCommandBuffer commandBuffer,
737 VkBool32 depthWriteEnable)
738 {
739 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
740
741 cmd_buffer->state.gfx.dynamic.depth_write_enable = depthWriteEnable;
742
743 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_DEPTH_WRITE_ENABLE;
744 }
745
746 void anv_CmdSetDepthCompareOpEXT(
747 VkCommandBuffer commandBuffer,
748 VkCompareOp depthCompareOp)
749 {
750 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
751
752 cmd_buffer->state.gfx.dynamic.depth_compare_op = depthCompareOp;
753
754 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_DEPTH_COMPARE_OP;
755 }
756
757 void anv_CmdSetDepthBoundsTestEnableEXT(
758 VkCommandBuffer commandBuffer,
759 VkBool32 depthBoundsTestEnable)
760 {
761 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
762
763 cmd_buffer->state.gfx.dynamic.depth_bounds_test_enable = depthBoundsTestEnable;
764
765 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS_TEST_ENABLE;
766 }
767
768 void anv_CmdSetStencilTestEnableEXT(
769 VkCommandBuffer commandBuffer,
770 VkBool32 stencilTestEnable)
771 {
772 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
773
774 cmd_buffer->state.gfx.dynamic.stencil_test_enable = stencilTestEnable;
775
776 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_TEST_ENABLE;
777 }
778
779 void anv_CmdSetStencilOpEXT(
780 VkCommandBuffer commandBuffer,
781 VkStencilFaceFlags faceMask,
782 VkStencilOp failOp,
783 VkStencilOp passOp,
784 VkStencilOp depthFailOp,
785 VkCompareOp compareOp)
786 {
787 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
788
789 if (faceMask & VK_STENCIL_FACE_FRONT_BIT) {
790 cmd_buffer->state.gfx.dynamic.stencil_op.front.fail_op = failOp;
791 cmd_buffer->state.gfx.dynamic.stencil_op.front.pass_op = passOp;
792 cmd_buffer->state.gfx.dynamic.stencil_op.front.depth_fail_op = depthFailOp;
793 cmd_buffer->state.gfx.dynamic.stencil_op.front.compare_op = compareOp;
794 }
795
796 if (faceMask & VK_STENCIL_FACE_BACK_BIT) {
797 cmd_buffer->state.gfx.dynamic.stencil_op.back.fail_op = failOp;
798 cmd_buffer->state.gfx.dynamic.stencil_op.back.pass_op = passOp;
799 cmd_buffer->state.gfx.dynamic.stencil_op.back.depth_fail_op = depthFailOp;
800 cmd_buffer->state.gfx.dynamic.stencil_op.back.compare_op = compareOp;
801 }
802
803 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_OP;
804 }
805
806 static void
807 anv_cmd_buffer_bind_descriptor_set(struct anv_cmd_buffer *cmd_buffer,
808 VkPipelineBindPoint bind_point,
809 struct anv_pipeline_layout *layout,
810 uint32_t set_index,
811 struct anv_descriptor_set *set,
812 uint32_t *dynamic_offset_count,
813 const uint32_t **dynamic_offsets)
814 {
815 struct anv_descriptor_set_layout *set_layout =
816 layout->set[set_index].layout;
817
818 VkShaderStageFlags stages = set_layout->shader_stages;
819 struct anv_cmd_pipeline_state *pipe_state;
820
821 switch (bind_point) {
822 case VK_PIPELINE_BIND_POINT_GRAPHICS:
823 stages &= VK_SHADER_STAGE_ALL_GRAPHICS;
824 pipe_state = &cmd_buffer->state.gfx.base;
825 break;
826
827 case VK_PIPELINE_BIND_POINT_COMPUTE:
828 stages &= VK_SHADER_STAGE_COMPUTE_BIT;
829 pipe_state = &cmd_buffer->state.compute.base;
830 break;
831
832 default:
833 unreachable("invalid bind point");
834 }
835
836 VkShaderStageFlags dirty_stages = 0;
837 if (pipe_state->descriptors[set_index] != set) {
838 pipe_state->descriptors[set_index] = set;
839 dirty_stages |= stages;
840 }
841
842 /* If it's a push descriptor set, we have to flag things as dirty
843 * regardless of whether or not the CPU-side data structure changed as we
844 * may have edited in-place.
845 */
846 if (set->pool == NULL)
847 dirty_stages |= stages;
848
849 if (dynamic_offsets) {
850 if (set_layout->dynamic_offset_count > 0) {
851 struct anv_push_constants *push = &pipe_state->push_constants;
852 uint32_t dynamic_offset_start =
853 layout->set[set_index].dynamic_offset_start;
854 uint32_t *push_offsets =
855 &push->dynamic_offsets[dynamic_offset_start];
856
857 /* Assert that everything is in range */
858 assert(set_layout->dynamic_offset_count <= *dynamic_offset_count);
859 assert(dynamic_offset_start + set_layout->dynamic_offset_count <=
860 ARRAY_SIZE(push->dynamic_offsets));
861
862 for (uint32_t i = 0; i < set_layout->dynamic_offset_count; i++) {
863 if (push_offsets[i] != (*dynamic_offsets)[i]) {
864 push_offsets[i] = (*dynamic_offsets)[i];
865 /* dynamic_offset_stages[] elements could contain blanket
866 * values like VK_SHADER_STAGE_ALL, so limit this to the
867 * binding point's bits.
868 */
869 dirty_stages |= set_layout->dynamic_offset_stages[i] & stages;
870 }
871 }
872
873 *dynamic_offsets += set_layout->dynamic_offset_count;
874 *dynamic_offset_count -= set_layout->dynamic_offset_count;
875 }
876 }
877
878 cmd_buffer->state.descriptors_dirty |= dirty_stages;
879 cmd_buffer->state.push_constants_dirty |= dirty_stages;
880 }
881
882 void anv_CmdBindDescriptorSets(
883 VkCommandBuffer commandBuffer,
884 VkPipelineBindPoint pipelineBindPoint,
885 VkPipelineLayout _layout,
886 uint32_t firstSet,
887 uint32_t descriptorSetCount,
888 const VkDescriptorSet* pDescriptorSets,
889 uint32_t dynamicOffsetCount,
890 const uint32_t* pDynamicOffsets)
891 {
892 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
893 ANV_FROM_HANDLE(anv_pipeline_layout, layout, _layout);
894
895 assert(firstSet + descriptorSetCount <= MAX_SETS);
896
897 for (uint32_t i = 0; i < descriptorSetCount; i++) {
898 ANV_FROM_HANDLE(anv_descriptor_set, set, pDescriptorSets[i]);
899 anv_cmd_buffer_bind_descriptor_set(cmd_buffer, pipelineBindPoint,
900 layout, firstSet + i, set,
901 &dynamicOffsetCount,
902 &pDynamicOffsets);
903 }
904 }
905
906 void anv_CmdBindVertexBuffers2EXT(
907 VkCommandBuffer commandBuffer,
908 uint32_t firstBinding,
909 uint32_t bindingCount,
910 const VkBuffer* pBuffers,
911 const VkDeviceSize* pOffsets,
912 const VkDeviceSize* pSizes,
913 const VkDeviceSize* pStrides)
914 {
915 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
916 struct anv_vertex_binding *vb = cmd_buffer->state.vertex_bindings;
917
918 /* We have to defer setting up vertex buffer since we need the buffer
919 * stride from the pipeline. */
920
921 if (pSizes)
922 cmd_buffer->state.gfx.dynamic.dyn_vbo_size = true;
923 if (pStrides)
924 cmd_buffer->state.gfx.dynamic.dyn_vbo_stride = true;
925
926 assert(firstBinding + bindingCount <= MAX_VBS);
927 for (uint32_t i = 0; i < bindingCount; i++) {
928 vb[firstBinding + i].buffer = anv_buffer_from_handle(pBuffers[i]);
929 vb[firstBinding + i].offset = pOffsets[i];
930 vb[firstBinding + i].size = pSizes ? pSizes[i] : 0;
931 vb[firstBinding + i].stride = pStrides ? pStrides[i] : 0;
932 cmd_buffer->state.gfx.vb_dirty |= 1 << (firstBinding + i);
933 }
934 }
935
936 void anv_CmdBindVertexBuffers(
937 VkCommandBuffer commandBuffer,
938 uint32_t firstBinding,
939 uint32_t bindingCount,
940 const VkBuffer* pBuffers,
941 const VkDeviceSize* pOffsets)
942 {
943 return anv_CmdBindVertexBuffers2EXT(commandBuffer, firstBinding,
944 bindingCount, pBuffers, pOffsets,
945 NULL, NULL);
946 }
947
948 void anv_CmdBindTransformFeedbackBuffersEXT(
949 VkCommandBuffer commandBuffer,
950 uint32_t firstBinding,
951 uint32_t bindingCount,
952 const VkBuffer* pBuffers,
953 const VkDeviceSize* pOffsets,
954 const VkDeviceSize* pSizes)
955 {
956 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
957 struct anv_xfb_binding *xfb = cmd_buffer->state.xfb_bindings;
958
959 /* We have to defer setting up vertex buffer since we need the buffer
960 * stride from the pipeline. */
961
962 assert(firstBinding + bindingCount <= MAX_XFB_BUFFERS);
963 for (uint32_t i = 0; i < bindingCount; i++) {
964 if (pBuffers[i] == VK_NULL_HANDLE) {
965 xfb[firstBinding + i].buffer = NULL;
966 } else {
967 ANV_FROM_HANDLE(anv_buffer, buffer, pBuffers[i]);
968 xfb[firstBinding + i].buffer = buffer;
969 xfb[firstBinding + i].offset = pOffsets[i];
970 xfb[firstBinding + i].size =
971 anv_buffer_get_range(buffer, pOffsets[i],
972 pSizes ? pSizes[i] : VK_WHOLE_SIZE);
973 }
974 }
975 }
976
977 enum isl_format
978 anv_isl_format_for_descriptor_type(VkDescriptorType type)
979 {
980 switch (type) {
981 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
982 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
983 return ISL_FORMAT_R32G32B32A32_FLOAT;
984
985 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
986 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
987 return ISL_FORMAT_RAW;
988
989 default:
990 unreachable("Invalid descriptor type");
991 }
992 }
993
994 struct anv_state
995 anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer *cmd_buffer,
996 const void *data, uint32_t size, uint32_t alignment)
997 {
998 struct anv_state state;
999
1000 state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, size, alignment);
1001 memcpy(state.map, data, size);
1002
1003 VG(VALGRIND_CHECK_MEM_IS_DEFINED(state.map, size));
1004
1005 return state;
1006 }
1007
1008 struct anv_state
1009 anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer *cmd_buffer,
1010 uint32_t *a, uint32_t *b,
1011 uint32_t dwords, uint32_t alignment)
1012 {
1013 struct anv_state state;
1014 uint32_t *p;
1015
1016 state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
1017 dwords * 4, alignment);
1018 p = state.map;
1019 for (uint32_t i = 0; i < dwords; i++)
1020 p[i] = a[i] | b[i];
1021
1022 VG(VALGRIND_CHECK_MEM_IS_DEFINED(p, dwords * 4));
1023
1024 return state;
1025 }
1026
1027 struct anv_state
1028 anv_cmd_buffer_gfx_push_constants(struct anv_cmd_buffer *cmd_buffer)
1029 {
1030 struct anv_push_constants *data =
1031 &cmd_buffer->state.gfx.base.push_constants;
1032
1033 struct anv_state state =
1034 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
1035 sizeof(struct anv_push_constants),
1036 32 /* bottom 5 bits MBZ */);
1037 memcpy(state.map, data, sizeof(struct anv_push_constants));
1038
1039 return state;
1040 }
1041
1042 struct anv_state
1043 anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer *cmd_buffer)
1044 {
1045 struct anv_push_constants *data =
1046 &cmd_buffer->state.compute.base.push_constants;
1047 struct anv_compute_pipeline *pipeline = cmd_buffer->state.compute.pipeline;
1048 const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline);
1049 const struct anv_push_range *range = &pipeline->cs->bind_map.push_ranges[0];
1050
1051 const struct anv_cs_parameters cs_params = anv_cs_parameters(pipeline);
1052 const unsigned total_push_constants_size =
1053 brw_cs_push_const_total_size(cs_prog_data, cs_params.threads);
1054 if (total_push_constants_size == 0)
1055 return (struct anv_state) { .offset = 0 };
1056
1057 const unsigned push_constant_alignment =
1058 cmd_buffer->device->info.gen < 8 ? 32 : 64;
1059 const unsigned aligned_total_push_constants_size =
1060 ALIGN(total_push_constants_size, push_constant_alignment);
1061 struct anv_state state =
1062 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
1063 aligned_total_push_constants_size,
1064 push_constant_alignment);
1065
1066 void *dst = state.map;
1067 const void *src = (char *)data + (range->start * 32);
1068
1069 if (cs_prog_data->push.cross_thread.size > 0) {
1070 memcpy(dst, src, cs_prog_data->push.cross_thread.size);
1071 dst += cs_prog_data->push.cross_thread.size;
1072 src += cs_prog_data->push.cross_thread.size;
1073 }
1074
1075 if (cs_prog_data->push.per_thread.size > 0) {
1076 for (unsigned t = 0; t < cs_params.threads; t++) {
1077 memcpy(dst, src, cs_prog_data->push.per_thread.size);
1078
1079 uint32_t *subgroup_id = dst +
1080 offsetof(struct anv_push_constants, cs.subgroup_id) -
1081 (range->start * 32 + cs_prog_data->push.cross_thread.size);
1082 *subgroup_id = t;
1083
1084 dst += cs_prog_data->push.per_thread.size;
1085 }
1086 }
1087
1088 return state;
1089 }
1090
1091 void anv_CmdPushConstants(
1092 VkCommandBuffer commandBuffer,
1093 VkPipelineLayout layout,
1094 VkShaderStageFlags stageFlags,
1095 uint32_t offset,
1096 uint32_t size,
1097 const void* pValues)
1098 {
1099 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1100
1101 if (stageFlags & VK_SHADER_STAGE_ALL_GRAPHICS) {
1102 struct anv_cmd_pipeline_state *pipe_state =
1103 &cmd_buffer->state.gfx.base;
1104
1105 memcpy(pipe_state->push_constants.client_data + offset, pValues, size);
1106 }
1107 if (stageFlags & VK_SHADER_STAGE_COMPUTE_BIT) {
1108 struct anv_cmd_pipeline_state *pipe_state =
1109 &cmd_buffer->state.compute.base;
1110
1111 memcpy(pipe_state->push_constants.client_data + offset, pValues, size);
1112 }
1113
1114 cmd_buffer->state.push_constants_dirty |= stageFlags;
1115 }
1116
1117 VkResult anv_CreateCommandPool(
1118 VkDevice _device,
1119 const VkCommandPoolCreateInfo* pCreateInfo,
1120 const VkAllocationCallbacks* pAllocator,
1121 VkCommandPool* pCmdPool)
1122 {
1123 ANV_FROM_HANDLE(anv_device, device, _device);
1124 struct anv_cmd_pool *pool;
1125
1126 pool = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*pool), 8,
1127 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1128 if (pool == NULL)
1129 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1130
1131 vk_object_base_init(&device->vk, &pool->base, VK_OBJECT_TYPE_COMMAND_POOL);
1132
1133 if (pAllocator)
1134 pool->alloc = *pAllocator;
1135 else
1136 pool->alloc = device->vk.alloc;
1137
1138 list_inithead(&pool->cmd_buffers);
1139
1140 *pCmdPool = anv_cmd_pool_to_handle(pool);
1141
1142 return VK_SUCCESS;
1143 }
1144
1145 void anv_DestroyCommandPool(
1146 VkDevice _device,
1147 VkCommandPool commandPool,
1148 const VkAllocationCallbacks* pAllocator)
1149 {
1150 ANV_FROM_HANDLE(anv_device, device, _device);
1151 ANV_FROM_HANDLE(anv_cmd_pool, pool, commandPool);
1152
1153 if (!pool)
1154 return;
1155
1156 list_for_each_entry_safe(struct anv_cmd_buffer, cmd_buffer,
1157 &pool->cmd_buffers, pool_link) {
1158 anv_cmd_buffer_destroy(cmd_buffer);
1159 }
1160
1161 vk_object_base_finish(&pool->base);
1162 vk_free2(&device->vk.alloc, pAllocator, pool);
1163 }
1164
1165 VkResult anv_ResetCommandPool(
1166 VkDevice device,
1167 VkCommandPool commandPool,
1168 VkCommandPoolResetFlags flags)
1169 {
1170 ANV_FROM_HANDLE(anv_cmd_pool, pool, commandPool);
1171
1172 list_for_each_entry(struct anv_cmd_buffer, cmd_buffer,
1173 &pool->cmd_buffers, pool_link) {
1174 anv_cmd_buffer_reset(cmd_buffer);
1175 }
1176
1177 return VK_SUCCESS;
1178 }
1179
1180 void anv_TrimCommandPool(
1181 VkDevice device,
1182 VkCommandPool commandPool,
1183 VkCommandPoolTrimFlags flags)
1184 {
1185 /* Nothing for us to do here. Our pools stay pretty tidy. */
1186 }
1187
1188 /**
1189 * Return NULL if the current subpass has no depthstencil attachment.
1190 */
1191 const struct anv_image_view *
1192 anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer *cmd_buffer)
1193 {
1194 const struct anv_subpass *subpass = cmd_buffer->state.subpass;
1195
1196 if (subpass->depth_stencil_attachment == NULL)
1197 return NULL;
1198
1199 const struct anv_image_view *iview =
1200 cmd_buffer->state.attachments[subpass->depth_stencil_attachment->attachment].image_view;
1201
1202 assert(iview->aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT |
1203 VK_IMAGE_ASPECT_STENCIL_BIT));
1204
1205 return iview;
1206 }
1207
1208 static struct anv_descriptor_set *
1209 anv_cmd_buffer_push_descriptor_set(struct anv_cmd_buffer *cmd_buffer,
1210 VkPipelineBindPoint bind_point,
1211 struct anv_descriptor_set_layout *layout,
1212 uint32_t _set)
1213 {
1214 struct anv_cmd_pipeline_state *pipe_state;
1215 if (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE) {
1216 pipe_state = &cmd_buffer->state.compute.base;
1217 } else {
1218 assert(bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS);
1219 pipe_state = &cmd_buffer->state.gfx.base;
1220 }
1221
1222 struct anv_push_descriptor_set **push_set =
1223 &pipe_state->push_descriptors[_set];
1224
1225 if (*push_set == NULL) {
1226 *push_set = vk_zalloc(&cmd_buffer->pool->alloc,
1227 sizeof(struct anv_push_descriptor_set), 8,
1228 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1229 if (*push_set == NULL) {
1230 anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_HOST_MEMORY);
1231 return NULL;
1232 }
1233 }
1234
1235 struct anv_descriptor_set *set = &(*push_set)->set;
1236
1237 if (set->layout != layout) {
1238 if (set->layout)
1239 anv_descriptor_set_layout_unref(cmd_buffer->device, set->layout);
1240 anv_descriptor_set_layout_ref(layout);
1241 set->layout = layout;
1242 }
1243 set->size = anv_descriptor_set_layout_size(layout);
1244 set->buffer_view_count = layout->buffer_view_count;
1245 set->buffer_views = (*push_set)->buffer_views;
1246
1247 if (layout->descriptor_buffer_size &&
1248 ((*push_set)->set_used_on_gpu ||
1249 set->desc_mem.alloc_size < layout->descriptor_buffer_size)) {
1250 /* The previous buffer is either actively used by some GPU command (so
1251 * we can't modify it) or is too small. Allocate a new one.
1252 */
1253 struct anv_state desc_mem =
1254 anv_state_stream_alloc(&cmd_buffer->dynamic_state_stream,
1255 layout->descriptor_buffer_size, 32);
1256 if (set->desc_mem.alloc_size) {
1257 /* TODO: Do we really need to copy all the time? */
1258 memcpy(desc_mem.map, set->desc_mem.map,
1259 MIN2(desc_mem.alloc_size, set->desc_mem.alloc_size));
1260 }
1261 set->desc_mem = desc_mem;
1262
1263 struct anv_address addr = {
1264 .bo = cmd_buffer->dynamic_state_stream.state_pool->block_pool.bo,
1265 .offset = set->desc_mem.offset,
1266 };
1267
1268 const struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
1269 set->desc_surface_state =
1270 anv_state_stream_alloc(&cmd_buffer->surface_state_stream,
1271 isl_dev->ss.size, isl_dev->ss.align);
1272 anv_fill_buffer_surface_state(cmd_buffer->device,
1273 set->desc_surface_state,
1274 ISL_FORMAT_R32G32B32A32_FLOAT,
1275 addr, layout->descriptor_buffer_size, 1);
1276 }
1277
1278 return set;
1279 }
1280
1281 void anv_CmdPushDescriptorSetKHR(
1282 VkCommandBuffer commandBuffer,
1283 VkPipelineBindPoint pipelineBindPoint,
1284 VkPipelineLayout _layout,
1285 uint32_t _set,
1286 uint32_t descriptorWriteCount,
1287 const VkWriteDescriptorSet* pDescriptorWrites)
1288 {
1289 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1290 ANV_FROM_HANDLE(anv_pipeline_layout, layout, _layout);
1291
1292 assert(_set < MAX_SETS);
1293
1294 struct anv_descriptor_set_layout *set_layout = layout->set[_set].layout;
1295
1296 struct anv_descriptor_set *set =
1297 anv_cmd_buffer_push_descriptor_set(cmd_buffer, pipelineBindPoint,
1298 set_layout, _set);
1299 if (!set)
1300 return;
1301
1302 /* Go through the user supplied descriptors. */
1303 for (uint32_t i = 0; i < descriptorWriteCount; i++) {
1304 const VkWriteDescriptorSet *write = &pDescriptorWrites[i];
1305
1306 switch (write->descriptorType) {
1307 case VK_DESCRIPTOR_TYPE_SAMPLER:
1308 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1309 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1310 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1311 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1312 for (uint32_t j = 0; j < write->descriptorCount; j++) {
1313 anv_descriptor_set_write_image_view(cmd_buffer->device, set,
1314 write->pImageInfo + j,
1315 write->descriptorType,
1316 write->dstBinding,
1317 write->dstArrayElement + j);
1318 }
1319 break;
1320
1321 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1322 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1323 for (uint32_t j = 0; j < write->descriptorCount; j++) {
1324 ANV_FROM_HANDLE(anv_buffer_view, bview,
1325 write->pTexelBufferView[j]);
1326
1327 anv_descriptor_set_write_buffer_view(cmd_buffer->device, set,
1328 write->descriptorType,
1329 bview,
1330 write->dstBinding,
1331 write->dstArrayElement + j);
1332 }
1333 break;
1334
1335 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1336 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1337 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1338 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1339 for (uint32_t j = 0; j < write->descriptorCount; j++) {
1340 ANV_FROM_HANDLE(anv_buffer, buffer, write->pBufferInfo[j].buffer);
1341
1342 anv_descriptor_set_write_buffer(cmd_buffer->device, set,
1343 &cmd_buffer->surface_state_stream,
1344 write->descriptorType,
1345 buffer,
1346 write->dstBinding,
1347 write->dstArrayElement + j,
1348 write->pBufferInfo[j].offset,
1349 write->pBufferInfo[j].range);
1350 }
1351 break;
1352
1353 default:
1354 break;
1355 }
1356 }
1357
1358 anv_cmd_buffer_bind_descriptor_set(cmd_buffer, pipelineBindPoint,
1359 layout, _set, set, NULL, NULL);
1360 }
1361
1362 void anv_CmdPushDescriptorSetWithTemplateKHR(
1363 VkCommandBuffer commandBuffer,
1364 VkDescriptorUpdateTemplate descriptorUpdateTemplate,
1365 VkPipelineLayout _layout,
1366 uint32_t _set,
1367 const void* pData)
1368 {
1369 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1370 ANV_FROM_HANDLE(anv_descriptor_update_template, template,
1371 descriptorUpdateTemplate);
1372 ANV_FROM_HANDLE(anv_pipeline_layout, layout, _layout);
1373
1374 assert(_set < MAX_PUSH_DESCRIPTORS);
1375
1376 struct anv_descriptor_set_layout *set_layout = layout->set[_set].layout;
1377
1378 struct anv_descriptor_set *set =
1379 anv_cmd_buffer_push_descriptor_set(cmd_buffer, template->bind_point,
1380 set_layout, _set);
1381 if (!set)
1382 return;
1383
1384 anv_descriptor_set_write_template(cmd_buffer->device, set,
1385 &cmd_buffer->surface_state_stream,
1386 template,
1387 pData);
1388
1389 anv_cmd_buffer_bind_descriptor_set(cmd_buffer, template->bind_point,
1390 layout, _set, set, NULL, NULL);
1391 }
1392
1393 void anv_CmdSetDeviceMask(
1394 VkCommandBuffer commandBuffer,
1395 uint32_t deviceMask)
1396 {
1397 /* No-op */
1398 }