03af604de4c9b3b8e06d58205e4e4f7a0355d0e4
[mesa.git] / src / intel / vulkan / anv_cmd_buffer.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31
32 #include "vk_format_info.h"
33 #include "vk_util.h"
34
35 /** \file anv_cmd_buffer.c
36 *
37 * This file contains all of the stuff for emitting commands into a command
38 * buffer. This includes implementations of most of the vkCmd*
39 * entrypoints. This file is concerned entirely with state emission and
40 * not with the command buffer data structure itself. As far as this file
41 * is concerned, most of anv_cmd_buffer is magic.
42 */
43
44 /* TODO: These are taken from GLES. We should check the Vulkan spec */
45 const struct anv_dynamic_state default_dynamic_state = {
46 .viewport = {
47 .count = 0,
48 },
49 .scissor = {
50 .count = 0,
51 },
52 .line_width = 1.0f,
53 .depth_bias = {
54 .bias = 0.0f,
55 .clamp = 0.0f,
56 .slope = 0.0f,
57 },
58 .blend_constants = { 0.0f, 0.0f, 0.0f, 0.0f },
59 .depth_bounds = {
60 .min = 0.0f,
61 .max = 1.0f,
62 },
63 .stencil_compare_mask = {
64 .front = ~0u,
65 .back = ~0u,
66 },
67 .stencil_write_mask = {
68 .front = ~0u,
69 .back = ~0u,
70 },
71 .stencil_reference = {
72 .front = 0u,
73 .back = 0u,
74 },
75 .line_stipple = {
76 .factor = 0u,
77 .pattern = 0u,
78 },
79 };
80
81 /**
82 * Copy the dynamic state from src to dest based on the copy_mask.
83 *
84 * Avoid copying states that have not changed, except for VIEWPORT, SCISSOR and
85 * BLEND_CONSTANTS (always copy them if they are in the copy_mask).
86 *
87 * Returns a mask of the states which changed.
88 */
89 anv_cmd_dirty_mask_t
90 anv_dynamic_state_copy(struct anv_dynamic_state *dest,
91 const struct anv_dynamic_state *src,
92 anv_cmd_dirty_mask_t copy_mask)
93 {
94 anv_cmd_dirty_mask_t changed = 0;
95
96 if (copy_mask & ANV_CMD_DIRTY_DYNAMIC_VIEWPORT) {
97 dest->viewport.count = src->viewport.count;
98 typed_memcpy(dest->viewport.viewports, src->viewport.viewports,
99 src->viewport.count);
100 changed |= ANV_CMD_DIRTY_DYNAMIC_VIEWPORT;
101 }
102
103 if (copy_mask & ANV_CMD_DIRTY_DYNAMIC_SCISSOR) {
104 dest->scissor.count = src->scissor.count;
105 typed_memcpy(dest->scissor.scissors, src->scissor.scissors,
106 src->scissor.count);
107 changed |= ANV_CMD_DIRTY_DYNAMIC_SCISSOR;
108 }
109
110 if (copy_mask & ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS) {
111 typed_memcpy(dest->blend_constants, src->blend_constants, 4);
112 changed |= ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS;
113 }
114
115 #define ANV_CMP_COPY(field, flag) \
116 if (copy_mask & flag) { \
117 if (dest->field != src->field) { \
118 dest->field = src->field; \
119 changed |= flag; \
120 } \
121 }
122
123 ANV_CMP_COPY(line_width, ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH);
124
125 ANV_CMP_COPY(depth_bias.bias, ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS);
126 ANV_CMP_COPY(depth_bias.clamp, ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS);
127 ANV_CMP_COPY(depth_bias.slope, ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS);
128
129 ANV_CMP_COPY(depth_bounds.min, ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS);
130 ANV_CMP_COPY(depth_bounds.max, ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS);
131
132 ANV_CMP_COPY(stencil_compare_mask.front, ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK);
133 ANV_CMP_COPY(stencil_compare_mask.back, ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK);
134
135 ANV_CMP_COPY(stencil_write_mask.front, ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK);
136 ANV_CMP_COPY(stencil_write_mask.back, ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK);
137
138 ANV_CMP_COPY(stencil_reference.front, ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE);
139 ANV_CMP_COPY(stencil_reference.back, ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE);
140
141 ANV_CMP_COPY(line_stipple.factor, ANV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE);
142 ANV_CMP_COPY(line_stipple.pattern, ANV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE);
143
144 #undef ANV_CMP_COPY
145
146 return changed;
147 }
148
149 static void
150 anv_cmd_state_init(struct anv_cmd_buffer *cmd_buffer)
151 {
152 struct anv_cmd_state *state = &cmd_buffer->state;
153
154 memset(state, 0, sizeof(*state));
155
156 state->current_pipeline = UINT32_MAX;
157 state->restart_index = UINT32_MAX;
158 state->gfx.dynamic = default_dynamic_state;
159 }
160
161 static void
162 anv_cmd_pipeline_state_finish(struct anv_cmd_buffer *cmd_buffer,
163 struct anv_cmd_pipeline_state *pipe_state)
164 {
165 for (uint32_t i = 0; i < ARRAY_SIZE(pipe_state->push_descriptors); i++) {
166 if (pipe_state->push_descriptors[i]) {
167 anv_descriptor_set_layout_unref(cmd_buffer->device,
168 pipe_state->push_descriptors[i]->set.layout);
169 vk_free(&cmd_buffer->pool->alloc, pipe_state->push_descriptors[i]);
170 }
171 }
172 }
173
174 static void
175 anv_cmd_state_finish(struct anv_cmd_buffer *cmd_buffer)
176 {
177 struct anv_cmd_state *state = &cmd_buffer->state;
178
179 anv_cmd_pipeline_state_finish(cmd_buffer, &state->gfx.base);
180 anv_cmd_pipeline_state_finish(cmd_buffer, &state->compute.base);
181
182 vk_free(&cmd_buffer->pool->alloc, state->attachments);
183 }
184
185 static void
186 anv_cmd_state_reset(struct anv_cmd_buffer *cmd_buffer)
187 {
188 anv_cmd_state_finish(cmd_buffer);
189 anv_cmd_state_init(cmd_buffer);
190 }
191
192 static VkResult anv_create_cmd_buffer(
193 struct anv_device * device,
194 struct anv_cmd_pool * pool,
195 VkCommandBufferLevel level,
196 VkCommandBuffer* pCommandBuffer)
197 {
198 struct anv_cmd_buffer *cmd_buffer;
199 VkResult result;
200
201 cmd_buffer = vk_alloc(&pool->alloc, sizeof(*cmd_buffer), 8,
202 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
203 if (cmd_buffer == NULL)
204 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
205
206 cmd_buffer->batch.status = VK_SUCCESS;
207
208 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
209 cmd_buffer->device = device;
210 cmd_buffer->pool = pool;
211 cmd_buffer->level = level;
212
213 result = anv_cmd_buffer_init_batch_bo_chain(cmd_buffer);
214 if (result != VK_SUCCESS)
215 goto fail;
216
217 anv_state_stream_init(&cmd_buffer->surface_state_stream,
218 &device->surface_state_pool, 4096);
219 anv_state_stream_init(&cmd_buffer->dynamic_state_stream,
220 &device->dynamic_state_pool, 16384);
221
222 anv_cmd_state_init(cmd_buffer);
223
224 if (pool) {
225 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
226 } else {
227 /* Init the pool_link so we can safefly call list_del when we destroy
228 * the command buffer
229 */
230 list_inithead(&cmd_buffer->pool_link);
231 }
232
233 *pCommandBuffer = anv_cmd_buffer_to_handle(cmd_buffer);
234
235 return VK_SUCCESS;
236
237 fail:
238 vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
239
240 return result;
241 }
242
243 VkResult anv_AllocateCommandBuffers(
244 VkDevice _device,
245 const VkCommandBufferAllocateInfo* pAllocateInfo,
246 VkCommandBuffer* pCommandBuffers)
247 {
248 ANV_FROM_HANDLE(anv_device, device, _device);
249 ANV_FROM_HANDLE(anv_cmd_pool, pool, pAllocateInfo->commandPool);
250
251 VkResult result = VK_SUCCESS;
252 uint32_t i;
253
254 for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
255 result = anv_create_cmd_buffer(device, pool, pAllocateInfo->level,
256 &pCommandBuffers[i]);
257 if (result != VK_SUCCESS)
258 break;
259 }
260
261 if (result != VK_SUCCESS) {
262 anv_FreeCommandBuffers(_device, pAllocateInfo->commandPool,
263 i, pCommandBuffers);
264 for (i = 0; i < pAllocateInfo->commandBufferCount; i++)
265 pCommandBuffers[i] = VK_NULL_HANDLE;
266 }
267
268 return result;
269 }
270
271 static void
272 anv_cmd_buffer_destroy(struct anv_cmd_buffer *cmd_buffer)
273 {
274 list_del(&cmd_buffer->pool_link);
275
276 anv_cmd_buffer_fini_batch_bo_chain(cmd_buffer);
277
278 anv_state_stream_finish(&cmd_buffer->surface_state_stream);
279 anv_state_stream_finish(&cmd_buffer->dynamic_state_stream);
280
281 anv_cmd_state_finish(cmd_buffer);
282
283 vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
284 }
285
286 void anv_FreeCommandBuffers(
287 VkDevice device,
288 VkCommandPool commandPool,
289 uint32_t commandBufferCount,
290 const VkCommandBuffer* pCommandBuffers)
291 {
292 for (uint32_t i = 0; i < commandBufferCount; i++) {
293 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
294
295 if (!cmd_buffer)
296 continue;
297
298 anv_cmd_buffer_destroy(cmd_buffer);
299 }
300 }
301
302 VkResult
303 anv_cmd_buffer_reset(struct anv_cmd_buffer *cmd_buffer)
304 {
305 cmd_buffer->usage_flags = 0;
306 anv_cmd_buffer_reset_batch_bo_chain(cmd_buffer);
307 anv_cmd_state_reset(cmd_buffer);
308
309 anv_state_stream_finish(&cmd_buffer->surface_state_stream);
310 anv_state_stream_init(&cmd_buffer->surface_state_stream,
311 &cmd_buffer->device->surface_state_pool, 4096);
312
313 anv_state_stream_finish(&cmd_buffer->dynamic_state_stream);
314 anv_state_stream_init(&cmd_buffer->dynamic_state_stream,
315 &cmd_buffer->device->dynamic_state_pool, 16384);
316 return VK_SUCCESS;
317 }
318
319 VkResult anv_ResetCommandBuffer(
320 VkCommandBuffer commandBuffer,
321 VkCommandBufferResetFlags flags)
322 {
323 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
324 return anv_cmd_buffer_reset(cmd_buffer);
325 }
326
327 #define anv_genX_call(devinfo, func, ...) \
328 switch ((devinfo)->gen) { \
329 case 7: \
330 if ((devinfo)->is_haswell) { \
331 gen75_##func(__VA_ARGS__); \
332 } else { \
333 gen7_##func(__VA_ARGS__); \
334 } \
335 break; \
336 case 8: \
337 gen8_##func(__VA_ARGS__); \
338 break; \
339 case 9: \
340 gen9_##func(__VA_ARGS__); \
341 break; \
342 case 10: \
343 gen10_##func(__VA_ARGS__); \
344 break; \
345 case 11: \
346 gen11_##func(__VA_ARGS__); \
347 break; \
348 case 12: \
349 gen12_##func(__VA_ARGS__); \
350 break; \
351 default: \
352 assert(!"Unknown hardware generation"); \
353 }
354
355 void
356 anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer)
357 {
358 anv_genX_call(&cmd_buffer->device->info,
359 cmd_buffer_emit_state_base_address,
360 cmd_buffer);
361 }
362
363 void
364 anv_cmd_buffer_mark_image_written(struct anv_cmd_buffer *cmd_buffer,
365 const struct anv_image *image,
366 VkImageAspectFlagBits aspect,
367 enum isl_aux_usage aux_usage,
368 uint32_t level,
369 uint32_t base_layer,
370 uint32_t layer_count)
371 {
372 anv_genX_call(&cmd_buffer->device->info,
373 cmd_buffer_mark_image_written,
374 cmd_buffer, image, aspect, aux_usage,
375 level, base_layer, layer_count);
376 }
377
378 void
379 anv_cmd_emit_conditional_render_predicate(struct anv_cmd_buffer *cmd_buffer)
380 {
381 anv_genX_call(&cmd_buffer->device->info,
382 cmd_emit_conditional_render_predicate,
383 cmd_buffer);
384 }
385
386 static bool
387 mem_update(void *dst, const void *src, size_t size)
388 {
389 if (memcmp(dst, src, size) == 0)
390 return false;
391
392 memcpy(dst, src, size);
393 return true;
394 }
395
396 static void
397 set_dirty_for_bind_map(struct anv_cmd_buffer *cmd_buffer,
398 gl_shader_stage stage,
399 const struct anv_pipeline_bind_map *map)
400 {
401 if (mem_update(cmd_buffer->state.surface_sha1s[stage],
402 map->surface_sha1, sizeof(map->surface_sha1)))
403 cmd_buffer->state.descriptors_dirty |= mesa_to_vk_shader_stage(stage);
404
405 if (mem_update(cmd_buffer->state.sampler_sha1s[stage],
406 map->sampler_sha1, sizeof(map->sampler_sha1)))
407 cmd_buffer->state.descriptors_dirty |= mesa_to_vk_shader_stage(stage);
408
409 if (mem_update(cmd_buffer->state.push_sha1s[stage],
410 map->push_sha1, sizeof(map->push_sha1)))
411 cmd_buffer->state.push_constants_dirty |= mesa_to_vk_shader_stage(stage);
412 }
413
414 void anv_CmdBindPipeline(
415 VkCommandBuffer commandBuffer,
416 VkPipelineBindPoint pipelineBindPoint,
417 VkPipeline _pipeline)
418 {
419 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
420 ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
421
422 switch (pipelineBindPoint) {
423 case VK_PIPELINE_BIND_POINT_COMPUTE: {
424 struct anv_compute_pipeline *compute_pipeline =
425 anv_pipeline_to_compute(pipeline);
426 if (cmd_buffer->state.compute.pipeline == compute_pipeline)
427 return;
428
429 cmd_buffer->state.compute.pipeline = compute_pipeline;
430 cmd_buffer->state.compute.pipeline_dirty = true;
431 set_dirty_for_bind_map(cmd_buffer, MESA_SHADER_COMPUTE,
432 &compute_pipeline->cs->bind_map);
433 break;
434 }
435
436 case VK_PIPELINE_BIND_POINT_GRAPHICS: {
437 struct anv_graphics_pipeline *gfx_pipeline =
438 anv_pipeline_to_graphics(pipeline);
439 if (cmd_buffer->state.gfx.pipeline == gfx_pipeline)
440 return;
441
442 cmd_buffer->state.gfx.pipeline = gfx_pipeline;
443 cmd_buffer->state.gfx.vb_dirty |= gfx_pipeline->vb_used;
444 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_PIPELINE;
445
446 anv_foreach_stage(stage, gfx_pipeline->active_stages) {
447 set_dirty_for_bind_map(cmd_buffer, stage,
448 &gfx_pipeline->shaders[stage]->bind_map);
449 }
450
451 /* Apply the dynamic state from the pipeline */
452 cmd_buffer->state.gfx.dirty |=
453 anv_dynamic_state_copy(&cmd_buffer->state.gfx.dynamic,
454 &gfx_pipeline->dynamic_state,
455 gfx_pipeline->dynamic_state_mask);
456 break;
457 }
458
459 default:
460 assert(!"invalid bind point");
461 break;
462 }
463 }
464
465 void anv_CmdSetViewport(
466 VkCommandBuffer commandBuffer,
467 uint32_t firstViewport,
468 uint32_t viewportCount,
469 const VkViewport* pViewports)
470 {
471 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
472
473 const uint32_t total_count = firstViewport + viewportCount;
474 if (cmd_buffer->state.gfx.dynamic.viewport.count < total_count)
475 cmd_buffer->state.gfx.dynamic.viewport.count = total_count;
476
477 memcpy(cmd_buffer->state.gfx.dynamic.viewport.viewports + firstViewport,
478 pViewports, viewportCount * sizeof(*pViewports));
479
480 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_VIEWPORT;
481 }
482
483 void anv_CmdSetScissor(
484 VkCommandBuffer commandBuffer,
485 uint32_t firstScissor,
486 uint32_t scissorCount,
487 const VkRect2D* pScissors)
488 {
489 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
490
491 const uint32_t total_count = firstScissor + scissorCount;
492 if (cmd_buffer->state.gfx.dynamic.scissor.count < total_count)
493 cmd_buffer->state.gfx.dynamic.scissor.count = total_count;
494
495 memcpy(cmd_buffer->state.gfx.dynamic.scissor.scissors + firstScissor,
496 pScissors, scissorCount * sizeof(*pScissors));
497
498 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_SCISSOR;
499 }
500
501 void anv_CmdSetLineWidth(
502 VkCommandBuffer commandBuffer,
503 float lineWidth)
504 {
505 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
506
507 cmd_buffer->state.gfx.dynamic.line_width = lineWidth;
508 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH;
509 }
510
511 void anv_CmdSetDepthBias(
512 VkCommandBuffer commandBuffer,
513 float depthBiasConstantFactor,
514 float depthBiasClamp,
515 float depthBiasSlopeFactor)
516 {
517 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
518
519 cmd_buffer->state.gfx.dynamic.depth_bias.bias = depthBiasConstantFactor;
520 cmd_buffer->state.gfx.dynamic.depth_bias.clamp = depthBiasClamp;
521 cmd_buffer->state.gfx.dynamic.depth_bias.slope = depthBiasSlopeFactor;
522
523 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
524 }
525
526 void anv_CmdSetBlendConstants(
527 VkCommandBuffer commandBuffer,
528 const float blendConstants[4])
529 {
530 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
531
532 memcpy(cmd_buffer->state.gfx.dynamic.blend_constants,
533 blendConstants, sizeof(float) * 4);
534
535 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS;
536 }
537
538 void anv_CmdSetDepthBounds(
539 VkCommandBuffer commandBuffer,
540 float minDepthBounds,
541 float maxDepthBounds)
542 {
543 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
544
545 cmd_buffer->state.gfx.dynamic.depth_bounds.min = minDepthBounds;
546 cmd_buffer->state.gfx.dynamic.depth_bounds.max = maxDepthBounds;
547
548 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS;
549 }
550
551 void anv_CmdSetStencilCompareMask(
552 VkCommandBuffer commandBuffer,
553 VkStencilFaceFlags faceMask,
554 uint32_t compareMask)
555 {
556 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
557
558 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
559 cmd_buffer->state.gfx.dynamic.stencil_compare_mask.front = compareMask;
560 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
561 cmd_buffer->state.gfx.dynamic.stencil_compare_mask.back = compareMask;
562
563 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK;
564 }
565
566 void anv_CmdSetStencilWriteMask(
567 VkCommandBuffer commandBuffer,
568 VkStencilFaceFlags faceMask,
569 uint32_t writeMask)
570 {
571 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
572
573 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
574 cmd_buffer->state.gfx.dynamic.stencil_write_mask.front = writeMask;
575 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
576 cmd_buffer->state.gfx.dynamic.stencil_write_mask.back = writeMask;
577
578 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK;
579 }
580
581 void anv_CmdSetStencilReference(
582 VkCommandBuffer commandBuffer,
583 VkStencilFaceFlags faceMask,
584 uint32_t reference)
585 {
586 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
587
588 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
589 cmd_buffer->state.gfx.dynamic.stencil_reference.front = reference;
590 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
591 cmd_buffer->state.gfx.dynamic.stencil_reference.back = reference;
592
593 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE;
594 }
595
596 void anv_CmdSetLineStippleEXT(
597 VkCommandBuffer commandBuffer,
598 uint32_t lineStippleFactor,
599 uint16_t lineStipplePattern)
600 {
601 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
602
603 cmd_buffer->state.gfx.dynamic.line_stipple.factor = lineStippleFactor;
604 cmd_buffer->state.gfx.dynamic.line_stipple.pattern = lineStipplePattern;
605
606 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE;
607 }
608
609 static void
610 anv_cmd_buffer_bind_descriptor_set(struct anv_cmd_buffer *cmd_buffer,
611 VkPipelineBindPoint bind_point,
612 struct anv_pipeline_layout *layout,
613 uint32_t set_index,
614 struct anv_descriptor_set *set,
615 uint32_t *dynamic_offset_count,
616 const uint32_t **dynamic_offsets)
617 {
618 struct anv_descriptor_set_layout *set_layout =
619 layout->set[set_index].layout;
620
621 VkShaderStageFlags stages = set_layout->shader_stages;
622 struct anv_cmd_pipeline_state *pipe_state;
623
624 switch (bind_point) {
625 case VK_PIPELINE_BIND_POINT_GRAPHICS:
626 stages &= VK_SHADER_STAGE_ALL_GRAPHICS;
627 pipe_state = &cmd_buffer->state.gfx.base;
628 break;
629
630 case VK_PIPELINE_BIND_POINT_COMPUTE:
631 stages &= VK_SHADER_STAGE_COMPUTE_BIT;
632 pipe_state = &cmd_buffer->state.compute.base;
633 break;
634
635 default:
636 unreachable("invalid bind point");
637 }
638
639 VkShaderStageFlags dirty_stages = 0;
640 if (pipe_state->descriptors[set_index] != set) {
641 pipe_state->descriptors[set_index] = set;
642 dirty_stages |= stages;
643 }
644
645 /* If it's a push descriptor set, we have to flag things as dirty
646 * regardless of whether or not the CPU-side data structure changed as we
647 * may have edited in-place.
648 */
649 if (set->pool == NULL)
650 dirty_stages |= stages;
651
652 if (dynamic_offsets) {
653 if (set_layout->dynamic_offset_count > 0) {
654 uint32_t dynamic_offset_start =
655 layout->set[set_index].dynamic_offset_start;
656
657 anv_foreach_stage(stage, stages) {
658 struct anv_push_constants *push =
659 &cmd_buffer->state.push_constants[stage];
660 uint32_t *push_offsets =
661 &push->dynamic_offsets[dynamic_offset_start];
662
663 /* Assert that everything is in range */
664 assert(set_layout->dynamic_offset_count <= *dynamic_offset_count);
665 assert(dynamic_offset_start + set_layout->dynamic_offset_count <=
666 ARRAY_SIZE(push->dynamic_offsets));
667
668 unsigned mask = set_layout->stage_dynamic_offsets[stage];
669 STATIC_ASSERT(MAX_DYNAMIC_BUFFERS <= sizeof(mask) * 8);
670 while (mask) {
671 int i = u_bit_scan(&mask);
672 if (push_offsets[i] != (*dynamic_offsets)[i]) {
673 push_offsets[i] = (*dynamic_offsets)[i];
674 dirty_stages |= mesa_to_vk_shader_stage(stage);
675 }
676 }
677 }
678
679 *dynamic_offsets += set_layout->dynamic_offset_count;
680 *dynamic_offset_count -= set_layout->dynamic_offset_count;
681 }
682 }
683
684 cmd_buffer->state.descriptors_dirty |= dirty_stages;
685 cmd_buffer->state.push_constants_dirty |= dirty_stages;
686 }
687
688 void anv_CmdBindDescriptorSets(
689 VkCommandBuffer commandBuffer,
690 VkPipelineBindPoint pipelineBindPoint,
691 VkPipelineLayout _layout,
692 uint32_t firstSet,
693 uint32_t descriptorSetCount,
694 const VkDescriptorSet* pDescriptorSets,
695 uint32_t dynamicOffsetCount,
696 const uint32_t* pDynamicOffsets)
697 {
698 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
699 ANV_FROM_HANDLE(anv_pipeline_layout, layout, _layout);
700
701 assert(firstSet + descriptorSetCount <= MAX_SETS);
702
703 for (uint32_t i = 0; i < descriptorSetCount; i++) {
704 ANV_FROM_HANDLE(anv_descriptor_set, set, pDescriptorSets[i]);
705 anv_cmd_buffer_bind_descriptor_set(cmd_buffer, pipelineBindPoint,
706 layout, firstSet + i, set,
707 &dynamicOffsetCount,
708 &pDynamicOffsets);
709 }
710 }
711
712 void anv_CmdBindVertexBuffers(
713 VkCommandBuffer commandBuffer,
714 uint32_t firstBinding,
715 uint32_t bindingCount,
716 const VkBuffer* pBuffers,
717 const VkDeviceSize* pOffsets)
718 {
719 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
720 struct anv_vertex_binding *vb = cmd_buffer->state.vertex_bindings;
721
722 /* We have to defer setting up vertex buffer since we need the buffer
723 * stride from the pipeline. */
724
725 assert(firstBinding + bindingCount <= MAX_VBS);
726 for (uint32_t i = 0; i < bindingCount; i++) {
727 vb[firstBinding + i].buffer = anv_buffer_from_handle(pBuffers[i]);
728 vb[firstBinding + i].offset = pOffsets[i];
729 cmd_buffer->state.gfx.vb_dirty |= 1 << (firstBinding + i);
730 }
731 }
732
733 void anv_CmdBindTransformFeedbackBuffersEXT(
734 VkCommandBuffer commandBuffer,
735 uint32_t firstBinding,
736 uint32_t bindingCount,
737 const VkBuffer* pBuffers,
738 const VkDeviceSize* pOffsets,
739 const VkDeviceSize* pSizes)
740 {
741 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
742 struct anv_xfb_binding *xfb = cmd_buffer->state.xfb_bindings;
743
744 /* We have to defer setting up vertex buffer since we need the buffer
745 * stride from the pipeline. */
746
747 assert(firstBinding + bindingCount <= MAX_XFB_BUFFERS);
748 for (uint32_t i = 0; i < bindingCount; i++) {
749 if (pBuffers[i] == VK_NULL_HANDLE) {
750 xfb[firstBinding + i].buffer = NULL;
751 } else {
752 ANV_FROM_HANDLE(anv_buffer, buffer, pBuffers[i]);
753 xfb[firstBinding + i].buffer = buffer;
754 xfb[firstBinding + i].offset = pOffsets[i];
755 xfb[firstBinding + i].size =
756 anv_buffer_get_range(buffer, pOffsets[i],
757 pSizes ? pSizes[i] : VK_WHOLE_SIZE);
758 }
759 }
760 }
761
762 enum isl_format
763 anv_isl_format_for_descriptor_type(VkDescriptorType type)
764 {
765 switch (type) {
766 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
767 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
768 return ISL_FORMAT_R32G32B32A32_FLOAT;
769
770 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
771 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
772 return ISL_FORMAT_RAW;
773
774 default:
775 unreachable("Invalid descriptor type");
776 }
777 }
778
779 struct anv_state
780 anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer *cmd_buffer,
781 const void *data, uint32_t size, uint32_t alignment)
782 {
783 struct anv_state state;
784
785 state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, size, alignment);
786 memcpy(state.map, data, size);
787
788 VG(VALGRIND_CHECK_MEM_IS_DEFINED(state.map, size));
789
790 return state;
791 }
792
793 struct anv_state
794 anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer *cmd_buffer,
795 uint32_t *a, uint32_t *b,
796 uint32_t dwords, uint32_t alignment)
797 {
798 struct anv_state state;
799 uint32_t *p;
800
801 state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
802 dwords * 4, alignment);
803 p = state.map;
804 for (uint32_t i = 0; i < dwords; i++)
805 p[i] = a[i] | b[i];
806
807 VG(VALGRIND_CHECK_MEM_IS_DEFINED(p, dwords * 4));
808
809 return state;
810 }
811
812 struct anv_state
813 anv_cmd_buffer_push_constants(struct anv_cmd_buffer *cmd_buffer,
814 gl_shader_stage stage)
815 {
816 struct anv_push_constants *data =
817 &cmd_buffer->state.push_constants[stage];
818
819 struct anv_state state =
820 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
821 sizeof(struct anv_push_constants),
822 32 /* bottom 5 bits MBZ */);
823 memcpy(state.map, data, sizeof(struct anv_push_constants));
824
825 return state;
826 }
827
828 struct anv_state
829 anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer *cmd_buffer)
830 {
831 struct anv_push_constants *data =
832 &cmd_buffer->state.push_constants[MESA_SHADER_COMPUTE];
833 struct anv_compute_pipeline *pipeline = cmd_buffer->state.compute.pipeline;
834 const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline);
835 const struct anv_push_range *range = &pipeline->cs->bind_map.push_ranges[0];
836
837 const uint32_t threads = anv_cs_threads(pipeline);
838 const unsigned total_push_constants_size =
839 brw_cs_push_const_total_size(cs_prog_data, threads);
840 if (total_push_constants_size == 0)
841 return (struct anv_state) { .offset = 0 };
842
843 const unsigned push_constant_alignment =
844 cmd_buffer->device->info.gen < 8 ? 32 : 64;
845 const unsigned aligned_total_push_constants_size =
846 ALIGN(total_push_constants_size, push_constant_alignment);
847 struct anv_state state =
848 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
849 aligned_total_push_constants_size,
850 push_constant_alignment);
851
852 void *dst = state.map;
853 const void *src = (char *)data + (range->start * 32);
854
855 if (cs_prog_data->push.cross_thread.size > 0) {
856 memcpy(dst, src, cs_prog_data->push.cross_thread.size);
857 dst += cs_prog_data->push.cross_thread.size;
858 src += cs_prog_data->push.cross_thread.size;
859 }
860
861 if (cs_prog_data->push.per_thread.size > 0) {
862 for (unsigned t = 0; t < threads; t++) {
863 memcpy(dst, src, cs_prog_data->push.per_thread.size);
864
865 uint32_t *subgroup_id = dst +
866 offsetof(struct anv_push_constants, cs.subgroup_id) -
867 (range->start * 32 + cs_prog_data->push.cross_thread.size);
868 *subgroup_id = t;
869
870 dst += cs_prog_data->push.per_thread.size;
871 }
872 }
873
874 return state;
875 }
876
877 void anv_CmdPushConstants(
878 VkCommandBuffer commandBuffer,
879 VkPipelineLayout layout,
880 VkShaderStageFlags stageFlags,
881 uint32_t offset,
882 uint32_t size,
883 const void* pValues)
884 {
885 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
886
887 anv_foreach_stage(stage, stageFlags) {
888 memcpy(cmd_buffer->state.push_constants[stage].client_data + offset,
889 pValues, size);
890 }
891
892 cmd_buffer->state.push_constants_dirty |= stageFlags;
893 }
894
895 VkResult anv_CreateCommandPool(
896 VkDevice _device,
897 const VkCommandPoolCreateInfo* pCreateInfo,
898 const VkAllocationCallbacks* pAllocator,
899 VkCommandPool* pCmdPool)
900 {
901 ANV_FROM_HANDLE(anv_device, device, _device);
902 struct anv_cmd_pool *pool;
903
904 pool = vk_alloc2(&device->vk.alloc, pAllocator, sizeof(*pool), 8,
905 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
906 if (pool == NULL)
907 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
908
909 if (pAllocator)
910 pool->alloc = *pAllocator;
911 else
912 pool->alloc = device->vk.alloc;
913
914 list_inithead(&pool->cmd_buffers);
915
916 *pCmdPool = anv_cmd_pool_to_handle(pool);
917
918 return VK_SUCCESS;
919 }
920
921 void anv_DestroyCommandPool(
922 VkDevice _device,
923 VkCommandPool commandPool,
924 const VkAllocationCallbacks* pAllocator)
925 {
926 ANV_FROM_HANDLE(anv_device, device, _device);
927 ANV_FROM_HANDLE(anv_cmd_pool, pool, commandPool);
928
929 if (!pool)
930 return;
931
932 list_for_each_entry_safe(struct anv_cmd_buffer, cmd_buffer,
933 &pool->cmd_buffers, pool_link) {
934 anv_cmd_buffer_destroy(cmd_buffer);
935 }
936
937 vk_free2(&device->vk.alloc, pAllocator, pool);
938 }
939
940 VkResult anv_ResetCommandPool(
941 VkDevice device,
942 VkCommandPool commandPool,
943 VkCommandPoolResetFlags flags)
944 {
945 ANV_FROM_HANDLE(anv_cmd_pool, pool, commandPool);
946
947 list_for_each_entry(struct anv_cmd_buffer, cmd_buffer,
948 &pool->cmd_buffers, pool_link) {
949 anv_cmd_buffer_reset(cmd_buffer);
950 }
951
952 return VK_SUCCESS;
953 }
954
955 void anv_TrimCommandPool(
956 VkDevice device,
957 VkCommandPool commandPool,
958 VkCommandPoolTrimFlags flags)
959 {
960 /* Nothing for us to do here. Our pools stay pretty tidy. */
961 }
962
963 /**
964 * Return NULL if the current subpass has no depthstencil attachment.
965 */
966 const struct anv_image_view *
967 anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer *cmd_buffer)
968 {
969 const struct anv_subpass *subpass = cmd_buffer->state.subpass;
970
971 if (subpass->depth_stencil_attachment == NULL)
972 return NULL;
973
974 const struct anv_image_view *iview =
975 cmd_buffer->state.attachments[subpass->depth_stencil_attachment->attachment].image_view;
976
977 assert(iview->aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT |
978 VK_IMAGE_ASPECT_STENCIL_BIT));
979
980 return iview;
981 }
982
983 static struct anv_descriptor_set *
984 anv_cmd_buffer_push_descriptor_set(struct anv_cmd_buffer *cmd_buffer,
985 VkPipelineBindPoint bind_point,
986 struct anv_descriptor_set_layout *layout,
987 uint32_t _set)
988 {
989 struct anv_cmd_pipeline_state *pipe_state;
990 if (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE) {
991 pipe_state = &cmd_buffer->state.compute.base;
992 } else {
993 assert(bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS);
994 pipe_state = &cmd_buffer->state.gfx.base;
995 }
996
997 struct anv_push_descriptor_set **push_set =
998 &pipe_state->push_descriptors[_set];
999
1000 if (*push_set == NULL) {
1001 *push_set = vk_zalloc(&cmd_buffer->pool->alloc,
1002 sizeof(struct anv_push_descriptor_set), 8,
1003 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1004 if (*push_set == NULL) {
1005 anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_HOST_MEMORY);
1006 return NULL;
1007 }
1008 }
1009
1010 struct anv_descriptor_set *set = &(*push_set)->set;
1011
1012 if (set->layout != layout) {
1013 if (set->layout)
1014 anv_descriptor_set_layout_unref(cmd_buffer->device, set->layout);
1015 anv_descriptor_set_layout_ref(layout);
1016 set->layout = layout;
1017 }
1018 set->size = anv_descriptor_set_layout_size(layout);
1019 set->buffer_view_count = layout->buffer_view_count;
1020 set->buffer_views = (*push_set)->buffer_views;
1021
1022 if (layout->descriptor_buffer_size &&
1023 ((*push_set)->set_used_on_gpu ||
1024 set->desc_mem.alloc_size < layout->descriptor_buffer_size)) {
1025 /* The previous buffer is either actively used by some GPU command (so
1026 * we can't modify it) or is too small. Allocate a new one.
1027 */
1028 struct anv_state desc_mem =
1029 anv_state_stream_alloc(&cmd_buffer->dynamic_state_stream,
1030 layout->descriptor_buffer_size, 32);
1031 if (set->desc_mem.alloc_size) {
1032 /* TODO: Do we really need to copy all the time? */
1033 memcpy(desc_mem.map, set->desc_mem.map,
1034 MIN2(desc_mem.alloc_size, set->desc_mem.alloc_size));
1035 }
1036 set->desc_mem = desc_mem;
1037
1038 struct anv_address addr = {
1039 .bo = cmd_buffer->dynamic_state_stream.state_pool->block_pool.bo,
1040 .offset = set->desc_mem.offset,
1041 };
1042
1043 const struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
1044 set->desc_surface_state =
1045 anv_state_stream_alloc(&cmd_buffer->surface_state_stream,
1046 isl_dev->ss.size, isl_dev->ss.align);
1047 anv_fill_buffer_surface_state(cmd_buffer->device,
1048 set->desc_surface_state,
1049 ISL_FORMAT_R32G32B32A32_FLOAT,
1050 addr, layout->descriptor_buffer_size, 1);
1051 }
1052
1053 return set;
1054 }
1055
1056 void anv_CmdPushDescriptorSetKHR(
1057 VkCommandBuffer commandBuffer,
1058 VkPipelineBindPoint pipelineBindPoint,
1059 VkPipelineLayout _layout,
1060 uint32_t _set,
1061 uint32_t descriptorWriteCount,
1062 const VkWriteDescriptorSet* pDescriptorWrites)
1063 {
1064 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1065 ANV_FROM_HANDLE(anv_pipeline_layout, layout, _layout);
1066
1067 assert(_set < MAX_SETS);
1068
1069 struct anv_descriptor_set_layout *set_layout = layout->set[_set].layout;
1070
1071 struct anv_descriptor_set *set =
1072 anv_cmd_buffer_push_descriptor_set(cmd_buffer, pipelineBindPoint,
1073 set_layout, _set);
1074 if (!set)
1075 return;
1076
1077 /* Go through the user supplied descriptors. */
1078 for (uint32_t i = 0; i < descriptorWriteCount; i++) {
1079 const VkWriteDescriptorSet *write = &pDescriptorWrites[i];
1080
1081 switch (write->descriptorType) {
1082 case VK_DESCRIPTOR_TYPE_SAMPLER:
1083 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1084 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1085 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1086 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1087 for (uint32_t j = 0; j < write->descriptorCount; j++) {
1088 anv_descriptor_set_write_image_view(cmd_buffer->device, set,
1089 write->pImageInfo + j,
1090 write->descriptorType,
1091 write->dstBinding,
1092 write->dstArrayElement + j);
1093 }
1094 break;
1095
1096 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1097 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1098 for (uint32_t j = 0; j < write->descriptorCount; j++) {
1099 ANV_FROM_HANDLE(anv_buffer_view, bview,
1100 write->pTexelBufferView[j]);
1101
1102 anv_descriptor_set_write_buffer_view(cmd_buffer->device, set,
1103 write->descriptorType,
1104 bview,
1105 write->dstBinding,
1106 write->dstArrayElement + j);
1107 }
1108 break;
1109
1110 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1111 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1112 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1113 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1114 for (uint32_t j = 0; j < write->descriptorCount; j++) {
1115 ANV_FROM_HANDLE(anv_buffer, buffer, write->pBufferInfo[j].buffer);
1116
1117 anv_descriptor_set_write_buffer(cmd_buffer->device, set,
1118 &cmd_buffer->surface_state_stream,
1119 write->descriptorType,
1120 buffer,
1121 write->dstBinding,
1122 write->dstArrayElement + j,
1123 write->pBufferInfo[j].offset,
1124 write->pBufferInfo[j].range);
1125 }
1126 break;
1127
1128 default:
1129 break;
1130 }
1131 }
1132
1133 anv_cmd_buffer_bind_descriptor_set(cmd_buffer, pipelineBindPoint,
1134 layout, _set, set, NULL, NULL);
1135 }
1136
1137 void anv_CmdPushDescriptorSetWithTemplateKHR(
1138 VkCommandBuffer commandBuffer,
1139 VkDescriptorUpdateTemplate descriptorUpdateTemplate,
1140 VkPipelineLayout _layout,
1141 uint32_t _set,
1142 const void* pData)
1143 {
1144 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1145 ANV_FROM_HANDLE(anv_descriptor_update_template, template,
1146 descriptorUpdateTemplate);
1147 ANV_FROM_HANDLE(anv_pipeline_layout, layout, _layout);
1148
1149 assert(_set < MAX_PUSH_DESCRIPTORS);
1150
1151 struct anv_descriptor_set_layout *set_layout = layout->set[_set].layout;
1152
1153 struct anv_descriptor_set *set =
1154 anv_cmd_buffer_push_descriptor_set(cmd_buffer, template->bind_point,
1155 set_layout, _set);
1156 if (!set)
1157 return;
1158
1159 anv_descriptor_set_write_template(cmd_buffer->device, set,
1160 &cmd_buffer->surface_state_stream,
1161 template,
1162 pData);
1163
1164 anv_cmd_buffer_bind_descriptor_set(cmd_buffer, template->bind_point,
1165 layout, _set, set, NULL, NULL);
1166 }
1167
1168 void anv_CmdSetDeviceMask(
1169 VkCommandBuffer commandBuffer,
1170 uint32_t deviceMask)
1171 {
1172 /* No-op */
1173 }