Merge ../mesa into vulkan
[mesa.git] / src / vulkan / anv_cmd_buffer.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31
32 /** \file anv_cmd_buffer.c
33 *
34 * This file contains all of the stuff for emitting commands into a command
35 * buffer. This includes implementations of most of the vkCmd*
36 * entrypoints. This file is concerned entirely with state emission and
37 * not with the command buffer data structure itself. As far as this file
38 * is concerned, most of anv_cmd_buffer is magic.
39 */
40
41 /* TODO: These are taken from GLES. We should check the Vulkan spec */
42 const struct anv_dynamic_state default_dynamic_state = {
43 .viewport = {
44 .count = 0,
45 },
46 .scissor = {
47 .count = 0,
48 },
49 .line_width = 1.0f,
50 .depth_bias = {
51 .bias = 0.0f,
52 .clamp = 0.0f,
53 .slope = 0.0f,
54 },
55 .blend_constants = { 0.0f, 0.0f, 0.0f, 0.0f },
56 .depth_bounds = {
57 .min = 0.0f,
58 .max = 1.0f,
59 },
60 .stencil_compare_mask = {
61 .front = ~0u,
62 .back = ~0u,
63 },
64 .stencil_write_mask = {
65 .front = ~0u,
66 .back = ~0u,
67 },
68 .stencil_reference = {
69 .front = 0u,
70 .back = 0u,
71 },
72 };
73
74 void
75 anv_dynamic_state_copy(struct anv_dynamic_state *dest,
76 const struct anv_dynamic_state *src,
77 uint32_t copy_mask)
78 {
79 if (copy_mask & (1 << VK_DYNAMIC_STATE_VIEWPORT)) {
80 dest->viewport.count = src->viewport.count;
81 typed_memcpy(dest->viewport.viewports, src->viewport.viewports,
82 src->viewport.count);
83 }
84
85 if (copy_mask & (1 << VK_DYNAMIC_STATE_SCISSOR)) {
86 dest->scissor.count = src->scissor.count;
87 typed_memcpy(dest->scissor.scissors, src->scissor.scissors,
88 src->scissor.count);
89 }
90
91 if (copy_mask & (1 << VK_DYNAMIC_STATE_LINE_WIDTH))
92 dest->line_width = src->line_width;
93
94 if (copy_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BIAS))
95 dest->depth_bias = src->depth_bias;
96
97 if (copy_mask & (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS))
98 typed_memcpy(dest->blend_constants, src->blend_constants, 4);
99
100 if (copy_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS))
101 dest->depth_bounds = src->depth_bounds;
102
103 if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK))
104 dest->stencil_compare_mask = src->stencil_compare_mask;
105
106 if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK))
107 dest->stencil_write_mask = src->stencil_write_mask;
108
109 if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE))
110 dest->stencil_reference = src->stencil_reference;
111 }
112
113 static void
114 anv_cmd_state_init(struct anv_cmd_state *state)
115 {
116 memset(&state->descriptors, 0, sizeof(state->descriptors));
117 memset(&state->push_constants, 0, sizeof(state->push_constants));
118
119 state->dirty = ~0;
120 state->vb_dirty = 0;
121 state->descriptors_dirty = 0;
122 state->push_constants_dirty = 0;
123 state->pipeline = NULL;
124 state->restart_index = UINT32_MAX;
125 state->dynamic = default_dynamic_state;
126
127 state->gen7.index_buffer = NULL;
128 }
129
130 static VkResult
131 anv_cmd_buffer_ensure_push_constants_size(struct anv_cmd_buffer *cmd_buffer,
132 gl_shader_stage stage, uint32_t size)
133 {
134 struct anv_push_constants **ptr = &cmd_buffer->state.push_constants[stage];
135
136 if (*ptr == NULL) {
137 *ptr = anv_alloc(&cmd_buffer->pool->alloc, size, 8,
138 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
139 if (*ptr == NULL)
140 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
141 } else if ((*ptr)->size < size) {
142 *ptr = anv_realloc(&cmd_buffer->pool->alloc, *ptr, size, 8,
143 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
144 if (*ptr == NULL)
145 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
146 }
147 (*ptr)->size = size;
148
149 return VK_SUCCESS;
150 }
151
152 #define anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, field) \
153 anv_cmd_buffer_ensure_push_constants_size(cmd_buffer, stage, \
154 (offsetof(struct anv_push_constants, field) + \
155 sizeof(cmd_buffer->state.push_constants[0]->field)))
156
157 static VkResult anv_create_cmd_buffer(
158 struct anv_device * device,
159 struct anv_cmd_pool * pool,
160 VkCommandBufferLevel level,
161 VkCommandBuffer* pCommandBuffer)
162 {
163 struct anv_cmd_buffer *cmd_buffer;
164 VkResult result;
165
166 cmd_buffer = anv_alloc(&pool->alloc, sizeof(*cmd_buffer), 8,
167 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
168 if (cmd_buffer == NULL)
169 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
170
171 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
172 cmd_buffer->device = device;
173 cmd_buffer->pool = pool;
174
175 result = anv_cmd_buffer_init_batch_bo_chain(cmd_buffer);
176 if (result != VK_SUCCESS)
177 goto fail;
178
179 anv_state_stream_init(&cmd_buffer->surface_state_stream,
180 &device->surface_state_block_pool);
181 anv_state_stream_init(&cmd_buffer->dynamic_state_stream,
182 &device->dynamic_state_block_pool);
183
184 cmd_buffer->level = level;
185 cmd_buffer->usage_flags = 0;
186
187 anv_cmd_state_init(&cmd_buffer->state);
188
189 if (pool) {
190 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
191 } else {
192 /* Init the pool_link so we can safefly call list_del when we destroy
193 * the command buffer
194 */
195 list_inithead(&cmd_buffer->pool_link);
196 }
197
198 *pCommandBuffer = anv_cmd_buffer_to_handle(cmd_buffer);
199
200 return VK_SUCCESS;
201
202 fail:
203 anv_free(&cmd_buffer->pool->alloc, cmd_buffer);
204
205 return result;
206 }
207
208 VkResult anv_AllocateCommandBuffers(
209 VkDevice _device,
210 const VkCommandBufferAllocateInfo* pAllocateInfo,
211 VkCommandBuffer* pCommandBuffers)
212 {
213 ANV_FROM_HANDLE(anv_device, device, _device);
214 ANV_FROM_HANDLE(anv_cmd_pool, pool, pAllocateInfo->commandPool);
215
216 VkResult result = VK_SUCCESS;
217 uint32_t i;
218
219 for (i = 0; i < pAllocateInfo->bufferCount; i++) {
220 result = anv_create_cmd_buffer(device, pool, pAllocateInfo->level,
221 &pCommandBuffers[i]);
222 if (result != VK_SUCCESS)
223 break;
224 }
225
226 if (result != VK_SUCCESS)
227 anv_FreeCommandBuffers(_device, pAllocateInfo->commandPool,
228 i, pCommandBuffers);
229
230 return result;
231 }
232
233 static void
234 anv_cmd_buffer_destroy(struct anv_cmd_buffer *cmd_buffer)
235 {
236 list_del(&cmd_buffer->pool_link);
237
238 anv_cmd_buffer_fini_batch_bo_chain(cmd_buffer);
239
240 anv_state_stream_finish(&cmd_buffer->surface_state_stream);
241 anv_state_stream_finish(&cmd_buffer->dynamic_state_stream);
242
243 anv_free(&cmd_buffer->pool->alloc, cmd_buffer);
244 }
245
246 void anv_FreeCommandBuffers(
247 VkDevice device,
248 VkCommandPool commandPool,
249 uint32_t commandBufferCount,
250 const VkCommandBuffer* pCommandBuffers)
251 {
252 for (uint32_t i = 0; i < commandBufferCount; i++) {
253 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
254
255 anv_cmd_buffer_destroy(cmd_buffer);
256 }
257 }
258
259 VkResult anv_ResetCommandBuffer(
260 VkCommandBuffer commandBuffer,
261 VkCommandBufferResetFlags flags)
262 {
263 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
264
265 anv_cmd_buffer_reset_batch_bo_chain(cmd_buffer);
266
267 anv_cmd_state_init(&cmd_buffer->state);
268
269 return VK_SUCCESS;
270 }
271
272 void
273 anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer)
274 {
275 switch (cmd_buffer->device->info.gen) {
276 case 7:
277 if (cmd_buffer->device->info.is_haswell)
278 return gen7_cmd_buffer_emit_state_base_address(cmd_buffer);
279 else
280 return gen7_cmd_buffer_emit_state_base_address(cmd_buffer);
281 case 8:
282 return gen8_cmd_buffer_emit_state_base_address(cmd_buffer);
283 case 9:
284 return gen9_cmd_buffer_emit_state_base_address(cmd_buffer);
285 default:
286 unreachable("unsupported gen\n");
287 }
288 }
289
290 VkResult anv_BeginCommandBuffer(
291 VkCommandBuffer commandBuffer,
292 const VkCommandBufferBeginInfo* pBeginInfo)
293 {
294 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
295
296 anv_cmd_buffer_reset_batch_bo_chain(cmd_buffer);
297
298 cmd_buffer->usage_flags = pBeginInfo->flags;
299
300 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
301 cmd_buffer->state.framebuffer =
302 anv_framebuffer_from_handle(pBeginInfo->framebuffer);
303 cmd_buffer->state.pass =
304 anv_render_pass_from_handle(pBeginInfo->renderPass);
305
306 struct anv_subpass *subpass =
307 &cmd_buffer->state.pass->subpasses[pBeginInfo->subpass];
308
309 anv_cmd_buffer_begin_subpass(cmd_buffer, subpass);
310 }
311
312 anv_cmd_buffer_emit_state_base_address(cmd_buffer);
313 cmd_buffer->state.current_pipeline = UINT32_MAX;
314
315 return VK_SUCCESS;
316 }
317
318 VkResult anv_EndCommandBuffer(
319 VkCommandBuffer commandBuffer)
320 {
321 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
322 struct anv_device *device = cmd_buffer->device;
323
324 anv_cmd_buffer_end_batch_buffer(cmd_buffer);
325
326 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
327 /* The algorithm used to compute the validate list is not threadsafe as
328 * it uses the bo->index field. We have to lock the device around it.
329 * Fortunately, the chances for contention here are probably very low.
330 */
331 pthread_mutex_lock(&device->mutex);
332 anv_cmd_buffer_prepare_execbuf(cmd_buffer);
333 pthread_mutex_unlock(&device->mutex);
334 }
335
336 return VK_SUCCESS;
337 }
338
339 void anv_CmdBindPipeline(
340 VkCommandBuffer commandBuffer,
341 VkPipelineBindPoint pipelineBindPoint,
342 VkPipeline _pipeline)
343 {
344 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
345 ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
346
347 switch (pipelineBindPoint) {
348 case VK_PIPELINE_BIND_POINT_COMPUTE:
349 cmd_buffer->state.compute_pipeline = pipeline;
350 cmd_buffer->state.compute_dirty |= ANV_CMD_DIRTY_PIPELINE;
351 cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
352 break;
353
354 case VK_PIPELINE_BIND_POINT_GRAPHICS:
355 cmd_buffer->state.pipeline = pipeline;
356 cmd_buffer->state.vb_dirty |= pipeline->vb_used;
357 cmd_buffer->state.dirty |= ANV_CMD_DIRTY_PIPELINE;
358 cmd_buffer->state.push_constants_dirty |= pipeline->active_stages;
359
360 /* Apply the dynamic state from the pipeline */
361 cmd_buffer->state.dirty |= pipeline->dynamic_state_mask;
362 anv_dynamic_state_copy(&cmd_buffer->state.dynamic,
363 &pipeline->dynamic_state,
364 pipeline->dynamic_state_mask);
365 break;
366
367 default:
368 assert(!"invalid bind point");
369 break;
370 }
371 }
372
373 void anv_CmdSetViewport(
374 VkCommandBuffer commandBuffer,
375 uint32_t viewportCount,
376 const VkViewport* pViewports)
377 {
378 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
379
380 cmd_buffer->state.dynamic.viewport.count = viewportCount;
381 memcpy(cmd_buffer->state.dynamic.viewport.viewports,
382 pViewports, viewportCount * sizeof(*pViewports));
383
384 cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_VIEWPORT;
385 }
386
387 void anv_CmdSetScissor(
388 VkCommandBuffer commandBuffer,
389 uint32_t scissorCount,
390 const VkRect2D* pScissors)
391 {
392 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
393
394 cmd_buffer->state.dynamic.scissor.count = scissorCount;
395 memcpy(cmd_buffer->state.dynamic.scissor.scissors,
396 pScissors, scissorCount * sizeof(*pScissors));
397
398 cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_SCISSOR;
399 }
400
401 void anv_CmdSetLineWidth(
402 VkCommandBuffer commandBuffer,
403 float lineWidth)
404 {
405 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
406
407 cmd_buffer->state.dynamic.line_width = lineWidth;
408 cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH;
409 }
410
411 void anv_CmdSetDepthBias(
412 VkCommandBuffer commandBuffer,
413 float depthBiasConstantFactor,
414 float depthBiasClamp,
415 float depthBiasSlopeFactor)
416 {
417 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
418
419 cmd_buffer->state.dynamic.depth_bias.bias = depthBiasConstantFactor;
420 cmd_buffer->state.dynamic.depth_bias.clamp = depthBiasClamp;
421 cmd_buffer->state.dynamic.depth_bias.slope = depthBiasSlopeFactor;
422
423 cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
424 }
425
426 void anv_CmdSetBlendConstants(
427 VkCommandBuffer commandBuffer,
428 const float blendConstants[4])
429 {
430 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
431
432 memcpy(cmd_buffer->state.dynamic.blend_constants,
433 blendConstants, sizeof(float) * 4);
434
435 cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS;
436 }
437
438 void anv_CmdSetDepthBounds(
439 VkCommandBuffer commandBuffer,
440 float minDepthBounds,
441 float maxDepthBounds)
442 {
443 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
444
445 cmd_buffer->state.dynamic.depth_bounds.min = minDepthBounds;
446 cmd_buffer->state.dynamic.depth_bounds.max = maxDepthBounds;
447
448 cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS;
449 }
450
451 void anv_CmdSetStencilCompareMask(
452 VkCommandBuffer commandBuffer,
453 VkStencilFaceFlags faceMask,
454 uint32_t compareMask)
455 {
456 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
457
458 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
459 cmd_buffer->state.dynamic.stencil_compare_mask.front = compareMask;
460 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
461 cmd_buffer->state.dynamic.stencil_compare_mask.back = compareMask;
462
463 cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK;
464 }
465
466 void anv_CmdSetStencilWriteMask(
467 VkCommandBuffer commandBuffer,
468 VkStencilFaceFlags faceMask,
469 uint32_t writeMask)
470 {
471 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
472
473 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
474 cmd_buffer->state.dynamic.stencil_write_mask.front = writeMask;
475 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
476 cmd_buffer->state.dynamic.stencil_write_mask.back = writeMask;
477
478 cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK;
479 }
480
481 void anv_CmdSetStencilReference(
482 VkCommandBuffer commandBuffer,
483 VkStencilFaceFlags faceMask,
484 uint32_t reference)
485 {
486 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
487
488 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
489 cmd_buffer->state.dynamic.stencil_reference.front = reference;
490 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
491 cmd_buffer->state.dynamic.stencil_reference.back = reference;
492
493 cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE;
494 }
495
496 void anv_CmdBindDescriptorSets(
497 VkCommandBuffer commandBuffer,
498 VkPipelineBindPoint pipelineBindPoint,
499 VkPipelineLayout _layout,
500 uint32_t firstSet,
501 uint32_t descriptorSetCount,
502 const VkDescriptorSet* pDescriptorSets,
503 uint32_t dynamicOffsetCount,
504 const uint32_t* pDynamicOffsets)
505 {
506 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
507 ANV_FROM_HANDLE(anv_pipeline_layout, layout, _layout);
508 struct anv_descriptor_set_layout *set_layout;
509
510 assert(firstSet + descriptorSetCount < MAX_SETS);
511
512 uint32_t dynamic_slot = 0;
513 for (uint32_t i = 0; i < descriptorSetCount; i++) {
514 ANV_FROM_HANDLE(anv_descriptor_set, set, pDescriptorSets[i]);
515 set_layout = layout->set[firstSet + i].layout;
516
517 if (cmd_buffer->state.descriptors[firstSet + i] != set) {
518 cmd_buffer->state.descriptors[firstSet + i] = set;
519 cmd_buffer->state.descriptors_dirty |= set_layout->shader_stages;
520 }
521
522 if (set_layout->dynamic_offset_count > 0) {
523 anv_foreach_stage(s, set_layout->shader_stages) {
524 anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, s, dynamic);
525
526 struct anv_push_constants *push =
527 cmd_buffer->state.push_constants[s];
528
529 unsigned d = layout->set[firstSet + i].dynamic_offset_start;
530 const uint32_t *offsets = pDynamicOffsets + dynamic_slot;
531 struct anv_descriptor *desc = set->descriptors;
532
533 for (unsigned b = 0; b < set_layout->binding_count; b++) {
534 if (set_layout->binding[b].dynamic_offset_index < 0)
535 continue;
536
537 unsigned array_size = set_layout->binding[b].array_size;
538 for (unsigned j = 0; j < array_size; j++) {
539 uint32_t range = 0;
540 if (desc->buffer_view)
541 range = desc->buffer_view->range;
542 push->dynamic[d].offset = *(offsets++);
543 push->dynamic[d].range = range;
544 desc++;
545 d++;
546 }
547 }
548 }
549 cmd_buffer->state.push_constants_dirty |= set_layout->shader_stages;
550 }
551 }
552 }
553
554 void anv_CmdBindVertexBuffers(
555 VkCommandBuffer commandBuffer,
556 uint32_t startBinding,
557 uint32_t bindingCount,
558 const VkBuffer* pBuffers,
559 const VkDeviceSize* pOffsets)
560 {
561 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
562 struct anv_vertex_binding *vb = cmd_buffer->state.vertex_bindings;
563
564 /* We have to defer setting up vertex buffer since we need the buffer
565 * stride from the pipeline. */
566
567 assert(startBinding + bindingCount < MAX_VBS);
568 for (uint32_t i = 0; i < bindingCount; i++) {
569 vb[startBinding + i].buffer = anv_buffer_from_handle(pBuffers[i]);
570 vb[startBinding + i].offset = pOffsets[i];
571 cmd_buffer->state.vb_dirty |= 1 << (startBinding + i);
572 }
573 }
574
575 static void
576 add_surface_state_reloc(struct anv_cmd_buffer *cmd_buffer,
577 struct anv_state state, struct anv_bo *bo, uint32_t offset)
578 {
579 /* The address goes in SURFACE_STATE dword 1 for gens < 8 and dwords 8 and
580 * 9 for gen8+. We only write the first dword for gen8+ here and rely on
581 * the initial state to set the high bits to 0. */
582
583 const uint32_t dword = cmd_buffer->device->info.gen < 8 ? 1 : 8;
584
585 anv_reloc_list_add(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc,
586 state.offset + dword * 4, bo, offset);
587 }
588
589 const struct anv_format *
590 anv_format_for_descriptor_type(VkDescriptorType type)
591 {
592 switch (type) {
593 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
594 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
595 return anv_format_for_vk_format(VK_FORMAT_R32G32B32A32_SFLOAT);
596
597 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
598 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
599 return anv_format_for_vk_format(VK_FORMAT_UNDEFINED);
600
601 default:
602 unreachable("Invalid descriptor type");
603 }
604 }
605
606 VkResult
607 anv_cmd_buffer_emit_binding_table(struct anv_cmd_buffer *cmd_buffer,
608 gl_shader_stage stage,
609 struct anv_state *bt_state)
610 {
611 struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
612 struct anv_subpass *subpass = cmd_buffer->state.subpass;
613 struct anv_pipeline_layout *layout;
614 uint32_t color_count, bias, state_offset;
615
616 switch (stage) {
617 case MESA_SHADER_FRAGMENT:
618 layout = cmd_buffer->state.pipeline->layout;
619 bias = MAX_RTS;
620 color_count = subpass->color_count;
621 break;
622 case MESA_SHADER_COMPUTE:
623 layout = cmd_buffer->state.compute_pipeline->layout;
624 bias = 1;
625 color_count = 0;
626 break;
627 default:
628 layout = cmd_buffer->state.pipeline->layout;
629 bias = 0;
630 color_count = 0;
631 break;
632 }
633
634 /* This is a little awkward: layout can be NULL but we still have to
635 * allocate and set a binding table for the PS stage for render
636 * targets. */
637 uint32_t surface_count = layout ? layout->stage[stage].surface_count : 0;
638
639 if (color_count + surface_count == 0) {
640 *bt_state = (struct anv_state) { 0, };
641 return VK_SUCCESS;
642 }
643
644 *bt_state = anv_cmd_buffer_alloc_binding_table(cmd_buffer,
645 bias + surface_count,
646 &state_offset);
647 uint32_t *bt_map = bt_state->map;
648
649 if (bt_state->map == NULL)
650 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
651
652 for (uint32_t a = 0; a < color_count; a++) {
653 const struct anv_image_view *iview =
654 fb->attachments[subpass->color_attachments[a]];
655
656 bt_map[a] = iview->color_rt_surface_state.offset + state_offset;
657 add_surface_state_reloc(cmd_buffer, iview->color_rt_surface_state,
658 iview->bo, iview->offset);
659 }
660
661 if (stage == MESA_SHADER_COMPUTE &&
662 cmd_buffer->state.compute_pipeline->cs_prog_data.uses_num_work_groups) {
663 struct anv_bo *bo = cmd_buffer->state.num_workgroups_bo;
664 uint32_t bo_offset = cmd_buffer->state.num_workgroups_offset;
665
666 struct anv_state surface_state;
667 surface_state =
668 anv_cmd_buffer_alloc_surface_state(cmd_buffer);
669
670 const struct anv_format *format =
671 anv_format_for_descriptor_type(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
672 anv_fill_buffer_surface_state(cmd_buffer->device, surface_state.map,
673 format->surface_format, bo_offset, 12, 1);
674
675 if (!cmd_buffer->device->info.has_llc)
676 anv_state_clflush(surface_state);
677
678 bt_map[0] = surface_state.offset + state_offset;
679 add_surface_state_reloc(cmd_buffer, surface_state, bo, bo_offset);
680 }
681
682 if (layout == NULL)
683 goto out;
684
685 if (layout->stage[stage].image_count > 0) {
686 VkResult result =
687 anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, images);
688 if (result != VK_SUCCESS)
689 return result;
690
691 cmd_buffer->state.push_constants_dirty |= 1 << stage;
692 }
693
694 uint32_t image = 0;
695 for (uint32_t s = 0; s < layout->stage[stage].surface_count; s++) {
696 struct anv_pipeline_binding *binding =
697 &layout->stage[stage].surface_to_descriptor[s];
698 struct anv_descriptor_set *set =
699 cmd_buffer->state.descriptors[binding->set];
700 struct anv_descriptor *desc = &set->descriptors[binding->offset];
701
702 struct anv_state surface_state;
703 struct anv_bo *bo;
704 uint32_t bo_offset;
705
706 switch (desc->type) {
707 case VK_DESCRIPTOR_TYPE_SAMPLER:
708 /* Nothing for us to do here */
709 continue;
710
711 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
712 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
713 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
714 surface_state = desc->image_view->nonrt_surface_state;
715 bo = desc->image_view->bo;
716 bo_offset = desc->image_view->offset;
717 break;
718
719 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
720 surface_state = desc->image_view->storage_surface_state;
721 bo = desc->image_view->bo;
722 bo_offset = desc->image_view->offset;
723
724 struct brw_image_param *image_param =
725 &cmd_buffer->state.push_constants[stage]->images[image++];
726
727 anv_image_view_fill_image_param(cmd_buffer->device, desc->image_view,
728 image_param);
729 image_param->surface_idx = bias + s;
730 break;
731 }
732
733 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
734 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
735 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
736 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
737 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
738 surface_state = desc->buffer_view->surface_state;
739 bo = desc->buffer_view->bo;
740 bo_offset = desc->buffer_view->offset;
741 break;
742
743 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
744 surface_state = desc->buffer_view->storage_surface_state;
745 bo = desc->buffer_view->bo;
746 bo_offset = desc->buffer_view->offset;
747
748 struct brw_image_param *image_param =
749 &cmd_buffer->state.push_constants[stage]->images[image++];
750
751 anv_buffer_view_fill_image_param(cmd_buffer->device, desc->buffer_view,
752 image_param);
753 image_param->surface_idx = bias + s;
754 break;
755
756 default:
757 assert(!"Invalid descriptor type");
758 continue;
759 }
760
761 bt_map[bias + s] = surface_state.offset + state_offset;
762 add_surface_state_reloc(cmd_buffer, surface_state, bo, bo_offset);
763 }
764 assert(image == layout->stage[stage].image_count);
765
766 out:
767 if (!cmd_buffer->device->info.has_llc)
768 anv_state_clflush(*bt_state);
769
770 return VK_SUCCESS;
771 }
772
773 VkResult
774 anv_cmd_buffer_emit_samplers(struct anv_cmd_buffer *cmd_buffer,
775 gl_shader_stage stage, struct anv_state *state)
776 {
777 struct anv_pipeline_layout *layout;
778 uint32_t sampler_count;
779
780 if (stage == MESA_SHADER_COMPUTE)
781 layout = cmd_buffer->state.compute_pipeline->layout;
782 else
783 layout = cmd_buffer->state.pipeline->layout;
784
785 sampler_count = layout ? layout->stage[stage].sampler_count : 0;
786 if (sampler_count == 0) {
787 *state = (struct anv_state) { 0, };
788 return VK_SUCCESS;
789 }
790
791 uint32_t size = sampler_count * 16;
792 *state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, size, 32);
793
794 if (state->map == NULL)
795 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
796
797 for (uint32_t s = 0; s < layout->stage[stage].sampler_count; s++) {
798 struct anv_pipeline_binding *binding =
799 &layout->stage[stage].sampler_to_descriptor[s];
800 struct anv_descriptor_set *set =
801 cmd_buffer->state.descriptors[binding->set];
802 struct anv_descriptor *desc = &set->descriptors[binding->offset];
803
804 if (desc->type != VK_DESCRIPTOR_TYPE_SAMPLER &&
805 desc->type != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
806 continue;
807
808 struct anv_sampler *sampler = desc->sampler;
809
810 /* This can happen if we have an unfilled slot since TYPE_SAMPLER
811 * happens to be zero.
812 */
813 if (sampler == NULL)
814 continue;
815
816 memcpy(state->map + (s * 16),
817 sampler->state, sizeof(sampler->state));
818 }
819
820 if (!cmd_buffer->device->info.has_llc)
821 anv_state_clflush(*state);
822
823 return VK_SUCCESS;
824 }
825
826 struct anv_state
827 anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer *cmd_buffer,
828 const void *data, uint32_t size, uint32_t alignment)
829 {
830 struct anv_state state;
831
832 state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, size, alignment);
833 memcpy(state.map, data, size);
834
835 if (!cmd_buffer->device->info.has_llc)
836 anv_state_clflush(state);
837
838 VG(VALGRIND_CHECK_MEM_IS_DEFINED(state.map, size));
839
840 return state;
841 }
842
843 struct anv_state
844 anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer *cmd_buffer,
845 uint32_t *a, uint32_t *b,
846 uint32_t dwords, uint32_t alignment)
847 {
848 struct anv_state state;
849 uint32_t *p;
850
851 state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
852 dwords * 4, alignment);
853 p = state.map;
854 for (uint32_t i = 0; i < dwords; i++)
855 p[i] = a[i] | b[i];
856
857 if (!cmd_buffer->device->info.has_llc)
858 anv_state_clflush(state);
859
860 VG(VALGRIND_CHECK_MEM_IS_DEFINED(p, dwords * 4));
861
862 return state;
863 }
864
865 void
866 anv_cmd_buffer_begin_subpass(struct anv_cmd_buffer *cmd_buffer,
867 struct anv_subpass *subpass)
868 {
869 switch (cmd_buffer->device->info.gen) {
870 case 7:
871 gen7_cmd_buffer_begin_subpass(cmd_buffer, subpass);
872 break;
873 case 8:
874 gen8_cmd_buffer_begin_subpass(cmd_buffer, subpass);
875 break;
876 case 9:
877 gen9_cmd_buffer_begin_subpass(cmd_buffer, subpass);
878 break;
879 default:
880 unreachable("unsupported gen\n");
881 }
882 }
883
884 struct anv_state
885 anv_cmd_buffer_push_constants(struct anv_cmd_buffer *cmd_buffer,
886 gl_shader_stage stage)
887 {
888 struct anv_push_constants *data =
889 cmd_buffer->state.push_constants[stage];
890 struct brw_stage_prog_data *prog_data =
891 cmd_buffer->state.pipeline->prog_data[stage];
892
893 /* If we don't actually have any push constants, bail. */
894 if (data == NULL || prog_data->nr_params == 0)
895 return (struct anv_state) { .offset = 0 };
896
897 struct anv_state state =
898 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
899 prog_data->nr_params * sizeof(float),
900 32 /* bottom 5 bits MBZ */);
901
902 /* Walk through the param array and fill the buffer with data */
903 uint32_t *u32_map = state.map;
904 for (unsigned i = 0; i < prog_data->nr_params; i++) {
905 uint32_t offset = (uintptr_t)prog_data->param[i];
906 u32_map[i] = *(uint32_t *)((uint8_t *)data + offset);
907 }
908
909 if (!cmd_buffer->device->info.has_llc)
910 anv_state_clflush(state);
911
912 return state;
913 }
914
915 struct anv_state
916 anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer *cmd_buffer)
917 {
918 struct anv_push_constants *data =
919 cmd_buffer->state.push_constants[MESA_SHADER_COMPUTE];
920 struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
921 const struct brw_cs_prog_data *cs_prog_data = &pipeline->cs_prog_data;
922 const struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
923
924 const unsigned local_id_dwords = cs_prog_data->local_invocation_id_regs * 8;
925 const unsigned push_constant_data_size =
926 (local_id_dwords + prog_data->nr_params) * 4;
927 const unsigned reg_aligned_constant_size = ALIGN(push_constant_data_size, 32);
928 const unsigned param_aligned_count =
929 reg_aligned_constant_size / sizeof(uint32_t);
930
931 /* If we don't actually have any push constants, bail. */
932 if (reg_aligned_constant_size == 0)
933 return (struct anv_state) { .offset = 0 };
934
935 const unsigned threads = pipeline->cs_thread_width_max;
936 const unsigned total_push_constants_size =
937 reg_aligned_constant_size * threads;
938 const unsigned push_constant_alignment =
939 cmd_buffer->device->info.gen < 8 ? 32 : 64;
940 const unsigned aligned_total_push_constants_size =
941 ALIGN(total_push_constants_size, push_constant_alignment);
942 struct anv_state state =
943 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
944 aligned_total_push_constants_size,
945 push_constant_alignment);
946
947 /* Walk through the param array and fill the buffer with data */
948 uint32_t *u32_map = state.map;
949
950 brw_cs_fill_local_id_payload(cs_prog_data, u32_map, threads,
951 reg_aligned_constant_size);
952
953 /* Setup uniform data for the first thread */
954 for (unsigned i = 0; i < prog_data->nr_params; i++) {
955 uint32_t offset = (uintptr_t)prog_data->param[i];
956 u32_map[local_id_dwords + i] = *(uint32_t *)((uint8_t *)data + offset);
957 }
958
959 /* Copy uniform data from the first thread to every other thread */
960 const size_t uniform_data_size = prog_data->nr_params * sizeof(uint32_t);
961 for (unsigned t = 1; t < threads; t++) {
962 memcpy(&u32_map[t * param_aligned_count + local_id_dwords],
963 &u32_map[local_id_dwords],
964 uniform_data_size);
965 }
966
967 if (!cmd_buffer->device->info.has_llc)
968 anv_state_clflush(state);
969
970 return state;
971 }
972
973 void anv_CmdPushConstants(
974 VkCommandBuffer commandBuffer,
975 VkPipelineLayout layout,
976 VkShaderStageFlags stageFlags,
977 uint32_t offset,
978 uint32_t size,
979 const void* pValues)
980 {
981 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
982
983 anv_foreach_stage(stage, stageFlags) {
984 anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, client_data);
985
986 memcpy(cmd_buffer->state.push_constants[stage]->client_data + offset,
987 pValues, size);
988 }
989
990 cmd_buffer->state.push_constants_dirty |= stageFlags;
991 }
992
993 void anv_CmdExecuteCommands(
994 VkCommandBuffer commandBuffer,
995 uint32_t commandBuffersCount,
996 const VkCommandBuffer* pCmdBuffers)
997 {
998 ANV_FROM_HANDLE(anv_cmd_buffer, primary, commandBuffer);
999
1000 assert(primary->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1001
1002 anv_assert(primary->state.subpass == &primary->state.pass->subpasses[0]);
1003
1004 for (uint32_t i = 0; i < commandBuffersCount; i++) {
1005 ANV_FROM_HANDLE(anv_cmd_buffer, secondary, pCmdBuffers[i]);
1006
1007 assert(secondary->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
1008
1009 anv_cmd_buffer_add_secondary(primary, secondary);
1010 }
1011 }
1012
1013 VkResult anv_CreateCommandPool(
1014 VkDevice _device,
1015 const VkCommandPoolCreateInfo* pCreateInfo,
1016 const VkAllocationCallbacks* pAllocator,
1017 VkCommandPool* pCmdPool)
1018 {
1019 ANV_FROM_HANDLE(anv_device, device, _device);
1020 struct anv_cmd_pool *pool;
1021
1022 pool = anv_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
1023 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1024 if (pool == NULL)
1025 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1026
1027 if (pAllocator)
1028 pool->alloc = *pAllocator;
1029 else
1030 pool->alloc = device->alloc;
1031
1032 list_inithead(&pool->cmd_buffers);
1033
1034 *pCmdPool = anv_cmd_pool_to_handle(pool);
1035
1036 return VK_SUCCESS;
1037 }
1038
1039 void anv_DestroyCommandPool(
1040 VkDevice _device,
1041 VkCommandPool commandPool,
1042 const VkAllocationCallbacks* pAllocator)
1043 {
1044 ANV_FROM_HANDLE(anv_device, device, _device);
1045 ANV_FROM_HANDLE(anv_cmd_pool, pool, commandPool);
1046
1047 anv_ResetCommandPool(_device, commandPool, 0);
1048
1049 anv_free2(&device->alloc, pAllocator, pool);
1050 }
1051
1052 VkResult anv_ResetCommandPool(
1053 VkDevice device,
1054 VkCommandPool commandPool,
1055 VkCommandPoolResetFlags flags)
1056 {
1057 ANV_FROM_HANDLE(anv_cmd_pool, pool, commandPool);
1058
1059 list_for_each_entry_safe(struct anv_cmd_buffer, cmd_buffer,
1060 &pool->cmd_buffers, pool_link) {
1061 anv_cmd_buffer_destroy(cmd_buffer);
1062 }
1063
1064 return VK_SUCCESS;
1065 }
1066
1067 /**
1068 * Return NULL if the current subpass has no depthstencil attachment.
1069 */
1070 const struct anv_image_view *
1071 anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer *cmd_buffer)
1072 {
1073 const struct anv_subpass *subpass = cmd_buffer->state.subpass;
1074 const struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
1075
1076 if (subpass->depth_stencil_attachment == VK_ATTACHMENT_UNUSED)
1077 return NULL;
1078
1079 const struct anv_image_view *iview =
1080 fb->attachments[subpass->depth_stencil_attachment];
1081
1082 assert(iview->aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT |
1083 VK_IMAGE_ASPECT_STENCIL_BIT));
1084
1085 return iview;
1086 }