vk: Add gen7 support
[mesa.git] / src / vulkan / anv_cmd_buffer.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31
32 /** \file anv_cmd_buffer.c
33 *
34 * This file contains all of the stuff for emitting commands into a command
35 * buffer. This includes implementations of most of the vkCmd*
36 * entrypoints. This file is concerned entirely with state emission and
37 * not with the command buffer data structure itself. As far as this file
38 * is concerned, most of anv_cmd_buffer is magic.
39 */
40
41 static void
42 anv_cmd_state_init(struct anv_cmd_state *state)
43 {
44 state->rs_state = NULL;
45 state->vp_state = NULL;
46 state->cb_state = NULL;
47 state->ds_state = NULL;
48 memset(&state->state_vf, 0, sizeof(state->state_vf));
49 memset(&state->descriptors, 0, sizeof(state->descriptors));
50
51 state->dirty = 0;
52 state->vb_dirty = 0;
53 state->descriptors_dirty = 0;
54 state->pipeline = NULL;
55 state->vp_state = NULL;
56 state->rs_state = NULL;
57 state->ds_state = NULL;
58
59 state->gen7.index_buffer = NULL;
60 }
61
62 VkResult anv_CreateCommandBuffer(
63 VkDevice _device,
64 const VkCmdBufferCreateInfo* pCreateInfo,
65 VkCmdBuffer* pCmdBuffer)
66 {
67 ANV_FROM_HANDLE(anv_device, device, _device);
68 ANV_FROM_HANDLE(anv_cmd_pool, pool, pCreateInfo->cmdPool);
69 struct anv_cmd_buffer *cmd_buffer;
70 VkResult result;
71
72 cmd_buffer = anv_device_alloc(device, sizeof(*cmd_buffer), 8,
73 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
74 if (cmd_buffer == NULL)
75 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
76
77 cmd_buffer->device = device;
78
79 result = anv_cmd_buffer_init_batch_bo_chain(cmd_buffer);
80 if (result != VK_SUCCESS)
81 goto fail;
82
83 anv_state_stream_init(&cmd_buffer->surface_state_stream,
84 &device->surface_state_block_pool);
85 anv_state_stream_init(&cmd_buffer->dynamic_state_stream,
86 &device->dynamic_state_block_pool);
87
88 cmd_buffer->level = pCreateInfo->level;
89 cmd_buffer->opt_flags = 0;
90
91 anv_cmd_state_init(&cmd_buffer->state);
92
93 if (pool) {
94 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
95 } else {
96 /* Init the pool_link so we can safefly call list_del when we destroy
97 * the command buffer
98 */
99 list_inithead(&cmd_buffer->pool_link);
100 }
101
102 *pCmdBuffer = anv_cmd_buffer_to_handle(cmd_buffer);
103
104 return VK_SUCCESS;
105
106 fail: anv_device_free(device, cmd_buffer);
107
108 return result;
109 }
110
111 VkResult anv_DestroyCommandBuffer(
112 VkDevice _device,
113 VkCmdBuffer _cmd_buffer)
114 {
115 ANV_FROM_HANDLE(anv_device, device, _device);
116 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, _cmd_buffer);
117
118 list_del(&cmd_buffer->pool_link);
119
120 anv_cmd_buffer_fini_batch_bo_chain(cmd_buffer);
121
122 anv_state_stream_finish(&cmd_buffer->surface_state_stream);
123 anv_state_stream_finish(&cmd_buffer->dynamic_state_stream);
124 anv_device_free(device, cmd_buffer);
125
126 return VK_SUCCESS;
127 }
128
129 VkResult anv_ResetCommandBuffer(
130 VkCmdBuffer cmdBuffer,
131 VkCmdBufferResetFlags flags)
132 {
133 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
134
135 anv_cmd_buffer_reset_batch_bo_chain(cmd_buffer);
136
137 anv_cmd_state_init(&cmd_buffer->state);
138
139 return VK_SUCCESS;
140 }
141
142 void
143 anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer)
144 {
145 switch (cmd_buffer->device->info.gen) {
146 case 7:
147 return gen7_cmd_buffer_emit_state_base_address(cmd_buffer);
148 case 8:
149 return gen8_cmd_buffer_emit_state_base_address(cmd_buffer);
150 default:
151 unreachable("unsupported gen\n");
152 }
153 }
154
155 VkResult anv_BeginCommandBuffer(
156 VkCmdBuffer cmdBuffer,
157 const VkCmdBufferBeginInfo* pBeginInfo)
158 {
159 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
160
161 cmd_buffer->opt_flags = pBeginInfo->flags;
162
163 if (cmd_buffer->level == VK_CMD_BUFFER_LEVEL_SECONDARY) {
164 cmd_buffer->state.framebuffer =
165 anv_framebuffer_from_handle(pBeginInfo->framebuffer);
166 cmd_buffer->state.pass =
167 anv_render_pass_from_handle(pBeginInfo->renderPass);
168
169 /* FIXME: We shouldn't be starting on the first subpass */
170 anv_cmd_buffer_begin_subpass(cmd_buffer,
171 &cmd_buffer->state.pass->subpasses[0]);
172 }
173
174 anv_cmd_buffer_emit_state_base_address(cmd_buffer);
175 cmd_buffer->state.current_pipeline = UINT32_MAX;
176
177 return VK_SUCCESS;
178 }
179
180 VkResult anv_EndCommandBuffer(
181 VkCmdBuffer cmdBuffer)
182 {
183 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
184 struct anv_device *device = cmd_buffer->device;
185
186 anv_cmd_buffer_end_batch_buffer(cmd_buffer);
187
188 if (cmd_buffer->level == VK_CMD_BUFFER_LEVEL_PRIMARY) {
189 /* The algorithm used to compute the validate list is not threadsafe as
190 * it uses the bo->index field. We have to lock the device around it.
191 * Fortunately, the chances for contention here are probably very low.
192 */
193 pthread_mutex_lock(&device->mutex);
194 anv_cmd_buffer_prepare_execbuf(cmd_buffer);
195 pthread_mutex_unlock(&device->mutex);
196 }
197
198 return VK_SUCCESS;
199 }
200
201 void anv_CmdBindPipeline(
202 VkCmdBuffer cmdBuffer,
203 VkPipelineBindPoint pipelineBindPoint,
204 VkPipeline _pipeline)
205 {
206 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
207 ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
208
209 switch (pipelineBindPoint) {
210 case VK_PIPELINE_BIND_POINT_COMPUTE:
211 cmd_buffer->state.compute_pipeline = pipeline;
212 cmd_buffer->state.compute_dirty |= ANV_CMD_BUFFER_PIPELINE_DIRTY;
213 break;
214
215 case VK_PIPELINE_BIND_POINT_GRAPHICS:
216 cmd_buffer->state.pipeline = pipeline;
217 cmd_buffer->state.vb_dirty |= pipeline->vb_used;
218 cmd_buffer->state.dirty |= ANV_CMD_BUFFER_PIPELINE_DIRTY;
219 break;
220
221 default:
222 assert(!"invalid bind point");
223 break;
224 }
225 }
226
227 void anv_CmdBindDynamicViewportState(
228 VkCmdBuffer cmdBuffer,
229 VkDynamicViewportState dynamicViewportState)
230 {
231 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
232 ANV_FROM_HANDLE(anv_dynamic_vp_state, vp_state, dynamicViewportState);
233
234 cmd_buffer->state.vp_state = vp_state;
235 cmd_buffer->state.dirty |= ANV_CMD_BUFFER_VP_DIRTY;
236 }
237
238 void anv_CmdBindDynamicRasterState(
239 VkCmdBuffer cmdBuffer,
240 VkDynamicRasterState dynamicRasterState)
241 {
242 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
243 ANV_FROM_HANDLE(anv_dynamic_rs_state, rs_state, dynamicRasterState);
244
245 cmd_buffer->state.rs_state = rs_state;
246 cmd_buffer->state.dirty |= ANV_CMD_BUFFER_RS_DIRTY;
247 }
248
249 void anv_CmdBindDynamicColorBlendState(
250 VkCmdBuffer cmdBuffer,
251 VkDynamicColorBlendState dynamicColorBlendState)
252 {
253 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
254 ANV_FROM_HANDLE(anv_dynamic_cb_state, cb_state, dynamicColorBlendState);
255
256 cmd_buffer->state.cb_state = cb_state;
257 cmd_buffer->state.dirty |= ANV_CMD_BUFFER_CB_DIRTY;
258 }
259
260 void anv_CmdBindDynamicDepthStencilState(
261 VkCmdBuffer cmdBuffer,
262 VkDynamicDepthStencilState dynamicDepthStencilState)
263 {
264 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
265 ANV_FROM_HANDLE(anv_dynamic_ds_state, ds_state, dynamicDepthStencilState);
266
267 cmd_buffer->state.ds_state = ds_state;
268 cmd_buffer->state.dirty |= ANV_CMD_BUFFER_DS_DIRTY;
269 }
270
271 void anv_CmdBindDescriptorSets(
272 VkCmdBuffer cmdBuffer,
273 VkPipelineBindPoint pipelineBindPoint,
274 VkPipelineLayout _layout,
275 uint32_t firstSet,
276 uint32_t setCount,
277 const VkDescriptorSet* pDescriptorSets,
278 uint32_t dynamicOffsetCount,
279 const uint32_t* pDynamicOffsets)
280 {
281 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
282 ANV_FROM_HANDLE(anv_pipeline_layout, layout, _layout);
283 struct anv_descriptor_set_layout *set_layout;
284
285 assert(firstSet + setCount < MAX_SETS);
286
287 uint32_t dynamic_slot = 0;
288 for (uint32_t i = 0; i < setCount; i++) {
289 ANV_FROM_HANDLE(anv_descriptor_set, set, pDescriptorSets[i]);
290 set_layout = layout->set[firstSet + i].layout;
291
292 cmd_buffer->state.descriptors[firstSet + i].set = set;
293
294 assert(set_layout->num_dynamic_buffers <
295 ARRAY_SIZE(cmd_buffer->state.descriptors[0].dynamic_offsets));
296 memcpy(cmd_buffer->state.descriptors[firstSet + i].dynamic_offsets,
297 pDynamicOffsets + dynamic_slot,
298 set_layout->num_dynamic_buffers * sizeof(*pDynamicOffsets));
299
300 cmd_buffer->state.descriptors_dirty |= set_layout->shader_stages;
301
302 dynamic_slot += set_layout->num_dynamic_buffers;
303 }
304 }
305
306 void anv_CmdBindVertexBuffers(
307 VkCmdBuffer cmdBuffer,
308 uint32_t startBinding,
309 uint32_t bindingCount,
310 const VkBuffer* pBuffers,
311 const VkDeviceSize* pOffsets)
312 {
313 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, cmdBuffer);
314 struct anv_vertex_binding *vb = cmd_buffer->state.vertex_bindings;
315
316 /* We have to defer setting up vertex buffer since we need the buffer
317 * stride from the pipeline. */
318
319 assert(startBinding + bindingCount < MAX_VBS);
320 for (uint32_t i = 0; i < bindingCount; i++) {
321 vb[startBinding + i].buffer = anv_buffer_from_handle(pBuffers[i]);
322 vb[startBinding + i].offset = pOffsets[i];
323 cmd_buffer->state.vb_dirty |= 1 << (startBinding + i);
324 }
325 }
326
327 static void
328 add_surface_state_reloc(struct anv_cmd_buffer *cmd_buffer,
329 struct anv_state state, struct anv_bo *bo, uint32_t offset)
330 {
331 /* The address goes in SURFACE_STATE dword 1 for gens < 8 and dwords 8 and
332 * 9 for gen8+. We only write the first dword for gen8+ here and rely on
333 * the initial state to set the high bits to 0. */
334
335 const uint32_t dword = cmd_buffer->device->info.gen < 8 ? 1 : 8;
336
337 *(uint32_t *)(state.map + dword * 4) =
338 anv_reloc_list_add(anv_cmd_buffer_current_surface_relocs(cmd_buffer),
339 cmd_buffer->device, state.offset + dword * 4, bo, offset);
340 }
341
342 VkResult
343 anv_cmd_buffer_emit_binding_table(struct anv_cmd_buffer *cmd_buffer,
344 unsigned stage, struct anv_state *bt_state)
345 {
346 struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
347 struct anv_subpass *subpass = cmd_buffer->state.subpass;
348 struct anv_pipeline_layout *layout;
349 uint32_t attachments, bias, size;
350
351 if (stage == VK_SHADER_STAGE_COMPUTE)
352 layout = cmd_buffer->state.compute_pipeline->layout;
353 else
354 layout = cmd_buffer->state.pipeline->layout;
355
356 if (stage == VK_SHADER_STAGE_FRAGMENT) {
357 bias = MAX_RTS;
358 attachments = subpass->color_count;
359 } else {
360 bias = 0;
361 attachments = 0;
362 }
363
364 /* This is a little awkward: layout can be NULL but we still have to
365 * allocate and set a binding table for the PS stage for render
366 * targets. */
367 uint32_t surface_count = layout ? layout->stage[stage].surface_count : 0;
368
369 if (attachments + surface_count == 0)
370 return VK_SUCCESS;
371
372 size = (bias + surface_count) * sizeof(uint32_t);
373 *bt_state = anv_cmd_buffer_alloc_surface_state(cmd_buffer, size, 32);
374 uint32_t *bt_map = bt_state->map;
375
376 if (bt_state->map == NULL)
377 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
378
379 /* This is highly annoying. The Vulkan spec puts the depth-stencil
380 * attachments in with the color attachments. Unfortunately, thanks to
381 * other aspects of the API, we cana't really saparate them before this
382 * point. Therefore, we have to walk all of the attachments but only
383 * put the color attachments into the binding table.
384 */
385 for (uint32_t a = 0; a < attachments; a++) {
386 const struct anv_attachment_view *attachment =
387 fb->attachments[subpass->color_attachments[a]];
388
389 assert(attachment->attachment_type == ANV_ATTACHMENT_VIEW_TYPE_COLOR);
390 const struct anv_color_attachment_view *view =
391 (const struct anv_color_attachment_view *)attachment;
392
393 struct anv_state state =
394 anv_cmd_buffer_alloc_surface_state(cmd_buffer, 64, 64);
395
396 if (state.map == NULL)
397 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
398
399 memcpy(state.map, view->view.surface_state.map, 64);
400
401 add_surface_state_reloc(cmd_buffer, state, view->view.bo, view->view.offset);
402
403 bt_map[a] = state.offset;
404 }
405
406 if (layout == NULL)
407 return VK_SUCCESS;
408
409 for (uint32_t set = 0; set < layout->num_sets; set++) {
410 struct anv_descriptor_set_binding *d = &cmd_buffer->state.descriptors[set];
411 struct anv_descriptor_set_layout *set_layout = layout->set[set].layout;
412 struct anv_descriptor_slot *surface_slots =
413 set_layout->stage[stage].surface_start;
414
415 uint32_t start = bias + layout->set[set].surface_start[stage];
416
417 for (uint32_t b = 0; b < set_layout->stage[stage].surface_count; b++) {
418 struct anv_surface_view *view =
419 d->set->descriptors[surface_slots[b].index].view;
420
421 if (!view)
422 continue;
423
424 struct anv_state state =
425 anv_cmd_buffer_alloc_surface_state(cmd_buffer, 64, 64);
426
427 if (state.map == NULL)
428 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
429
430 uint32_t offset;
431 if (surface_slots[b].dynamic_slot >= 0) {
432 uint32_t dynamic_offset =
433 d->dynamic_offsets[surface_slots[b].dynamic_slot];
434
435 offset = view->offset + dynamic_offset;
436 anv_fill_buffer_surface_state(cmd_buffer->device,
437 state.map, view->format, offset,
438 view->range - dynamic_offset);
439 } else {
440 offset = view->offset;
441 memcpy(state.map, view->surface_state.map, 64);
442 }
443
444 add_surface_state_reloc(cmd_buffer, state, view->bo, offset);
445
446 bt_map[start + b] = state.offset;
447 }
448 }
449
450 return VK_SUCCESS;
451 }
452
453 VkResult
454 anv_cmd_buffer_emit_samplers(struct anv_cmd_buffer *cmd_buffer,
455 unsigned stage, struct anv_state *state)
456 {
457 struct anv_pipeline_layout *layout;
458 uint32_t sampler_count;
459
460 if (stage == VK_SHADER_STAGE_COMPUTE)
461 layout = cmd_buffer->state.compute_pipeline->layout;
462 else
463 layout = cmd_buffer->state.pipeline->layout;
464
465 sampler_count = layout ? layout->stage[stage].sampler_count : 0;
466 if (sampler_count == 0)
467 return VK_SUCCESS;
468
469 uint32_t size = sampler_count * 16;
470 *state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, size, 32);
471
472 if (state->map == NULL)
473 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
474
475 for (uint32_t set = 0; set < layout->num_sets; set++) {
476 struct anv_descriptor_set_binding *d = &cmd_buffer->state.descriptors[set];
477 struct anv_descriptor_set_layout *set_layout = layout->set[set].layout;
478 struct anv_descriptor_slot *sampler_slots =
479 set_layout->stage[stage].sampler_start;
480
481 uint32_t start = layout->set[set].sampler_start[stage];
482
483 for (uint32_t b = 0; b < set_layout->stage[stage].sampler_count; b++) {
484 struct anv_sampler *sampler =
485 d->set->descriptors[sampler_slots[b].index].sampler;
486
487 if (!sampler)
488 continue;
489
490 memcpy(state->map + (start + b) * 16,
491 sampler->state, sizeof(sampler->state));
492 }
493 }
494
495 return VK_SUCCESS;
496 }
497
498 static VkResult
499 flush_descriptor_set(struct anv_cmd_buffer *cmd_buffer, uint32_t stage)
500 {
501 struct anv_state surfaces = { 0, }, samplers = { 0, };
502 VkResult result;
503
504 result = anv_cmd_buffer_emit_samplers(cmd_buffer, stage, &samplers);
505 if (result != VK_SUCCESS)
506 return result;
507 result = anv_cmd_buffer_emit_binding_table(cmd_buffer, stage, &surfaces);
508 if (result != VK_SUCCESS)
509 return result;
510
511 static const uint32_t sampler_state_opcodes[] = {
512 [VK_SHADER_STAGE_VERTEX] = 43,
513 [VK_SHADER_STAGE_TESS_CONTROL] = 44, /* HS */
514 [VK_SHADER_STAGE_TESS_EVALUATION] = 45, /* DS */
515 [VK_SHADER_STAGE_GEOMETRY] = 46,
516 [VK_SHADER_STAGE_FRAGMENT] = 47,
517 [VK_SHADER_STAGE_COMPUTE] = 0,
518 };
519
520 static const uint32_t binding_table_opcodes[] = {
521 [VK_SHADER_STAGE_VERTEX] = 38,
522 [VK_SHADER_STAGE_TESS_CONTROL] = 39,
523 [VK_SHADER_STAGE_TESS_EVALUATION] = 40,
524 [VK_SHADER_STAGE_GEOMETRY] = 41,
525 [VK_SHADER_STAGE_FRAGMENT] = 42,
526 [VK_SHADER_STAGE_COMPUTE] = 0,
527 };
528
529 if (samplers.alloc_size > 0) {
530 anv_batch_emit(&cmd_buffer->batch,
531 GEN7_3DSTATE_SAMPLER_STATE_POINTERS_VS,
532 ._3DCommandSubOpcode = sampler_state_opcodes[stage],
533 .PointertoVSSamplerState = samplers.offset);
534 }
535
536 if (surfaces.alloc_size > 0) {
537 anv_batch_emit(&cmd_buffer->batch,
538 GEN7_3DSTATE_BINDING_TABLE_POINTERS_VS,
539 ._3DCommandSubOpcode = binding_table_opcodes[stage],
540 .PointertoVSBindingTable = surfaces.offset);
541 }
542
543 return VK_SUCCESS;
544 }
545
546 void
547 anv_flush_descriptor_sets(struct anv_cmd_buffer *cmd_buffer)
548 {
549 uint32_t s, dirty = cmd_buffer->state.descriptors_dirty &
550 cmd_buffer->state.pipeline->active_stages;
551
552 VkResult result = VK_SUCCESS;
553 for_each_bit(s, dirty) {
554 result = flush_descriptor_set(cmd_buffer, s);
555 if (result != VK_SUCCESS)
556 break;
557 }
558
559 if (result != VK_SUCCESS) {
560 assert(result == VK_ERROR_OUT_OF_DEVICE_MEMORY);
561
562 result = anv_cmd_buffer_new_surface_state_bo(cmd_buffer);
563 assert(result == VK_SUCCESS);
564
565 /* Re-emit state base addresses so we get the new surface state base
566 * address before we start emitting binding tables etc.
567 */
568 anv_cmd_buffer_emit_state_base_address(cmd_buffer);
569
570 /* Re-emit all active binding tables */
571 for_each_bit(s, cmd_buffer->state.pipeline->active_stages) {
572 result = flush_descriptor_set(cmd_buffer, s);
573
574 /* It had better succeed this time */
575 assert(result == VK_SUCCESS);
576 }
577 }
578
579 cmd_buffer->state.descriptors_dirty &= ~cmd_buffer->state.pipeline->active_stages;
580 }
581
582 struct anv_state
583 anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer *cmd_buffer,
584 uint32_t *a, uint32_t dwords, uint32_t alignment)
585 {
586 struct anv_state state;
587
588 state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
589 dwords * 4, alignment);
590 memcpy(state.map, a, dwords * 4);
591
592 VG(VALGRIND_CHECK_MEM_IS_DEFINED(state.map, dwords * 4));
593
594 return state;
595 }
596
597 struct anv_state
598 anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer *cmd_buffer,
599 uint32_t *a, uint32_t *b,
600 uint32_t dwords, uint32_t alignment)
601 {
602 struct anv_state state;
603 uint32_t *p;
604
605 state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
606 dwords * 4, alignment);
607 p = state.map;
608 for (uint32_t i = 0; i < dwords; i++)
609 p[i] = a[i] | b[i];
610
611 VG(VALGRIND_CHECK_MEM_IS_DEFINED(p, dwords * 4));
612
613 return state;
614 }
615
616 void
617 anv_cmd_buffer_begin_subpass(struct anv_cmd_buffer *cmd_buffer,
618 struct anv_subpass *subpass)
619 {
620 switch (cmd_buffer->device->info.gen) {
621 case 7:
622 gen7_cmd_buffer_begin_subpass(cmd_buffer, subpass);
623 break;
624 case 8:
625 gen8_cmd_buffer_begin_subpass(cmd_buffer, subpass);
626 break;
627 default:
628 unreachable("unsupported gen\n");
629 }
630 }
631
632 void anv_CmdSetEvent(
633 VkCmdBuffer cmdBuffer,
634 VkEvent event,
635 VkPipelineStageFlags stageMask)
636 {
637 stub();
638 }
639
640 void anv_CmdResetEvent(
641 VkCmdBuffer cmdBuffer,
642 VkEvent event,
643 VkPipelineStageFlags stageMask)
644 {
645 stub();
646 }
647
648 void anv_CmdWaitEvents(
649 VkCmdBuffer cmdBuffer,
650 uint32_t eventCount,
651 const VkEvent* pEvents,
652 VkPipelineStageFlags srcStageMask,
653 VkPipelineStageFlags destStageMask,
654 uint32_t memBarrierCount,
655 const void* const* ppMemBarriers)
656 {
657 stub();
658 }
659
660 void anv_CmdPushConstants(
661 VkCmdBuffer cmdBuffer,
662 VkPipelineLayout layout,
663 VkShaderStageFlags stageFlags,
664 uint32_t start,
665 uint32_t length,
666 const void* values)
667 {
668 stub();
669 }
670
671 void anv_CmdExecuteCommands(
672 VkCmdBuffer cmdBuffer,
673 uint32_t cmdBuffersCount,
674 const VkCmdBuffer* pCmdBuffers)
675 {
676 ANV_FROM_HANDLE(anv_cmd_buffer, primary, cmdBuffer);
677
678 assert(primary->level == VK_CMD_BUFFER_LEVEL_PRIMARY);
679
680 anv_assert(primary->state.subpass == &primary->state.pass->subpasses[0]);
681
682 for (uint32_t i = 0; i < cmdBuffersCount; i++) {
683 ANV_FROM_HANDLE(anv_cmd_buffer, secondary, pCmdBuffers[i]);
684
685 assert(secondary->level == VK_CMD_BUFFER_LEVEL_SECONDARY);
686
687 anv_cmd_buffer_add_secondary(primary, secondary);
688 }
689 }
690
691 VkResult anv_CreateCommandPool(
692 VkDevice _device,
693 const VkCmdPoolCreateInfo* pCreateInfo,
694 VkCmdPool* pCmdPool)
695 {
696 ANV_FROM_HANDLE(anv_device, device, _device);
697 struct anv_cmd_pool *pool;
698
699 pool = anv_device_alloc(device, sizeof(*pool), 8,
700 VK_SYSTEM_ALLOC_TYPE_API_OBJECT);
701 if (pool == NULL)
702 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
703
704 list_inithead(&pool->cmd_buffers);
705
706 *pCmdPool = anv_cmd_pool_to_handle(pool);
707
708 return VK_SUCCESS;
709 }
710
711 VkResult anv_DestroyCommandPool(
712 VkDevice _device,
713 VkCmdPool cmdPool)
714 {
715 ANV_FROM_HANDLE(anv_device, device, _device);
716 ANV_FROM_HANDLE(anv_cmd_pool, pool, cmdPool);
717
718 anv_ResetCommandPool(_device, cmdPool, 0);
719
720 anv_device_free(device, pool);
721
722 return VK_SUCCESS;
723 }
724
725 VkResult anv_ResetCommandPool(
726 VkDevice device,
727 VkCmdPool cmdPool,
728 VkCmdPoolResetFlags flags)
729 {
730 ANV_FROM_HANDLE(anv_cmd_pool, pool, cmdPool);
731
732 list_for_each_entry_safe(struct anv_cmd_buffer, cmd_buffer,
733 &pool->cmd_buffers, pool_link) {
734 anv_DestroyCommandBuffer(device, anv_cmd_buffer_to_handle(cmd_buffer));
735 }
736
737 return VK_SUCCESS;
738 }