anv: Fix a relocation race condition
[mesa.git] / src / intel / vulkan / anv_batch_chain.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31
32 #include "genxml/gen8_pack.h"
33
34 #include "util/debug.h"
35
36 /** \file anv_batch_chain.c
37 *
38 * This file contains functions related to anv_cmd_buffer as a data
39 * structure. This involves everything required to create and destroy
40 * the actual batch buffers as well as link them together and handle
41 * relocations and surface state. It specifically does *not* contain any
42 * handling of actual vkCmd calls beyond vkCmdExecuteCommands.
43 */
44
45 /*-----------------------------------------------------------------------*
46 * Functions related to anv_reloc_list
47 *-----------------------------------------------------------------------*/
48
49 VkResult
50 anv_reloc_list_init(struct anv_reloc_list *list,
51 const VkAllocationCallbacks *alloc)
52 {
53 memset(list, 0, sizeof(*list));
54 return VK_SUCCESS;
55 }
56
57 static VkResult
58 anv_reloc_list_init_clone(struct anv_reloc_list *list,
59 const VkAllocationCallbacks *alloc,
60 const struct anv_reloc_list *other_list)
61 {
62 list->num_relocs = other_list->num_relocs;
63 list->array_length = other_list->array_length;
64
65 if (list->num_relocs > 0) {
66 list->relocs =
67 vk_alloc(alloc, list->array_length * sizeof(*list->relocs), 8,
68 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
69 if (list->relocs == NULL)
70 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
71
72 list->reloc_bos =
73 vk_alloc(alloc, list->array_length * sizeof(*list->reloc_bos), 8,
74 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
75 if (list->reloc_bos == NULL) {
76 vk_free(alloc, list->relocs);
77 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
78 }
79
80 memcpy(list->relocs, other_list->relocs,
81 list->array_length * sizeof(*list->relocs));
82 memcpy(list->reloc_bos, other_list->reloc_bos,
83 list->array_length * sizeof(*list->reloc_bos));
84 } else {
85 list->relocs = NULL;
86 list->reloc_bos = NULL;
87 }
88
89 if (other_list->deps) {
90 list->deps = _mesa_set_clone(other_list->deps, NULL);
91 if (!list->deps) {
92 vk_free(alloc, list->relocs);
93 vk_free(alloc, list->reloc_bos);
94 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
95 }
96 } else {
97 list->deps = NULL;
98 }
99
100 return VK_SUCCESS;
101 }
102
103 void
104 anv_reloc_list_finish(struct anv_reloc_list *list,
105 const VkAllocationCallbacks *alloc)
106 {
107 vk_free(alloc, list->relocs);
108 vk_free(alloc, list->reloc_bos);
109 if (list->deps != NULL)
110 _mesa_set_destroy(list->deps, NULL);
111 }
112
113 static VkResult
114 anv_reloc_list_grow(struct anv_reloc_list *list,
115 const VkAllocationCallbacks *alloc,
116 size_t num_additional_relocs)
117 {
118 if (list->num_relocs + num_additional_relocs <= list->array_length)
119 return VK_SUCCESS;
120
121 size_t new_length = MAX2(16, list->array_length * 2);
122 while (new_length < list->num_relocs + num_additional_relocs)
123 new_length *= 2;
124
125 struct drm_i915_gem_relocation_entry *new_relocs =
126 vk_realloc(alloc, list->relocs,
127 new_length * sizeof(*list->relocs), 8,
128 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
129 if (new_relocs == NULL)
130 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
131 list->relocs = new_relocs;
132
133 struct anv_bo **new_reloc_bos =
134 vk_realloc(alloc, list->reloc_bos,
135 new_length * sizeof(*list->reloc_bos), 8,
136 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
137 if (new_reloc_bos == NULL)
138 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
139 list->reloc_bos = new_reloc_bos;
140
141 list->array_length = new_length;
142
143 return VK_SUCCESS;
144 }
145
146 #define READ_ONCE(x) (*(volatile __typeof__(x) *)&(x))
147
148 VkResult
149 anv_reloc_list_add(struct anv_reloc_list *list,
150 const VkAllocationCallbacks *alloc,
151 uint32_t offset, struct anv_bo *target_bo, uint32_t delta,
152 uint64_t *address_u64_out)
153 {
154 struct drm_i915_gem_relocation_entry *entry;
155 int index;
156
157 uint64_t target_bo_offset = READ_ONCE(target_bo->offset);
158 if (address_u64_out)
159 *address_u64_out = target_bo_offset + delta;
160
161 if (target_bo->flags & EXEC_OBJECT_PINNED) {
162 if (list->deps == NULL) {
163 list->deps = _mesa_pointer_set_create(NULL);
164 if (unlikely(list->deps == NULL))
165 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
166 }
167 _mesa_set_add(list->deps, target_bo);
168 return VK_SUCCESS;
169 }
170
171 VkResult result = anv_reloc_list_grow(list, alloc, 1);
172 if (result != VK_SUCCESS)
173 return result;
174
175 /* XXX: Can we use I915_EXEC_HANDLE_LUT? */
176 index = list->num_relocs++;
177 list->reloc_bos[index] = target_bo;
178 entry = &list->relocs[index];
179 entry->target_handle = target_bo->gem_handle;
180 entry->delta = delta;
181 entry->offset = offset;
182 entry->presumed_offset = target_bo_offset;
183 entry->read_domains = 0;
184 entry->write_domain = 0;
185 VG(VALGRIND_CHECK_MEM_IS_DEFINED(entry, sizeof(*entry)));
186
187 return VK_SUCCESS;
188 }
189
190 static VkResult
191 anv_reloc_list_append(struct anv_reloc_list *list,
192 const VkAllocationCallbacks *alloc,
193 struct anv_reloc_list *other, uint32_t offset)
194 {
195 VkResult result = anv_reloc_list_grow(list, alloc, other->num_relocs);
196 if (result != VK_SUCCESS)
197 return result;
198
199 if (other->num_relocs > 0) {
200 memcpy(&list->relocs[list->num_relocs], &other->relocs[0],
201 other->num_relocs * sizeof(other->relocs[0]));
202 memcpy(&list->reloc_bos[list->num_relocs], &other->reloc_bos[0],
203 other->num_relocs * sizeof(other->reloc_bos[0]));
204
205 for (uint32_t i = 0; i < other->num_relocs; i++)
206 list->relocs[i + list->num_relocs].offset += offset;
207
208 list->num_relocs += other->num_relocs;
209 }
210
211 if (other->deps) {
212 if (list->deps == NULL) {
213 list->deps = _mesa_pointer_set_create(NULL);
214 if (unlikely(list->deps == NULL))
215 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
216 }
217 set_foreach(other->deps, entry)
218 _mesa_set_add_pre_hashed(list->deps, entry->hash, entry->key);
219 }
220
221 return VK_SUCCESS;
222 }
223
224 /*-----------------------------------------------------------------------*
225 * Functions related to anv_batch
226 *-----------------------------------------------------------------------*/
227
228 void *
229 anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords)
230 {
231 if (batch->next + num_dwords * 4 > batch->end) {
232 VkResult result = batch->extend_cb(batch, batch->user_data);
233 if (result != VK_SUCCESS) {
234 anv_batch_set_error(batch, result);
235 return NULL;
236 }
237 }
238
239 void *p = batch->next;
240
241 batch->next += num_dwords * 4;
242 assert(batch->next <= batch->end);
243
244 return p;
245 }
246
247 uint64_t
248 anv_batch_emit_reloc(struct anv_batch *batch,
249 void *location, struct anv_bo *bo, uint32_t delta)
250 {
251 uint64_t address_u64 = 0;
252 VkResult result = anv_reloc_list_add(batch->relocs, batch->alloc,
253 location - batch->start, bo, delta,
254 &address_u64);
255 if (result != VK_SUCCESS) {
256 anv_batch_set_error(batch, result);
257 return 0;
258 }
259
260 return address_u64;
261 }
262
263 void
264 anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other)
265 {
266 uint32_t size, offset;
267
268 size = other->next - other->start;
269 assert(size % 4 == 0);
270
271 if (batch->next + size > batch->end) {
272 VkResult result = batch->extend_cb(batch, batch->user_data);
273 if (result != VK_SUCCESS) {
274 anv_batch_set_error(batch, result);
275 return;
276 }
277 }
278
279 assert(batch->next + size <= batch->end);
280
281 VG(VALGRIND_CHECK_MEM_IS_DEFINED(other->start, size));
282 memcpy(batch->next, other->start, size);
283
284 offset = batch->next - batch->start;
285 VkResult result = anv_reloc_list_append(batch->relocs, batch->alloc,
286 other->relocs, offset);
287 if (result != VK_SUCCESS) {
288 anv_batch_set_error(batch, result);
289 return;
290 }
291
292 batch->next += size;
293 }
294
295 /*-----------------------------------------------------------------------*
296 * Functions related to anv_batch_bo
297 *-----------------------------------------------------------------------*/
298
299 static VkResult
300 anv_batch_bo_create(struct anv_cmd_buffer *cmd_buffer,
301 struct anv_batch_bo **bbo_out)
302 {
303 VkResult result;
304
305 struct anv_batch_bo *bbo = vk_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo),
306 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
307 if (bbo == NULL)
308 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
309
310 result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool, &bbo->bo,
311 ANV_CMD_BUFFER_BATCH_SIZE);
312 if (result != VK_SUCCESS)
313 goto fail_alloc;
314
315 result = anv_reloc_list_init(&bbo->relocs, &cmd_buffer->pool->alloc);
316 if (result != VK_SUCCESS)
317 goto fail_bo_alloc;
318
319 *bbo_out = bbo;
320
321 return VK_SUCCESS;
322
323 fail_bo_alloc:
324 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
325 fail_alloc:
326 vk_free(&cmd_buffer->pool->alloc, bbo);
327
328 return result;
329 }
330
331 static VkResult
332 anv_batch_bo_clone(struct anv_cmd_buffer *cmd_buffer,
333 const struct anv_batch_bo *other_bbo,
334 struct anv_batch_bo **bbo_out)
335 {
336 VkResult result;
337
338 struct anv_batch_bo *bbo = vk_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo),
339 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
340 if (bbo == NULL)
341 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
342
343 result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool, &bbo->bo,
344 other_bbo->bo.size);
345 if (result != VK_SUCCESS)
346 goto fail_alloc;
347
348 result = anv_reloc_list_init_clone(&bbo->relocs, &cmd_buffer->pool->alloc,
349 &other_bbo->relocs);
350 if (result != VK_SUCCESS)
351 goto fail_bo_alloc;
352
353 bbo->length = other_bbo->length;
354 memcpy(bbo->bo.map, other_bbo->bo.map, other_bbo->length);
355
356 *bbo_out = bbo;
357
358 return VK_SUCCESS;
359
360 fail_bo_alloc:
361 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
362 fail_alloc:
363 vk_free(&cmd_buffer->pool->alloc, bbo);
364
365 return result;
366 }
367
368 static void
369 anv_batch_bo_start(struct anv_batch_bo *bbo, struct anv_batch *batch,
370 size_t batch_padding)
371 {
372 batch->next = batch->start = bbo->bo.map;
373 batch->end = bbo->bo.map + bbo->bo.size - batch_padding;
374 batch->relocs = &bbo->relocs;
375 bbo->relocs.num_relocs = 0;
376 _mesa_set_clear(bbo->relocs.deps, NULL);
377 }
378
379 static void
380 anv_batch_bo_continue(struct anv_batch_bo *bbo, struct anv_batch *batch,
381 size_t batch_padding)
382 {
383 batch->start = bbo->bo.map;
384 batch->next = bbo->bo.map + bbo->length;
385 batch->end = bbo->bo.map + bbo->bo.size - batch_padding;
386 batch->relocs = &bbo->relocs;
387 }
388
389 static void
390 anv_batch_bo_finish(struct anv_batch_bo *bbo, struct anv_batch *batch)
391 {
392 assert(batch->start == bbo->bo.map);
393 bbo->length = batch->next - batch->start;
394 VG(VALGRIND_CHECK_MEM_IS_DEFINED(batch->start, bbo->length));
395 }
396
397 static VkResult
398 anv_batch_bo_grow(struct anv_cmd_buffer *cmd_buffer, struct anv_batch_bo *bbo,
399 struct anv_batch *batch, size_t aditional,
400 size_t batch_padding)
401 {
402 assert(batch->start == bbo->bo.map);
403 bbo->length = batch->next - batch->start;
404
405 size_t new_size = bbo->bo.size;
406 while (new_size <= bbo->length + aditional + batch_padding)
407 new_size *= 2;
408
409 if (new_size == bbo->bo.size)
410 return VK_SUCCESS;
411
412 struct anv_bo new_bo;
413 VkResult result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
414 &new_bo, new_size);
415 if (result != VK_SUCCESS)
416 return result;
417
418 memcpy(new_bo.map, bbo->bo.map, bbo->length);
419
420 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
421
422 bbo->bo = new_bo;
423 anv_batch_bo_continue(bbo, batch, batch_padding);
424
425 return VK_SUCCESS;
426 }
427
428 static void
429 anv_batch_bo_link(struct anv_cmd_buffer *cmd_buffer,
430 struct anv_batch_bo *prev_bbo,
431 struct anv_batch_bo *next_bbo,
432 uint32_t next_bbo_offset)
433 {
434 const uint32_t bb_start_offset =
435 prev_bbo->length - GEN8_MI_BATCH_BUFFER_START_length * 4;
436 ASSERTED const uint32_t *bb_start = prev_bbo->bo.map + bb_start_offset;
437
438 /* Make sure we're looking at a MI_BATCH_BUFFER_START */
439 assert(((*bb_start >> 29) & 0x07) == 0);
440 assert(((*bb_start >> 23) & 0x3f) == 49);
441
442 if (cmd_buffer->device->instance->physicalDevice.use_softpin) {
443 assert(prev_bbo->bo.flags & EXEC_OBJECT_PINNED);
444 assert(next_bbo->bo.flags & EXEC_OBJECT_PINNED);
445
446 write_reloc(cmd_buffer->device,
447 prev_bbo->bo.map + bb_start_offset + 4,
448 next_bbo->bo.offset + next_bbo_offset, true);
449 } else {
450 uint32_t reloc_idx = prev_bbo->relocs.num_relocs - 1;
451 assert(prev_bbo->relocs.relocs[reloc_idx].offset == bb_start_offset + 4);
452
453 prev_bbo->relocs.reloc_bos[reloc_idx] = &next_bbo->bo;
454 prev_bbo->relocs.relocs[reloc_idx].delta = next_bbo_offset;
455
456 /* Use a bogus presumed offset to force a relocation */
457 prev_bbo->relocs.relocs[reloc_idx].presumed_offset = -1;
458 }
459 }
460
461 static void
462 anv_batch_bo_destroy(struct anv_batch_bo *bbo,
463 struct anv_cmd_buffer *cmd_buffer)
464 {
465 anv_reloc_list_finish(&bbo->relocs, &cmd_buffer->pool->alloc);
466 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
467 vk_free(&cmd_buffer->pool->alloc, bbo);
468 }
469
470 static VkResult
471 anv_batch_bo_list_clone(const struct list_head *list,
472 struct anv_cmd_buffer *cmd_buffer,
473 struct list_head *new_list)
474 {
475 VkResult result = VK_SUCCESS;
476
477 list_inithead(new_list);
478
479 struct anv_batch_bo *prev_bbo = NULL;
480 list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
481 struct anv_batch_bo *new_bbo = NULL;
482 result = anv_batch_bo_clone(cmd_buffer, bbo, &new_bbo);
483 if (result != VK_SUCCESS)
484 break;
485 list_addtail(&new_bbo->link, new_list);
486
487 if (prev_bbo)
488 anv_batch_bo_link(cmd_buffer, prev_bbo, new_bbo, 0);
489
490 prev_bbo = new_bbo;
491 }
492
493 if (result != VK_SUCCESS) {
494 list_for_each_entry_safe(struct anv_batch_bo, bbo, new_list, link)
495 anv_batch_bo_destroy(bbo, cmd_buffer);
496 }
497
498 return result;
499 }
500
501 /*-----------------------------------------------------------------------*
502 * Functions related to anv_batch_bo
503 *-----------------------------------------------------------------------*/
504
505 static struct anv_batch_bo *
506 anv_cmd_buffer_current_batch_bo(struct anv_cmd_buffer *cmd_buffer)
507 {
508 return LIST_ENTRY(struct anv_batch_bo, cmd_buffer->batch_bos.prev, link);
509 }
510
511 struct anv_address
512 anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer *cmd_buffer)
513 {
514 struct anv_state *bt_block = u_vector_head(&cmd_buffer->bt_block_states);
515 return (struct anv_address) {
516 .bo = anv_binding_table_pool(cmd_buffer->device)->block_pool.bo,
517 .offset = bt_block->offset,
518 };
519 }
520
521 static void
522 emit_batch_buffer_start(struct anv_cmd_buffer *cmd_buffer,
523 struct anv_bo *bo, uint32_t offset)
524 {
525 /* In gen8+ the address field grew to two dwords to accomodate 48 bit
526 * offsets. The high 16 bits are in the last dword, so we can use the gen8
527 * version in either case, as long as we set the instruction length in the
528 * header accordingly. This means that we always emit three dwords here
529 * and all the padding and adjustment we do in this file works for all
530 * gens.
531 */
532
533 #define GEN7_MI_BATCH_BUFFER_START_length 2
534 #define GEN7_MI_BATCH_BUFFER_START_length_bias 2
535
536 const uint32_t gen7_length =
537 GEN7_MI_BATCH_BUFFER_START_length - GEN7_MI_BATCH_BUFFER_START_length_bias;
538 const uint32_t gen8_length =
539 GEN8_MI_BATCH_BUFFER_START_length - GEN8_MI_BATCH_BUFFER_START_length_bias;
540
541 anv_batch_emit(&cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_START, bbs) {
542 bbs.DWordLength = cmd_buffer->device->info.gen < 8 ?
543 gen7_length : gen8_length;
544 bbs.SecondLevelBatchBuffer = Firstlevelbatch;
545 bbs.AddressSpaceIndicator = ASI_PPGTT;
546 bbs.BatchBufferStartAddress = (struct anv_address) { bo, offset };
547 }
548 }
549
550 static void
551 cmd_buffer_chain_to_batch_bo(struct anv_cmd_buffer *cmd_buffer,
552 struct anv_batch_bo *bbo)
553 {
554 struct anv_batch *batch = &cmd_buffer->batch;
555 struct anv_batch_bo *current_bbo =
556 anv_cmd_buffer_current_batch_bo(cmd_buffer);
557
558 /* We set the end of the batch a little short so we would be sure we
559 * have room for the chaining command. Since we're about to emit the
560 * chaining command, let's set it back where it should go.
561 */
562 batch->end += GEN8_MI_BATCH_BUFFER_START_length * 4;
563 assert(batch->end == current_bbo->bo.map + current_bbo->bo.size);
564
565 emit_batch_buffer_start(cmd_buffer, &bbo->bo, 0);
566
567 anv_batch_bo_finish(current_bbo, batch);
568 }
569
570 static VkResult
571 anv_cmd_buffer_chain_batch(struct anv_batch *batch, void *_data)
572 {
573 struct anv_cmd_buffer *cmd_buffer = _data;
574 struct anv_batch_bo *new_bbo;
575
576 VkResult result = anv_batch_bo_create(cmd_buffer, &new_bbo);
577 if (result != VK_SUCCESS)
578 return result;
579
580 struct anv_batch_bo **seen_bbo = u_vector_add(&cmd_buffer->seen_bbos);
581 if (seen_bbo == NULL) {
582 anv_batch_bo_destroy(new_bbo, cmd_buffer);
583 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
584 }
585 *seen_bbo = new_bbo;
586
587 cmd_buffer_chain_to_batch_bo(cmd_buffer, new_bbo);
588
589 list_addtail(&new_bbo->link, &cmd_buffer->batch_bos);
590
591 anv_batch_bo_start(new_bbo, batch, GEN8_MI_BATCH_BUFFER_START_length * 4);
592
593 return VK_SUCCESS;
594 }
595
596 static VkResult
597 anv_cmd_buffer_grow_batch(struct anv_batch *batch, void *_data)
598 {
599 struct anv_cmd_buffer *cmd_buffer = _data;
600 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
601
602 anv_batch_bo_grow(cmd_buffer, bbo, &cmd_buffer->batch, 4096,
603 GEN8_MI_BATCH_BUFFER_START_length * 4);
604
605 return VK_SUCCESS;
606 }
607
608 /** Allocate a binding table
609 *
610 * This function allocates a binding table. This is a bit more complicated
611 * than one would think due to a combination of Vulkan driver design and some
612 * unfortunate hardware restrictions.
613 *
614 * The 3DSTATE_BINDING_TABLE_POINTERS_* packets only have a 16-bit field for
615 * the binding table pointer which means that all binding tables need to live
616 * in the bottom 64k of surface state base address. The way the GL driver has
617 * classically dealt with this restriction is to emit all surface states
618 * on-the-fly into the batch and have a batch buffer smaller than 64k. This
619 * isn't really an option in Vulkan for a couple of reasons:
620 *
621 * 1) In Vulkan, we have growing (or chaining) batches so surface states have
622 * to live in their own buffer and we have to be able to re-emit
623 * STATE_BASE_ADDRESS as needed which requires a full pipeline stall. In
624 * order to avoid emitting STATE_BASE_ADDRESS any more often than needed
625 * (it's not that hard to hit 64k of just binding tables), we allocate
626 * surface state objects up-front when VkImageView is created. In order
627 * for this to work, surface state objects need to be allocated from a
628 * global buffer.
629 *
630 * 2) We tried to design the surface state system in such a way that it's
631 * already ready for bindless texturing. The way bindless texturing works
632 * on our hardware is that you have a big pool of surface state objects
633 * (with its own state base address) and the bindless handles are simply
634 * offsets into that pool. With the architecture we chose, we already
635 * have that pool and it's exactly the same pool that we use for regular
636 * surface states so we should already be ready for bindless.
637 *
638 * 3) For render targets, we need to be able to fill out the surface states
639 * later in vkBeginRenderPass so that we can assign clear colors
640 * correctly. One way to do this would be to just create the surface
641 * state data and then repeatedly copy it into the surface state BO every
642 * time we have to re-emit STATE_BASE_ADDRESS. While this works, it's
643 * rather annoying and just being able to allocate them up-front and
644 * re-use them for the entire render pass.
645 *
646 * While none of these are technically blockers for emitting state on the fly
647 * like we do in GL, the ability to have a single surface state pool is
648 * simplifies things greatly. Unfortunately, it comes at a cost...
649 *
650 * Because of the 64k limitation of 3DSTATE_BINDING_TABLE_POINTERS_*, we can't
651 * place the binding tables just anywhere in surface state base address.
652 * Because 64k isn't a whole lot of space, we can't simply restrict the
653 * surface state buffer to 64k, we have to be more clever. The solution we've
654 * chosen is to have a block pool with a maximum size of 2G that starts at
655 * zero and grows in both directions. All surface states are allocated from
656 * the top of the pool (positive offsets) and we allocate blocks (< 64k) of
657 * binding tables from the bottom of the pool (negative offsets). Every time
658 * we allocate a new binding table block, we set surface state base address to
659 * point to the bottom of the binding table block. This way all of the
660 * binding tables in the block are in the bottom 64k of surface state base
661 * address. When we fill out the binding table, we add the distance between
662 * the bottom of our binding table block and zero of the block pool to the
663 * surface state offsets so that they are correct relative to out new surface
664 * state base address at the bottom of the binding table block.
665 *
666 * \see adjust_relocations_from_block_pool()
667 * \see adjust_relocations_too_block_pool()
668 *
669 * \param[in] entries The number of surface state entries the binding
670 * table should be able to hold.
671 *
672 * \param[out] state_offset The offset surface surface state base address
673 * where the surface states live. This must be
674 * added to the surface state offset when it is
675 * written into the binding table entry.
676 *
677 * \return An anv_state representing the binding table
678 */
679 struct anv_state
680 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
681 uint32_t entries, uint32_t *state_offset)
682 {
683 struct anv_device *device = cmd_buffer->device;
684 struct anv_state_pool *state_pool = &device->surface_state_pool;
685 struct anv_state *bt_block = u_vector_head(&cmd_buffer->bt_block_states);
686 struct anv_state state;
687
688 state.alloc_size = align_u32(entries * 4, 32);
689
690 if (cmd_buffer->bt_next + state.alloc_size > state_pool->block_size)
691 return (struct anv_state) { 0 };
692
693 state.offset = cmd_buffer->bt_next;
694 state.map = anv_block_pool_map(&anv_binding_table_pool(device)->block_pool,
695 bt_block->offset + state.offset);
696
697 cmd_buffer->bt_next += state.alloc_size;
698
699 if (device->instance->physicalDevice.use_softpin) {
700 assert(bt_block->offset >= 0);
701 *state_offset = device->surface_state_pool.block_pool.start_address -
702 device->binding_table_pool.block_pool.start_address - bt_block->offset;
703 } else {
704 assert(bt_block->offset < 0);
705 *state_offset = -bt_block->offset;
706 }
707
708 return state;
709 }
710
711 struct anv_state
712 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer *cmd_buffer)
713 {
714 struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
715 return anv_state_stream_alloc(&cmd_buffer->surface_state_stream,
716 isl_dev->ss.size, isl_dev->ss.align);
717 }
718
719 struct anv_state
720 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer *cmd_buffer,
721 uint32_t size, uint32_t alignment)
722 {
723 return anv_state_stream_alloc(&cmd_buffer->dynamic_state_stream,
724 size, alignment);
725 }
726
727 VkResult
728 anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer *cmd_buffer)
729 {
730 struct anv_state *bt_block = u_vector_add(&cmd_buffer->bt_block_states);
731 if (bt_block == NULL) {
732 anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_HOST_MEMORY);
733 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
734 }
735
736 *bt_block = anv_binding_table_pool_alloc(cmd_buffer->device);
737 cmd_buffer->bt_next = 0;
738
739 return VK_SUCCESS;
740 }
741
742 VkResult
743 anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
744 {
745 struct anv_batch_bo *batch_bo;
746 VkResult result;
747
748 list_inithead(&cmd_buffer->batch_bos);
749
750 result = anv_batch_bo_create(cmd_buffer, &batch_bo);
751 if (result != VK_SUCCESS)
752 return result;
753
754 list_addtail(&batch_bo->link, &cmd_buffer->batch_bos);
755
756 cmd_buffer->batch.alloc = &cmd_buffer->pool->alloc;
757 cmd_buffer->batch.user_data = cmd_buffer;
758
759 if (cmd_buffer->device->can_chain_batches) {
760 cmd_buffer->batch.extend_cb = anv_cmd_buffer_chain_batch;
761 } else {
762 cmd_buffer->batch.extend_cb = anv_cmd_buffer_grow_batch;
763 }
764
765 anv_batch_bo_start(batch_bo, &cmd_buffer->batch,
766 GEN8_MI_BATCH_BUFFER_START_length * 4);
767
768 int success = u_vector_init(&cmd_buffer->seen_bbos,
769 sizeof(struct anv_bo *),
770 8 * sizeof(struct anv_bo *));
771 if (!success)
772 goto fail_batch_bo;
773
774 *(struct anv_batch_bo **)u_vector_add(&cmd_buffer->seen_bbos) = batch_bo;
775
776 /* u_vector requires power-of-two size elements */
777 unsigned pow2_state_size = util_next_power_of_two(sizeof(struct anv_state));
778 success = u_vector_init(&cmd_buffer->bt_block_states,
779 pow2_state_size, 8 * pow2_state_size);
780 if (!success)
781 goto fail_seen_bbos;
782
783 result = anv_reloc_list_init(&cmd_buffer->surface_relocs,
784 &cmd_buffer->pool->alloc);
785 if (result != VK_SUCCESS)
786 goto fail_bt_blocks;
787 cmd_buffer->last_ss_pool_center = 0;
788
789 result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
790 if (result != VK_SUCCESS)
791 goto fail_bt_blocks;
792
793 return VK_SUCCESS;
794
795 fail_bt_blocks:
796 u_vector_finish(&cmd_buffer->bt_block_states);
797 fail_seen_bbos:
798 u_vector_finish(&cmd_buffer->seen_bbos);
799 fail_batch_bo:
800 anv_batch_bo_destroy(batch_bo, cmd_buffer);
801
802 return result;
803 }
804
805 void
806 anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
807 {
808 struct anv_state *bt_block;
809 u_vector_foreach(bt_block, &cmd_buffer->bt_block_states)
810 anv_binding_table_pool_free(cmd_buffer->device, *bt_block);
811 u_vector_finish(&cmd_buffer->bt_block_states);
812
813 anv_reloc_list_finish(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc);
814
815 u_vector_finish(&cmd_buffer->seen_bbos);
816
817 /* Destroy all of the batch buffers */
818 list_for_each_entry_safe(struct anv_batch_bo, bbo,
819 &cmd_buffer->batch_bos, link) {
820 anv_batch_bo_destroy(bbo, cmd_buffer);
821 }
822 }
823
824 void
825 anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
826 {
827 /* Delete all but the first batch bo */
828 assert(!list_is_empty(&cmd_buffer->batch_bos));
829 while (cmd_buffer->batch_bos.next != cmd_buffer->batch_bos.prev) {
830 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
831 list_del(&bbo->link);
832 anv_batch_bo_destroy(bbo, cmd_buffer);
833 }
834 assert(!list_is_empty(&cmd_buffer->batch_bos));
835
836 anv_batch_bo_start(anv_cmd_buffer_current_batch_bo(cmd_buffer),
837 &cmd_buffer->batch,
838 GEN8_MI_BATCH_BUFFER_START_length * 4);
839
840 while (u_vector_length(&cmd_buffer->bt_block_states) > 1) {
841 struct anv_state *bt_block = u_vector_remove(&cmd_buffer->bt_block_states);
842 anv_binding_table_pool_free(cmd_buffer->device, *bt_block);
843 }
844 assert(u_vector_length(&cmd_buffer->bt_block_states) == 1);
845 cmd_buffer->bt_next = 0;
846
847 cmd_buffer->surface_relocs.num_relocs = 0;
848 _mesa_set_clear(cmd_buffer->surface_relocs.deps, NULL);
849 cmd_buffer->last_ss_pool_center = 0;
850
851 /* Reset the list of seen buffers */
852 cmd_buffer->seen_bbos.head = 0;
853 cmd_buffer->seen_bbos.tail = 0;
854
855 *(struct anv_batch_bo **)u_vector_add(&cmd_buffer->seen_bbos) =
856 anv_cmd_buffer_current_batch_bo(cmd_buffer);
857 }
858
859 void
860 anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer)
861 {
862 struct anv_batch_bo *batch_bo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
863
864 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
865 /* When we start a batch buffer, we subtract a certain amount of
866 * padding from the end to ensure that we always have room to emit a
867 * BATCH_BUFFER_START to chain to the next BO. We need to remove
868 * that padding before we end the batch; otherwise, we may end up
869 * with our BATCH_BUFFER_END in another BO.
870 */
871 cmd_buffer->batch.end += GEN8_MI_BATCH_BUFFER_START_length * 4;
872 assert(cmd_buffer->batch.end == batch_bo->bo.map + batch_bo->bo.size);
873
874 anv_batch_emit(&cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_END, bbe);
875
876 /* Round batch up to an even number of dwords. */
877 if ((cmd_buffer->batch.next - cmd_buffer->batch.start) & 4)
878 anv_batch_emit(&cmd_buffer->batch, GEN8_MI_NOOP, noop);
879
880 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_PRIMARY;
881 } else {
882 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
883 /* If this is a secondary command buffer, we need to determine the
884 * mode in which it will be executed with vkExecuteCommands. We
885 * determine this statically here so that this stays in sync with the
886 * actual ExecuteCommands implementation.
887 */
888 const uint32_t length = cmd_buffer->batch.next - cmd_buffer->batch.start;
889 if (!cmd_buffer->device->can_chain_batches) {
890 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT;
891 } else if ((cmd_buffer->batch_bos.next == cmd_buffer->batch_bos.prev) &&
892 (length < ANV_CMD_BUFFER_BATCH_SIZE / 2)) {
893 /* If the secondary has exactly one batch buffer in its list *and*
894 * that batch buffer is less than half of the maximum size, we're
895 * probably better of simply copying it into our batch.
896 */
897 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_EMIT;
898 } else if (!(cmd_buffer->usage_flags &
899 VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
900 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_CHAIN;
901
902 /* In order to chain, we need this command buffer to contain an
903 * MI_BATCH_BUFFER_START which will jump back to the calling batch.
904 * It doesn't matter where it points now so long as has a valid
905 * relocation. We'll adjust it later as part of the chaining
906 * process.
907 *
908 * We set the end of the batch a little short so we would be sure we
909 * have room for the chaining command. Since we're about to emit the
910 * chaining command, let's set it back where it should go.
911 */
912 cmd_buffer->batch.end += GEN8_MI_BATCH_BUFFER_START_length * 4;
913 assert(cmd_buffer->batch.start == batch_bo->bo.map);
914 assert(cmd_buffer->batch.end == batch_bo->bo.map + batch_bo->bo.size);
915
916 emit_batch_buffer_start(cmd_buffer, &batch_bo->bo, 0);
917 assert(cmd_buffer->batch.start == batch_bo->bo.map);
918 } else {
919 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN;
920 }
921 }
922
923 anv_batch_bo_finish(batch_bo, &cmd_buffer->batch);
924 }
925
926 static VkResult
927 anv_cmd_buffer_add_seen_bbos(struct anv_cmd_buffer *cmd_buffer,
928 struct list_head *list)
929 {
930 list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
931 struct anv_batch_bo **bbo_ptr = u_vector_add(&cmd_buffer->seen_bbos);
932 if (bbo_ptr == NULL)
933 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
934
935 *bbo_ptr = bbo;
936 }
937
938 return VK_SUCCESS;
939 }
940
941 void
942 anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary,
943 struct anv_cmd_buffer *secondary)
944 {
945 switch (secondary->exec_mode) {
946 case ANV_CMD_BUFFER_EXEC_MODE_EMIT:
947 anv_batch_emit_batch(&primary->batch, &secondary->batch);
948 break;
949 case ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT: {
950 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(primary);
951 unsigned length = secondary->batch.end - secondary->batch.start;
952 anv_batch_bo_grow(primary, bbo, &primary->batch, length,
953 GEN8_MI_BATCH_BUFFER_START_length * 4);
954 anv_batch_emit_batch(&primary->batch, &secondary->batch);
955 break;
956 }
957 case ANV_CMD_BUFFER_EXEC_MODE_CHAIN: {
958 struct anv_batch_bo *first_bbo =
959 list_first_entry(&secondary->batch_bos, struct anv_batch_bo, link);
960 struct anv_batch_bo *last_bbo =
961 list_last_entry(&secondary->batch_bos, struct anv_batch_bo, link);
962
963 emit_batch_buffer_start(primary, &first_bbo->bo, 0);
964
965 struct anv_batch_bo *this_bbo = anv_cmd_buffer_current_batch_bo(primary);
966 assert(primary->batch.start == this_bbo->bo.map);
967 uint32_t offset = primary->batch.next - primary->batch.start;
968
969 /* Make the tail of the secondary point back to right after the
970 * MI_BATCH_BUFFER_START in the primary batch.
971 */
972 anv_batch_bo_link(primary, last_bbo, this_bbo, offset);
973
974 anv_cmd_buffer_add_seen_bbos(primary, &secondary->batch_bos);
975 break;
976 }
977 case ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN: {
978 struct list_head copy_list;
979 VkResult result = anv_batch_bo_list_clone(&secondary->batch_bos,
980 secondary,
981 &copy_list);
982 if (result != VK_SUCCESS)
983 return; /* FIXME */
984
985 anv_cmd_buffer_add_seen_bbos(primary, &copy_list);
986
987 struct anv_batch_bo *first_bbo =
988 list_first_entry(&copy_list, struct anv_batch_bo, link);
989 struct anv_batch_bo *last_bbo =
990 list_last_entry(&copy_list, struct anv_batch_bo, link);
991
992 cmd_buffer_chain_to_batch_bo(primary, first_bbo);
993
994 list_splicetail(&copy_list, &primary->batch_bos);
995
996 anv_batch_bo_continue(last_bbo, &primary->batch,
997 GEN8_MI_BATCH_BUFFER_START_length * 4);
998 break;
999 }
1000 default:
1001 assert(!"Invalid execution mode");
1002 }
1003
1004 anv_reloc_list_append(&primary->surface_relocs, &primary->pool->alloc,
1005 &secondary->surface_relocs, 0);
1006 }
1007
1008 struct anv_execbuf {
1009 struct drm_i915_gem_execbuffer2 execbuf;
1010
1011 struct drm_i915_gem_exec_object2 * objects;
1012 uint32_t bo_count;
1013 struct anv_bo ** bos;
1014
1015 /* Allocated length of the 'objects' and 'bos' arrays */
1016 uint32_t array_length;
1017
1018 bool has_relocs;
1019
1020 uint32_t fence_count;
1021 uint32_t fence_array_length;
1022 struct drm_i915_gem_exec_fence * fences;
1023 struct anv_syncobj ** syncobjs;
1024 };
1025
1026 static void
1027 anv_execbuf_init(struct anv_execbuf *exec)
1028 {
1029 memset(exec, 0, sizeof(*exec));
1030 }
1031
1032 static void
1033 anv_execbuf_finish(struct anv_execbuf *exec,
1034 const VkAllocationCallbacks *alloc)
1035 {
1036 vk_free(alloc, exec->objects);
1037 vk_free(alloc, exec->bos);
1038 vk_free(alloc, exec->fences);
1039 vk_free(alloc, exec->syncobjs);
1040 }
1041
1042 static int
1043 _compare_bo_handles(const void *_bo1, const void *_bo2)
1044 {
1045 struct anv_bo * const *bo1 = _bo1;
1046 struct anv_bo * const *bo2 = _bo2;
1047
1048 return (*bo1)->gem_handle - (*bo2)->gem_handle;
1049 }
1050
1051 static VkResult
1052 anv_execbuf_add_bo_set(struct anv_execbuf *exec,
1053 struct set *deps,
1054 uint32_t extra_flags,
1055 const VkAllocationCallbacks *alloc);
1056
1057 static VkResult
1058 anv_execbuf_add_bo(struct anv_execbuf *exec,
1059 struct anv_bo *bo,
1060 struct anv_reloc_list *relocs,
1061 uint32_t extra_flags,
1062 const VkAllocationCallbacks *alloc)
1063 {
1064 struct drm_i915_gem_exec_object2 *obj = NULL;
1065
1066 if (bo->index < exec->bo_count && exec->bos[bo->index] == bo)
1067 obj = &exec->objects[bo->index];
1068
1069 if (obj == NULL) {
1070 /* We've never seen this one before. Add it to the list and assign
1071 * an id that we can use later.
1072 */
1073 if (exec->bo_count >= exec->array_length) {
1074 uint32_t new_len = exec->objects ? exec->array_length * 2 : 64;
1075
1076 struct drm_i915_gem_exec_object2 *new_objects =
1077 vk_alloc(alloc, new_len * sizeof(*new_objects),
1078 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
1079 if (new_objects == NULL)
1080 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1081
1082 struct anv_bo **new_bos =
1083 vk_alloc(alloc, new_len * sizeof(*new_bos),
1084 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
1085 if (new_bos == NULL) {
1086 vk_free(alloc, new_objects);
1087 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1088 }
1089
1090 if (exec->objects) {
1091 memcpy(new_objects, exec->objects,
1092 exec->bo_count * sizeof(*new_objects));
1093 memcpy(new_bos, exec->bos,
1094 exec->bo_count * sizeof(*new_bos));
1095 }
1096
1097 vk_free(alloc, exec->objects);
1098 vk_free(alloc, exec->bos);
1099
1100 exec->objects = new_objects;
1101 exec->bos = new_bos;
1102 exec->array_length = new_len;
1103 }
1104
1105 assert(exec->bo_count < exec->array_length);
1106
1107 bo->index = exec->bo_count++;
1108 obj = &exec->objects[bo->index];
1109 exec->bos[bo->index] = bo;
1110
1111 obj->handle = bo->gem_handle;
1112 obj->relocation_count = 0;
1113 obj->relocs_ptr = 0;
1114 obj->alignment = 0;
1115 obj->offset = bo->offset;
1116 obj->flags = (bo->flags & ~ANV_BO_FLAG_MASK) | extra_flags;
1117 obj->rsvd1 = 0;
1118 obj->rsvd2 = 0;
1119 }
1120
1121 if (relocs != NULL) {
1122 assert(obj->relocation_count == 0);
1123
1124 if (relocs->num_relocs > 0) {
1125 /* This is the first time we've ever seen a list of relocations for
1126 * this BO. Go ahead and set the relocations and then walk the list
1127 * of relocations and add them all.
1128 */
1129 exec->has_relocs = true;
1130 obj->relocation_count = relocs->num_relocs;
1131 obj->relocs_ptr = (uintptr_t) relocs->relocs;
1132
1133 for (size_t i = 0; i < relocs->num_relocs; i++) {
1134 VkResult result;
1135
1136 /* A quick sanity check on relocations */
1137 assert(relocs->relocs[i].offset < bo->size);
1138 result = anv_execbuf_add_bo(exec, relocs->reloc_bos[i], NULL,
1139 extra_flags, alloc);
1140
1141 if (result != VK_SUCCESS)
1142 return result;
1143 }
1144 }
1145
1146 return anv_execbuf_add_bo_set(exec, relocs->deps, extra_flags, alloc);
1147 }
1148
1149 return VK_SUCCESS;
1150 }
1151
1152 /* Add BO dependencies to execbuf */
1153 static VkResult
1154 anv_execbuf_add_bo_set(struct anv_execbuf *exec,
1155 struct set *deps,
1156 uint32_t extra_flags,
1157 const VkAllocationCallbacks *alloc)
1158 {
1159 if (!deps || deps->entries <= 0)
1160 return VK_SUCCESS;
1161
1162 const uint32_t entries = deps->entries;
1163 struct anv_bo **bos =
1164 vk_alloc(alloc, entries * sizeof(*bos),
1165 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
1166 if (bos == NULL)
1167 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1168
1169 struct anv_bo **bo = bos;
1170 set_foreach(deps, entry) {
1171 *bo++ = (void *)entry->key;
1172 }
1173
1174 qsort(bos, entries, sizeof(struct anv_bo*), _compare_bo_handles);
1175
1176 VkResult result = VK_SUCCESS;
1177 for (bo = bos; bo < bos + entries; bo++) {
1178 result = anv_execbuf_add_bo(exec, *bo, NULL, extra_flags, alloc);
1179 if (result != VK_SUCCESS)
1180 break;
1181 }
1182
1183 vk_free(alloc, bos);
1184
1185 return result;
1186 }
1187
1188 static VkResult
1189 anv_execbuf_add_syncobj(struct anv_execbuf *exec,
1190 uint32_t handle, uint32_t flags,
1191 const VkAllocationCallbacks *alloc)
1192 {
1193 assert(flags != 0);
1194
1195 if (exec->fence_count >= exec->fence_array_length) {
1196 uint32_t new_len = MAX2(exec->fence_array_length * 2, 64);
1197
1198 exec->fences = vk_realloc(alloc, exec->fences,
1199 new_len * sizeof(*exec->fences),
1200 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
1201 if (exec->fences == NULL)
1202 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1203
1204 exec->fence_array_length = new_len;
1205 }
1206
1207 exec->fences[exec->fence_count] = (struct drm_i915_gem_exec_fence) {
1208 .handle = handle,
1209 .flags = flags,
1210 };
1211
1212 exec->fence_count++;
1213
1214 return VK_SUCCESS;
1215 }
1216
1217 static void
1218 anv_cmd_buffer_process_relocs(struct anv_cmd_buffer *cmd_buffer,
1219 struct anv_reloc_list *list)
1220 {
1221 for (size_t i = 0; i < list->num_relocs; i++)
1222 list->relocs[i].target_handle = list->reloc_bos[i]->index;
1223 }
1224
1225 static void
1226 adjust_relocations_from_state_pool(struct anv_state_pool *pool,
1227 struct anv_reloc_list *relocs,
1228 uint32_t last_pool_center_bo_offset)
1229 {
1230 assert(last_pool_center_bo_offset <= pool->block_pool.center_bo_offset);
1231 uint32_t delta = pool->block_pool.center_bo_offset - last_pool_center_bo_offset;
1232
1233 for (size_t i = 0; i < relocs->num_relocs; i++) {
1234 /* All of the relocations from this block pool to other BO's should
1235 * have been emitted relative to the surface block pool center. We
1236 * need to add the center offset to make them relative to the
1237 * beginning of the actual GEM bo.
1238 */
1239 relocs->relocs[i].offset += delta;
1240 }
1241 }
1242
1243 static void
1244 adjust_relocations_to_state_pool(struct anv_state_pool *pool,
1245 struct anv_bo *from_bo,
1246 struct anv_reloc_list *relocs,
1247 uint32_t last_pool_center_bo_offset)
1248 {
1249 assert(last_pool_center_bo_offset <= pool->block_pool.center_bo_offset);
1250 uint32_t delta = pool->block_pool.center_bo_offset - last_pool_center_bo_offset;
1251
1252 /* When we initially emit relocations into a block pool, we don't
1253 * actually know what the final center_bo_offset will be so we just emit
1254 * it as if center_bo_offset == 0. Now that we know what the center
1255 * offset is, we need to walk the list of relocations and adjust any
1256 * relocations that point to the pool bo with the correct offset.
1257 */
1258 for (size_t i = 0; i < relocs->num_relocs; i++) {
1259 if (relocs->reloc_bos[i] == pool->block_pool.bo) {
1260 /* Adjust the delta value in the relocation to correctly
1261 * correspond to the new delta. Initially, this value may have
1262 * been negative (if treated as unsigned), but we trust in
1263 * uint32_t roll-over to fix that for us at this point.
1264 */
1265 relocs->relocs[i].delta += delta;
1266
1267 /* Since the delta has changed, we need to update the actual
1268 * relocated value with the new presumed value. This function
1269 * should only be called on batch buffers, so we know it isn't in
1270 * use by the GPU at the moment.
1271 */
1272 assert(relocs->relocs[i].offset < from_bo->size);
1273 write_reloc(pool->block_pool.device,
1274 from_bo->map + relocs->relocs[i].offset,
1275 relocs->relocs[i].presumed_offset +
1276 relocs->relocs[i].delta, false);
1277 }
1278 }
1279 }
1280
1281 static void
1282 anv_reloc_list_apply(struct anv_device *device,
1283 struct anv_reloc_list *list,
1284 struct anv_bo *bo,
1285 bool always_relocate)
1286 {
1287 for (size_t i = 0; i < list->num_relocs; i++) {
1288 struct anv_bo *target_bo = list->reloc_bos[i];
1289 if (list->relocs[i].presumed_offset == target_bo->offset &&
1290 !always_relocate)
1291 continue;
1292
1293 void *p = bo->map + list->relocs[i].offset;
1294 write_reloc(device, p, target_bo->offset + list->relocs[i].delta, true);
1295 list->relocs[i].presumed_offset = target_bo->offset;
1296 }
1297 }
1298
1299 /**
1300 * This function applies the relocation for a command buffer and writes the
1301 * actual addresses into the buffers as per what we were told by the kernel on
1302 * the previous execbuf2 call. This should be safe to do because, for each
1303 * relocated address, we have two cases:
1304 *
1305 * 1) The target BO is inactive (as seen by the kernel). In this case, it is
1306 * not in use by the GPU so updating the address is 100% ok. It won't be
1307 * in-use by the GPU (from our context) again until the next execbuf2
1308 * happens. If the kernel decides to move it in the next execbuf2, it
1309 * will have to do the relocations itself, but that's ok because it should
1310 * have all of the information needed to do so.
1311 *
1312 * 2) The target BO is active (as seen by the kernel). In this case, it
1313 * hasn't moved since the last execbuffer2 call because GTT shuffling
1314 * *only* happens when the BO is idle. (From our perspective, it only
1315 * happens inside the execbuffer2 ioctl, but the shuffling may be
1316 * triggered by another ioctl, with full-ppgtt this is limited to only
1317 * execbuffer2 ioctls on the same context, or memory pressure.) Since the
1318 * target BO hasn't moved, our anv_bo::offset exactly matches the BO's GTT
1319 * address and the relocated value we are writing into the BO will be the
1320 * same as the value that is already there.
1321 *
1322 * There is also a possibility that the target BO is active but the exact
1323 * RENDER_SURFACE_STATE object we are writing the relocation into isn't in
1324 * use. In this case, the address currently in the RENDER_SURFACE_STATE
1325 * may be stale but it's still safe to write the relocation because that
1326 * particular RENDER_SURFACE_STATE object isn't in-use by the GPU and
1327 * won't be until the next execbuf2 call.
1328 *
1329 * By doing relocations on the CPU, we can tell the kernel that it doesn't
1330 * need to bother. We want to do this because the surface state buffer is
1331 * used by every command buffer so, if the kernel does the relocations, it
1332 * will always be busy and the kernel will always stall. This is also
1333 * probably the fastest mechanism for doing relocations since the kernel would
1334 * have to make a full copy of all the relocations lists.
1335 */
1336 static bool
1337 relocate_cmd_buffer(struct anv_cmd_buffer *cmd_buffer,
1338 struct anv_execbuf *exec)
1339 {
1340 if (!exec->has_relocs)
1341 return true;
1342
1343 static int userspace_relocs = -1;
1344 if (userspace_relocs < 0)
1345 userspace_relocs = env_var_as_boolean("ANV_USERSPACE_RELOCS", true);
1346 if (!userspace_relocs)
1347 return false;
1348
1349 /* First, we have to check to see whether or not we can even do the
1350 * relocation. New buffers which have never been submitted to the kernel
1351 * don't have a valid offset so we need to let the kernel do relocations so
1352 * that we can get offsets for them. On future execbuf2 calls, those
1353 * buffers will have offsets and we will be able to skip relocating.
1354 * Invalid offsets are indicated by anv_bo::offset == (uint64_t)-1.
1355 */
1356 for (uint32_t i = 0; i < exec->bo_count; i++) {
1357 if (exec->bos[i]->offset == (uint64_t)-1)
1358 return false;
1359 }
1360
1361 /* Since surface states are shared between command buffers and we don't
1362 * know what order they will be submitted to the kernel, we don't know
1363 * what address is actually written in the surface state object at any
1364 * given time. The only option is to always relocate them.
1365 */
1366 anv_reloc_list_apply(cmd_buffer->device, &cmd_buffer->surface_relocs,
1367 cmd_buffer->device->surface_state_pool.block_pool.bo,
1368 true /* always relocate surface states */);
1369
1370 /* Since we own all of the batch buffers, we know what values are stored
1371 * in the relocated addresses and only have to update them if the offsets
1372 * have changed.
1373 */
1374 struct anv_batch_bo **bbo;
1375 u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1376 anv_reloc_list_apply(cmd_buffer->device,
1377 &(*bbo)->relocs, &(*bbo)->bo, false);
1378 }
1379
1380 for (uint32_t i = 0; i < exec->bo_count; i++)
1381 exec->objects[i].offset = exec->bos[i]->offset;
1382
1383 return true;
1384 }
1385
1386 static VkResult
1387 setup_execbuf_for_cmd_buffer(struct anv_execbuf *execbuf,
1388 struct anv_cmd_buffer *cmd_buffer)
1389 {
1390 struct anv_batch *batch = &cmd_buffer->batch;
1391 struct anv_state_pool *ss_pool =
1392 &cmd_buffer->device->surface_state_pool;
1393
1394 adjust_relocations_from_state_pool(ss_pool, &cmd_buffer->surface_relocs,
1395 cmd_buffer->last_ss_pool_center);
1396 VkResult result;
1397 struct anv_bo *bo;
1398 if (cmd_buffer->device->instance->physicalDevice.use_softpin) {
1399 anv_block_pool_foreach_bo(bo, &ss_pool->block_pool) {
1400 result = anv_execbuf_add_bo(execbuf, bo, NULL, 0,
1401 &cmd_buffer->device->alloc);
1402 if (result != VK_SUCCESS)
1403 return result;
1404 }
1405 /* Add surface dependencies (BOs) to the execbuf */
1406 anv_execbuf_add_bo_set(execbuf, cmd_buffer->surface_relocs.deps, 0,
1407 &cmd_buffer->device->alloc);
1408
1409 /* Add the BOs for all memory objects */
1410 list_for_each_entry(struct anv_device_memory, mem,
1411 &cmd_buffer->device->memory_objects, link) {
1412 result = anv_execbuf_add_bo(execbuf, mem->bo, NULL, 0,
1413 &cmd_buffer->device->alloc);
1414 if (result != VK_SUCCESS)
1415 return result;
1416 }
1417
1418 struct anv_block_pool *pool;
1419 pool = &cmd_buffer->device->dynamic_state_pool.block_pool;
1420 anv_block_pool_foreach_bo(bo, pool) {
1421 result = anv_execbuf_add_bo(execbuf, bo, NULL, 0,
1422 &cmd_buffer->device->alloc);
1423 if (result != VK_SUCCESS)
1424 return result;
1425 }
1426
1427 pool = &cmd_buffer->device->instruction_state_pool.block_pool;
1428 anv_block_pool_foreach_bo(bo, pool) {
1429 result = anv_execbuf_add_bo(execbuf, bo, NULL, 0,
1430 &cmd_buffer->device->alloc);
1431 if (result != VK_SUCCESS)
1432 return result;
1433 }
1434
1435 pool = &cmd_buffer->device->binding_table_pool.block_pool;
1436 anv_block_pool_foreach_bo(bo, pool) {
1437 result = anv_execbuf_add_bo(execbuf, bo, NULL, 0,
1438 &cmd_buffer->device->alloc);
1439 if (result != VK_SUCCESS)
1440 return result;
1441 }
1442 } else {
1443 /* Since we aren't in the softpin case, all of our STATE_BASE_ADDRESS BOs
1444 * will get added automatically by processing relocations on the batch
1445 * buffer. We have to add the surface state BO manually because it has
1446 * relocations of its own that we need to be sure are processsed.
1447 */
1448 result = anv_execbuf_add_bo(execbuf, ss_pool->block_pool.bo,
1449 &cmd_buffer->surface_relocs, 0,
1450 &cmd_buffer->device->alloc);
1451 if (result != VK_SUCCESS)
1452 return result;
1453 }
1454
1455 /* First, we walk over all of the bos we've seen and add them and their
1456 * relocations to the validate list.
1457 */
1458 struct anv_batch_bo **bbo;
1459 u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1460 adjust_relocations_to_state_pool(ss_pool, &(*bbo)->bo, &(*bbo)->relocs,
1461 cmd_buffer->last_ss_pool_center);
1462
1463 result = anv_execbuf_add_bo(execbuf, &(*bbo)->bo, &(*bbo)->relocs, 0,
1464 &cmd_buffer->device->alloc);
1465 if (result != VK_SUCCESS)
1466 return result;
1467 }
1468
1469 /* Now that we've adjusted all of the surface state relocations, we need to
1470 * record the surface state pool center so future executions of the command
1471 * buffer can adjust correctly.
1472 */
1473 cmd_buffer->last_ss_pool_center = ss_pool->block_pool.center_bo_offset;
1474
1475 struct anv_batch_bo *first_batch_bo =
1476 list_first_entry(&cmd_buffer->batch_bos, struct anv_batch_bo, link);
1477
1478 /* The kernel requires that the last entry in the validation list be the
1479 * batch buffer to execute. We can simply swap the element
1480 * corresponding to the first batch_bo in the chain with the last
1481 * element in the list.
1482 */
1483 if (first_batch_bo->bo.index != execbuf->bo_count - 1) {
1484 uint32_t idx = first_batch_bo->bo.index;
1485 uint32_t last_idx = execbuf->bo_count - 1;
1486
1487 struct drm_i915_gem_exec_object2 tmp_obj = execbuf->objects[idx];
1488 assert(execbuf->bos[idx] == &first_batch_bo->bo);
1489
1490 execbuf->objects[idx] = execbuf->objects[last_idx];
1491 execbuf->bos[idx] = execbuf->bos[last_idx];
1492 execbuf->bos[idx]->index = idx;
1493
1494 execbuf->objects[last_idx] = tmp_obj;
1495 execbuf->bos[last_idx] = &first_batch_bo->bo;
1496 first_batch_bo->bo.index = last_idx;
1497 }
1498
1499 /* If we are pinning our BOs, we shouldn't have to relocate anything */
1500 if (cmd_buffer->device->instance->physicalDevice.use_softpin)
1501 assert(!execbuf->has_relocs);
1502
1503 /* Now we go through and fixup all of the relocation lists to point to
1504 * the correct indices in the object array. We have to do this after we
1505 * reorder the list above as some of the indices may have changed.
1506 */
1507 if (execbuf->has_relocs) {
1508 u_vector_foreach(bbo, &cmd_buffer->seen_bbos)
1509 anv_cmd_buffer_process_relocs(cmd_buffer, &(*bbo)->relocs);
1510
1511 anv_cmd_buffer_process_relocs(cmd_buffer, &cmd_buffer->surface_relocs);
1512 }
1513
1514 if (!cmd_buffer->device->info.has_llc) {
1515 __builtin_ia32_mfence();
1516 u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1517 for (uint32_t i = 0; i < (*bbo)->length; i += CACHELINE_SIZE)
1518 __builtin_ia32_clflush((*bbo)->bo.map + i);
1519 }
1520 }
1521
1522 execbuf->execbuf = (struct drm_i915_gem_execbuffer2) {
1523 .buffers_ptr = (uintptr_t) execbuf->objects,
1524 .buffer_count = execbuf->bo_count,
1525 .batch_start_offset = 0,
1526 .batch_len = batch->next - batch->start,
1527 .cliprects_ptr = 0,
1528 .num_cliprects = 0,
1529 .DR1 = 0,
1530 .DR4 = 0,
1531 .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER,
1532 .rsvd1 = cmd_buffer->device->context_id,
1533 .rsvd2 = 0,
1534 };
1535
1536 if (relocate_cmd_buffer(cmd_buffer, execbuf)) {
1537 /* If we were able to successfully relocate everything, tell the kernel
1538 * that it can skip doing relocations. The requirement for using
1539 * NO_RELOC is:
1540 *
1541 * 1) The addresses written in the objects must match the corresponding
1542 * reloc.presumed_offset which in turn must match the corresponding
1543 * execobject.offset.
1544 *
1545 * 2) To avoid stalling, execobject.offset should match the current
1546 * address of that object within the active context.
1547 *
1548 * In order to satisfy all of the invariants that make userspace
1549 * relocations to be safe (see relocate_cmd_buffer()), we need to
1550 * further ensure that the addresses we use match those used by the
1551 * kernel for the most recent execbuf2.
1552 *
1553 * The kernel may still choose to do relocations anyway if something has
1554 * moved in the GTT. In this case, the relocation list still needs to be
1555 * valid. All relocations on the batch buffers are already valid and
1556 * kept up-to-date. For surface state relocations, by applying the
1557 * relocations in relocate_cmd_buffer, we ensured that the address in
1558 * the RENDER_SURFACE_STATE matches presumed_offset, so it should be
1559 * safe for the kernel to relocate them as needed.
1560 */
1561 execbuf->execbuf.flags |= I915_EXEC_NO_RELOC;
1562 } else {
1563 /* In the case where we fall back to doing kernel relocations, we need
1564 * to ensure that the relocation list is valid. All relocations on the
1565 * batch buffers are already valid and kept up-to-date. Since surface
1566 * states are shared between command buffers and we don't know what
1567 * order they will be submitted to the kernel, we don't know what
1568 * address is actually written in the surface state object at any given
1569 * time. The only option is to set a bogus presumed offset and let the
1570 * kernel relocate them.
1571 */
1572 for (size_t i = 0; i < cmd_buffer->surface_relocs.num_relocs; i++)
1573 cmd_buffer->surface_relocs.relocs[i].presumed_offset = -1;
1574 }
1575
1576 return VK_SUCCESS;
1577 }
1578
1579 static VkResult
1580 setup_empty_execbuf(struct anv_execbuf *execbuf, struct anv_device *device)
1581 {
1582 VkResult result = anv_execbuf_add_bo(execbuf, &device->trivial_batch_bo,
1583 NULL, 0, &device->alloc);
1584 if (result != VK_SUCCESS)
1585 return result;
1586
1587 execbuf->execbuf = (struct drm_i915_gem_execbuffer2) {
1588 .buffers_ptr = (uintptr_t) execbuf->objects,
1589 .buffer_count = execbuf->bo_count,
1590 .batch_start_offset = 0,
1591 .batch_len = 8, /* GEN7_MI_BATCH_BUFFER_END and NOOP */
1592 .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER,
1593 .rsvd1 = device->context_id,
1594 .rsvd2 = 0,
1595 };
1596
1597 return VK_SUCCESS;
1598 }
1599
1600 VkResult
1601 anv_cmd_buffer_execbuf(struct anv_device *device,
1602 struct anv_cmd_buffer *cmd_buffer,
1603 const VkSemaphore *in_semaphores,
1604 uint32_t num_in_semaphores,
1605 const VkSemaphore *out_semaphores,
1606 uint32_t num_out_semaphores,
1607 VkFence _fence)
1608 {
1609 ANV_FROM_HANDLE(anv_fence, fence, _fence);
1610 UNUSED struct anv_physical_device *pdevice = &device->instance->physicalDevice;
1611
1612 struct anv_execbuf execbuf;
1613 anv_execbuf_init(&execbuf);
1614
1615 int in_fence = -1;
1616 VkResult result = VK_SUCCESS;
1617 for (uint32_t i = 0; i < num_in_semaphores; i++) {
1618 ANV_FROM_HANDLE(anv_semaphore, semaphore, in_semaphores[i]);
1619 struct anv_semaphore_impl *impl =
1620 semaphore->temporary.type != ANV_SEMAPHORE_TYPE_NONE ?
1621 &semaphore->temporary : &semaphore->permanent;
1622
1623 switch (impl->type) {
1624 case ANV_SEMAPHORE_TYPE_BO:
1625 assert(!pdevice->has_syncobj);
1626 result = anv_execbuf_add_bo(&execbuf, impl->bo, NULL,
1627 0, &device->alloc);
1628 if (result != VK_SUCCESS)
1629 return result;
1630 break;
1631
1632 case ANV_SEMAPHORE_TYPE_SYNC_FILE:
1633 assert(!pdevice->has_syncobj);
1634 if (in_fence == -1) {
1635 in_fence = impl->fd;
1636 } else {
1637 int merge = anv_gem_sync_file_merge(device, in_fence, impl->fd);
1638 if (merge == -1)
1639 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1640
1641 close(impl->fd);
1642 close(in_fence);
1643 in_fence = merge;
1644 }
1645
1646 impl->fd = -1;
1647 break;
1648
1649 case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
1650 result = anv_execbuf_add_syncobj(&execbuf, impl->syncobj,
1651 I915_EXEC_FENCE_WAIT,
1652 &device->alloc);
1653 if (result != VK_SUCCESS)
1654 return result;
1655 break;
1656
1657 default:
1658 break;
1659 }
1660 }
1661
1662 bool need_out_fence = false;
1663 for (uint32_t i = 0; i < num_out_semaphores; i++) {
1664 ANV_FROM_HANDLE(anv_semaphore, semaphore, out_semaphores[i]);
1665
1666 /* Under most circumstances, out fences won't be temporary. However,
1667 * the spec does allow it for opaque_fd. From the Vulkan 1.0.53 spec:
1668 *
1669 * "If the import is temporary, the implementation must restore the
1670 * semaphore to its prior permanent state after submitting the next
1671 * semaphore wait operation."
1672 *
1673 * The spec says nothing whatsoever about signal operations on
1674 * temporarily imported semaphores so it appears they are allowed.
1675 * There are also CTS tests that require this to work.
1676 */
1677 struct anv_semaphore_impl *impl =
1678 semaphore->temporary.type != ANV_SEMAPHORE_TYPE_NONE ?
1679 &semaphore->temporary : &semaphore->permanent;
1680
1681 switch (impl->type) {
1682 case ANV_SEMAPHORE_TYPE_BO:
1683 assert(!pdevice->has_syncobj);
1684 result = anv_execbuf_add_bo(&execbuf, impl->bo, NULL,
1685 EXEC_OBJECT_WRITE, &device->alloc);
1686 if (result != VK_SUCCESS)
1687 return result;
1688 break;
1689
1690 case ANV_SEMAPHORE_TYPE_SYNC_FILE:
1691 assert(!pdevice->has_syncobj);
1692 need_out_fence = true;
1693 break;
1694
1695 case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
1696 result = anv_execbuf_add_syncobj(&execbuf, impl->syncobj,
1697 I915_EXEC_FENCE_SIGNAL,
1698 &device->alloc);
1699 if (result != VK_SUCCESS)
1700 return result;
1701 break;
1702
1703 default:
1704 break;
1705 }
1706 }
1707
1708 if (fence) {
1709 /* Under most circumstances, out fences won't be temporary. However,
1710 * the spec does allow it for opaque_fd. From the Vulkan 1.0.53 spec:
1711 *
1712 * "If the import is temporary, the implementation must restore the
1713 * semaphore to its prior permanent state after submitting the next
1714 * semaphore wait operation."
1715 *
1716 * The spec says nothing whatsoever about signal operations on
1717 * temporarily imported semaphores so it appears they are allowed.
1718 * There are also CTS tests that require this to work.
1719 */
1720 struct anv_fence_impl *impl =
1721 fence->temporary.type != ANV_FENCE_TYPE_NONE ?
1722 &fence->temporary : &fence->permanent;
1723
1724 switch (impl->type) {
1725 case ANV_FENCE_TYPE_BO:
1726 assert(!pdevice->has_syncobj_wait);
1727 result = anv_execbuf_add_bo(&execbuf, &impl->bo.bo, NULL,
1728 EXEC_OBJECT_WRITE, &device->alloc);
1729 if (result != VK_SUCCESS)
1730 return result;
1731 break;
1732
1733 case ANV_FENCE_TYPE_SYNCOBJ:
1734 result = anv_execbuf_add_syncobj(&execbuf, impl->syncobj,
1735 I915_EXEC_FENCE_SIGNAL,
1736 &device->alloc);
1737 if (result != VK_SUCCESS)
1738 return result;
1739 break;
1740
1741 default:
1742 unreachable("Invalid fence type");
1743 }
1744 }
1745
1746 if (cmd_buffer) {
1747 if (unlikely(INTEL_DEBUG & DEBUG_BATCH)) {
1748 struct anv_batch_bo **bo = u_vector_tail(&cmd_buffer->seen_bbos);
1749
1750 device->cmd_buffer_being_decoded = cmd_buffer;
1751 gen_print_batch(&device->decoder_ctx, (*bo)->bo.map,
1752 (*bo)->bo.size, (*bo)->bo.offset, false);
1753 device->cmd_buffer_being_decoded = NULL;
1754 }
1755
1756 result = setup_execbuf_for_cmd_buffer(&execbuf, cmd_buffer);
1757 } else {
1758 result = setup_empty_execbuf(&execbuf, device);
1759 }
1760
1761 if (result != VK_SUCCESS)
1762 return result;
1763
1764 if (execbuf.fence_count > 0) {
1765 assert(device->instance->physicalDevice.has_syncobj);
1766 execbuf.execbuf.flags |= I915_EXEC_FENCE_ARRAY;
1767 execbuf.execbuf.num_cliprects = execbuf.fence_count;
1768 execbuf.execbuf.cliprects_ptr = (uintptr_t) execbuf.fences;
1769 }
1770
1771 if (in_fence != -1) {
1772 execbuf.execbuf.flags |= I915_EXEC_FENCE_IN;
1773 execbuf.execbuf.rsvd2 |= (uint32_t)in_fence;
1774 }
1775
1776 if (need_out_fence)
1777 execbuf.execbuf.flags |= I915_EXEC_FENCE_OUT;
1778
1779 result = anv_device_execbuf(device, &execbuf.execbuf, execbuf.bos);
1780
1781 /* Execbuf does not consume the in_fence. It's our job to close it. */
1782 if (in_fence != -1)
1783 close(in_fence);
1784
1785 for (uint32_t i = 0; i < num_in_semaphores; i++) {
1786 ANV_FROM_HANDLE(anv_semaphore, semaphore, in_semaphores[i]);
1787 /* From the Vulkan 1.0.53 spec:
1788 *
1789 * "If the import is temporary, the implementation must restore the
1790 * semaphore to its prior permanent state after submitting the next
1791 * semaphore wait operation."
1792 *
1793 * This has to happen after the execbuf in case we close any syncobjs in
1794 * the process.
1795 */
1796 anv_semaphore_reset_temporary(device, semaphore);
1797 }
1798
1799 if (fence && fence->permanent.type == ANV_FENCE_TYPE_BO) {
1800 assert(!pdevice->has_syncobj_wait);
1801 /* BO fences can't be shared, so they can't be temporary. */
1802 assert(fence->temporary.type == ANV_FENCE_TYPE_NONE);
1803
1804 /* Once the execbuf has returned, we need to set the fence state to
1805 * SUBMITTED. We can't do this before calling execbuf because
1806 * anv_GetFenceStatus does take the global device lock before checking
1807 * fence->state.
1808 *
1809 * We set the fence state to SUBMITTED regardless of whether or not the
1810 * execbuf succeeds because we need to ensure that vkWaitForFences() and
1811 * vkGetFenceStatus() return a valid result (VK_ERROR_DEVICE_LOST or
1812 * VK_SUCCESS) in a finite amount of time even if execbuf fails.
1813 */
1814 fence->permanent.bo.state = ANV_BO_FENCE_STATE_SUBMITTED;
1815 }
1816
1817 if (result == VK_SUCCESS && need_out_fence) {
1818 assert(!pdevice->has_syncobj_wait);
1819 int out_fence = execbuf.execbuf.rsvd2 >> 32;
1820 for (uint32_t i = 0; i < num_out_semaphores; i++) {
1821 ANV_FROM_HANDLE(anv_semaphore, semaphore, out_semaphores[i]);
1822 /* Out fences can't have temporary state because that would imply
1823 * that we imported a sync file and are trying to signal it.
1824 */
1825 assert(semaphore->temporary.type == ANV_SEMAPHORE_TYPE_NONE);
1826 struct anv_semaphore_impl *impl = &semaphore->permanent;
1827
1828 if (impl->type == ANV_SEMAPHORE_TYPE_SYNC_FILE) {
1829 assert(impl->fd == -1);
1830 impl->fd = dup(out_fence);
1831 }
1832 }
1833 close(out_fence);
1834 }
1835
1836 anv_execbuf_finish(&execbuf, &device->alloc);
1837
1838 return result;
1839 }