anv: remove list items on batch fini
[mesa.git] / src / intel / vulkan / anv_batch_chain.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31
32 #include "genxml/gen8_pack.h"
33
34 #include "util/debug.h"
35
36 /** \file anv_batch_chain.c
37 *
38 * This file contains functions related to anv_cmd_buffer as a data
39 * structure. This involves everything required to create and destroy
40 * the actual batch buffers as well as link them together and handle
41 * relocations and surface state. It specifically does *not* contain any
42 * handling of actual vkCmd calls beyond vkCmdExecuteCommands.
43 */
44
45 /*-----------------------------------------------------------------------*
46 * Functions related to anv_reloc_list
47 *-----------------------------------------------------------------------*/
48
49 VkResult
50 anv_reloc_list_init(struct anv_reloc_list *list,
51 const VkAllocationCallbacks *alloc)
52 {
53 memset(list, 0, sizeof(*list));
54 return VK_SUCCESS;
55 }
56
57 static VkResult
58 anv_reloc_list_init_clone(struct anv_reloc_list *list,
59 const VkAllocationCallbacks *alloc,
60 const struct anv_reloc_list *other_list)
61 {
62 list->num_relocs = other_list->num_relocs;
63 list->array_length = other_list->array_length;
64
65 if (list->num_relocs > 0) {
66 list->relocs =
67 vk_alloc(alloc, list->array_length * sizeof(*list->relocs), 8,
68 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
69 if (list->relocs == NULL)
70 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
71
72 list->reloc_bos =
73 vk_alloc(alloc, list->array_length * sizeof(*list->reloc_bos), 8,
74 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
75 if (list->reloc_bos == NULL) {
76 vk_free(alloc, list->relocs);
77 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
78 }
79
80 memcpy(list->relocs, other_list->relocs,
81 list->array_length * sizeof(*list->relocs));
82 memcpy(list->reloc_bos, other_list->reloc_bos,
83 list->array_length * sizeof(*list->reloc_bos));
84 } else {
85 list->relocs = NULL;
86 list->reloc_bos = NULL;
87 }
88
89 list->dep_words = other_list->dep_words;
90
91 if (list->dep_words > 0) {
92 list->deps =
93 vk_alloc(alloc, list->dep_words * sizeof(BITSET_WORD), 8,
94 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
95 memcpy(list->deps, other_list->deps,
96 list->dep_words * sizeof(BITSET_WORD));
97 } else {
98 list->deps = NULL;
99 }
100
101 return VK_SUCCESS;
102 }
103
104 void
105 anv_reloc_list_finish(struct anv_reloc_list *list,
106 const VkAllocationCallbacks *alloc)
107 {
108 vk_free(alloc, list->relocs);
109 vk_free(alloc, list->reloc_bos);
110 vk_free(alloc, list->deps);
111 }
112
113 static VkResult
114 anv_reloc_list_grow(struct anv_reloc_list *list,
115 const VkAllocationCallbacks *alloc,
116 size_t num_additional_relocs)
117 {
118 if (list->num_relocs + num_additional_relocs <= list->array_length)
119 return VK_SUCCESS;
120
121 size_t new_length = MAX2(16, list->array_length * 2);
122 while (new_length < list->num_relocs + num_additional_relocs)
123 new_length *= 2;
124
125 struct drm_i915_gem_relocation_entry *new_relocs =
126 vk_realloc(alloc, list->relocs,
127 new_length * sizeof(*list->relocs), 8,
128 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
129 if (new_relocs == NULL)
130 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
131 list->relocs = new_relocs;
132
133 struct anv_bo **new_reloc_bos =
134 vk_realloc(alloc, list->reloc_bos,
135 new_length * sizeof(*list->reloc_bos), 8,
136 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
137 if (new_reloc_bos == NULL)
138 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
139 list->reloc_bos = new_reloc_bos;
140
141 list->array_length = new_length;
142
143 return VK_SUCCESS;
144 }
145
146 static VkResult
147 anv_reloc_list_grow_deps(struct anv_reloc_list *list,
148 const VkAllocationCallbacks *alloc,
149 uint32_t min_num_words)
150 {
151 if (min_num_words <= list->dep_words)
152 return VK_SUCCESS;
153
154 uint32_t new_length = MAX2(32, list->dep_words * 2);
155 while (new_length < min_num_words)
156 new_length *= 2;
157
158 BITSET_WORD *new_deps =
159 vk_realloc(alloc, list->deps, new_length * sizeof(BITSET_WORD), 8,
160 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
161 if (new_deps == NULL)
162 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
163 list->deps = new_deps;
164
165 /* Zero out the new data */
166 memset(list->deps + list->dep_words, 0,
167 (new_length - list->dep_words) * sizeof(BITSET_WORD));
168 list->dep_words = new_length;
169
170 return VK_SUCCESS;
171 }
172
173 #define READ_ONCE(x) (*(volatile __typeof__(x) *)&(x))
174
175 VkResult
176 anv_reloc_list_add(struct anv_reloc_list *list,
177 const VkAllocationCallbacks *alloc,
178 uint32_t offset, struct anv_bo *target_bo, uint32_t delta,
179 uint64_t *address_u64_out)
180 {
181 struct drm_i915_gem_relocation_entry *entry;
182 int index;
183
184 struct anv_bo *unwrapped_target_bo = anv_bo_unwrap(target_bo);
185 uint64_t target_bo_offset = READ_ONCE(unwrapped_target_bo->offset);
186 if (address_u64_out)
187 *address_u64_out = target_bo_offset + delta;
188
189 if (unwrapped_target_bo->flags & EXEC_OBJECT_PINNED) {
190 assert(!target_bo->is_wrapper);
191 uint32_t idx = unwrapped_target_bo->gem_handle;
192 anv_reloc_list_grow_deps(list, alloc, (idx / BITSET_WORDBITS) + 1);
193 BITSET_SET(list->deps, unwrapped_target_bo->gem_handle);
194 return VK_SUCCESS;
195 }
196
197 VkResult result = anv_reloc_list_grow(list, alloc, 1);
198 if (result != VK_SUCCESS)
199 return result;
200
201 /* XXX: Can we use I915_EXEC_HANDLE_LUT? */
202 index = list->num_relocs++;
203 list->reloc_bos[index] = target_bo;
204 entry = &list->relocs[index];
205 entry->target_handle = -1; /* See also anv_cmd_buffer_process_relocs() */
206 entry->delta = delta;
207 entry->offset = offset;
208 entry->presumed_offset = target_bo_offset;
209 entry->read_domains = 0;
210 entry->write_domain = 0;
211 VG(VALGRIND_CHECK_MEM_IS_DEFINED(entry, sizeof(*entry)));
212
213 return VK_SUCCESS;
214 }
215
216 static void
217 anv_reloc_list_clear(struct anv_reloc_list *list)
218 {
219 list->num_relocs = 0;
220 if (list->dep_words > 0)
221 memset(list->deps, 0, list->dep_words * sizeof(BITSET_WORD));
222 }
223
224 static VkResult
225 anv_reloc_list_append(struct anv_reloc_list *list,
226 const VkAllocationCallbacks *alloc,
227 struct anv_reloc_list *other, uint32_t offset)
228 {
229 VkResult result = anv_reloc_list_grow(list, alloc, other->num_relocs);
230 if (result != VK_SUCCESS)
231 return result;
232
233 if (other->num_relocs > 0) {
234 memcpy(&list->relocs[list->num_relocs], &other->relocs[0],
235 other->num_relocs * sizeof(other->relocs[0]));
236 memcpy(&list->reloc_bos[list->num_relocs], &other->reloc_bos[0],
237 other->num_relocs * sizeof(other->reloc_bos[0]));
238
239 for (uint32_t i = 0; i < other->num_relocs; i++)
240 list->relocs[i + list->num_relocs].offset += offset;
241
242 list->num_relocs += other->num_relocs;
243 }
244
245 anv_reloc_list_grow_deps(list, alloc, other->dep_words);
246 for (uint32_t w = 0; w < other->dep_words; w++)
247 list->deps[w] |= other->deps[w];
248
249 return VK_SUCCESS;
250 }
251
252 /*-----------------------------------------------------------------------*
253 * Functions related to anv_batch
254 *-----------------------------------------------------------------------*/
255
256 void *
257 anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords)
258 {
259 if (batch->next + num_dwords * 4 > batch->end) {
260 VkResult result = batch->extend_cb(batch, batch->user_data);
261 if (result != VK_SUCCESS) {
262 anv_batch_set_error(batch, result);
263 return NULL;
264 }
265 }
266
267 void *p = batch->next;
268
269 batch->next += num_dwords * 4;
270 assert(batch->next <= batch->end);
271
272 return p;
273 }
274
275 uint64_t
276 anv_batch_emit_reloc(struct anv_batch *batch,
277 void *location, struct anv_bo *bo, uint32_t delta)
278 {
279 uint64_t address_u64 = 0;
280 VkResult result = anv_reloc_list_add(batch->relocs, batch->alloc,
281 location - batch->start, bo, delta,
282 &address_u64);
283 if (result != VK_SUCCESS) {
284 anv_batch_set_error(batch, result);
285 return 0;
286 }
287
288 return address_u64;
289 }
290
291 void
292 anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other)
293 {
294 uint32_t size, offset;
295
296 size = other->next - other->start;
297 assert(size % 4 == 0);
298
299 if (batch->next + size > batch->end) {
300 VkResult result = batch->extend_cb(batch, batch->user_data);
301 if (result != VK_SUCCESS) {
302 anv_batch_set_error(batch, result);
303 return;
304 }
305 }
306
307 assert(batch->next + size <= batch->end);
308
309 VG(VALGRIND_CHECK_MEM_IS_DEFINED(other->start, size));
310 memcpy(batch->next, other->start, size);
311
312 offset = batch->next - batch->start;
313 VkResult result = anv_reloc_list_append(batch->relocs, batch->alloc,
314 other->relocs, offset);
315 if (result != VK_SUCCESS) {
316 anv_batch_set_error(batch, result);
317 return;
318 }
319
320 batch->next += size;
321 }
322
323 /*-----------------------------------------------------------------------*
324 * Functions related to anv_batch_bo
325 *-----------------------------------------------------------------------*/
326
327 static VkResult
328 anv_batch_bo_create(struct anv_cmd_buffer *cmd_buffer,
329 struct anv_batch_bo **bbo_out)
330 {
331 VkResult result;
332
333 struct anv_batch_bo *bbo = vk_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo),
334 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
335 if (bbo == NULL)
336 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
337
338 result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
339 ANV_CMD_BUFFER_BATCH_SIZE, &bbo->bo);
340 if (result != VK_SUCCESS)
341 goto fail_alloc;
342
343 result = anv_reloc_list_init(&bbo->relocs, &cmd_buffer->pool->alloc);
344 if (result != VK_SUCCESS)
345 goto fail_bo_alloc;
346
347 *bbo_out = bbo;
348
349 return VK_SUCCESS;
350
351 fail_bo_alloc:
352 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
353 fail_alloc:
354 vk_free(&cmd_buffer->pool->alloc, bbo);
355
356 return result;
357 }
358
359 static VkResult
360 anv_batch_bo_clone(struct anv_cmd_buffer *cmd_buffer,
361 const struct anv_batch_bo *other_bbo,
362 struct anv_batch_bo **bbo_out)
363 {
364 VkResult result;
365
366 struct anv_batch_bo *bbo = vk_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo),
367 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
368 if (bbo == NULL)
369 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
370
371 result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
372 other_bbo->bo->size, &bbo->bo);
373 if (result != VK_SUCCESS)
374 goto fail_alloc;
375
376 result = anv_reloc_list_init_clone(&bbo->relocs, &cmd_buffer->pool->alloc,
377 &other_bbo->relocs);
378 if (result != VK_SUCCESS)
379 goto fail_bo_alloc;
380
381 bbo->length = other_bbo->length;
382 memcpy(bbo->bo->map, other_bbo->bo->map, other_bbo->length);
383 *bbo_out = bbo;
384
385 return VK_SUCCESS;
386
387 fail_bo_alloc:
388 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
389 fail_alloc:
390 vk_free(&cmd_buffer->pool->alloc, bbo);
391
392 return result;
393 }
394
395 static void
396 anv_batch_bo_start(struct anv_batch_bo *bbo, struct anv_batch *batch,
397 size_t batch_padding)
398 {
399 batch->next = batch->start = bbo->bo->map;
400 batch->end = bbo->bo->map + bbo->bo->size - batch_padding;
401 batch->relocs = &bbo->relocs;
402 anv_reloc_list_clear(&bbo->relocs);
403 }
404
405 static void
406 anv_batch_bo_continue(struct anv_batch_bo *bbo, struct anv_batch *batch,
407 size_t batch_padding)
408 {
409 batch->start = bbo->bo->map;
410 batch->next = bbo->bo->map + bbo->length;
411 batch->end = bbo->bo->map + bbo->bo->size - batch_padding;
412 batch->relocs = &bbo->relocs;
413 }
414
415 static void
416 anv_batch_bo_finish(struct anv_batch_bo *bbo, struct anv_batch *batch)
417 {
418 assert(batch->start == bbo->bo->map);
419 bbo->length = batch->next - batch->start;
420 VG(VALGRIND_CHECK_MEM_IS_DEFINED(batch->start, bbo->length));
421 }
422
423 static VkResult
424 anv_batch_bo_grow(struct anv_cmd_buffer *cmd_buffer, struct anv_batch_bo *bbo,
425 struct anv_batch *batch, size_t aditional,
426 size_t batch_padding)
427 {
428 assert(batch->start == bbo->bo->map);
429 bbo->length = batch->next - batch->start;
430
431 size_t new_size = bbo->bo->size;
432 while (new_size <= bbo->length + aditional + batch_padding)
433 new_size *= 2;
434
435 if (new_size == bbo->bo->size)
436 return VK_SUCCESS;
437
438 struct anv_bo *new_bo;
439 VkResult result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
440 new_size, &new_bo);
441 if (result != VK_SUCCESS)
442 return result;
443
444 memcpy(new_bo->map, bbo->bo->map, bbo->length);
445
446 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
447
448 bbo->bo = new_bo;
449 anv_batch_bo_continue(bbo, batch, batch_padding);
450
451 return VK_SUCCESS;
452 }
453
454 static void
455 anv_batch_bo_link(struct anv_cmd_buffer *cmd_buffer,
456 struct anv_batch_bo *prev_bbo,
457 struct anv_batch_bo *next_bbo,
458 uint32_t next_bbo_offset)
459 {
460 const uint32_t bb_start_offset =
461 prev_bbo->length - GEN8_MI_BATCH_BUFFER_START_length * 4;
462 ASSERTED const uint32_t *bb_start = prev_bbo->bo->map + bb_start_offset;
463
464 /* Make sure we're looking at a MI_BATCH_BUFFER_START */
465 assert(((*bb_start >> 29) & 0x07) == 0);
466 assert(((*bb_start >> 23) & 0x3f) == 49);
467
468 if (cmd_buffer->device->instance->physicalDevice.use_softpin) {
469 assert(prev_bbo->bo->flags & EXEC_OBJECT_PINNED);
470 assert(next_bbo->bo->flags & EXEC_OBJECT_PINNED);
471
472 write_reloc(cmd_buffer->device,
473 prev_bbo->bo->map + bb_start_offset + 4,
474 next_bbo->bo->offset + next_bbo_offset, true);
475 } else {
476 uint32_t reloc_idx = prev_bbo->relocs.num_relocs - 1;
477 assert(prev_bbo->relocs.relocs[reloc_idx].offset == bb_start_offset + 4);
478
479 prev_bbo->relocs.reloc_bos[reloc_idx] = next_bbo->bo;
480 prev_bbo->relocs.relocs[reloc_idx].delta = next_bbo_offset;
481
482 /* Use a bogus presumed offset to force a relocation */
483 prev_bbo->relocs.relocs[reloc_idx].presumed_offset = -1;
484 }
485 }
486
487 static void
488 anv_batch_bo_destroy(struct anv_batch_bo *bbo,
489 struct anv_cmd_buffer *cmd_buffer)
490 {
491 anv_reloc_list_finish(&bbo->relocs, &cmd_buffer->pool->alloc);
492 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
493 vk_free(&cmd_buffer->pool->alloc, bbo);
494 }
495
496 static VkResult
497 anv_batch_bo_list_clone(const struct list_head *list,
498 struct anv_cmd_buffer *cmd_buffer,
499 struct list_head *new_list)
500 {
501 VkResult result = VK_SUCCESS;
502
503 list_inithead(new_list);
504
505 struct anv_batch_bo *prev_bbo = NULL;
506 list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
507 struct anv_batch_bo *new_bbo = NULL;
508 result = anv_batch_bo_clone(cmd_buffer, bbo, &new_bbo);
509 if (result != VK_SUCCESS)
510 break;
511 list_addtail(&new_bbo->link, new_list);
512
513 if (prev_bbo)
514 anv_batch_bo_link(cmd_buffer, prev_bbo, new_bbo, 0);
515
516 prev_bbo = new_bbo;
517 }
518
519 if (result != VK_SUCCESS) {
520 list_for_each_entry_safe(struct anv_batch_bo, bbo, new_list, link) {
521 list_del(&bbo->link);
522 anv_batch_bo_destroy(bbo, cmd_buffer);
523 }
524 }
525
526 return result;
527 }
528
529 /*-----------------------------------------------------------------------*
530 * Functions related to anv_batch_bo
531 *-----------------------------------------------------------------------*/
532
533 static struct anv_batch_bo *
534 anv_cmd_buffer_current_batch_bo(struct anv_cmd_buffer *cmd_buffer)
535 {
536 return LIST_ENTRY(struct anv_batch_bo, cmd_buffer->batch_bos.prev, link);
537 }
538
539 struct anv_address
540 anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer *cmd_buffer)
541 {
542 struct anv_state *bt_block = u_vector_head(&cmd_buffer->bt_block_states);
543 return (struct anv_address) {
544 .bo = anv_binding_table_pool(cmd_buffer->device)->block_pool.bo,
545 .offset = bt_block->offset,
546 };
547 }
548
549 static void
550 emit_batch_buffer_start(struct anv_cmd_buffer *cmd_buffer,
551 struct anv_bo *bo, uint32_t offset)
552 {
553 /* In gen8+ the address field grew to two dwords to accomodate 48 bit
554 * offsets. The high 16 bits are in the last dword, so we can use the gen8
555 * version in either case, as long as we set the instruction length in the
556 * header accordingly. This means that we always emit three dwords here
557 * and all the padding and adjustment we do in this file works for all
558 * gens.
559 */
560
561 #define GEN7_MI_BATCH_BUFFER_START_length 2
562 #define GEN7_MI_BATCH_BUFFER_START_length_bias 2
563
564 const uint32_t gen7_length =
565 GEN7_MI_BATCH_BUFFER_START_length - GEN7_MI_BATCH_BUFFER_START_length_bias;
566 const uint32_t gen8_length =
567 GEN8_MI_BATCH_BUFFER_START_length - GEN8_MI_BATCH_BUFFER_START_length_bias;
568
569 anv_batch_emit(&cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_START, bbs) {
570 bbs.DWordLength = cmd_buffer->device->info.gen < 8 ?
571 gen7_length : gen8_length;
572 bbs.SecondLevelBatchBuffer = Firstlevelbatch;
573 bbs.AddressSpaceIndicator = ASI_PPGTT;
574 bbs.BatchBufferStartAddress = (struct anv_address) { bo, offset };
575 }
576 }
577
578 static void
579 cmd_buffer_chain_to_batch_bo(struct anv_cmd_buffer *cmd_buffer,
580 struct anv_batch_bo *bbo)
581 {
582 struct anv_batch *batch = &cmd_buffer->batch;
583 struct anv_batch_bo *current_bbo =
584 anv_cmd_buffer_current_batch_bo(cmd_buffer);
585
586 /* We set the end of the batch a little short so we would be sure we
587 * have room for the chaining command. Since we're about to emit the
588 * chaining command, let's set it back where it should go.
589 */
590 batch->end += GEN8_MI_BATCH_BUFFER_START_length * 4;
591 assert(batch->end == current_bbo->bo->map + current_bbo->bo->size);
592
593 emit_batch_buffer_start(cmd_buffer, bbo->bo, 0);
594
595 anv_batch_bo_finish(current_bbo, batch);
596 }
597
598 static VkResult
599 anv_cmd_buffer_chain_batch(struct anv_batch *batch, void *_data)
600 {
601 struct anv_cmd_buffer *cmd_buffer = _data;
602 struct anv_batch_bo *new_bbo;
603
604 VkResult result = anv_batch_bo_create(cmd_buffer, &new_bbo);
605 if (result != VK_SUCCESS)
606 return result;
607
608 struct anv_batch_bo **seen_bbo = u_vector_add(&cmd_buffer->seen_bbos);
609 if (seen_bbo == NULL) {
610 anv_batch_bo_destroy(new_bbo, cmd_buffer);
611 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
612 }
613 *seen_bbo = new_bbo;
614
615 cmd_buffer_chain_to_batch_bo(cmd_buffer, new_bbo);
616
617 list_addtail(&new_bbo->link, &cmd_buffer->batch_bos);
618
619 anv_batch_bo_start(new_bbo, batch, GEN8_MI_BATCH_BUFFER_START_length * 4);
620
621 return VK_SUCCESS;
622 }
623
624 static VkResult
625 anv_cmd_buffer_grow_batch(struct anv_batch *batch, void *_data)
626 {
627 struct anv_cmd_buffer *cmd_buffer = _data;
628 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
629
630 anv_batch_bo_grow(cmd_buffer, bbo, &cmd_buffer->batch, 4096,
631 GEN8_MI_BATCH_BUFFER_START_length * 4);
632
633 return VK_SUCCESS;
634 }
635
636 /** Allocate a binding table
637 *
638 * This function allocates a binding table. This is a bit more complicated
639 * than one would think due to a combination of Vulkan driver design and some
640 * unfortunate hardware restrictions.
641 *
642 * The 3DSTATE_BINDING_TABLE_POINTERS_* packets only have a 16-bit field for
643 * the binding table pointer which means that all binding tables need to live
644 * in the bottom 64k of surface state base address. The way the GL driver has
645 * classically dealt with this restriction is to emit all surface states
646 * on-the-fly into the batch and have a batch buffer smaller than 64k. This
647 * isn't really an option in Vulkan for a couple of reasons:
648 *
649 * 1) In Vulkan, we have growing (or chaining) batches so surface states have
650 * to live in their own buffer and we have to be able to re-emit
651 * STATE_BASE_ADDRESS as needed which requires a full pipeline stall. In
652 * order to avoid emitting STATE_BASE_ADDRESS any more often than needed
653 * (it's not that hard to hit 64k of just binding tables), we allocate
654 * surface state objects up-front when VkImageView is created. In order
655 * for this to work, surface state objects need to be allocated from a
656 * global buffer.
657 *
658 * 2) We tried to design the surface state system in such a way that it's
659 * already ready for bindless texturing. The way bindless texturing works
660 * on our hardware is that you have a big pool of surface state objects
661 * (with its own state base address) and the bindless handles are simply
662 * offsets into that pool. With the architecture we chose, we already
663 * have that pool and it's exactly the same pool that we use for regular
664 * surface states so we should already be ready for bindless.
665 *
666 * 3) For render targets, we need to be able to fill out the surface states
667 * later in vkBeginRenderPass so that we can assign clear colors
668 * correctly. One way to do this would be to just create the surface
669 * state data and then repeatedly copy it into the surface state BO every
670 * time we have to re-emit STATE_BASE_ADDRESS. While this works, it's
671 * rather annoying and just being able to allocate them up-front and
672 * re-use them for the entire render pass.
673 *
674 * While none of these are technically blockers for emitting state on the fly
675 * like we do in GL, the ability to have a single surface state pool is
676 * simplifies things greatly. Unfortunately, it comes at a cost...
677 *
678 * Because of the 64k limitation of 3DSTATE_BINDING_TABLE_POINTERS_*, we can't
679 * place the binding tables just anywhere in surface state base address.
680 * Because 64k isn't a whole lot of space, we can't simply restrict the
681 * surface state buffer to 64k, we have to be more clever. The solution we've
682 * chosen is to have a block pool with a maximum size of 2G that starts at
683 * zero and grows in both directions. All surface states are allocated from
684 * the top of the pool (positive offsets) and we allocate blocks (< 64k) of
685 * binding tables from the bottom of the pool (negative offsets). Every time
686 * we allocate a new binding table block, we set surface state base address to
687 * point to the bottom of the binding table block. This way all of the
688 * binding tables in the block are in the bottom 64k of surface state base
689 * address. When we fill out the binding table, we add the distance between
690 * the bottom of our binding table block and zero of the block pool to the
691 * surface state offsets so that they are correct relative to out new surface
692 * state base address at the bottom of the binding table block.
693 *
694 * \see adjust_relocations_from_block_pool()
695 * \see adjust_relocations_too_block_pool()
696 *
697 * \param[in] entries The number of surface state entries the binding
698 * table should be able to hold.
699 *
700 * \param[out] state_offset The offset surface surface state base address
701 * where the surface states live. This must be
702 * added to the surface state offset when it is
703 * written into the binding table entry.
704 *
705 * \return An anv_state representing the binding table
706 */
707 struct anv_state
708 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
709 uint32_t entries, uint32_t *state_offset)
710 {
711 struct anv_device *device = cmd_buffer->device;
712 struct anv_state_pool *state_pool = &device->surface_state_pool;
713 struct anv_state *bt_block = u_vector_head(&cmd_buffer->bt_block_states);
714 struct anv_state state;
715
716 state.alloc_size = align_u32(entries * 4, 32);
717
718 if (cmd_buffer->bt_next + state.alloc_size > state_pool->block_size)
719 return (struct anv_state) { 0 };
720
721 state.offset = cmd_buffer->bt_next;
722 state.map = anv_block_pool_map(&anv_binding_table_pool(device)->block_pool,
723 bt_block->offset + state.offset);
724
725 cmd_buffer->bt_next += state.alloc_size;
726
727 if (device->instance->physicalDevice.use_softpin) {
728 assert(bt_block->offset >= 0);
729 *state_offset = device->surface_state_pool.block_pool.start_address -
730 device->binding_table_pool.block_pool.start_address - bt_block->offset;
731 } else {
732 assert(bt_block->offset < 0);
733 *state_offset = -bt_block->offset;
734 }
735
736 return state;
737 }
738
739 struct anv_state
740 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer *cmd_buffer)
741 {
742 struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
743 return anv_state_stream_alloc(&cmd_buffer->surface_state_stream,
744 isl_dev->ss.size, isl_dev->ss.align);
745 }
746
747 struct anv_state
748 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer *cmd_buffer,
749 uint32_t size, uint32_t alignment)
750 {
751 return anv_state_stream_alloc(&cmd_buffer->dynamic_state_stream,
752 size, alignment);
753 }
754
755 VkResult
756 anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer *cmd_buffer)
757 {
758 struct anv_state *bt_block = u_vector_add(&cmd_buffer->bt_block_states);
759 if (bt_block == NULL) {
760 anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_HOST_MEMORY);
761 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
762 }
763
764 *bt_block = anv_binding_table_pool_alloc(cmd_buffer->device);
765 cmd_buffer->bt_next = 0;
766
767 return VK_SUCCESS;
768 }
769
770 VkResult
771 anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
772 {
773 struct anv_batch_bo *batch_bo;
774 VkResult result;
775
776 list_inithead(&cmd_buffer->batch_bos);
777
778 result = anv_batch_bo_create(cmd_buffer, &batch_bo);
779 if (result != VK_SUCCESS)
780 return result;
781
782 list_addtail(&batch_bo->link, &cmd_buffer->batch_bos);
783
784 cmd_buffer->batch.alloc = &cmd_buffer->pool->alloc;
785 cmd_buffer->batch.user_data = cmd_buffer;
786
787 if (cmd_buffer->device->can_chain_batches) {
788 cmd_buffer->batch.extend_cb = anv_cmd_buffer_chain_batch;
789 } else {
790 cmd_buffer->batch.extend_cb = anv_cmd_buffer_grow_batch;
791 }
792
793 anv_batch_bo_start(batch_bo, &cmd_buffer->batch,
794 GEN8_MI_BATCH_BUFFER_START_length * 4);
795
796 int success = u_vector_init(&cmd_buffer->seen_bbos,
797 sizeof(struct anv_bo *),
798 8 * sizeof(struct anv_bo *));
799 if (!success)
800 goto fail_batch_bo;
801
802 *(struct anv_batch_bo **)u_vector_add(&cmd_buffer->seen_bbos) = batch_bo;
803
804 /* u_vector requires power-of-two size elements */
805 unsigned pow2_state_size = util_next_power_of_two(sizeof(struct anv_state));
806 success = u_vector_init(&cmd_buffer->bt_block_states,
807 pow2_state_size, 8 * pow2_state_size);
808 if (!success)
809 goto fail_seen_bbos;
810
811 result = anv_reloc_list_init(&cmd_buffer->surface_relocs,
812 &cmd_buffer->pool->alloc);
813 if (result != VK_SUCCESS)
814 goto fail_bt_blocks;
815 cmd_buffer->last_ss_pool_center = 0;
816
817 result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
818 if (result != VK_SUCCESS)
819 goto fail_bt_blocks;
820
821 return VK_SUCCESS;
822
823 fail_bt_blocks:
824 u_vector_finish(&cmd_buffer->bt_block_states);
825 fail_seen_bbos:
826 u_vector_finish(&cmd_buffer->seen_bbos);
827 fail_batch_bo:
828 anv_batch_bo_destroy(batch_bo, cmd_buffer);
829
830 return result;
831 }
832
833 void
834 anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
835 {
836 struct anv_state *bt_block;
837 u_vector_foreach(bt_block, &cmd_buffer->bt_block_states)
838 anv_binding_table_pool_free(cmd_buffer->device, *bt_block);
839 u_vector_finish(&cmd_buffer->bt_block_states);
840
841 anv_reloc_list_finish(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc);
842
843 u_vector_finish(&cmd_buffer->seen_bbos);
844
845 /* Destroy all of the batch buffers */
846 list_for_each_entry_safe(struct anv_batch_bo, bbo,
847 &cmd_buffer->batch_bos, link) {
848 list_del(&bbo->link);
849 anv_batch_bo_destroy(bbo, cmd_buffer);
850 }
851 }
852
853 void
854 anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
855 {
856 /* Delete all but the first batch bo */
857 assert(!list_is_empty(&cmd_buffer->batch_bos));
858 while (cmd_buffer->batch_bos.next != cmd_buffer->batch_bos.prev) {
859 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
860 list_del(&bbo->link);
861 anv_batch_bo_destroy(bbo, cmd_buffer);
862 }
863 assert(!list_is_empty(&cmd_buffer->batch_bos));
864
865 anv_batch_bo_start(anv_cmd_buffer_current_batch_bo(cmd_buffer),
866 &cmd_buffer->batch,
867 GEN8_MI_BATCH_BUFFER_START_length * 4);
868
869 while (u_vector_length(&cmd_buffer->bt_block_states) > 1) {
870 struct anv_state *bt_block = u_vector_remove(&cmd_buffer->bt_block_states);
871 anv_binding_table_pool_free(cmd_buffer->device, *bt_block);
872 }
873 assert(u_vector_length(&cmd_buffer->bt_block_states) == 1);
874 cmd_buffer->bt_next = 0;
875
876 anv_reloc_list_clear(&cmd_buffer->surface_relocs);
877 cmd_buffer->last_ss_pool_center = 0;
878
879 /* Reset the list of seen buffers */
880 cmd_buffer->seen_bbos.head = 0;
881 cmd_buffer->seen_bbos.tail = 0;
882
883 *(struct anv_batch_bo **)u_vector_add(&cmd_buffer->seen_bbos) =
884 anv_cmd_buffer_current_batch_bo(cmd_buffer);
885 }
886
887 void
888 anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer)
889 {
890 struct anv_batch_bo *batch_bo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
891
892 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
893 /* When we start a batch buffer, we subtract a certain amount of
894 * padding from the end to ensure that we always have room to emit a
895 * BATCH_BUFFER_START to chain to the next BO. We need to remove
896 * that padding before we end the batch; otherwise, we may end up
897 * with our BATCH_BUFFER_END in another BO.
898 */
899 cmd_buffer->batch.end += GEN8_MI_BATCH_BUFFER_START_length * 4;
900 assert(cmd_buffer->batch.end == batch_bo->bo->map + batch_bo->bo->size);
901
902 anv_batch_emit(&cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_END, bbe);
903
904 /* Round batch up to an even number of dwords. */
905 if ((cmd_buffer->batch.next - cmd_buffer->batch.start) & 4)
906 anv_batch_emit(&cmd_buffer->batch, GEN8_MI_NOOP, noop);
907
908 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_PRIMARY;
909 } else {
910 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
911 /* If this is a secondary command buffer, we need to determine the
912 * mode in which it will be executed with vkExecuteCommands. We
913 * determine this statically here so that this stays in sync with the
914 * actual ExecuteCommands implementation.
915 */
916 const uint32_t length = cmd_buffer->batch.next - cmd_buffer->batch.start;
917 if (!cmd_buffer->device->can_chain_batches) {
918 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT;
919 } else if ((cmd_buffer->batch_bos.next == cmd_buffer->batch_bos.prev) &&
920 (length < ANV_CMD_BUFFER_BATCH_SIZE / 2)) {
921 /* If the secondary has exactly one batch buffer in its list *and*
922 * that batch buffer is less than half of the maximum size, we're
923 * probably better of simply copying it into our batch.
924 */
925 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_EMIT;
926 } else if (!(cmd_buffer->usage_flags &
927 VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
928 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_CHAIN;
929
930 /* In order to chain, we need this command buffer to contain an
931 * MI_BATCH_BUFFER_START which will jump back to the calling batch.
932 * It doesn't matter where it points now so long as has a valid
933 * relocation. We'll adjust it later as part of the chaining
934 * process.
935 *
936 * We set the end of the batch a little short so we would be sure we
937 * have room for the chaining command. Since we're about to emit the
938 * chaining command, let's set it back where it should go.
939 */
940 cmd_buffer->batch.end += GEN8_MI_BATCH_BUFFER_START_length * 4;
941 assert(cmd_buffer->batch.start == batch_bo->bo->map);
942 assert(cmd_buffer->batch.end == batch_bo->bo->map + batch_bo->bo->size);
943
944 emit_batch_buffer_start(cmd_buffer, batch_bo->bo, 0);
945 assert(cmd_buffer->batch.start == batch_bo->bo->map);
946 } else {
947 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN;
948 }
949 }
950
951 anv_batch_bo_finish(batch_bo, &cmd_buffer->batch);
952 }
953
954 static VkResult
955 anv_cmd_buffer_add_seen_bbos(struct anv_cmd_buffer *cmd_buffer,
956 struct list_head *list)
957 {
958 list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
959 struct anv_batch_bo **bbo_ptr = u_vector_add(&cmd_buffer->seen_bbos);
960 if (bbo_ptr == NULL)
961 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
962
963 *bbo_ptr = bbo;
964 }
965
966 return VK_SUCCESS;
967 }
968
969 void
970 anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary,
971 struct anv_cmd_buffer *secondary)
972 {
973 switch (secondary->exec_mode) {
974 case ANV_CMD_BUFFER_EXEC_MODE_EMIT:
975 anv_batch_emit_batch(&primary->batch, &secondary->batch);
976 break;
977 case ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT: {
978 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(primary);
979 unsigned length = secondary->batch.end - secondary->batch.start;
980 anv_batch_bo_grow(primary, bbo, &primary->batch, length,
981 GEN8_MI_BATCH_BUFFER_START_length * 4);
982 anv_batch_emit_batch(&primary->batch, &secondary->batch);
983 break;
984 }
985 case ANV_CMD_BUFFER_EXEC_MODE_CHAIN: {
986 struct anv_batch_bo *first_bbo =
987 list_first_entry(&secondary->batch_bos, struct anv_batch_bo, link);
988 struct anv_batch_bo *last_bbo =
989 list_last_entry(&secondary->batch_bos, struct anv_batch_bo, link);
990
991 emit_batch_buffer_start(primary, first_bbo->bo, 0);
992
993 struct anv_batch_bo *this_bbo = anv_cmd_buffer_current_batch_bo(primary);
994 assert(primary->batch.start == this_bbo->bo->map);
995 uint32_t offset = primary->batch.next - primary->batch.start;
996
997 /* Make the tail of the secondary point back to right after the
998 * MI_BATCH_BUFFER_START in the primary batch.
999 */
1000 anv_batch_bo_link(primary, last_bbo, this_bbo, offset);
1001
1002 anv_cmd_buffer_add_seen_bbos(primary, &secondary->batch_bos);
1003 break;
1004 }
1005 case ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN: {
1006 struct list_head copy_list;
1007 VkResult result = anv_batch_bo_list_clone(&secondary->batch_bos,
1008 secondary,
1009 &copy_list);
1010 if (result != VK_SUCCESS)
1011 return; /* FIXME */
1012
1013 anv_cmd_buffer_add_seen_bbos(primary, &copy_list);
1014
1015 struct anv_batch_bo *first_bbo =
1016 list_first_entry(&copy_list, struct anv_batch_bo, link);
1017 struct anv_batch_bo *last_bbo =
1018 list_last_entry(&copy_list, struct anv_batch_bo, link);
1019
1020 cmd_buffer_chain_to_batch_bo(primary, first_bbo);
1021
1022 list_splicetail(&copy_list, &primary->batch_bos);
1023
1024 anv_batch_bo_continue(last_bbo, &primary->batch,
1025 GEN8_MI_BATCH_BUFFER_START_length * 4);
1026 break;
1027 }
1028 default:
1029 assert(!"Invalid execution mode");
1030 }
1031
1032 anv_reloc_list_append(&primary->surface_relocs, &primary->pool->alloc,
1033 &secondary->surface_relocs, 0);
1034 }
1035
1036 struct anv_execbuf {
1037 struct drm_i915_gem_execbuffer2 execbuf;
1038
1039 struct drm_i915_gem_exec_object2 * objects;
1040 uint32_t bo_count;
1041 struct anv_bo ** bos;
1042
1043 /* Allocated length of the 'objects' and 'bos' arrays */
1044 uint32_t array_length;
1045
1046 bool has_relocs;
1047
1048 uint32_t fence_count;
1049 uint32_t fence_array_length;
1050 struct drm_i915_gem_exec_fence * fences;
1051 struct anv_syncobj ** syncobjs;
1052 };
1053
1054 static void
1055 anv_execbuf_init(struct anv_execbuf *exec)
1056 {
1057 memset(exec, 0, sizeof(*exec));
1058 }
1059
1060 static void
1061 anv_execbuf_finish(struct anv_execbuf *exec,
1062 const VkAllocationCallbacks *alloc)
1063 {
1064 vk_free(alloc, exec->objects);
1065 vk_free(alloc, exec->bos);
1066 vk_free(alloc, exec->fences);
1067 vk_free(alloc, exec->syncobjs);
1068 }
1069
1070 static VkResult
1071 anv_execbuf_add_bo_bitset(struct anv_device *device,
1072 struct anv_execbuf *exec,
1073 uint32_t dep_words,
1074 BITSET_WORD *deps,
1075 uint32_t extra_flags,
1076 const VkAllocationCallbacks *alloc);
1077
1078 static VkResult
1079 anv_execbuf_add_bo(struct anv_device *device,
1080 struct anv_execbuf *exec,
1081 struct anv_bo *bo,
1082 struct anv_reloc_list *relocs,
1083 uint32_t extra_flags,
1084 const VkAllocationCallbacks *alloc)
1085 {
1086 struct drm_i915_gem_exec_object2 *obj = NULL;
1087
1088 bo = anv_bo_unwrap(bo);
1089
1090 if (bo->index < exec->bo_count && exec->bos[bo->index] == bo)
1091 obj = &exec->objects[bo->index];
1092
1093 if (obj == NULL) {
1094 /* We've never seen this one before. Add it to the list and assign
1095 * an id that we can use later.
1096 */
1097 if (exec->bo_count >= exec->array_length) {
1098 uint32_t new_len = exec->objects ? exec->array_length * 2 : 64;
1099
1100 struct drm_i915_gem_exec_object2 *new_objects =
1101 vk_alloc(alloc, new_len * sizeof(*new_objects),
1102 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
1103 if (new_objects == NULL)
1104 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1105
1106 struct anv_bo **new_bos =
1107 vk_alloc(alloc, new_len * sizeof(*new_bos),
1108 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
1109 if (new_bos == NULL) {
1110 vk_free(alloc, new_objects);
1111 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1112 }
1113
1114 if (exec->objects) {
1115 memcpy(new_objects, exec->objects,
1116 exec->bo_count * sizeof(*new_objects));
1117 memcpy(new_bos, exec->bos,
1118 exec->bo_count * sizeof(*new_bos));
1119 }
1120
1121 vk_free(alloc, exec->objects);
1122 vk_free(alloc, exec->bos);
1123
1124 exec->objects = new_objects;
1125 exec->bos = new_bos;
1126 exec->array_length = new_len;
1127 }
1128
1129 assert(exec->bo_count < exec->array_length);
1130
1131 bo->index = exec->bo_count++;
1132 obj = &exec->objects[bo->index];
1133 exec->bos[bo->index] = bo;
1134
1135 obj->handle = bo->gem_handle;
1136 obj->relocation_count = 0;
1137 obj->relocs_ptr = 0;
1138 obj->alignment = 0;
1139 obj->offset = bo->offset;
1140 obj->flags = bo->flags | extra_flags;
1141 obj->rsvd1 = 0;
1142 obj->rsvd2 = 0;
1143 }
1144
1145 if (relocs != NULL) {
1146 assert(obj->relocation_count == 0);
1147
1148 if (relocs->num_relocs > 0) {
1149 /* This is the first time we've ever seen a list of relocations for
1150 * this BO. Go ahead and set the relocations and then walk the list
1151 * of relocations and add them all.
1152 */
1153 exec->has_relocs = true;
1154 obj->relocation_count = relocs->num_relocs;
1155 obj->relocs_ptr = (uintptr_t) relocs->relocs;
1156
1157 for (size_t i = 0; i < relocs->num_relocs; i++) {
1158 VkResult result;
1159
1160 /* A quick sanity check on relocations */
1161 assert(relocs->relocs[i].offset < bo->size);
1162 result = anv_execbuf_add_bo(device, exec, relocs->reloc_bos[i],
1163 NULL, extra_flags, alloc);
1164
1165 if (result != VK_SUCCESS)
1166 return result;
1167 }
1168 }
1169
1170 return anv_execbuf_add_bo_bitset(device, exec, relocs->dep_words,
1171 relocs->deps, extra_flags, alloc);
1172 }
1173
1174 return VK_SUCCESS;
1175 }
1176
1177 /* Add BO dependencies to execbuf */
1178 static VkResult
1179 anv_execbuf_add_bo_bitset(struct anv_device *device,
1180 struct anv_execbuf *exec,
1181 uint32_t dep_words,
1182 BITSET_WORD *deps,
1183 uint32_t extra_flags,
1184 const VkAllocationCallbacks *alloc)
1185 {
1186 for (uint32_t w = 0; w < dep_words; w++) {
1187 BITSET_WORD mask = deps[w];
1188 while (mask) {
1189 int i = u_bit_scan(&mask);
1190 uint32_t gem_handle = w * BITSET_WORDBITS + i;
1191 struct anv_bo *bo = anv_device_lookup_bo(device, gem_handle);
1192 assert(bo->refcount > 0);
1193 VkResult result = anv_execbuf_add_bo(device, exec,
1194 bo, NULL, extra_flags, alloc);
1195 if (result != VK_SUCCESS)
1196 return result;
1197 }
1198 }
1199
1200 return VK_SUCCESS;
1201 }
1202
1203 static VkResult
1204 anv_execbuf_add_syncobj(struct anv_execbuf *exec,
1205 uint32_t handle, uint32_t flags,
1206 const VkAllocationCallbacks *alloc)
1207 {
1208 assert(flags != 0);
1209
1210 if (exec->fence_count >= exec->fence_array_length) {
1211 uint32_t new_len = MAX2(exec->fence_array_length * 2, 64);
1212
1213 exec->fences = vk_realloc(alloc, exec->fences,
1214 new_len * sizeof(*exec->fences),
1215 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
1216 if (exec->fences == NULL)
1217 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1218
1219 exec->fence_array_length = new_len;
1220 }
1221
1222 exec->fences[exec->fence_count] = (struct drm_i915_gem_exec_fence) {
1223 .handle = handle,
1224 .flags = flags,
1225 };
1226
1227 exec->fence_count++;
1228
1229 return VK_SUCCESS;
1230 }
1231
1232 static void
1233 anv_cmd_buffer_process_relocs(struct anv_cmd_buffer *cmd_buffer,
1234 struct anv_reloc_list *list)
1235 {
1236 for (size_t i = 0; i < list->num_relocs; i++)
1237 list->relocs[i].target_handle = anv_bo_unwrap(list->reloc_bos[i])->index;
1238 }
1239
1240 static void
1241 adjust_relocations_from_state_pool(struct anv_state_pool *pool,
1242 struct anv_reloc_list *relocs,
1243 uint32_t last_pool_center_bo_offset)
1244 {
1245 assert(last_pool_center_bo_offset <= pool->block_pool.center_bo_offset);
1246 uint32_t delta = pool->block_pool.center_bo_offset - last_pool_center_bo_offset;
1247
1248 for (size_t i = 0; i < relocs->num_relocs; i++) {
1249 /* All of the relocations from this block pool to other BO's should
1250 * have been emitted relative to the surface block pool center. We
1251 * need to add the center offset to make them relative to the
1252 * beginning of the actual GEM bo.
1253 */
1254 relocs->relocs[i].offset += delta;
1255 }
1256 }
1257
1258 static void
1259 adjust_relocations_to_state_pool(struct anv_state_pool *pool,
1260 struct anv_bo *from_bo,
1261 struct anv_reloc_list *relocs,
1262 uint32_t last_pool_center_bo_offset)
1263 {
1264 assert(!from_bo->is_wrapper);
1265 assert(last_pool_center_bo_offset <= pool->block_pool.center_bo_offset);
1266 uint32_t delta = pool->block_pool.center_bo_offset - last_pool_center_bo_offset;
1267
1268 /* When we initially emit relocations into a block pool, we don't
1269 * actually know what the final center_bo_offset will be so we just emit
1270 * it as if center_bo_offset == 0. Now that we know what the center
1271 * offset is, we need to walk the list of relocations and adjust any
1272 * relocations that point to the pool bo with the correct offset.
1273 */
1274 for (size_t i = 0; i < relocs->num_relocs; i++) {
1275 if (relocs->reloc_bos[i] == pool->block_pool.bo) {
1276 /* Adjust the delta value in the relocation to correctly
1277 * correspond to the new delta. Initially, this value may have
1278 * been negative (if treated as unsigned), but we trust in
1279 * uint32_t roll-over to fix that for us at this point.
1280 */
1281 relocs->relocs[i].delta += delta;
1282
1283 /* Since the delta has changed, we need to update the actual
1284 * relocated value with the new presumed value. This function
1285 * should only be called on batch buffers, so we know it isn't in
1286 * use by the GPU at the moment.
1287 */
1288 assert(relocs->relocs[i].offset < from_bo->size);
1289 write_reloc(pool->block_pool.device,
1290 from_bo->map + relocs->relocs[i].offset,
1291 relocs->relocs[i].presumed_offset +
1292 relocs->relocs[i].delta, false);
1293 }
1294 }
1295 }
1296
1297 static void
1298 anv_reloc_list_apply(struct anv_device *device,
1299 struct anv_reloc_list *list,
1300 struct anv_bo *bo,
1301 bool always_relocate)
1302 {
1303 bo = anv_bo_unwrap(bo);
1304
1305 for (size_t i = 0; i < list->num_relocs; i++) {
1306 struct anv_bo *target_bo = anv_bo_unwrap(list->reloc_bos[i]);
1307 if (list->relocs[i].presumed_offset == target_bo->offset &&
1308 !always_relocate)
1309 continue;
1310
1311 void *p = bo->map + list->relocs[i].offset;
1312 write_reloc(device, p, target_bo->offset + list->relocs[i].delta, true);
1313 list->relocs[i].presumed_offset = target_bo->offset;
1314 }
1315 }
1316
1317 /**
1318 * This function applies the relocation for a command buffer and writes the
1319 * actual addresses into the buffers as per what we were told by the kernel on
1320 * the previous execbuf2 call. This should be safe to do because, for each
1321 * relocated address, we have two cases:
1322 *
1323 * 1) The target BO is inactive (as seen by the kernel). In this case, it is
1324 * not in use by the GPU so updating the address is 100% ok. It won't be
1325 * in-use by the GPU (from our context) again until the next execbuf2
1326 * happens. If the kernel decides to move it in the next execbuf2, it
1327 * will have to do the relocations itself, but that's ok because it should
1328 * have all of the information needed to do so.
1329 *
1330 * 2) The target BO is active (as seen by the kernel). In this case, it
1331 * hasn't moved since the last execbuffer2 call because GTT shuffling
1332 * *only* happens when the BO is idle. (From our perspective, it only
1333 * happens inside the execbuffer2 ioctl, but the shuffling may be
1334 * triggered by another ioctl, with full-ppgtt this is limited to only
1335 * execbuffer2 ioctls on the same context, or memory pressure.) Since the
1336 * target BO hasn't moved, our anv_bo::offset exactly matches the BO's GTT
1337 * address and the relocated value we are writing into the BO will be the
1338 * same as the value that is already there.
1339 *
1340 * There is also a possibility that the target BO is active but the exact
1341 * RENDER_SURFACE_STATE object we are writing the relocation into isn't in
1342 * use. In this case, the address currently in the RENDER_SURFACE_STATE
1343 * may be stale but it's still safe to write the relocation because that
1344 * particular RENDER_SURFACE_STATE object isn't in-use by the GPU and
1345 * won't be until the next execbuf2 call.
1346 *
1347 * By doing relocations on the CPU, we can tell the kernel that it doesn't
1348 * need to bother. We want to do this because the surface state buffer is
1349 * used by every command buffer so, if the kernel does the relocations, it
1350 * will always be busy and the kernel will always stall. This is also
1351 * probably the fastest mechanism for doing relocations since the kernel would
1352 * have to make a full copy of all the relocations lists.
1353 */
1354 static bool
1355 relocate_cmd_buffer(struct anv_cmd_buffer *cmd_buffer,
1356 struct anv_execbuf *exec)
1357 {
1358 if (!exec->has_relocs)
1359 return true;
1360
1361 static int userspace_relocs = -1;
1362 if (userspace_relocs < 0)
1363 userspace_relocs = env_var_as_boolean("ANV_USERSPACE_RELOCS", true);
1364 if (!userspace_relocs)
1365 return false;
1366
1367 /* First, we have to check to see whether or not we can even do the
1368 * relocation. New buffers which have never been submitted to the kernel
1369 * don't have a valid offset so we need to let the kernel do relocations so
1370 * that we can get offsets for them. On future execbuf2 calls, those
1371 * buffers will have offsets and we will be able to skip relocating.
1372 * Invalid offsets are indicated by anv_bo::offset == (uint64_t)-1.
1373 */
1374 for (uint32_t i = 0; i < exec->bo_count; i++) {
1375 assert(!exec->bos[i]->is_wrapper);
1376 if (exec->bos[i]->offset == (uint64_t)-1)
1377 return false;
1378 }
1379
1380 /* Since surface states are shared between command buffers and we don't
1381 * know what order they will be submitted to the kernel, we don't know
1382 * what address is actually written in the surface state object at any
1383 * given time. The only option is to always relocate them.
1384 */
1385 struct anv_bo *surface_state_bo =
1386 anv_bo_unwrap(cmd_buffer->device->surface_state_pool.block_pool.bo);
1387 anv_reloc_list_apply(cmd_buffer->device, &cmd_buffer->surface_relocs,
1388 surface_state_bo,
1389 true /* always relocate surface states */);
1390
1391 /* Since we own all of the batch buffers, we know what values are stored
1392 * in the relocated addresses and only have to update them if the offsets
1393 * have changed.
1394 */
1395 struct anv_batch_bo **bbo;
1396 u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1397 anv_reloc_list_apply(cmd_buffer->device,
1398 &(*bbo)->relocs, (*bbo)->bo, false);
1399 }
1400
1401 for (uint32_t i = 0; i < exec->bo_count; i++)
1402 exec->objects[i].offset = exec->bos[i]->offset;
1403
1404 return true;
1405 }
1406
1407 static VkResult
1408 setup_execbuf_for_cmd_buffer(struct anv_execbuf *execbuf,
1409 struct anv_cmd_buffer *cmd_buffer)
1410 {
1411 struct anv_batch *batch = &cmd_buffer->batch;
1412 struct anv_state_pool *ss_pool =
1413 &cmd_buffer->device->surface_state_pool;
1414
1415 adjust_relocations_from_state_pool(ss_pool, &cmd_buffer->surface_relocs,
1416 cmd_buffer->last_ss_pool_center);
1417 VkResult result;
1418 if (cmd_buffer->device->instance->physicalDevice.use_softpin) {
1419 anv_block_pool_foreach_bo(bo, &ss_pool->block_pool) {
1420 result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
1421 bo, NULL, 0,
1422 &cmd_buffer->device->alloc);
1423 if (result != VK_SUCCESS)
1424 return result;
1425 }
1426 /* Add surface dependencies (BOs) to the execbuf */
1427 anv_execbuf_add_bo_bitset(cmd_buffer->device, execbuf,
1428 cmd_buffer->surface_relocs.dep_words,
1429 cmd_buffer->surface_relocs.deps,
1430 0, &cmd_buffer->device->alloc);
1431
1432 /* Add the BOs for all memory objects */
1433 list_for_each_entry(struct anv_device_memory, mem,
1434 &cmd_buffer->device->memory_objects, link) {
1435 result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
1436 mem->bo, NULL, 0,
1437 &cmd_buffer->device->alloc);
1438 if (result != VK_SUCCESS)
1439 return result;
1440 }
1441
1442 struct anv_block_pool *pool;
1443 pool = &cmd_buffer->device->dynamic_state_pool.block_pool;
1444 anv_block_pool_foreach_bo(bo, pool) {
1445 result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
1446 bo, NULL, 0,
1447 &cmd_buffer->device->alloc);
1448 if (result != VK_SUCCESS)
1449 return result;
1450 }
1451
1452 pool = &cmd_buffer->device->instruction_state_pool.block_pool;
1453 anv_block_pool_foreach_bo(bo, pool) {
1454 result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
1455 bo, NULL, 0,
1456 &cmd_buffer->device->alloc);
1457 if (result != VK_SUCCESS)
1458 return result;
1459 }
1460
1461 pool = &cmd_buffer->device->binding_table_pool.block_pool;
1462 anv_block_pool_foreach_bo(bo, pool) {
1463 result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
1464 bo, NULL, 0,
1465 &cmd_buffer->device->alloc);
1466 if (result != VK_SUCCESS)
1467 return result;
1468 }
1469 } else {
1470 /* Since we aren't in the softpin case, all of our STATE_BASE_ADDRESS BOs
1471 * will get added automatically by processing relocations on the batch
1472 * buffer. We have to add the surface state BO manually because it has
1473 * relocations of its own that we need to be sure are processsed.
1474 */
1475 result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
1476 ss_pool->block_pool.bo,
1477 &cmd_buffer->surface_relocs, 0,
1478 &cmd_buffer->device->alloc);
1479 if (result != VK_SUCCESS)
1480 return result;
1481 }
1482
1483 /* First, we walk over all of the bos we've seen and add them and their
1484 * relocations to the validate list.
1485 */
1486 struct anv_batch_bo **bbo;
1487 u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1488 adjust_relocations_to_state_pool(ss_pool, (*bbo)->bo, &(*bbo)->relocs,
1489 cmd_buffer->last_ss_pool_center);
1490
1491 result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
1492 (*bbo)->bo, &(*bbo)->relocs, 0,
1493 &cmd_buffer->device->alloc);
1494 if (result != VK_SUCCESS)
1495 return result;
1496 }
1497
1498 /* Now that we've adjusted all of the surface state relocations, we need to
1499 * record the surface state pool center so future executions of the command
1500 * buffer can adjust correctly.
1501 */
1502 cmd_buffer->last_ss_pool_center = ss_pool->block_pool.center_bo_offset;
1503
1504 struct anv_batch_bo *first_batch_bo =
1505 list_first_entry(&cmd_buffer->batch_bos, struct anv_batch_bo, link);
1506
1507 /* The kernel requires that the last entry in the validation list be the
1508 * batch buffer to execute. We can simply swap the element
1509 * corresponding to the first batch_bo in the chain with the last
1510 * element in the list.
1511 */
1512 if (first_batch_bo->bo->index != execbuf->bo_count - 1) {
1513 uint32_t idx = first_batch_bo->bo->index;
1514 uint32_t last_idx = execbuf->bo_count - 1;
1515
1516 struct drm_i915_gem_exec_object2 tmp_obj = execbuf->objects[idx];
1517 assert(execbuf->bos[idx] == first_batch_bo->bo);
1518
1519 execbuf->objects[idx] = execbuf->objects[last_idx];
1520 execbuf->bos[idx] = execbuf->bos[last_idx];
1521 execbuf->bos[idx]->index = idx;
1522
1523 execbuf->objects[last_idx] = tmp_obj;
1524 execbuf->bos[last_idx] = first_batch_bo->bo;
1525 first_batch_bo->bo->index = last_idx;
1526 }
1527
1528 /* If we are pinning our BOs, we shouldn't have to relocate anything */
1529 if (cmd_buffer->device->instance->physicalDevice.use_softpin)
1530 assert(!execbuf->has_relocs);
1531
1532 /* Now we go through and fixup all of the relocation lists to point to
1533 * the correct indices in the object array. We have to do this after we
1534 * reorder the list above as some of the indices may have changed.
1535 */
1536 if (execbuf->has_relocs) {
1537 u_vector_foreach(bbo, &cmd_buffer->seen_bbos)
1538 anv_cmd_buffer_process_relocs(cmd_buffer, &(*bbo)->relocs);
1539
1540 anv_cmd_buffer_process_relocs(cmd_buffer, &cmd_buffer->surface_relocs);
1541 }
1542
1543 if (!cmd_buffer->device->info.has_llc) {
1544 __builtin_ia32_mfence();
1545 u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1546 for (uint32_t i = 0; i < (*bbo)->length; i += CACHELINE_SIZE)
1547 __builtin_ia32_clflush((*bbo)->bo->map + i);
1548 }
1549 }
1550
1551 execbuf->execbuf = (struct drm_i915_gem_execbuffer2) {
1552 .buffers_ptr = (uintptr_t) execbuf->objects,
1553 .buffer_count = execbuf->bo_count,
1554 .batch_start_offset = 0,
1555 .batch_len = batch->next - batch->start,
1556 .cliprects_ptr = 0,
1557 .num_cliprects = 0,
1558 .DR1 = 0,
1559 .DR4 = 0,
1560 .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER,
1561 .rsvd1 = cmd_buffer->device->context_id,
1562 .rsvd2 = 0,
1563 };
1564
1565 if (relocate_cmd_buffer(cmd_buffer, execbuf)) {
1566 /* If we were able to successfully relocate everything, tell the kernel
1567 * that it can skip doing relocations. The requirement for using
1568 * NO_RELOC is:
1569 *
1570 * 1) The addresses written in the objects must match the corresponding
1571 * reloc.presumed_offset which in turn must match the corresponding
1572 * execobject.offset.
1573 *
1574 * 2) To avoid stalling, execobject.offset should match the current
1575 * address of that object within the active context.
1576 *
1577 * In order to satisfy all of the invariants that make userspace
1578 * relocations to be safe (see relocate_cmd_buffer()), we need to
1579 * further ensure that the addresses we use match those used by the
1580 * kernel for the most recent execbuf2.
1581 *
1582 * The kernel may still choose to do relocations anyway if something has
1583 * moved in the GTT. In this case, the relocation list still needs to be
1584 * valid. All relocations on the batch buffers are already valid and
1585 * kept up-to-date. For surface state relocations, by applying the
1586 * relocations in relocate_cmd_buffer, we ensured that the address in
1587 * the RENDER_SURFACE_STATE matches presumed_offset, so it should be
1588 * safe for the kernel to relocate them as needed.
1589 */
1590 execbuf->execbuf.flags |= I915_EXEC_NO_RELOC;
1591 } else {
1592 /* In the case where we fall back to doing kernel relocations, we need
1593 * to ensure that the relocation list is valid. All relocations on the
1594 * batch buffers are already valid and kept up-to-date. Since surface
1595 * states are shared between command buffers and we don't know what
1596 * order they will be submitted to the kernel, we don't know what
1597 * address is actually written in the surface state object at any given
1598 * time. The only option is to set a bogus presumed offset and let the
1599 * kernel relocate them.
1600 */
1601 for (size_t i = 0; i < cmd_buffer->surface_relocs.num_relocs; i++)
1602 cmd_buffer->surface_relocs.relocs[i].presumed_offset = -1;
1603 }
1604
1605 return VK_SUCCESS;
1606 }
1607
1608 static VkResult
1609 setup_empty_execbuf(struct anv_execbuf *execbuf, struct anv_device *device)
1610 {
1611 VkResult result = anv_execbuf_add_bo(device, execbuf,
1612 device->trivial_batch_bo,
1613 NULL, 0, &device->alloc);
1614 if (result != VK_SUCCESS)
1615 return result;
1616
1617 execbuf->execbuf = (struct drm_i915_gem_execbuffer2) {
1618 .buffers_ptr = (uintptr_t) execbuf->objects,
1619 .buffer_count = execbuf->bo_count,
1620 .batch_start_offset = 0,
1621 .batch_len = 8, /* GEN7_MI_BATCH_BUFFER_END and NOOP */
1622 .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER,
1623 .rsvd1 = device->context_id,
1624 .rsvd2 = 0,
1625 };
1626
1627 return VK_SUCCESS;
1628 }
1629
1630 VkResult
1631 anv_cmd_buffer_execbuf(struct anv_device *device,
1632 struct anv_cmd_buffer *cmd_buffer,
1633 const VkSemaphore *in_semaphores,
1634 uint32_t num_in_semaphores,
1635 const VkSemaphore *out_semaphores,
1636 uint32_t num_out_semaphores,
1637 VkFence _fence)
1638 {
1639 ANV_FROM_HANDLE(anv_fence, fence, _fence);
1640 UNUSED struct anv_physical_device *pdevice = &device->instance->physicalDevice;
1641
1642 struct anv_execbuf execbuf;
1643 anv_execbuf_init(&execbuf);
1644
1645 int in_fence = -1;
1646 VkResult result = VK_SUCCESS;
1647 for (uint32_t i = 0; i < num_in_semaphores; i++) {
1648 ANV_FROM_HANDLE(anv_semaphore, semaphore, in_semaphores[i]);
1649 struct anv_semaphore_impl *impl =
1650 semaphore->temporary.type != ANV_SEMAPHORE_TYPE_NONE ?
1651 &semaphore->temporary : &semaphore->permanent;
1652
1653 switch (impl->type) {
1654 case ANV_SEMAPHORE_TYPE_BO:
1655 assert(!pdevice->has_syncobj);
1656 result = anv_execbuf_add_bo(device, &execbuf, impl->bo, NULL,
1657 0, &device->alloc);
1658 if (result != VK_SUCCESS)
1659 return result;
1660 break;
1661
1662 case ANV_SEMAPHORE_TYPE_SYNC_FILE:
1663 assert(!pdevice->has_syncobj);
1664 if (in_fence == -1) {
1665 in_fence = impl->fd;
1666 if (in_fence == -1)
1667 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1668 impl->fd = -1;
1669 } else {
1670 int merge = anv_gem_sync_file_merge(device, in_fence, impl->fd);
1671 if (merge == -1)
1672 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1673
1674 close(impl->fd);
1675 close(in_fence);
1676 impl->fd = -1;
1677 in_fence = merge;
1678 }
1679 break;
1680
1681 case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
1682 result = anv_execbuf_add_syncobj(&execbuf, impl->syncobj,
1683 I915_EXEC_FENCE_WAIT,
1684 &device->alloc);
1685 if (result != VK_SUCCESS)
1686 return result;
1687 break;
1688
1689 default:
1690 break;
1691 }
1692 }
1693
1694 bool need_out_fence = false;
1695 for (uint32_t i = 0; i < num_out_semaphores; i++) {
1696 ANV_FROM_HANDLE(anv_semaphore, semaphore, out_semaphores[i]);
1697
1698 /* Under most circumstances, out fences won't be temporary. However,
1699 * the spec does allow it for opaque_fd. From the Vulkan 1.0.53 spec:
1700 *
1701 * "If the import is temporary, the implementation must restore the
1702 * semaphore to its prior permanent state after submitting the next
1703 * semaphore wait operation."
1704 *
1705 * The spec says nothing whatsoever about signal operations on
1706 * temporarily imported semaphores so it appears they are allowed.
1707 * There are also CTS tests that require this to work.
1708 */
1709 struct anv_semaphore_impl *impl =
1710 semaphore->temporary.type != ANV_SEMAPHORE_TYPE_NONE ?
1711 &semaphore->temporary : &semaphore->permanent;
1712
1713 switch (impl->type) {
1714 case ANV_SEMAPHORE_TYPE_BO:
1715 assert(!pdevice->has_syncobj);
1716 result = anv_execbuf_add_bo(device, &execbuf, impl->bo, NULL,
1717 EXEC_OBJECT_WRITE, &device->alloc);
1718 if (result != VK_SUCCESS)
1719 return result;
1720 break;
1721
1722 case ANV_SEMAPHORE_TYPE_SYNC_FILE:
1723 assert(!pdevice->has_syncobj);
1724 need_out_fence = true;
1725 break;
1726
1727 case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
1728 result = anv_execbuf_add_syncobj(&execbuf, impl->syncobj,
1729 I915_EXEC_FENCE_SIGNAL,
1730 &device->alloc);
1731 if (result != VK_SUCCESS)
1732 return result;
1733 break;
1734
1735 default:
1736 break;
1737 }
1738 }
1739
1740 if (fence) {
1741 /* Under most circumstances, out fences won't be temporary. However,
1742 * the spec does allow it for opaque_fd. From the Vulkan 1.0.53 spec:
1743 *
1744 * "If the import is temporary, the implementation must restore the
1745 * semaphore to its prior permanent state after submitting the next
1746 * semaphore wait operation."
1747 *
1748 * The spec says nothing whatsoever about signal operations on
1749 * temporarily imported semaphores so it appears they are allowed.
1750 * There are also CTS tests that require this to work.
1751 */
1752 struct anv_fence_impl *impl =
1753 fence->temporary.type != ANV_FENCE_TYPE_NONE ?
1754 &fence->temporary : &fence->permanent;
1755
1756 switch (impl->type) {
1757 case ANV_FENCE_TYPE_BO:
1758 assert(!pdevice->has_syncobj_wait);
1759 result = anv_execbuf_add_bo(device, &execbuf, impl->bo.bo, NULL,
1760 EXEC_OBJECT_WRITE, &device->alloc);
1761 if (result != VK_SUCCESS)
1762 return result;
1763 break;
1764
1765 case ANV_FENCE_TYPE_SYNCOBJ:
1766 result = anv_execbuf_add_syncobj(&execbuf, impl->syncobj,
1767 I915_EXEC_FENCE_SIGNAL,
1768 &device->alloc);
1769 if (result != VK_SUCCESS)
1770 return result;
1771 break;
1772
1773 default:
1774 unreachable("Invalid fence type");
1775 }
1776 }
1777
1778 if (cmd_buffer) {
1779 if (unlikely(INTEL_DEBUG & DEBUG_BATCH)) {
1780 struct anv_batch_bo **bo = u_vector_tail(&cmd_buffer->seen_bbos);
1781
1782 device->cmd_buffer_being_decoded = cmd_buffer;
1783 gen_print_batch(&device->decoder_ctx, (*bo)->bo->map,
1784 (*bo)->bo->size, (*bo)->bo->offset, false);
1785 device->cmd_buffer_being_decoded = NULL;
1786 }
1787
1788 result = setup_execbuf_for_cmd_buffer(&execbuf, cmd_buffer);
1789 } else {
1790 result = setup_empty_execbuf(&execbuf, device);
1791 }
1792
1793 if (result != VK_SUCCESS)
1794 return result;
1795
1796 if (execbuf.fence_count > 0) {
1797 assert(device->instance->physicalDevice.has_syncobj);
1798 execbuf.execbuf.flags |= I915_EXEC_FENCE_ARRAY;
1799 execbuf.execbuf.num_cliprects = execbuf.fence_count;
1800 execbuf.execbuf.cliprects_ptr = (uintptr_t) execbuf.fences;
1801 }
1802
1803 if (in_fence != -1) {
1804 execbuf.execbuf.flags |= I915_EXEC_FENCE_IN;
1805 execbuf.execbuf.rsvd2 |= (uint32_t)in_fence;
1806 }
1807
1808 if (need_out_fence)
1809 execbuf.execbuf.flags |= I915_EXEC_FENCE_OUT;
1810
1811 result = anv_device_execbuf(device, &execbuf.execbuf, execbuf.bos);
1812
1813 /* Execbuf does not consume the in_fence. It's our job to close it. */
1814 if (in_fence != -1)
1815 close(in_fence);
1816
1817 for (uint32_t i = 0; i < num_in_semaphores; i++) {
1818 ANV_FROM_HANDLE(anv_semaphore, semaphore, in_semaphores[i]);
1819 /* From the Vulkan 1.0.53 spec:
1820 *
1821 * "If the import is temporary, the implementation must restore the
1822 * semaphore to its prior permanent state after submitting the next
1823 * semaphore wait operation."
1824 *
1825 * This has to happen after the execbuf in case we close any syncobjs in
1826 * the process.
1827 */
1828 anv_semaphore_reset_temporary(device, semaphore);
1829 }
1830
1831 if (fence && fence->permanent.type == ANV_FENCE_TYPE_BO) {
1832 assert(!pdevice->has_syncobj_wait);
1833 /* BO fences can't be shared, so they can't be temporary. */
1834 assert(fence->temporary.type == ANV_FENCE_TYPE_NONE);
1835
1836 /* Once the execbuf has returned, we need to set the fence state to
1837 * SUBMITTED. We can't do this before calling execbuf because
1838 * anv_GetFenceStatus does take the global device lock before checking
1839 * fence->state.
1840 *
1841 * We set the fence state to SUBMITTED regardless of whether or not the
1842 * execbuf succeeds because we need to ensure that vkWaitForFences() and
1843 * vkGetFenceStatus() return a valid result (VK_ERROR_DEVICE_LOST or
1844 * VK_SUCCESS) in a finite amount of time even if execbuf fails.
1845 */
1846 fence->permanent.bo.state = ANV_BO_FENCE_STATE_SUBMITTED;
1847 }
1848
1849 if (result == VK_SUCCESS && need_out_fence) {
1850 assert(!pdevice->has_syncobj_wait);
1851 int out_fence = execbuf.execbuf.rsvd2 >> 32;
1852 for (uint32_t i = 0; i < num_out_semaphores; i++) {
1853 ANV_FROM_HANDLE(anv_semaphore, semaphore, out_semaphores[i]);
1854 /* Out fences can't have temporary state because that would imply
1855 * that we imported a sync file and are trying to signal it.
1856 */
1857 assert(semaphore->temporary.type == ANV_SEMAPHORE_TYPE_NONE);
1858 struct anv_semaphore_impl *impl = &semaphore->permanent;
1859
1860 if (impl->type == ANV_SEMAPHORE_TYPE_SYNC_FILE) {
1861 assert(impl->fd == -1);
1862 impl->fd = dup(out_fence);
1863 }
1864 }
1865 close(out_fence);
1866 }
1867
1868 anv_execbuf_finish(&execbuf, &device->alloc);
1869
1870 return result;
1871 }