anv: Handle state pool relocations using "wrapper" BOs
[mesa.git] / src / intel / vulkan / anv_batch_chain.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31
32 #include "genxml/gen8_pack.h"
33
34 #include "util/debug.h"
35
36 /** \file anv_batch_chain.c
37 *
38 * This file contains functions related to anv_cmd_buffer as a data
39 * structure. This involves everything required to create and destroy
40 * the actual batch buffers as well as link them together and handle
41 * relocations and surface state. It specifically does *not* contain any
42 * handling of actual vkCmd calls beyond vkCmdExecuteCommands.
43 */
44
45 /*-----------------------------------------------------------------------*
46 * Functions related to anv_reloc_list
47 *-----------------------------------------------------------------------*/
48
49 VkResult
50 anv_reloc_list_init(struct anv_reloc_list *list,
51 const VkAllocationCallbacks *alloc)
52 {
53 memset(list, 0, sizeof(*list));
54 return VK_SUCCESS;
55 }
56
57 static VkResult
58 anv_reloc_list_init_clone(struct anv_reloc_list *list,
59 const VkAllocationCallbacks *alloc,
60 const struct anv_reloc_list *other_list)
61 {
62 list->num_relocs = other_list->num_relocs;
63 list->array_length = other_list->array_length;
64
65 if (list->num_relocs > 0) {
66 list->relocs =
67 vk_alloc(alloc, list->array_length * sizeof(*list->relocs), 8,
68 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
69 if (list->relocs == NULL)
70 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
71
72 list->reloc_bos =
73 vk_alloc(alloc, list->array_length * sizeof(*list->reloc_bos), 8,
74 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
75 if (list->reloc_bos == NULL) {
76 vk_free(alloc, list->relocs);
77 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
78 }
79
80 memcpy(list->relocs, other_list->relocs,
81 list->array_length * sizeof(*list->relocs));
82 memcpy(list->reloc_bos, other_list->reloc_bos,
83 list->array_length * sizeof(*list->reloc_bos));
84 } else {
85 list->relocs = NULL;
86 list->reloc_bos = NULL;
87 }
88
89 if (other_list->deps) {
90 list->deps = _mesa_set_clone(other_list->deps, NULL);
91 if (!list->deps) {
92 vk_free(alloc, list->relocs);
93 vk_free(alloc, list->reloc_bos);
94 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
95 }
96 } else {
97 list->deps = NULL;
98 }
99
100 return VK_SUCCESS;
101 }
102
103 void
104 anv_reloc_list_finish(struct anv_reloc_list *list,
105 const VkAllocationCallbacks *alloc)
106 {
107 vk_free(alloc, list->relocs);
108 vk_free(alloc, list->reloc_bos);
109 if (list->deps != NULL)
110 _mesa_set_destroy(list->deps, NULL);
111 }
112
113 static VkResult
114 anv_reloc_list_grow(struct anv_reloc_list *list,
115 const VkAllocationCallbacks *alloc,
116 size_t num_additional_relocs)
117 {
118 if (list->num_relocs + num_additional_relocs <= list->array_length)
119 return VK_SUCCESS;
120
121 size_t new_length = MAX2(16, list->array_length * 2);
122 while (new_length < list->num_relocs + num_additional_relocs)
123 new_length *= 2;
124
125 struct drm_i915_gem_relocation_entry *new_relocs =
126 vk_realloc(alloc, list->relocs,
127 new_length * sizeof(*list->relocs), 8,
128 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
129 if (new_relocs == NULL)
130 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
131 list->relocs = new_relocs;
132
133 struct anv_bo **new_reloc_bos =
134 vk_realloc(alloc, list->reloc_bos,
135 new_length * sizeof(*list->reloc_bos), 8,
136 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
137 if (new_reloc_bos == NULL)
138 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
139 list->reloc_bos = new_reloc_bos;
140
141 list->array_length = new_length;
142
143 return VK_SUCCESS;
144 }
145
146 #define READ_ONCE(x) (*(volatile __typeof__(x) *)&(x))
147
148 VkResult
149 anv_reloc_list_add(struct anv_reloc_list *list,
150 const VkAllocationCallbacks *alloc,
151 uint32_t offset, struct anv_bo *target_bo, uint32_t delta,
152 uint64_t *address_u64_out)
153 {
154 struct drm_i915_gem_relocation_entry *entry;
155 int index;
156
157 struct anv_bo *unwrapped_target_bo = anv_bo_unwrap(target_bo);
158 uint64_t target_bo_offset = READ_ONCE(unwrapped_target_bo->offset);
159 if (address_u64_out)
160 *address_u64_out = target_bo_offset + delta;
161
162 if (unwrapped_target_bo->flags & EXEC_OBJECT_PINNED) {
163 if (list->deps == NULL) {
164 list->deps = _mesa_pointer_set_create(NULL);
165 if (unlikely(list->deps == NULL))
166 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
167 }
168 _mesa_set_add(list->deps, target_bo);
169 return VK_SUCCESS;
170 }
171
172 VkResult result = anv_reloc_list_grow(list, alloc, 1);
173 if (result != VK_SUCCESS)
174 return result;
175
176 /* XXX: Can we use I915_EXEC_HANDLE_LUT? */
177 index = list->num_relocs++;
178 list->reloc_bos[index] = target_bo;
179 entry = &list->relocs[index];
180 entry->target_handle = -1; /* See also anv_cmd_buffer_process_relocs() */
181 entry->delta = delta;
182 entry->offset = offset;
183 entry->presumed_offset = target_bo_offset;
184 entry->read_domains = 0;
185 entry->write_domain = 0;
186 VG(VALGRIND_CHECK_MEM_IS_DEFINED(entry, sizeof(*entry)));
187
188 return VK_SUCCESS;
189 }
190
191 static VkResult
192 anv_reloc_list_append(struct anv_reloc_list *list,
193 const VkAllocationCallbacks *alloc,
194 struct anv_reloc_list *other, uint32_t offset)
195 {
196 VkResult result = anv_reloc_list_grow(list, alloc, other->num_relocs);
197 if (result != VK_SUCCESS)
198 return result;
199
200 if (other->num_relocs > 0) {
201 memcpy(&list->relocs[list->num_relocs], &other->relocs[0],
202 other->num_relocs * sizeof(other->relocs[0]));
203 memcpy(&list->reloc_bos[list->num_relocs], &other->reloc_bos[0],
204 other->num_relocs * sizeof(other->reloc_bos[0]));
205
206 for (uint32_t i = 0; i < other->num_relocs; i++)
207 list->relocs[i + list->num_relocs].offset += offset;
208
209 list->num_relocs += other->num_relocs;
210 }
211
212 if (other->deps) {
213 if (list->deps == NULL) {
214 list->deps = _mesa_pointer_set_create(NULL);
215 if (unlikely(list->deps == NULL))
216 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
217 }
218 set_foreach(other->deps, entry)
219 _mesa_set_add_pre_hashed(list->deps, entry->hash, entry->key);
220 }
221
222 return VK_SUCCESS;
223 }
224
225 /*-----------------------------------------------------------------------*
226 * Functions related to anv_batch
227 *-----------------------------------------------------------------------*/
228
229 void *
230 anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords)
231 {
232 if (batch->next + num_dwords * 4 > batch->end) {
233 VkResult result = batch->extend_cb(batch, batch->user_data);
234 if (result != VK_SUCCESS) {
235 anv_batch_set_error(batch, result);
236 return NULL;
237 }
238 }
239
240 void *p = batch->next;
241
242 batch->next += num_dwords * 4;
243 assert(batch->next <= batch->end);
244
245 return p;
246 }
247
248 uint64_t
249 anv_batch_emit_reloc(struct anv_batch *batch,
250 void *location, struct anv_bo *bo, uint32_t delta)
251 {
252 uint64_t address_u64 = 0;
253 VkResult result = anv_reloc_list_add(batch->relocs, batch->alloc,
254 location - batch->start, bo, delta,
255 &address_u64);
256 if (result != VK_SUCCESS) {
257 anv_batch_set_error(batch, result);
258 return 0;
259 }
260
261 return address_u64;
262 }
263
264 void
265 anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other)
266 {
267 uint32_t size, offset;
268
269 size = other->next - other->start;
270 assert(size % 4 == 0);
271
272 if (batch->next + size > batch->end) {
273 VkResult result = batch->extend_cb(batch, batch->user_data);
274 if (result != VK_SUCCESS) {
275 anv_batch_set_error(batch, result);
276 return;
277 }
278 }
279
280 assert(batch->next + size <= batch->end);
281
282 VG(VALGRIND_CHECK_MEM_IS_DEFINED(other->start, size));
283 memcpy(batch->next, other->start, size);
284
285 offset = batch->next - batch->start;
286 VkResult result = anv_reloc_list_append(batch->relocs, batch->alloc,
287 other->relocs, offset);
288 if (result != VK_SUCCESS) {
289 anv_batch_set_error(batch, result);
290 return;
291 }
292
293 batch->next += size;
294 }
295
296 /*-----------------------------------------------------------------------*
297 * Functions related to anv_batch_bo
298 *-----------------------------------------------------------------------*/
299
300 static VkResult
301 anv_batch_bo_create(struct anv_cmd_buffer *cmd_buffer,
302 struct anv_batch_bo **bbo_out)
303 {
304 VkResult result;
305
306 struct anv_batch_bo *bbo = vk_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo),
307 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
308 if (bbo == NULL)
309 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
310
311 result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool, &bbo->bo,
312 ANV_CMD_BUFFER_BATCH_SIZE);
313 if (result != VK_SUCCESS)
314 goto fail_alloc;
315
316 result = anv_reloc_list_init(&bbo->relocs, &cmd_buffer->pool->alloc);
317 if (result != VK_SUCCESS)
318 goto fail_bo_alloc;
319
320 *bbo_out = bbo;
321
322 return VK_SUCCESS;
323
324 fail_bo_alloc:
325 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
326 fail_alloc:
327 vk_free(&cmd_buffer->pool->alloc, bbo);
328
329 return result;
330 }
331
332 static VkResult
333 anv_batch_bo_clone(struct anv_cmd_buffer *cmd_buffer,
334 const struct anv_batch_bo *other_bbo,
335 struct anv_batch_bo **bbo_out)
336 {
337 VkResult result;
338
339 struct anv_batch_bo *bbo = vk_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo),
340 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
341 if (bbo == NULL)
342 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
343
344 result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool, &bbo->bo,
345 other_bbo->bo.size);
346 if (result != VK_SUCCESS)
347 goto fail_alloc;
348
349 result = anv_reloc_list_init_clone(&bbo->relocs, &cmd_buffer->pool->alloc,
350 &other_bbo->relocs);
351 if (result != VK_SUCCESS)
352 goto fail_bo_alloc;
353
354 bbo->length = other_bbo->length;
355 memcpy(bbo->bo.map, other_bbo->bo.map, other_bbo->length);
356
357 *bbo_out = bbo;
358
359 return VK_SUCCESS;
360
361 fail_bo_alloc:
362 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
363 fail_alloc:
364 vk_free(&cmd_buffer->pool->alloc, bbo);
365
366 return result;
367 }
368
369 static void
370 anv_batch_bo_start(struct anv_batch_bo *bbo, struct anv_batch *batch,
371 size_t batch_padding)
372 {
373 batch->next = batch->start = bbo->bo.map;
374 batch->end = bbo->bo.map + bbo->bo.size - batch_padding;
375 batch->relocs = &bbo->relocs;
376 bbo->relocs.num_relocs = 0;
377 _mesa_set_clear(bbo->relocs.deps, NULL);
378 }
379
380 static void
381 anv_batch_bo_continue(struct anv_batch_bo *bbo, struct anv_batch *batch,
382 size_t batch_padding)
383 {
384 batch->start = bbo->bo.map;
385 batch->next = bbo->bo.map + bbo->length;
386 batch->end = bbo->bo.map + bbo->bo.size - batch_padding;
387 batch->relocs = &bbo->relocs;
388 }
389
390 static void
391 anv_batch_bo_finish(struct anv_batch_bo *bbo, struct anv_batch *batch)
392 {
393 assert(batch->start == bbo->bo.map);
394 bbo->length = batch->next - batch->start;
395 VG(VALGRIND_CHECK_MEM_IS_DEFINED(batch->start, bbo->length));
396 }
397
398 static VkResult
399 anv_batch_bo_grow(struct anv_cmd_buffer *cmd_buffer, struct anv_batch_bo *bbo,
400 struct anv_batch *batch, size_t aditional,
401 size_t batch_padding)
402 {
403 assert(batch->start == bbo->bo.map);
404 bbo->length = batch->next - batch->start;
405
406 size_t new_size = bbo->bo.size;
407 while (new_size <= bbo->length + aditional + batch_padding)
408 new_size *= 2;
409
410 if (new_size == bbo->bo.size)
411 return VK_SUCCESS;
412
413 struct anv_bo new_bo;
414 VkResult result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
415 &new_bo, new_size);
416 if (result != VK_SUCCESS)
417 return result;
418
419 memcpy(new_bo.map, bbo->bo.map, bbo->length);
420
421 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
422
423 bbo->bo = new_bo;
424 anv_batch_bo_continue(bbo, batch, batch_padding);
425
426 return VK_SUCCESS;
427 }
428
429 static void
430 anv_batch_bo_link(struct anv_cmd_buffer *cmd_buffer,
431 struct anv_batch_bo *prev_bbo,
432 struct anv_batch_bo *next_bbo,
433 uint32_t next_bbo_offset)
434 {
435 const uint32_t bb_start_offset =
436 prev_bbo->length - GEN8_MI_BATCH_BUFFER_START_length * 4;
437 ASSERTED const uint32_t *bb_start = prev_bbo->bo.map + bb_start_offset;
438
439 /* Make sure we're looking at a MI_BATCH_BUFFER_START */
440 assert(((*bb_start >> 29) & 0x07) == 0);
441 assert(((*bb_start >> 23) & 0x3f) == 49);
442
443 if (cmd_buffer->device->instance->physicalDevice.use_softpin) {
444 assert(prev_bbo->bo.flags & EXEC_OBJECT_PINNED);
445 assert(next_bbo->bo.flags & EXEC_OBJECT_PINNED);
446
447 write_reloc(cmd_buffer->device,
448 prev_bbo->bo.map + bb_start_offset + 4,
449 next_bbo->bo.offset + next_bbo_offset, true);
450 } else {
451 uint32_t reloc_idx = prev_bbo->relocs.num_relocs - 1;
452 assert(prev_bbo->relocs.relocs[reloc_idx].offset == bb_start_offset + 4);
453
454 prev_bbo->relocs.reloc_bos[reloc_idx] = &next_bbo->bo;
455 prev_bbo->relocs.relocs[reloc_idx].delta = next_bbo_offset;
456
457 /* Use a bogus presumed offset to force a relocation */
458 prev_bbo->relocs.relocs[reloc_idx].presumed_offset = -1;
459 }
460 }
461
462 static void
463 anv_batch_bo_destroy(struct anv_batch_bo *bbo,
464 struct anv_cmd_buffer *cmd_buffer)
465 {
466 anv_reloc_list_finish(&bbo->relocs, &cmd_buffer->pool->alloc);
467 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
468 vk_free(&cmd_buffer->pool->alloc, bbo);
469 }
470
471 static VkResult
472 anv_batch_bo_list_clone(const struct list_head *list,
473 struct anv_cmd_buffer *cmd_buffer,
474 struct list_head *new_list)
475 {
476 VkResult result = VK_SUCCESS;
477
478 list_inithead(new_list);
479
480 struct anv_batch_bo *prev_bbo = NULL;
481 list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
482 struct anv_batch_bo *new_bbo = NULL;
483 result = anv_batch_bo_clone(cmd_buffer, bbo, &new_bbo);
484 if (result != VK_SUCCESS)
485 break;
486 list_addtail(&new_bbo->link, new_list);
487
488 if (prev_bbo)
489 anv_batch_bo_link(cmd_buffer, prev_bbo, new_bbo, 0);
490
491 prev_bbo = new_bbo;
492 }
493
494 if (result != VK_SUCCESS) {
495 list_for_each_entry_safe(struct anv_batch_bo, bbo, new_list, link)
496 anv_batch_bo_destroy(bbo, cmd_buffer);
497 }
498
499 return result;
500 }
501
502 /*-----------------------------------------------------------------------*
503 * Functions related to anv_batch_bo
504 *-----------------------------------------------------------------------*/
505
506 static struct anv_batch_bo *
507 anv_cmd_buffer_current_batch_bo(struct anv_cmd_buffer *cmd_buffer)
508 {
509 return LIST_ENTRY(struct anv_batch_bo, cmd_buffer->batch_bos.prev, link);
510 }
511
512 struct anv_address
513 anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer *cmd_buffer)
514 {
515 struct anv_state *bt_block = u_vector_head(&cmd_buffer->bt_block_states);
516 return (struct anv_address) {
517 .bo = anv_binding_table_pool(cmd_buffer->device)->block_pool.bo,
518 .offset = bt_block->offset,
519 };
520 }
521
522 static void
523 emit_batch_buffer_start(struct anv_cmd_buffer *cmd_buffer,
524 struct anv_bo *bo, uint32_t offset)
525 {
526 /* In gen8+ the address field grew to two dwords to accomodate 48 bit
527 * offsets. The high 16 bits are in the last dword, so we can use the gen8
528 * version in either case, as long as we set the instruction length in the
529 * header accordingly. This means that we always emit three dwords here
530 * and all the padding and adjustment we do in this file works for all
531 * gens.
532 */
533
534 #define GEN7_MI_BATCH_BUFFER_START_length 2
535 #define GEN7_MI_BATCH_BUFFER_START_length_bias 2
536
537 const uint32_t gen7_length =
538 GEN7_MI_BATCH_BUFFER_START_length - GEN7_MI_BATCH_BUFFER_START_length_bias;
539 const uint32_t gen8_length =
540 GEN8_MI_BATCH_BUFFER_START_length - GEN8_MI_BATCH_BUFFER_START_length_bias;
541
542 anv_batch_emit(&cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_START, bbs) {
543 bbs.DWordLength = cmd_buffer->device->info.gen < 8 ?
544 gen7_length : gen8_length;
545 bbs.SecondLevelBatchBuffer = Firstlevelbatch;
546 bbs.AddressSpaceIndicator = ASI_PPGTT;
547 bbs.BatchBufferStartAddress = (struct anv_address) { bo, offset };
548 }
549 }
550
551 static void
552 cmd_buffer_chain_to_batch_bo(struct anv_cmd_buffer *cmd_buffer,
553 struct anv_batch_bo *bbo)
554 {
555 struct anv_batch *batch = &cmd_buffer->batch;
556 struct anv_batch_bo *current_bbo =
557 anv_cmd_buffer_current_batch_bo(cmd_buffer);
558
559 /* We set the end of the batch a little short so we would be sure we
560 * have room for the chaining command. Since we're about to emit the
561 * chaining command, let's set it back where it should go.
562 */
563 batch->end += GEN8_MI_BATCH_BUFFER_START_length * 4;
564 assert(batch->end == current_bbo->bo.map + current_bbo->bo.size);
565
566 emit_batch_buffer_start(cmd_buffer, &bbo->bo, 0);
567
568 anv_batch_bo_finish(current_bbo, batch);
569 }
570
571 static VkResult
572 anv_cmd_buffer_chain_batch(struct anv_batch *batch, void *_data)
573 {
574 struct anv_cmd_buffer *cmd_buffer = _data;
575 struct anv_batch_bo *new_bbo;
576
577 VkResult result = anv_batch_bo_create(cmd_buffer, &new_bbo);
578 if (result != VK_SUCCESS)
579 return result;
580
581 struct anv_batch_bo **seen_bbo = u_vector_add(&cmd_buffer->seen_bbos);
582 if (seen_bbo == NULL) {
583 anv_batch_bo_destroy(new_bbo, cmd_buffer);
584 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
585 }
586 *seen_bbo = new_bbo;
587
588 cmd_buffer_chain_to_batch_bo(cmd_buffer, new_bbo);
589
590 list_addtail(&new_bbo->link, &cmd_buffer->batch_bos);
591
592 anv_batch_bo_start(new_bbo, batch, GEN8_MI_BATCH_BUFFER_START_length * 4);
593
594 return VK_SUCCESS;
595 }
596
597 static VkResult
598 anv_cmd_buffer_grow_batch(struct anv_batch *batch, void *_data)
599 {
600 struct anv_cmd_buffer *cmd_buffer = _data;
601 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
602
603 anv_batch_bo_grow(cmd_buffer, bbo, &cmd_buffer->batch, 4096,
604 GEN8_MI_BATCH_BUFFER_START_length * 4);
605
606 return VK_SUCCESS;
607 }
608
609 /** Allocate a binding table
610 *
611 * This function allocates a binding table. This is a bit more complicated
612 * than one would think due to a combination of Vulkan driver design and some
613 * unfortunate hardware restrictions.
614 *
615 * The 3DSTATE_BINDING_TABLE_POINTERS_* packets only have a 16-bit field for
616 * the binding table pointer which means that all binding tables need to live
617 * in the bottom 64k of surface state base address. The way the GL driver has
618 * classically dealt with this restriction is to emit all surface states
619 * on-the-fly into the batch and have a batch buffer smaller than 64k. This
620 * isn't really an option in Vulkan for a couple of reasons:
621 *
622 * 1) In Vulkan, we have growing (or chaining) batches so surface states have
623 * to live in their own buffer and we have to be able to re-emit
624 * STATE_BASE_ADDRESS as needed which requires a full pipeline stall. In
625 * order to avoid emitting STATE_BASE_ADDRESS any more often than needed
626 * (it's not that hard to hit 64k of just binding tables), we allocate
627 * surface state objects up-front when VkImageView is created. In order
628 * for this to work, surface state objects need to be allocated from a
629 * global buffer.
630 *
631 * 2) We tried to design the surface state system in such a way that it's
632 * already ready for bindless texturing. The way bindless texturing works
633 * on our hardware is that you have a big pool of surface state objects
634 * (with its own state base address) and the bindless handles are simply
635 * offsets into that pool. With the architecture we chose, we already
636 * have that pool and it's exactly the same pool that we use for regular
637 * surface states so we should already be ready for bindless.
638 *
639 * 3) For render targets, we need to be able to fill out the surface states
640 * later in vkBeginRenderPass so that we can assign clear colors
641 * correctly. One way to do this would be to just create the surface
642 * state data and then repeatedly copy it into the surface state BO every
643 * time we have to re-emit STATE_BASE_ADDRESS. While this works, it's
644 * rather annoying and just being able to allocate them up-front and
645 * re-use them for the entire render pass.
646 *
647 * While none of these are technically blockers for emitting state on the fly
648 * like we do in GL, the ability to have a single surface state pool is
649 * simplifies things greatly. Unfortunately, it comes at a cost...
650 *
651 * Because of the 64k limitation of 3DSTATE_BINDING_TABLE_POINTERS_*, we can't
652 * place the binding tables just anywhere in surface state base address.
653 * Because 64k isn't a whole lot of space, we can't simply restrict the
654 * surface state buffer to 64k, we have to be more clever. The solution we've
655 * chosen is to have a block pool with a maximum size of 2G that starts at
656 * zero and grows in both directions. All surface states are allocated from
657 * the top of the pool (positive offsets) and we allocate blocks (< 64k) of
658 * binding tables from the bottom of the pool (negative offsets). Every time
659 * we allocate a new binding table block, we set surface state base address to
660 * point to the bottom of the binding table block. This way all of the
661 * binding tables in the block are in the bottom 64k of surface state base
662 * address. When we fill out the binding table, we add the distance between
663 * the bottom of our binding table block and zero of the block pool to the
664 * surface state offsets so that they are correct relative to out new surface
665 * state base address at the bottom of the binding table block.
666 *
667 * \see adjust_relocations_from_block_pool()
668 * \see adjust_relocations_too_block_pool()
669 *
670 * \param[in] entries The number of surface state entries the binding
671 * table should be able to hold.
672 *
673 * \param[out] state_offset The offset surface surface state base address
674 * where the surface states live. This must be
675 * added to the surface state offset when it is
676 * written into the binding table entry.
677 *
678 * \return An anv_state representing the binding table
679 */
680 struct anv_state
681 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
682 uint32_t entries, uint32_t *state_offset)
683 {
684 struct anv_device *device = cmd_buffer->device;
685 struct anv_state_pool *state_pool = &device->surface_state_pool;
686 struct anv_state *bt_block = u_vector_head(&cmd_buffer->bt_block_states);
687 struct anv_state state;
688
689 state.alloc_size = align_u32(entries * 4, 32);
690
691 if (cmd_buffer->bt_next + state.alloc_size > state_pool->block_size)
692 return (struct anv_state) { 0 };
693
694 state.offset = cmd_buffer->bt_next;
695 state.map = anv_block_pool_map(&anv_binding_table_pool(device)->block_pool,
696 bt_block->offset + state.offset);
697
698 cmd_buffer->bt_next += state.alloc_size;
699
700 if (device->instance->physicalDevice.use_softpin) {
701 assert(bt_block->offset >= 0);
702 *state_offset = device->surface_state_pool.block_pool.start_address -
703 device->binding_table_pool.block_pool.start_address - bt_block->offset;
704 } else {
705 assert(bt_block->offset < 0);
706 *state_offset = -bt_block->offset;
707 }
708
709 return state;
710 }
711
712 struct anv_state
713 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer *cmd_buffer)
714 {
715 struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
716 return anv_state_stream_alloc(&cmd_buffer->surface_state_stream,
717 isl_dev->ss.size, isl_dev->ss.align);
718 }
719
720 struct anv_state
721 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer *cmd_buffer,
722 uint32_t size, uint32_t alignment)
723 {
724 return anv_state_stream_alloc(&cmd_buffer->dynamic_state_stream,
725 size, alignment);
726 }
727
728 VkResult
729 anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer *cmd_buffer)
730 {
731 struct anv_state *bt_block = u_vector_add(&cmd_buffer->bt_block_states);
732 if (bt_block == NULL) {
733 anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_HOST_MEMORY);
734 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
735 }
736
737 *bt_block = anv_binding_table_pool_alloc(cmd_buffer->device);
738 cmd_buffer->bt_next = 0;
739
740 return VK_SUCCESS;
741 }
742
743 VkResult
744 anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
745 {
746 struct anv_batch_bo *batch_bo;
747 VkResult result;
748
749 list_inithead(&cmd_buffer->batch_bos);
750
751 result = anv_batch_bo_create(cmd_buffer, &batch_bo);
752 if (result != VK_SUCCESS)
753 return result;
754
755 list_addtail(&batch_bo->link, &cmd_buffer->batch_bos);
756
757 cmd_buffer->batch.alloc = &cmd_buffer->pool->alloc;
758 cmd_buffer->batch.user_data = cmd_buffer;
759
760 if (cmd_buffer->device->can_chain_batches) {
761 cmd_buffer->batch.extend_cb = anv_cmd_buffer_chain_batch;
762 } else {
763 cmd_buffer->batch.extend_cb = anv_cmd_buffer_grow_batch;
764 }
765
766 anv_batch_bo_start(batch_bo, &cmd_buffer->batch,
767 GEN8_MI_BATCH_BUFFER_START_length * 4);
768
769 int success = u_vector_init(&cmd_buffer->seen_bbos,
770 sizeof(struct anv_bo *),
771 8 * sizeof(struct anv_bo *));
772 if (!success)
773 goto fail_batch_bo;
774
775 *(struct anv_batch_bo **)u_vector_add(&cmd_buffer->seen_bbos) = batch_bo;
776
777 /* u_vector requires power-of-two size elements */
778 unsigned pow2_state_size = util_next_power_of_two(sizeof(struct anv_state));
779 success = u_vector_init(&cmd_buffer->bt_block_states,
780 pow2_state_size, 8 * pow2_state_size);
781 if (!success)
782 goto fail_seen_bbos;
783
784 result = anv_reloc_list_init(&cmd_buffer->surface_relocs,
785 &cmd_buffer->pool->alloc);
786 if (result != VK_SUCCESS)
787 goto fail_bt_blocks;
788 cmd_buffer->last_ss_pool_center = 0;
789
790 result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
791 if (result != VK_SUCCESS)
792 goto fail_bt_blocks;
793
794 return VK_SUCCESS;
795
796 fail_bt_blocks:
797 u_vector_finish(&cmd_buffer->bt_block_states);
798 fail_seen_bbos:
799 u_vector_finish(&cmd_buffer->seen_bbos);
800 fail_batch_bo:
801 anv_batch_bo_destroy(batch_bo, cmd_buffer);
802
803 return result;
804 }
805
806 void
807 anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
808 {
809 struct anv_state *bt_block;
810 u_vector_foreach(bt_block, &cmd_buffer->bt_block_states)
811 anv_binding_table_pool_free(cmd_buffer->device, *bt_block);
812 u_vector_finish(&cmd_buffer->bt_block_states);
813
814 anv_reloc_list_finish(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc);
815
816 u_vector_finish(&cmd_buffer->seen_bbos);
817
818 /* Destroy all of the batch buffers */
819 list_for_each_entry_safe(struct anv_batch_bo, bbo,
820 &cmd_buffer->batch_bos, link) {
821 anv_batch_bo_destroy(bbo, cmd_buffer);
822 }
823 }
824
825 void
826 anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
827 {
828 /* Delete all but the first batch bo */
829 assert(!list_is_empty(&cmd_buffer->batch_bos));
830 while (cmd_buffer->batch_bos.next != cmd_buffer->batch_bos.prev) {
831 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
832 list_del(&bbo->link);
833 anv_batch_bo_destroy(bbo, cmd_buffer);
834 }
835 assert(!list_is_empty(&cmd_buffer->batch_bos));
836
837 anv_batch_bo_start(anv_cmd_buffer_current_batch_bo(cmd_buffer),
838 &cmd_buffer->batch,
839 GEN8_MI_BATCH_BUFFER_START_length * 4);
840
841 while (u_vector_length(&cmd_buffer->bt_block_states) > 1) {
842 struct anv_state *bt_block = u_vector_remove(&cmd_buffer->bt_block_states);
843 anv_binding_table_pool_free(cmd_buffer->device, *bt_block);
844 }
845 assert(u_vector_length(&cmd_buffer->bt_block_states) == 1);
846 cmd_buffer->bt_next = 0;
847
848 cmd_buffer->surface_relocs.num_relocs = 0;
849 _mesa_set_clear(cmd_buffer->surface_relocs.deps, NULL);
850 cmd_buffer->last_ss_pool_center = 0;
851
852 /* Reset the list of seen buffers */
853 cmd_buffer->seen_bbos.head = 0;
854 cmd_buffer->seen_bbos.tail = 0;
855
856 *(struct anv_batch_bo **)u_vector_add(&cmd_buffer->seen_bbos) =
857 anv_cmd_buffer_current_batch_bo(cmd_buffer);
858 }
859
860 void
861 anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer)
862 {
863 struct anv_batch_bo *batch_bo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
864
865 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
866 /* When we start a batch buffer, we subtract a certain amount of
867 * padding from the end to ensure that we always have room to emit a
868 * BATCH_BUFFER_START to chain to the next BO. We need to remove
869 * that padding before we end the batch; otherwise, we may end up
870 * with our BATCH_BUFFER_END in another BO.
871 */
872 cmd_buffer->batch.end += GEN8_MI_BATCH_BUFFER_START_length * 4;
873 assert(cmd_buffer->batch.end == batch_bo->bo.map + batch_bo->bo.size);
874
875 anv_batch_emit(&cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_END, bbe);
876
877 /* Round batch up to an even number of dwords. */
878 if ((cmd_buffer->batch.next - cmd_buffer->batch.start) & 4)
879 anv_batch_emit(&cmd_buffer->batch, GEN8_MI_NOOP, noop);
880
881 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_PRIMARY;
882 } else {
883 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
884 /* If this is a secondary command buffer, we need to determine the
885 * mode in which it will be executed with vkExecuteCommands. We
886 * determine this statically here so that this stays in sync with the
887 * actual ExecuteCommands implementation.
888 */
889 const uint32_t length = cmd_buffer->batch.next - cmd_buffer->batch.start;
890 if (!cmd_buffer->device->can_chain_batches) {
891 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT;
892 } else if ((cmd_buffer->batch_bos.next == cmd_buffer->batch_bos.prev) &&
893 (length < ANV_CMD_BUFFER_BATCH_SIZE / 2)) {
894 /* If the secondary has exactly one batch buffer in its list *and*
895 * that batch buffer is less than half of the maximum size, we're
896 * probably better of simply copying it into our batch.
897 */
898 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_EMIT;
899 } else if (!(cmd_buffer->usage_flags &
900 VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
901 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_CHAIN;
902
903 /* In order to chain, we need this command buffer to contain an
904 * MI_BATCH_BUFFER_START which will jump back to the calling batch.
905 * It doesn't matter where it points now so long as has a valid
906 * relocation. We'll adjust it later as part of the chaining
907 * process.
908 *
909 * We set the end of the batch a little short so we would be sure we
910 * have room for the chaining command. Since we're about to emit the
911 * chaining command, let's set it back where it should go.
912 */
913 cmd_buffer->batch.end += GEN8_MI_BATCH_BUFFER_START_length * 4;
914 assert(cmd_buffer->batch.start == batch_bo->bo.map);
915 assert(cmd_buffer->batch.end == batch_bo->bo.map + batch_bo->bo.size);
916
917 emit_batch_buffer_start(cmd_buffer, &batch_bo->bo, 0);
918 assert(cmd_buffer->batch.start == batch_bo->bo.map);
919 } else {
920 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN;
921 }
922 }
923
924 anv_batch_bo_finish(batch_bo, &cmd_buffer->batch);
925 }
926
927 static VkResult
928 anv_cmd_buffer_add_seen_bbos(struct anv_cmd_buffer *cmd_buffer,
929 struct list_head *list)
930 {
931 list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
932 struct anv_batch_bo **bbo_ptr = u_vector_add(&cmd_buffer->seen_bbos);
933 if (bbo_ptr == NULL)
934 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
935
936 *bbo_ptr = bbo;
937 }
938
939 return VK_SUCCESS;
940 }
941
942 void
943 anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary,
944 struct anv_cmd_buffer *secondary)
945 {
946 switch (secondary->exec_mode) {
947 case ANV_CMD_BUFFER_EXEC_MODE_EMIT:
948 anv_batch_emit_batch(&primary->batch, &secondary->batch);
949 break;
950 case ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT: {
951 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(primary);
952 unsigned length = secondary->batch.end - secondary->batch.start;
953 anv_batch_bo_grow(primary, bbo, &primary->batch, length,
954 GEN8_MI_BATCH_BUFFER_START_length * 4);
955 anv_batch_emit_batch(&primary->batch, &secondary->batch);
956 break;
957 }
958 case ANV_CMD_BUFFER_EXEC_MODE_CHAIN: {
959 struct anv_batch_bo *first_bbo =
960 list_first_entry(&secondary->batch_bos, struct anv_batch_bo, link);
961 struct anv_batch_bo *last_bbo =
962 list_last_entry(&secondary->batch_bos, struct anv_batch_bo, link);
963
964 emit_batch_buffer_start(primary, &first_bbo->bo, 0);
965
966 struct anv_batch_bo *this_bbo = anv_cmd_buffer_current_batch_bo(primary);
967 assert(primary->batch.start == this_bbo->bo.map);
968 uint32_t offset = primary->batch.next - primary->batch.start;
969
970 /* Make the tail of the secondary point back to right after the
971 * MI_BATCH_BUFFER_START in the primary batch.
972 */
973 anv_batch_bo_link(primary, last_bbo, this_bbo, offset);
974
975 anv_cmd_buffer_add_seen_bbos(primary, &secondary->batch_bos);
976 break;
977 }
978 case ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN: {
979 struct list_head copy_list;
980 VkResult result = anv_batch_bo_list_clone(&secondary->batch_bos,
981 secondary,
982 &copy_list);
983 if (result != VK_SUCCESS)
984 return; /* FIXME */
985
986 anv_cmd_buffer_add_seen_bbos(primary, &copy_list);
987
988 struct anv_batch_bo *first_bbo =
989 list_first_entry(&copy_list, struct anv_batch_bo, link);
990 struct anv_batch_bo *last_bbo =
991 list_last_entry(&copy_list, struct anv_batch_bo, link);
992
993 cmd_buffer_chain_to_batch_bo(primary, first_bbo);
994
995 list_splicetail(&copy_list, &primary->batch_bos);
996
997 anv_batch_bo_continue(last_bbo, &primary->batch,
998 GEN8_MI_BATCH_BUFFER_START_length * 4);
999 break;
1000 }
1001 default:
1002 assert(!"Invalid execution mode");
1003 }
1004
1005 anv_reloc_list_append(&primary->surface_relocs, &primary->pool->alloc,
1006 &secondary->surface_relocs, 0);
1007 }
1008
1009 struct anv_execbuf {
1010 struct drm_i915_gem_execbuffer2 execbuf;
1011
1012 struct drm_i915_gem_exec_object2 * objects;
1013 uint32_t bo_count;
1014 struct anv_bo ** bos;
1015
1016 /* Allocated length of the 'objects' and 'bos' arrays */
1017 uint32_t array_length;
1018
1019 bool has_relocs;
1020
1021 uint32_t fence_count;
1022 uint32_t fence_array_length;
1023 struct drm_i915_gem_exec_fence * fences;
1024 struct anv_syncobj ** syncobjs;
1025 };
1026
1027 static void
1028 anv_execbuf_init(struct anv_execbuf *exec)
1029 {
1030 memset(exec, 0, sizeof(*exec));
1031 }
1032
1033 static void
1034 anv_execbuf_finish(struct anv_execbuf *exec,
1035 const VkAllocationCallbacks *alloc)
1036 {
1037 vk_free(alloc, exec->objects);
1038 vk_free(alloc, exec->bos);
1039 vk_free(alloc, exec->fences);
1040 vk_free(alloc, exec->syncobjs);
1041 }
1042
1043 static int
1044 _compare_bo_handles(const void *_bo1, const void *_bo2)
1045 {
1046 struct anv_bo * const *bo1 = _bo1;
1047 struct anv_bo * const *bo2 = _bo2;
1048
1049 return (*bo1)->gem_handle - (*bo2)->gem_handle;
1050 }
1051
1052 static VkResult
1053 anv_execbuf_add_bo_set(struct anv_execbuf *exec,
1054 struct set *deps,
1055 uint32_t extra_flags,
1056 const VkAllocationCallbacks *alloc);
1057
1058 static VkResult
1059 anv_execbuf_add_bo(struct anv_execbuf *exec,
1060 struct anv_bo *bo,
1061 struct anv_reloc_list *relocs,
1062 uint32_t extra_flags,
1063 const VkAllocationCallbacks *alloc)
1064 {
1065 struct drm_i915_gem_exec_object2 *obj = NULL;
1066
1067 bo = anv_bo_unwrap(bo);
1068
1069 if (bo->index < exec->bo_count && exec->bos[bo->index] == bo)
1070 obj = &exec->objects[bo->index];
1071
1072 if (obj == NULL) {
1073 /* We've never seen this one before. Add it to the list and assign
1074 * an id that we can use later.
1075 */
1076 if (exec->bo_count >= exec->array_length) {
1077 uint32_t new_len = exec->objects ? exec->array_length * 2 : 64;
1078
1079 struct drm_i915_gem_exec_object2 *new_objects =
1080 vk_alloc(alloc, new_len * sizeof(*new_objects),
1081 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
1082 if (new_objects == NULL)
1083 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1084
1085 struct anv_bo **new_bos =
1086 vk_alloc(alloc, new_len * sizeof(*new_bos),
1087 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
1088 if (new_bos == NULL) {
1089 vk_free(alloc, new_objects);
1090 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1091 }
1092
1093 if (exec->objects) {
1094 memcpy(new_objects, exec->objects,
1095 exec->bo_count * sizeof(*new_objects));
1096 memcpy(new_bos, exec->bos,
1097 exec->bo_count * sizeof(*new_bos));
1098 }
1099
1100 vk_free(alloc, exec->objects);
1101 vk_free(alloc, exec->bos);
1102
1103 exec->objects = new_objects;
1104 exec->bos = new_bos;
1105 exec->array_length = new_len;
1106 }
1107
1108 assert(exec->bo_count < exec->array_length);
1109
1110 bo->index = exec->bo_count++;
1111 obj = &exec->objects[bo->index];
1112 exec->bos[bo->index] = bo;
1113
1114 obj->handle = bo->gem_handle;
1115 obj->relocation_count = 0;
1116 obj->relocs_ptr = 0;
1117 obj->alignment = 0;
1118 obj->offset = bo->offset;
1119 obj->flags = bo->flags | extra_flags;
1120 obj->rsvd1 = 0;
1121 obj->rsvd2 = 0;
1122 }
1123
1124 if (relocs != NULL) {
1125 assert(obj->relocation_count == 0);
1126
1127 if (relocs->num_relocs > 0) {
1128 /* This is the first time we've ever seen a list of relocations for
1129 * this BO. Go ahead and set the relocations and then walk the list
1130 * of relocations and add them all.
1131 */
1132 exec->has_relocs = true;
1133 obj->relocation_count = relocs->num_relocs;
1134 obj->relocs_ptr = (uintptr_t) relocs->relocs;
1135
1136 for (size_t i = 0; i < relocs->num_relocs; i++) {
1137 VkResult result;
1138
1139 /* A quick sanity check on relocations */
1140 assert(relocs->relocs[i].offset < bo->size);
1141 result = anv_execbuf_add_bo(exec, relocs->reloc_bos[i], NULL,
1142 extra_flags, alloc);
1143
1144 if (result != VK_SUCCESS)
1145 return result;
1146 }
1147 }
1148
1149 return anv_execbuf_add_bo_set(exec, relocs->deps, extra_flags, alloc);
1150 }
1151
1152 return VK_SUCCESS;
1153 }
1154
1155 /* Add BO dependencies to execbuf */
1156 static VkResult
1157 anv_execbuf_add_bo_set(struct anv_execbuf *exec,
1158 struct set *deps,
1159 uint32_t extra_flags,
1160 const VkAllocationCallbacks *alloc)
1161 {
1162 if (!deps || deps->entries <= 0)
1163 return VK_SUCCESS;
1164
1165 const uint32_t entries = deps->entries;
1166 struct anv_bo **bos =
1167 vk_alloc(alloc, entries * sizeof(*bos),
1168 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
1169 if (bos == NULL)
1170 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1171
1172 struct anv_bo **bo = bos;
1173 set_foreach(deps, entry) {
1174 *bo++ = (void *)entry->key;
1175 }
1176
1177 qsort(bos, entries, sizeof(struct anv_bo*), _compare_bo_handles);
1178
1179 VkResult result = VK_SUCCESS;
1180 for (bo = bos; bo < bos + entries; bo++) {
1181 result = anv_execbuf_add_bo(exec, *bo, NULL, extra_flags, alloc);
1182 if (result != VK_SUCCESS)
1183 break;
1184 }
1185
1186 vk_free(alloc, bos);
1187
1188 return result;
1189 }
1190
1191 static VkResult
1192 anv_execbuf_add_syncobj(struct anv_execbuf *exec,
1193 uint32_t handle, uint32_t flags,
1194 const VkAllocationCallbacks *alloc)
1195 {
1196 assert(flags != 0);
1197
1198 if (exec->fence_count >= exec->fence_array_length) {
1199 uint32_t new_len = MAX2(exec->fence_array_length * 2, 64);
1200
1201 exec->fences = vk_realloc(alloc, exec->fences,
1202 new_len * sizeof(*exec->fences),
1203 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
1204 if (exec->fences == NULL)
1205 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1206
1207 exec->fence_array_length = new_len;
1208 }
1209
1210 exec->fences[exec->fence_count] = (struct drm_i915_gem_exec_fence) {
1211 .handle = handle,
1212 .flags = flags,
1213 };
1214
1215 exec->fence_count++;
1216
1217 return VK_SUCCESS;
1218 }
1219
1220 static void
1221 anv_cmd_buffer_process_relocs(struct anv_cmd_buffer *cmd_buffer,
1222 struct anv_reloc_list *list)
1223 {
1224 for (size_t i = 0; i < list->num_relocs; i++)
1225 list->relocs[i].target_handle = anv_bo_unwrap(list->reloc_bos[i])->index;
1226 }
1227
1228 static void
1229 adjust_relocations_from_state_pool(struct anv_state_pool *pool,
1230 struct anv_reloc_list *relocs,
1231 uint32_t last_pool_center_bo_offset)
1232 {
1233 assert(last_pool_center_bo_offset <= pool->block_pool.center_bo_offset);
1234 uint32_t delta = pool->block_pool.center_bo_offset - last_pool_center_bo_offset;
1235
1236 for (size_t i = 0; i < relocs->num_relocs; i++) {
1237 /* All of the relocations from this block pool to other BO's should
1238 * have been emitted relative to the surface block pool center. We
1239 * need to add the center offset to make them relative to the
1240 * beginning of the actual GEM bo.
1241 */
1242 relocs->relocs[i].offset += delta;
1243 }
1244 }
1245
1246 static void
1247 adjust_relocations_to_state_pool(struct anv_state_pool *pool,
1248 struct anv_bo *from_bo,
1249 struct anv_reloc_list *relocs,
1250 uint32_t last_pool_center_bo_offset)
1251 {
1252 assert(!from_bo->is_wrapper);
1253 assert(last_pool_center_bo_offset <= pool->block_pool.center_bo_offset);
1254 uint32_t delta = pool->block_pool.center_bo_offset - last_pool_center_bo_offset;
1255
1256 /* When we initially emit relocations into a block pool, we don't
1257 * actually know what the final center_bo_offset will be so we just emit
1258 * it as if center_bo_offset == 0. Now that we know what the center
1259 * offset is, we need to walk the list of relocations and adjust any
1260 * relocations that point to the pool bo with the correct offset.
1261 */
1262 for (size_t i = 0; i < relocs->num_relocs; i++) {
1263 if (relocs->reloc_bos[i] == pool->block_pool.bo) {
1264 /* Adjust the delta value in the relocation to correctly
1265 * correspond to the new delta. Initially, this value may have
1266 * been negative (if treated as unsigned), but we trust in
1267 * uint32_t roll-over to fix that for us at this point.
1268 */
1269 relocs->relocs[i].delta += delta;
1270
1271 /* Since the delta has changed, we need to update the actual
1272 * relocated value with the new presumed value. This function
1273 * should only be called on batch buffers, so we know it isn't in
1274 * use by the GPU at the moment.
1275 */
1276 assert(relocs->relocs[i].offset < from_bo->size);
1277 write_reloc(pool->block_pool.device,
1278 from_bo->map + relocs->relocs[i].offset,
1279 relocs->relocs[i].presumed_offset +
1280 relocs->relocs[i].delta, false);
1281 }
1282 }
1283 }
1284
1285 static void
1286 anv_reloc_list_apply(struct anv_device *device,
1287 struct anv_reloc_list *list,
1288 struct anv_bo *bo,
1289 bool always_relocate)
1290 {
1291 bo = anv_bo_unwrap(bo);
1292
1293 for (size_t i = 0; i < list->num_relocs; i++) {
1294 struct anv_bo *target_bo = anv_bo_unwrap(list->reloc_bos[i]);
1295 if (list->relocs[i].presumed_offset == target_bo->offset &&
1296 !always_relocate)
1297 continue;
1298
1299 void *p = bo->map + list->relocs[i].offset;
1300 write_reloc(device, p, target_bo->offset + list->relocs[i].delta, true);
1301 list->relocs[i].presumed_offset = target_bo->offset;
1302 }
1303 }
1304
1305 /**
1306 * This function applies the relocation for a command buffer and writes the
1307 * actual addresses into the buffers as per what we were told by the kernel on
1308 * the previous execbuf2 call. This should be safe to do because, for each
1309 * relocated address, we have two cases:
1310 *
1311 * 1) The target BO is inactive (as seen by the kernel). In this case, it is
1312 * not in use by the GPU so updating the address is 100% ok. It won't be
1313 * in-use by the GPU (from our context) again until the next execbuf2
1314 * happens. If the kernel decides to move it in the next execbuf2, it
1315 * will have to do the relocations itself, but that's ok because it should
1316 * have all of the information needed to do so.
1317 *
1318 * 2) The target BO is active (as seen by the kernel). In this case, it
1319 * hasn't moved since the last execbuffer2 call because GTT shuffling
1320 * *only* happens when the BO is idle. (From our perspective, it only
1321 * happens inside the execbuffer2 ioctl, but the shuffling may be
1322 * triggered by another ioctl, with full-ppgtt this is limited to only
1323 * execbuffer2 ioctls on the same context, or memory pressure.) Since the
1324 * target BO hasn't moved, our anv_bo::offset exactly matches the BO's GTT
1325 * address and the relocated value we are writing into the BO will be the
1326 * same as the value that is already there.
1327 *
1328 * There is also a possibility that the target BO is active but the exact
1329 * RENDER_SURFACE_STATE object we are writing the relocation into isn't in
1330 * use. In this case, the address currently in the RENDER_SURFACE_STATE
1331 * may be stale but it's still safe to write the relocation because that
1332 * particular RENDER_SURFACE_STATE object isn't in-use by the GPU and
1333 * won't be until the next execbuf2 call.
1334 *
1335 * By doing relocations on the CPU, we can tell the kernel that it doesn't
1336 * need to bother. We want to do this because the surface state buffer is
1337 * used by every command buffer so, if the kernel does the relocations, it
1338 * will always be busy and the kernel will always stall. This is also
1339 * probably the fastest mechanism for doing relocations since the kernel would
1340 * have to make a full copy of all the relocations lists.
1341 */
1342 static bool
1343 relocate_cmd_buffer(struct anv_cmd_buffer *cmd_buffer,
1344 struct anv_execbuf *exec)
1345 {
1346 if (!exec->has_relocs)
1347 return true;
1348
1349 static int userspace_relocs = -1;
1350 if (userspace_relocs < 0)
1351 userspace_relocs = env_var_as_boolean("ANV_USERSPACE_RELOCS", true);
1352 if (!userspace_relocs)
1353 return false;
1354
1355 /* First, we have to check to see whether or not we can even do the
1356 * relocation. New buffers which have never been submitted to the kernel
1357 * don't have a valid offset so we need to let the kernel do relocations so
1358 * that we can get offsets for them. On future execbuf2 calls, those
1359 * buffers will have offsets and we will be able to skip relocating.
1360 * Invalid offsets are indicated by anv_bo::offset == (uint64_t)-1.
1361 */
1362 for (uint32_t i = 0; i < exec->bo_count; i++) {
1363 assert(!exec->bos[i]->is_wrapper);
1364 if (exec->bos[i]->offset == (uint64_t)-1)
1365 return false;
1366 }
1367
1368 /* Since surface states are shared between command buffers and we don't
1369 * know what order they will be submitted to the kernel, we don't know
1370 * what address is actually written in the surface state object at any
1371 * given time. The only option is to always relocate them.
1372 */
1373 struct anv_bo *surface_state_bo =
1374 anv_bo_unwrap(cmd_buffer->device->surface_state_pool.block_pool.bo);
1375 anv_reloc_list_apply(cmd_buffer->device, &cmd_buffer->surface_relocs,
1376 surface_state_bo,
1377 true /* always relocate surface states */);
1378
1379 /* Since we own all of the batch buffers, we know what values are stored
1380 * in the relocated addresses and only have to update them if the offsets
1381 * have changed.
1382 */
1383 struct anv_batch_bo **bbo;
1384 u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1385 anv_reloc_list_apply(cmd_buffer->device,
1386 &(*bbo)->relocs, &(*bbo)->bo, false);
1387 }
1388
1389 for (uint32_t i = 0; i < exec->bo_count; i++)
1390 exec->objects[i].offset = exec->bos[i]->offset;
1391
1392 return true;
1393 }
1394
1395 static VkResult
1396 setup_execbuf_for_cmd_buffer(struct anv_execbuf *execbuf,
1397 struct anv_cmd_buffer *cmd_buffer)
1398 {
1399 struct anv_batch *batch = &cmd_buffer->batch;
1400 struct anv_state_pool *ss_pool =
1401 &cmd_buffer->device->surface_state_pool;
1402
1403 adjust_relocations_from_state_pool(ss_pool, &cmd_buffer->surface_relocs,
1404 cmd_buffer->last_ss_pool_center);
1405 VkResult result;
1406 if (cmd_buffer->device->instance->physicalDevice.use_softpin) {
1407 anv_block_pool_foreach_bo(bo, &ss_pool->block_pool) {
1408 result = anv_execbuf_add_bo(execbuf, bo, NULL, 0,
1409 &cmd_buffer->device->alloc);
1410 if (result != VK_SUCCESS)
1411 return result;
1412 }
1413 /* Add surface dependencies (BOs) to the execbuf */
1414 anv_execbuf_add_bo_set(execbuf, cmd_buffer->surface_relocs.deps, 0,
1415 &cmd_buffer->device->alloc);
1416
1417 /* Add the BOs for all memory objects */
1418 list_for_each_entry(struct anv_device_memory, mem,
1419 &cmd_buffer->device->memory_objects, link) {
1420 result = anv_execbuf_add_bo(execbuf, mem->bo, NULL, 0,
1421 &cmd_buffer->device->alloc);
1422 if (result != VK_SUCCESS)
1423 return result;
1424 }
1425
1426 struct anv_block_pool *pool;
1427 pool = &cmd_buffer->device->dynamic_state_pool.block_pool;
1428 anv_block_pool_foreach_bo(bo, pool) {
1429 result = anv_execbuf_add_bo(execbuf, bo, NULL, 0,
1430 &cmd_buffer->device->alloc);
1431 if (result != VK_SUCCESS)
1432 return result;
1433 }
1434
1435 pool = &cmd_buffer->device->instruction_state_pool.block_pool;
1436 anv_block_pool_foreach_bo(bo, pool) {
1437 result = anv_execbuf_add_bo(execbuf, bo, NULL, 0,
1438 &cmd_buffer->device->alloc);
1439 if (result != VK_SUCCESS)
1440 return result;
1441 }
1442
1443 pool = &cmd_buffer->device->binding_table_pool.block_pool;
1444 anv_block_pool_foreach_bo(bo, pool) {
1445 result = anv_execbuf_add_bo(execbuf, bo, NULL, 0,
1446 &cmd_buffer->device->alloc);
1447 if (result != VK_SUCCESS)
1448 return result;
1449 }
1450 } else {
1451 /* Since we aren't in the softpin case, all of our STATE_BASE_ADDRESS BOs
1452 * will get added automatically by processing relocations on the batch
1453 * buffer. We have to add the surface state BO manually because it has
1454 * relocations of its own that we need to be sure are processsed.
1455 */
1456 result = anv_execbuf_add_bo(execbuf, ss_pool->block_pool.bo,
1457 &cmd_buffer->surface_relocs, 0,
1458 &cmd_buffer->device->alloc);
1459 if (result != VK_SUCCESS)
1460 return result;
1461 }
1462
1463 /* First, we walk over all of the bos we've seen and add them and their
1464 * relocations to the validate list.
1465 */
1466 struct anv_batch_bo **bbo;
1467 u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1468 adjust_relocations_to_state_pool(ss_pool, &(*bbo)->bo, &(*bbo)->relocs,
1469 cmd_buffer->last_ss_pool_center);
1470
1471 result = anv_execbuf_add_bo(execbuf, &(*bbo)->bo, &(*bbo)->relocs, 0,
1472 &cmd_buffer->device->alloc);
1473 if (result != VK_SUCCESS)
1474 return result;
1475 }
1476
1477 /* Now that we've adjusted all of the surface state relocations, we need to
1478 * record the surface state pool center so future executions of the command
1479 * buffer can adjust correctly.
1480 */
1481 cmd_buffer->last_ss_pool_center = ss_pool->block_pool.center_bo_offset;
1482
1483 struct anv_batch_bo *first_batch_bo =
1484 list_first_entry(&cmd_buffer->batch_bos, struct anv_batch_bo, link);
1485
1486 /* The kernel requires that the last entry in the validation list be the
1487 * batch buffer to execute. We can simply swap the element
1488 * corresponding to the first batch_bo in the chain with the last
1489 * element in the list.
1490 */
1491 if (first_batch_bo->bo.index != execbuf->bo_count - 1) {
1492 uint32_t idx = first_batch_bo->bo.index;
1493 uint32_t last_idx = execbuf->bo_count - 1;
1494
1495 struct drm_i915_gem_exec_object2 tmp_obj = execbuf->objects[idx];
1496 assert(execbuf->bos[idx] == &first_batch_bo->bo);
1497
1498 execbuf->objects[idx] = execbuf->objects[last_idx];
1499 execbuf->bos[idx] = execbuf->bos[last_idx];
1500 execbuf->bos[idx]->index = idx;
1501
1502 execbuf->objects[last_idx] = tmp_obj;
1503 execbuf->bos[last_idx] = &first_batch_bo->bo;
1504 first_batch_bo->bo.index = last_idx;
1505 }
1506
1507 /* If we are pinning our BOs, we shouldn't have to relocate anything */
1508 if (cmd_buffer->device->instance->physicalDevice.use_softpin)
1509 assert(!execbuf->has_relocs);
1510
1511 /* Now we go through and fixup all of the relocation lists to point to
1512 * the correct indices in the object array. We have to do this after we
1513 * reorder the list above as some of the indices may have changed.
1514 */
1515 if (execbuf->has_relocs) {
1516 u_vector_foreach(bbo, &cmd_buffer->seen_bbos)
1517 anv_cmd_buffer_process_relocs(cmd_buffer, &(*bbo)->relocs);
1518
1519 anv_cmd_buffer_process_relocs(cmd_buffer, &cmd_buffer->surface_relocs);
1520 }
1521
1522 if (!cmd_buffer->device->info.has_llc) {
1523 __builtin_ia32_mfence();
1524 u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1525 for (uint32_t i = 0; i < (*bbo)->length; i += CACHELINE_SIZE)
1526 __builtin_ia32_clflush((*bbo)->bo.map + i);
1527 }
1528 }
1529
1530 execbuf->execbuf = (struct drm_i915_gem_execbuffer2) {
1531 .buffers_ptr = (uintptr_t) execbuf->objects,
1532 .buffer_count = execbuf->bo_count,
1533 .batch_start_offset = 0,
1534 .batch_len = batch->next - batch->start,
1535 .cliprects_ptr = 0,
1536 .num_cliprects = 0,
1537 .DR1 = 0,
1538 .DR4 = 0,
1539 .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER,
1540 .rsvd1 = cmd_buffer->device->context_id,
1541 .rsvd2 = 0,
1542 };
1543
1544 if (relocate_cmd_buffer(cmd_buffer, execbuf)) {
1545 /* If we were able to successfully relocate everything, tell the kernel
1546 * that it can skip doing relocations. The requirement for using
1547 * NO_RELOC is:
1548 *
1549 * 1) The addresses written in the objects must match the corresponding
1550 * reloc.presumed_offset which in turn must match the corresponding
1551 * execobject.offset.
1552 *
1553 * 2) To avoid stalling, execobject.offset should match the current
1554 * address of that object within the active context.
1555 *
1556 * In order to satisfy all of the invariants that make userspace
1557 * relocations to be safe (see relocate_cmd_buffer()), we need to
1558 * further ensure that the addresses we use match those used by the
1559 * kernel for the most recent execbuf2.
1560 *
1561 * The kernel may still choose to do relocations anyway if something has
1562 * moved in the GTT. In this case, the relocation list still needs to be
1563 * valid. All relocations on the batch buffers are already valid and
1564 * kept up-to-date. For surface state relocations, by applying the
1565 * relocations in relocate_cmd_buffer, we ensured that the address in
1566 * the RENDER_SURFACE_STATE matches presumed_offset, so it should be
1567 * safe for the kernel to relocate them as needed.
1568 */
1569 execbuf->execbuf.flags |= I915_EXEC_NO_RELOC;
1570 } else {
1571 /* In the case where we fall back to doing kernel relocations, we need
1572 * to ensure that the relocation list is valid. All relocations on the
1573 * batch buffers are already valid and kept up-to-date. Since surface
1574 * states are shared between command buffers and we don't know what
1575 * order they will be submitted to the kernel, we don't know what
1576 * address is actually written in the surface state object at any given
1577 * time. The only option is to set a bogus presumed offset and let the
1578 * kernel relocate them.
1579 */
1580 for (size_t i = 0; i < cmd_buffer->surface_relocs.num_relocs; i++)
1581 cmd_buffer->surface_relocs.relocs[i].presumed_offset = -1;
1582 }
1583
1584 return VK_SUCCESS;
1585 }
1586
1587 static VkResult
1588 setup_empty_execbuf(struct anv_execbuf *execbuf, struct anv_device *device)
1589 {
1590 VkResult result = anv_execbuf_add_bo(execbuf, &device->trivial_batch_bo,
1591 NULL, 0, &device->alloc);
1592 if (result != VK_SUCCESS)
1593 return result;
1594
1595 execbuf->execbuf = (struct drm_i915_gem_execbuffer2) {
1596 .buffers_ptr = (uintptr_t) execbuf->objects,
1597 .buffer_count = execbuf->bo_count,
1598 .batch_start_offset = 0,
1599 .batch_len = 8, /* GEN7_MI_BATCH_BUFFER_END and NOOP */
1600 .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER,
1601 .rsvd1 = device->context_id,
1602 .rsvd2 = 0,
1603 };
1604
1605 return VK_SUCCESS;
1606 }
1607
1608 VkResult
1609 anv_cmd_buffer_execbuf(struct anv_device *device,
1610 struct anv_cmd_buffer *cmd_buffer,
1611 const VkSemaphore *in_semaphores,
1612 uint32_t num_in_semaphores,
1613 const VkSemaphore *out_semaphores,
1614 uint32_t num_out_semaphores,
1615 VkFence _fence)
1616 {
1617 ANV_FROM_HANDLE(anv_fence, fence, _fence);
1618 UNUSED struct anv_physical_device *pdevice = &device->instance->physicalDevice;
1619
1620 struct anv_execbuf execbuf;
1621 anv_execbuf_init(&execbuf);
1622
1623 int in_fence = -1;
1624 VkResult result = VK_SUCCESS;
1625 for (uint32_t i = 0; i < num_in_semaphores; i++) {
1626 ANV_FROM_HANDLE(anv_semaphore, semaphore, in_semaphores[i]);
1627 struct anv_semaphore_impl *impl =
1628 semaphore->temporary.type != ANV_SEMAPHORE_TYPE_NONE ?
1629 &semaphore->temporary : &semaphore->permanent;
1630
1631 switch (impl->type) {
1632 case ANV_SEMAPHORE_TYPE_BO:
1633 assert(!pdevice->has_syncobj);
1634 result = anv_execbuf_add_bo(&execbuf, impl->bo, NULL,
1635 0, &device->alloc);
1636 if (result != VK_SUCCESS)
1637 return result;
1638 break;
1639
1640 case ANV_SEMAPHORE_TYPE_SYNC_FILE:
1641 assert(!pdevice->has_syncobj);
1642 if (in_fence == -1) {
1643 in_fence = impl->fd;
1644 } else {
1645 int merge = anv_gem_sync_file_merge(device, in_fence, impl->fd);
1646 if (merge == -1)
1647 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1648
1649 close(impl->fd);
1650 close(in_fence);
1651 in_fence = merge;
1652 }
1653
1654 impl->fd = -1;
1655 break;
1656
1657 case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
1658 result = anv_execbuf_add_syncobj(&execbuf, impl->syncobj,
1659 I915_EXEC_FENCE_WAIT,
1660 &device->alloc);
1661 if (result != VK_SUCCESS)
1662 return result;
1663 break;
1664
1665 default:
1666 break;
1667 }
1668 }
1669
1670 bool need_out_fence = false;
1671 for (uint32_t i = 0; i < num_out_semaphores; i++) {
1672 ANV_FROM_HANDLE(anv_semaphore, semaphore, out_semaphores[i]);
1673
1674 /* Under most circumstances, out fences won't be temporary. However,
1675 * the spec does allow it for opaque_fd. From the Vulkan 1.0.53 spec:
1676 *
1677 * "If the import is temporary, the implementation must restore the
1678 * semaphore to its prior permanent state after submitting the next
1679 * semaphore wait operation."
1680 *
1681 * The spec says nothing whatsoever about signal operations on
1682 * temporarily imported semaphores so it appears they are allowed.
1683 * There are also CTS tests that require this to work.
1684 */
1685 struct anv_semaphore_impl *impl =
1686 semaphore->temporary.type != ANV_SEMAPHORE_TYPE_NONE ?
1687 &semaphore->temporary : &semaphore->permanent;
1688
1689 switch (impl->type) {
1690 case ANV_SEMAPHORE_TYPE_BO:
1691 assert(!pdevice->has_syncobj);
1692 result = anv_execbuf_add_bo(&execbuf, impl->bo, NULL,
1693 EXEC_OBJECT_WRITE, &device->alloc);
1694 if (result != VK_SUCCESS)
1695 return result;
1696 break;
1697
1698 case ANV_SEMAPHORE_TYPE_SYNC_FILE:
1699 assert(!pdevice->has_syncobj);
1700 need_out_fence = true;
1701 break;
1702
1703 case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
1704 result = anv_execbuf_add_syncobj(&execbuf, impl->syncobj,
1705 I915_EXEC_FENCE_SIGNAL,
1706 &device->alloc);
1707 if (result != VK_SUCCESS)
1708 return result;
1709 break;
1710
1711 default:
1712 break;
1713 }
1714 }
1715
1716 if (fence) {
1717 /* Under most circumstances, out fences won't be temporary. However,
1718 * the spec does allow it for opaque_fd. From the Vulkan 1.0.53 spec:
1719 *
1720 * "If the import is temporary, the implementation must restore the
1721 * semaphore to its prior permanent state after submitting the next
1722 * semaphore wait operation."
1723 *
1724 * The spec says nothing whatsoever about signal operations on
1725 * temporarily imported semaphores so it appears they are allowed.
1726 * There are also CTS tests that require this to work.
1727 */
1728 struct anv_fence_impl *impl =
1729 fence->temporary.type != ANV_FENCE_TYPE_NONE ?
1730 &fence->temporary : &fence->permanent;
1731
1732 switch (impl->type) {
1733 case ANV_FENCE_TYPE_BO:
1734 assert(!pdevice->has_syncobj_wait);
1735 result = anv_execbuf_add_bo(&execbuf, &impl->bo.bo, NULL,
1736 EXEC_OBJECT_WRITE, &device->alloc);
1737 if (result != VK_SUCCESS)
1738 return result;
1739 break;
1740
1741 case ANV_FENCE_TYPE_SYNCOBJ:
1742 result = anv_execbuf_add_syncobj(&execbuf, impl->syncobj,
1743 I915_EXEC_FENCE_SIGNAL,
1744 &device->alloc);
1745 if (result != VK_SUCCESS)
1746 return result;
1747 break;
1748
1749 default:
1750 unreachable("Invalid fence type");
1751 }
1752 }
1753
1754 if (cmd_buffer) {
1755 if (unlikely(INTEL_DEBUG & DEBUG_BATCH)) {
1756 struct anv_batch_bo **bo = u_vector_tail(&cmd_buffer->seen_bbos);
1757
1758 device->cmd_buffer_being_decoded = cmd_buffer;
1759 gen_print_batch(&device->decoder_ctx, (*bo)->bo.map,
1760 (*bo)->bo.size, (*bo)->bo.offset, false);
1761 device->cmd_buffer_being_decoded = NULL;
1762 }
1763
1764 result = setup_execbuf_for_cmd_buffer(&execbuf, cmd_buffer);
1765 } else {
1766 result = setup_empty_execbuf(&execbuf, device);
1767 }
1768
1769 if (result != VK_SUCCESS)
1770 return result;
1771
1772 if (execbuf.fence_count > 0) {
1773 assert(device->instance->physicalDevice.has_syncobj);
1774 execbuf.execbuf.flags |= I915_EXEC_FENCE_ARRAY;
1775 execbuf.execbuf.num_cliprects = execbuf.fence_count;
1776 execbuf.execbuf.cliprects_ptr = (uintptr_t) execbuf.fences;
1777 }
1778
1779 if (in_fence != -1) {
1780 execbuf.execbuf.flags |= I915_EXEC_FENCE_IN;
1781 execbuf.execbuf.rsvd2 |= (uint32_t)in_fence;
1782 }
1783
1784 if (need_out_fence)
1785 execbuf.execbuf.flags |= I915_EXEC_FENCE_OUT;
1786
1787 result = anv_device_execbuf(device, &execbuf.execbuf, execbuf.bos);
1788
1789 /* Execbuf does not consume the in_fence. It's our job to close it. */
1790 if (in_fence != -1)
1791 close(in_fence);
1792
1793 for (uint32_t i = 0; i < num_in_semaphores; i++) {
1794 ANV_FROM_HANDLE(anv_semaphore, semaphore, in_semaphores[i]);
1795 /* From the Vulkan 1.0.53 spec:
1796 *
1797 * "If the import is temporary, the implementation must restore the
1798 * semaphore to its prior permanent state after submitting the next
1799 * semaphore wait operation."
1800 *
1801 * This has to happen after the execbuf in case we close any syncobjs in
1802 * the process.
1803 */
1804 anv_semaphore_reset_temporary(device, semaphore);
1805 }
1806
1807 if (fence && fence->permanent.type == ANV_FENCE_TYPE_BO) {
1808 assert(!pdevice->has_syncobj_wait);
1809 /* BO fences can't be shared, so they can't be temporary. */
1810 assert(fence->temporary.type == ANV_FENCE_TYPE_NONE);
1811
1812 /* Once the execbuf has returned, we need to set the fence state to
1813 * SUBMITTED. We can't do this before calling execbuf because
1814 * anv_GetFenceStatus does take the global device lock before checking
1815 * fence->state.
1816 *
1817 * We set the fence state to SUBMITTED regardless of whether or not the
1818 * execbuf succeeds because we need to ensure that vkWaitForFences() and
1819 * vkGetFenceStatus() return a valid result (VK_ERROR_DEVICE_LOST or
1820 * VK_SUCCESS) in a finite amount of time even if execbuf fails.
1821 */
1822 fence->permanent.bo.state = ANV_BO_FENCE_STATE_SUBMITTED;
1823 }
1824
1825 if (result == VK_SUCCESS && need_out_fence) {
1826 assert(!pdevice->has_syncobj_wait);
1827 int out_fence = execbuf.execbuf.rsvd2 >> 32;
1828 for (uint32_t i = 0; i < num_out_semaphores; i++) {
1829 ANV_FROM_HANDLE(anv_semaphore, semaphore, out_semaphores[i]);
1830 /* Out fences can't have temporary state because that would imply
1831 * that we imported a sync file and are trying to signal it.
1832 */
1833 assert(semaphore->temporary.type == ANV_SEMAPHORE_TYPE_NONE);
1834 struct anv_semaphore_impl *impl = &semaphore->permanent;
1835
1836 if (impl->type == ANV_SEMAPHORE_TYPE_SYNC_FILE) {
1837 assert(impl->fd == -1);
1838 impl->fd = dup(out_fence);
1839 }
1840 }
1841 close(out_fence);
1842 }
1843
1844 anv_execbuf_finish(&execbuf, &device->alloc);
1845
1846 return result;
1847 }