2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
32 #include "common/gen_decoder.h"
34 #include "genxml/gen8_pack.h"
36 #include "util/debug.h"
38 /** \file anv_batch_chain.c
40 * This file contains functions related to anv_cmd_buffer as a data
41 * structure. This involves everything required to create and destroy
42 * the actual batch buffers as well as link them together and handle
43 * relocations and surface state. It specifically does *not* contain any
44 * handling of actual vkCmd calls beyond vkCmdExecuteCommands.
47 /*-----------------------------------------------------------------------*
48 * Functions related to anv_reloc_list
49 *-----------------------------------------------------------------------*/
52 anv_reloc_list_init_clone(struct anv_reloc_list
*list
,
53 const VkAllocationCallbacks
*alloc
,
54 const struct anv_reloc_list
*other_list
)
57 list
->num_relocs
= other_list
->num_relocs
;
58 list
->array_length
= other_list
->array_length
;
61 list
->array_length
= 256;
65 vk_alloc(alloc
, list
->array_length
* sizeof(*list
->relocs
), 8,
66 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
68 if (list
->relocs
== NULL
)
69 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
72 vk_alloc(alloc
, list
->array_length
* sizeof(*list
->reloc_bos
), 8,
73 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
75 if (list
->reloc_bos
== NULL
) {
76 vk_free(alloc
, list
->relocs
);
77 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
80 list
->deps
= _mesa_pointer_set_create(NULL
);
83 vk_free(alloc
, list
->relocs
);
84 vk_free(alloc
, list
->reloc_bos
);
85 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
89 memcpy(list
->relocs
, other_list
->relocs
,
90 list
->array_length
* sizeof(*list
->relocs
));
91 memcpy(list
->reloc_bos
, other_list
->reloc_bos
,
92 list
->array_length
* sizeof(*list
->reloc_bos
));
93 set_foreach(other_list
->deps
, entry
) {
94 _mesa_set_add_pre_hashed(list
->deps
, entry
->hash
, entry
->key
);
102 anv_reloc_list_init(struct anv_reloc_list
*list
,
103 const VkAllocationCallbacks
*alloc
)
105 return anv_reloc_list_init_clone(list
, alloc
, NULL
);
109 anv_reloc_list_finish(struct anv_reloc_list
*list
,
110 const VkAllocationCallbacks
*alloc
)
112 vk_free(alloc
, list
->relocs
);
113 vk_free(alloc
, list
->reloc_bos
);
114 _mesa_set_destroy(list
->deps
, NULL
);
118 anv_reloc_list_grow(struct anv_reloc_list
*list
,
119 const VkAllocationCallbacks
*alloc
,
120 size_t num_additional_relocs
)
122 if (list
->num_relocs
+ num_additional_relocs
<= list
->array_length
)
125 size_t new_length
= list
->array_length
* 2;
126 while (new_length
< list
->num_relocs
+ num_additional_relocs
)
129 struct drm_i915_gem_relocation_entry
*new_relocs
=
130 vk_alloc(alloc
, new_length
* sizeof(*list
->relocs
), 8,
131 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
132 if (new_relocs
== NULL
)
133 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
135 struct anv_bo
**new_reloc_bos
=
136 vk_alloc(alloc
, new_length
* sizeof(*list
->reloc_bos
), 8,
137 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
138 if (new_reloc_bos
== NULL
) {
139 vk_free(alloc
, new_relocs
);
140 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
143 memcpy(new_relocs
, list
->relocs
, list
->num_relocs
* sizeof(*list
->relocs
));
144 memcpy(new_reloc_bos
, list
->reloc_bos
,
145 list
->num_relocs
* sizeof(*list
->reloc_bos
));
147 vk_free(alloc
, list
->relocs
);
148 vk_free(alloc
, list
->reloc_bos
);
150 list
->array_length
= new_length
;
151 list
->relocs
= new_relocs
;
152 list
->reloc_bos
= new_reloc_bos
;
158 anv_reloc_list_add(struct anv_reloc_list
*list
,
159 const VkAllocationCallbacks
*alloc
,
160 uint32_t offset
, struct anv_bo
*target_bo
, uint32_t delta
)
162 struct drm_i915_gem_relocation_entry
*entry
;
165 if (target_bo
->flags
& EXEC_OBJECT_PINNED
) {
166 _mesa_set_add(list
->deps
, target_bo
);
170 VkResult result
= anv_reloc_list_grow(list
, alloc
, 1);
171 if (result
!= VK_SUCCESS
)
174 /* XXX: Can we use I915_EXEC_HANDLE_LUT? */
175 index
= list
->num_relocs
++;
176 list
->reloc_bos
[index
] = target_bo
;
177 entry
= &list
->relocs
[index
];
178 entry
->target_handle
= target_bo
->gem_handle
;
179 entry
->delta
= delta
;
180 entry
->offset
= offset
;
181 entry
->presumed_offset
= target_bo
->offset
;
182 entry
->read_domains
= 0;
183 entry
->write_domain
= 0;
184 VG(VALGRIND_CHECK_MEM_IS_DEFINED(entry
, sizeof(*entry
)));
190 anv_reloc_list_append(struct anv_reloc_list
*list
,
191 const VkAllocationCallbacks
*alloc
,
192 struct anv_reloc_list
*other
, uint32_t offset
)
194 VkResult result
= anv_reloc_list_grow(list
, alloc
, other
->num_relocs
);
195 if (result
!= VK_SUCCESS
)
198 memcpy(&list
->relocs
[list
->num_relocs
], &other
->relocs
[0],
199 other
->num_relocs
* sizeof(other
->relocs
[0]));
200 memcpy(&list
->reloc_bos
[list
->num_relocs
], &other
->reloc_bos
[0],
201 other
->num_relocs
* sizeof(other
->reloc_bos
[0]));
203 for (uint32_t i
= 0; i
< other
->num_relocs
; i
++)
204 list
->relocs
[i
+ list
->num_relocs
].offset
+= offset
;
206 list
->num_relocs
+= other
->num_relocs
;
208 set_foreach(other
->deps
, entry
) {
209 _mesa_set_add_pre_hashed(list
->deps
, entry
->hash
, entry
->key
);
215 /*-----------------------------------------------------------------------*
216 * Functions related to anv_batch
217 *-----------------------------------------------------------------------*/
220 anv_batch_emit_dwords(struct anv_batch
*batch
, int num_dwords
)
222 if (batch
->next
+ num_dwords
* 4 > batch
->end
) {
223 VkResult result
= batch
->extend_cb(batch
, batch
->user_data
);
224 if (result
!= VK_SUCCESS
) {
225 anv_batch_set_error(batch
, result
);
230 void *p
= batch
->next
;
232 batch
->next
+= num_dwords
* 4;
233 assert(batch
->next
<= batch
->end
);
239 anv_batch_emit_reloc(struct anv_batch
*batch
,
240 void *location
, struct anv_bo
*bo
, uint32_t delta
)
242 VkResult result
= anv_reloc_list_add(batch
->relocs
, batch
->alloc
,
243 location
- batch
->start
, bo
, delta
);
244 if (result
!= VK_SUCCESS
) {
245 anv_batch_set_error(batch
, result
);
249 return bo
->offset
+ delta
;
253 anv_batch_emit_batch(struct anv_batch
*batch
, struct anv_batch
*other
)
255 uint32_t size
, offset
;
257 size
= other
->next
- other
->start
;
258 assert(size
% 4 == 0);
260 if (batch
->next
+ size
> batch
->end
) {
261 VkResult result
= batch
->extend_cb(batch
, batch
->user_data
);
262 if (result
!= VK_SUCCESS
) {
263 anv_batch_set_error(batch
, result
);
268 assert(batch
->next
+ size
<= batch
->end
);
270 VG(VALGRIND_CHECK_MEM_IS_DEFINED(other
->start
, size
));
271 memcpy(batch
->next
, other
->start
, size
);
273 offset
= batch
->next
- batch
->start
;
274 VkResult result
= anv_reloc_list_append(batch
->relocs
, batch
->alloc
,
275 other
->relocs
, offset
);
276 if (result
!= VK_SUCCESS
) {
277 anv_batch_set_error(batch
, result
);
284 /*-----------------------------------------------------------------------*
285 * Functions related to anv_batch_bo
286 *-----------------------------------------------------------------------*/
289 anv_batch_bo_create(struct anv_cmd_buffer
*cmd_buffer
,
290 struct anv_batch_bo
**bbo_out
)
294 struct anv_batch_bo
*bbo
= vk_alloc(&cmd_buffer
->pool
->alloc
, sizeof(*bbo
),
295 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
297 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
299 result
= anv_bo_pool_alloc(&cmd_buffer
->device
->batch_bo_pool
, &bbo
->bo
,
300 ANV_CMD_BUFFER_BATCH_SIZE
);
301 if (result
!= VK_SUCCESS
)
304 result
= anv_reloc_list_init(&bbo
->relocs
, &cmd_buffer
->pool
->alloc
);
305 if (result
!= VK_SUCCESS
)
313 anv_bo_pool_free(&cmd_buffer
->device
->batch_bo_pool
, &bbo
->bo
);
315 vk_free(&cmd_buffer
->pool
->alloc
, bbo
);
321 anv_batch_bo_clone(struct anv_cmd_buffer
*cmd_buffer
,
322 const struct anv_batch_bo
*other_bbo
,
323 struct anv_batch_bo
**bbo_out
)
327 struct anv_batch_bo
*bbo
= vk_alloc(&cmd_buffer
->pool
->alloc
, sizeof(*bbo
),
328 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
330 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
332 result
= anv_bo_pool_alloc(&cmd_buffer
->device
->batch_bo_pool
, &bbo
->bo
,
334 if (result
!= VK_SUCCESS
)
337 result
= anv_reloc_list_init_clone(&bbo
->relocs
, &cmd_buffer
->pool
->alloc
,
339 if (result
!= VK_SUCCESS
)
342 bbo
->length
= other_bbo
->length
;
343 memcpy(bbo
->bo
.map
, other_bbo
->bo
.map
, other_bbo
->length
);
350 anv_bo_pool_free(&cmd_buffer
->device
->batch_bo_pool
, &bbo
->bo
);
352 vk_free(&cmd_buffer
->pool
->alloc
, bbo
);
358 anv_batch_bo_start(struct anv_batch_bo
*bbo
, struct anv_batch
*batch
,
359 size_t batch_padding
)
361 batch
->next
= batch
->start
= bbo
->bo
.map
;
362 batch
->end
= bbo
->bo
.map
+ bbo
->bo
.size
- batch_padding
;
363 batch
->relocs
= &bbo
->relocs
;
364 bbo
->relocs
.num_relocs
= 0;
365 _mesa_set_clear(bbo
->relocs
.deps
, NULL
);
369 anv_batch_bo_continue(struct anv_batch_bo
*bbo
, struct anv_batch
*batch
,
370 size_t batch_padding
)
372 batch
->start
= bbo
->bo
.map
;
373 batch
->next
= bbo
->bo
.map
+ bbo
->length
;
374 batch
->end
= bbo
->bo
.map
+ bbo
->bo
.size
- batch_padding
;
375 batch
->relocs
= &bbo
->relocs
;
379 anv_batch_bo_finish(struct anv_batch_bo
*bbo
, struct anv_batch
*batch
)
381 assert(batch
->start
== bbo
->bo
.map
);
382 bbo
->length
= batch
->next
- batch
->start
;
383 VG(VALGRIND_CHECK_MEM_IS_DEFINED(batch
->start
, bbo
->length
));
387 anv_batch_bo_grow(struct anv_cmd_buffer
*cmd_buffer
, struct anv_batch_bo
*bbo
,
388 struct anv_batch
*batch
, size_t aditional
,
389 size_t batch_padding
)
391 assert(batch
->start
== bbo
->bo
.map
);
392 bbo
->length
= batch
->next
- batch
->start
;
394 size_t new_size
= bbo
->bo
.size
;
395 while (new_size
<= bbo
->length
+ aditional
+ batch_padding
)
398 if (new_size
== bbo
->bo
.size
)
401 struct anv_bo new_bo
;
402 VkResult result
= anv_bo_pool_alloc(&cmd_buffer
->device
->batch_bo_pool
,
404 if (result
!= VK_SUCCESS
)
407 memcpy(new_bo
.map
, bbo
->bo
.map
, bbo
->length
);
409 anv_bo_pool_free(&cmd_buffer
->device
->batch_bo_pool
, &bbo
->bo
);
412 anv_batch_bo_continue(bbo
, batch
, batch_padding
);
418 anv_batch_bo_link(struct anv_cmd_buffer
*cmd_buffer
,
419 struct anv_batch_bo
*prev_bbo
,
420 struct anv_batch_bo
*next_bbo
,
421 uint32_t next_bbo_offset
)
423 MAYBE_UNUSED
const uint32_t bb_start_offset
=
424 prev_bbo
->length
- GEN8_MI_BATCH_BUFFER_START_length
* 4;
425 MAYBE_UNUSED
const uint32_t *bb_start
= prev_bbo
->bo
.map
+ bb_start_offset
;
427 /* Make sure we're looking at a MI_BATCH_BUFFER_START */
428 assert(((*bb_start
>> 29) & 0x07) == 0);
429 assert(((*bb_start
>> 23) & 0x3f) == 49);
431 if (cmd_buffer
->device
->instance
->physicalDevice
.use_softpin
) {
432 assert(prev_bbo
->bo
.flags
& EXEC_OBJECT_PINNED
);
433 assert(next_bbo
->bo
.flags
& EXEC_OBJECT_PINNED
);
435 write_reloc(cmd_buffer
->device
,
436 prev_bbo
->bo
.map
+ bb_start_offset
+ 4,
437 next_bbo
->bo
.offset
+ next_bbo_offset
, true);
439 uint32_t reloc_idx
= prev_bbo
->relocs
.num_relocs
- 1;
440 assert(prev_bbo
->relocs
.relocs
[reloc_idx
].offset
== bb_start_offset
+ 4);
442 prev_bbo
->relocs
.reloc_bos
[reloc_idx
] = &next_bbo
->bo
;
443 prev_bbo
->relocs
.relocs
[reloc_idx
].delta
= next_bbo_offset
;
445 /* Use a bogus presumed offset to force a relocation */
446 prev_bbo
->relocs
.relocs
[reloc_idx
].presumed_offset
= -1;
451 anv_batch_bo_destroy(struct anv_batch_bo
*bbo
,
452 struct anv_cmd_buffer
*cmd_buffer
)
454 anv_reloc_list_finish(&bbo
->relocs
, &cmd_buffer
->pool
->alloc
);
455 anv_bo_pool_free(&cmd_buffer
->device
->batch_bo_pool
, &bbo
->bo
);
456 vk_free(&cmd_buffer
->pool
->alloc
, bbo
);
460 anv_batch_bo_list_clone(const struct list_head
*list
,
461 struct anv_cmd_buffer
*cmd_buffer
,
462 struct list_head
*new_list
)
464 VkResult result
= VK_SUCCESS
;
466 list_inithead(new_list
);
468 struct anv_batch_bo
*prev_bbo
= NULL
;
469 list_for_each_entry(struct anv_batch_bo
, bbo
, list
, link
) {
470 struct anv_batch_bo
*new_bbo
= NULL
;
471 result
= anv_batch_bo_clone(cmd_buffer
, bbo
, &new_bbo
);
472 if (result
!= VK_SUCCESS
)
474 list_addtail(&new_bbo
->link
, new_list
);
477 anv_batch_bo_link(cmd_buffer
, prev_bbo
, new_bbo
, 0);
482 if (result
!= VK_SUCCESS
) {
483 list_for_each_entry_safe(struct anv_batch_bo
, bbo
, new_list
, link
)
484 anv_batch_bo_destroy(bbo
, cmd_buffer
);
490 /*-----------------------------------------------------------------------*
491 * Functions related to anv_batch_bo
492 *-----------------------------------------------------------------------*/
494 static struct anv_batch_bo
*
495 anv_cmd_buffer_current_batch_bo(struct anv_cmd_buffer
*cmd_buffer
)
497 return LIST_ENTRY(struct anv_batch_bo
, cmd_buffer
->batch_bos
.prev
, link
);
501 anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer
*cmd_buffer
)
503 struct anv_state
*bt_block
= u_vector_head(&cmd_buffer
->bt_block_states
);
504 return (struct anv_address
) {
505 .bo
= anv_binding_table_pool(cmd_buffer
->device
)->block_pool
.bo
,
506 .offset
= bt_block
->offset
,
511 emit_batch_buffer_start(struct anv_cmd_buffer
*cmd_buffer
,
512 struct anv_bo
*bo
, uint32_t offset
)
514 /* In gen8+ the address field grew to two dwords to accomodate 48 bit
515 * offsets. The high 16 bits are in the last dword, so we can use the gen8
516 * version in either case, as long as we set the instruction length in the
517 * header accordingly. This means that we always emit three dwords here
518 * and all the padding and adjustment we do in this file works for all
522 #define GEN7_MI_BATCH_BUFFER_START_length 2
523 #define GEN7_MI_BATCH_BUFFER_START_length_bias 2
525 const uint32_t gen7_length
=
526 GEN7_MI_BATCH_BUFFER_START_length
- GEN7_MI_BATCH_BUFFER_START_length_bias
;
527 const uint32_t gen8_length
=
528 GEN8_MI_BATCH_BUFFER_START_length
- GEN8_MI_BATCH_BUFFER_START_length_bias
;
530 anv_batch_emit(&cmd_buffer
->batch
, GEN8_MI_BATCH_BUFFER_START
, bbs
) {
531 bbs
.DWordLength
= cmd_buffer
->device
->info
.gen
< 8 ?
532 gen7_length
: gen8_length
;
533 bbs
.SecondLevelBatchBuffer
= Firstlevelbatch
;
534 bbs
.AddressSpaceIndicator
= ASI_PPGTT
;
535 bbs
.BatchBufferStartAddress
= (struct anv_address
) { bo
, offset
};
540 cmd_buffer_chain_to_batch_bo(struct anv_cmd_buffer
*cmd_buffer
,
541 struct anv_batch_bo
*bbo
)
543 struct anv_batch
*batch
= &cmd_buffer
->batch
;
544 struct anv_batch_bo
*current_bbo
=
545 anv_cmd_buffer_current_batch_bo(cmd_buffer
);
547 /* We set the end of the batch a little short so we would be sure we
548 * have room for the chaining command. Since we're about to emit the
549 * chaining command, let's set it back where it should go.
551 batch
->end
+= GEN8_MI_BATCH_BUFFER_START_length
* 4;
552 assert(batch
->end
== current_bbo
->bo
.map
+ current_bbo
->bo
.size
);
554 emit_batch_buffer_start(cmd_buffer
, &bbo
->bo
, 0);
556 anv_batch_bo_finish(current_bbo
, batch
);
560 anv_cmd_buffer_chain_batch(struct anv_batch
*batch
, void *_data
)
562 struct anv_cmd_buffer
*cmd_buffer
= _data
;
563 struct anv_batch_bo
*new_bbo
;
565 VkResult result
= anv_batch_bo_create(cmd_buffer
, &new_bbo
);
566 if (result
!= VK_SUCCESS
)
569 struct anv_batch_bo
**seen_bbo
= u_vector_add(&cmd_buffer
->seen_bbos
);
570 if (seen_bbo
== NULL
) {
571 anv_batch_bo_destroy(new_bbo
, cmd_buffer
);
572 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
576 cmd_buffer_chain_to_batch_bo(cmd_buffer
, new_bbo
);
578 list_addtail(&new_bbo
->link
, &cmd_buffer
->batch_bos
);
580 anv_batch_bo_start(new_bbo
, batch
, GEN8_MI_BATCH_BUFFER_START_length
* 4);
586 anv_cmd_buffer_grow_batch(struct anv_batch
*batch
, void *_data
)
588 struct anv_cmd_buffer
*cmd_buffer
= _data
;
589 struct anv_batch_bo
*bbo
= anv_cmd_buffer_current_batch_bo(cmd_buffer
);
591 anv_batch_bo_grow(cmd_buffer
, bbo
, &cmd_buffer
->batch
, 4096,
592 GEN8_MI_BATCH_BUFFER_START_length
* 4);
597 /** Allocate a binding table
599 * This function allocates a binding table. This is a bit more complicated
600 * than one would think due to a combination of Vulkan driver design and some
601 * unfortunate hardware restrictions.
603 * The 3DSTATE_BINDING_TABLE_POINTERS_* packets only have a 16-bit field for
604 * the binding table pointer which means that all binding tables need to live
605 * in the bottom 64k of surface state base address. The way the GL driver has
606 * classically dealt with this restriction is to emit all surface states
607 * on-the-fly into the batch and have a batch buffer smaller than 64k. This
608 * isn't really an option in Vulkan for a couple of reasons:
610 * 1) In Vulkan, we have growing (or chaining) batches so surface states have
611 * to live in their own buffer and we have to be able to re-emit
612 * STATE_BASE_ADDRESS as needed which requires a full pipeline stall. In
613 * order to avoid emitting STATE_BASE_ADDRESS any more often than needed
614 * (it's not that hard to hit 64k of just binding tables), we allocate
615 * surface state objects up-front when VkImageView is created. In order
616 * for this to work, surface state objects need to be allocated from a
619 * 2) We tried to design the surface state system in such a way that it's
620 * already ready for bindless texturing. The way bindless texturing works
621 * on our hardware is that you have a big pool of surface state objects
622 * (with its own state base address) and the bindless handles are simply
623 * offsets into that pool. With the architecture we chose, we already
624 * have that pool and it's exactly the same pool that we use for regular
625 * surface states so we should already be ready for bindless.
627 * 3) For render targets, we need to be able to fill out the surface states
628 * later in vkBeginRenderPass so that we can assign clear colors
629 * correctly. One way to do this would be to just create the surface
630 * state data and then repeatedly copy it into the surface state BO every
631 * time we have to re-emit STATE_BASE_ADDRESS. While this works, it's
632 * rather annoying and just being able to allocate them up-front and
633 * re-use them for the entire render pass.
635 * While none of these are technically blockers for emitting state on the fly
636 * like we do in GL, the ability to have a single surface state pool is
637 * simplifies things greatly. Unfortunately, it comes at a cost...
639 * Because of the 64k limitation of 3DSTATE_BINDING_TABLE_POINTERS_*, we can't
640 * place the binding tables just anywhere in surface state base address.
641 * Because 64k isn't a whole lot of space, we can't simply restrict the
642 * surface state buffer to 64k, we have to be more clever. The solution we've
643 * chosen is to have a block pool with a maximum size of 2G that starts at
644 * zero and grows in both directions. All surface states are allocated from
645 * the top of the pool (positive offsets) and we allocate blocks (< 64k) of
646 * binding tables from the bottom of the pool (negative offsets). Every time
647 * we allocate a new binding table block, we set surface state base address to
648 * point to the bottom of the binding table block. This way all of the
649 * binding tables in the block are in the bottom 64k of surface state base
650 * address. When we fill out the binding table, we add the distance between
651 * the bottom of our binding table block and zero of the block pool to the
652 * surface state offsets so that they are correct relative to out new surface
653 * state base address at the bottom of the binding table block.
655 * \see adjust_relocations_from_block_pool()
656 * \see adjust_relocations_too_block_pool()
658 * \param[in] entries The number of surface state entries the binding
659 * table should be able to hold.
661 * \param[out] state_offset The offset surface surface state base address
662 * where the surface states live. This must be
663 * added to the surface state offset when it is
664 * written into the binding table entry.
666 * \return An anv_state representing the binding table
669 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer
*cmd_buffer
,
670 uint32_t entries
, uint32_t *state_offset
)
672 struct anv_device
*device
= cmd_buffer
->device
;
673 struct anv_state_pool
*state_pool
= &device
->surface_state_pool
;
674 struct anv_state
*bt_block
= u_vector_head(&cmd_buffer
->bt_block_states
);
675 struct anv_state state
;
677 state
.alloc_size
= align_u32(entries
* 4, 32);
679 if (cmd_buffer
->bt_next
+ state
.alloc_size
> state_pool
->block_size
)
680 return (struct anv_state
) { 0 };
682 state
.offset
= cmd_buffer
->bt_next
;
683 state
.map
= anv_block_pool_map(&anv_binding_table_pool(device
)->block_pool
,
684 bt_block
->offset
+ state
.offset
);
686 cmd_buffer
->bt_next
+= state
.alloc_size
;
688 if (device
->instance
->physicalDevice
.use_softpin
) {
689 assert(bt_block
->offset
>= 0);
690 *state_offset
= device
->surface_state_pool
.block_pool
.start_address
-
691 device
->binding_table_pool
.block_pool
.start_address
- bt_block
->offset
;
693 assert(bt_block
->offset
< 0);
694 *state_offset
= -bt_block
->offset
;
701 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer
*cmd_buffer
)
703 struct isl_device
*isl_dev
= &cmd_buffer
->device
->isl_dev
;
704 return anv_state_stream_alloc(&cmd_buffer
->surface_state_stream
,
705 isl_dev
->ss
.size
, isl_dev
->ss
.align
);
709 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer
*cmd_buffer
,
710 uint32_t size
, uint32_t alignment
)
712 return anv_state_stream_alloc(&cmd_buffer
->dynamic_state_stream
,
717 anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer
*cmd_buffer
)
719 struct anv_state
*bt_block
= u_vector_add(&cmd_buffer
->bt_block_states
);
720 if (bt_block
== NULL
) {
721 anv_batch_set_error(&cmd_buffer
->batch
, VK_ERROR_OUT_OF_HOST_MEMORY
);
722 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
725 *bt_block
= anv_binding_table_pool_alloc(cmd_buffer
->device
);
726 cmd_buffer
->bt_next
= 0;
732 anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer
*cmd_buffer
)
734 struct anv_batch_bo
*batch_bo
;
737 list_inithead(&cmd_buffer
->batch_bos
);
739 result
= anv_batch_bo_create(cmd_buffer
, &batch_bo
);
740 if (result
!= VK_SUCCESS
)
743 list_addtail(&batch_bo
->link
, &cmd_buffer
->batch_bos
);
745 cmd_buffer
->batch
.alloc
= &cmd_buffer
->pool
->alloc
;
746 cmd_buffer
->batch
.user_data
= cmd_buffer
;
748 if (cmd_buffer
->device
->can_chain_batches
) {
749 cmd_buffer
->batch
.extend_cb
= anv_cmd_buffer_chain_batch
;
751 cmd_buffer
->batch
.extend_cb
= anv_cmd_buffer_grow_batch
;
754 anv_batch_bo_start(batch_bo
, &cmd_buffer
->batch
,
755 GEN8_MI_BATCH_BUFFER_START_length
* 4);
757 int success
= u_vector_init(&cmd_buffer
->seen_bbos
,
758 sizeof(struct anv_bo
*),
759 8 * sizeof(struct anv_bo
*));
763 *(struct anv_batch_bo
**)u_vector_add(&cmd_buffer
->seen_bbos
) = batch_bo
;
765 /* u_vector requires power-of-two size elements */
766 unsigned pow2_state_size
= util_next_power_of_two(sizeof(struct anv_state
));
767 success
= u_vector_init(&cmd_buffer
->bt_block_states
,
768 pow2_state_size
, 8 * pow2_state_size
);
772 result
= anv_reloc_list_init(&cmd_buffer
->surface_relocs
,
773 &cmd_buffer
->pool
->alloc
);
774 if (result
!= VK_SUCCESS
)
776 cmd_buffer
->last_ss_pool_center
= 0;
778 result
= anv_cmd_buffer_new_binding_table_block(cmd_buffer
);
779 if (result
!= VK_SUCCESS
)
785 u_vector_finish(&cmd_buffer
->bt_block_states
);
787 u_vector_finish(&cmd_buffer
->seen_bbos
);
789 anv_batch_bo_destroy(batch_bo
, cmd_buffer
);
795 anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer
*cmd_buffer
)
797 struct anv_state
*bt_block
;
798 u_vector_foreach(bt_block
, &cmd_buffer
->bt_block_states
)
799 anv_binding_table_pool_free(cmd_buffer
->device
, *bt_block
);
800 u_vector_finish(&cmd_buffer
->bt_block_states
);
802 anv_reloc_list_finish(&cmd_buffer
->surface_relocs
, &cmd_buffer
->pool
->alloc
);
804 u_vector_finish(&cmd_buffer
->seen_bbos
);
806 /* Destroy all of the batch buffers */
807 list_for_each_entry_safe(struct anv_batch_bo
, bbo
,
808 &cmd_buffer
->batch_bos
, link
) {
809 anv_batch_bo_destroy(bbo
, cmd_buffer
);
814 anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer
*cmd_buffer
)
816 /* Delete all but the first batch bo */
817 assert(!list_empty(&cmd_buffer
->batch_bos
));
818 while (cmd_buffer
->batch_bos
.next
!= cmd_buffer
->batch_bos
.prev
) {
819 struct anv_batch_bo
*bbo
= anv_cmd_buffer_current_batch_bo(cmd_buffer
);
820 list_del(&bbo
->link
);
821 anv_batch_bo_destroy(bbo
, cmd_buffer
);
823 assert(!list_empty(&cmd_buffer
->batch_bos
));
825 anv_batch_bo_start(anv_cmd_buffer_current_batch_bo(cmd_buffer
),
827 GEN8_MI_BATCH_BUFFER_START_length
* 4);
829 while (u_vector_length(&cmd_buffer
->bt_block_states
) > 1) {
830 struct anv_state
*bt_block
= u_vector_remove(&cmd_buffer
->bt_block_states
);
831 anv_binding_table_pool_free(cmd_buffer
->device
, *bt_block
);
833 assert(u_vector_length(&cmd_buffer
->bt_block_states
) == 1);
834 cmd_buffer
->bt_next
= 0;
836 cmd_buffer
->surface_relocs
.num_relocs
= 0;
837 _mesa_set_clear(cmd_buffer
->surface_relocs
.deps
, NULL
);
838 cmd_buffer
->last_ss_pool_center
= 0;
840 /* Reset the list of seen buffers */
841 cmd_buffer
->seen_bbos
.head
= 0;
842 cmd_buffer
->seen_bbos
.tail
= 0;
844 *(struct anv_batch_bo
**)u_vector_add(&cmd_buffer
->seen_bbos
) =
845 anv_cmd_buffer_current_batch_bo(cmd_buffer
);
849 anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer
*cmd_buffer
)
851 struct anv_batch_bo
*batch_bo
= anv_cmd_buffer_current_batch_bo(cmd_buffer
);
853 if (cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_PRIMARY
) {
854 /* When we start a batch buffer, we subtract a certain amount of
855 * padding from the end to ensure that we always have room to emit a
856 * BATCH_BUFFER_START to chain to the next BO. We need to remove
857 * that padding before we end the batch; otherwise, we may end up
858 * with our BATCH_BUFFER_END in another BO.
860 cmd_buffer
->batch
.end
+= GEN8_MI_BATCH_BUFFER_START_length
* 4;
861 assert(cmd_buffer
->batch
.end
== batch_bo
->bo
.map
+ batch_bo
->bo
.size
);
863 anv_batch_emit(&cmd_buffer
->batch
, GEN8_MI_BATCH_BUFFER_END
, bbe
);
865 /* Round batch up to an even number of dwords. */
866 if ((cmd_buffer
->batch
.next
- cmd_buffer
->batch
.start
) & 4)
867 anv_batch_emit(&cmd_buffer
->batch
, GEN8_MI_NOOP
, noop
);
869 cmd_buffer
->exec_mode
= ANV_CMD_BUFFER_EXEC_MODE_PRIMARY
;
871 assert(cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_SECONDARY
);
872 /* If this is a secondary command buffer, we need to determine the
873 * mode in which it will be executed with vkExecuteCommands. We
874 * determine this statically here so that this stays in sync with the
875 * actual ExecuteCommands implementation.
877 const uint32_t length
= cmd_buffer
->batch
.next
- cmd_buffer
->batch
.start
;
878 if (!cmd_buffer
->device
->can_chain_batches
) {
879 cmd_buffer
->exec_mode
= ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT
;
880 } else if ((cmd_buffer
->batch_bos
.next
== cmd_buffer
->batch_bos
.prev
) &&
881 (length
< ANV_CMD_BUFFER_BATCH_SIZE
/ 2)) {
882 /* If the secondary has exactly one batch buffer in its list *and*
883 * that batch buffer is less than half of the maximum size, we're
884 * probably better of simply copying it into our batch.
886 cmd_buffer
->exec_mode
= ANV_CMD_BUFFER_EXEC_MODE_EMIT
;
887 } else if (!(cmd_buffer
->usage_flags
&
888 VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT
)) {
889 cmd_buffer
->exec_mode
= ANV_CMD_BUFFER_EXEC_MODE_CHAIN
;
891 /* In order to chain, we need this command buffer to contain an
892 * MI_BATCH_BUFFER_START which will jump back to the calling batch.
893 * It doesn't matter where it points now so long as has a valid
894 * relocation. We'll adjust it later as part of the chaining
897 * We set the end of the batch a little short so we would be sure we
898 * have room for the chaining command. Since we're about to emit the
899 * chaining command, let's set it back where it should go.
901 cmd_buffer
->batch
.end
+= GEN8_MI_BATCH_BUFFER_START_length
* 4;
902 assert(cmd_buffer
->batch
.start
== batch_bo
->bo
.map
);
903 assert(cmd_buffer
->batch
.end
== batch_bo
->bo
.map
+ batch_bo
->bo
.size
);
905 emit_batch_buffer_start(cmd_buffer
, &batch_bo
->bo
, 0);
906 assert(cmd_buffer
->batch
.start
== batch_bo
->bo
.map
);
908 cmd_buffer
->exec_mode
= ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN
;
912 anv_batch_bo_finish(batch_bo
, &cmd_buffer
->batch
);
916 anv_cmd_buffer_add_seen_bbos(struct anv_cmd_buffer
*cmd_buffer
,
917 struct list_head
*list
)
919 list_for_each_entry(struct anv_batch_bo
, bbo
, list
, link
) {
920 struct anv_batch_bo
**bbo_ptr
= u_vector_add(&cmd_buffer
->seen_bbos
);
922 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
931 anv_cmd_buffer_add_secondary(struct anv_cmd_buffer
*primary
,
932 struct anv_cmd_buffer
*secondary
)
934 switch (secondary
->exec_mode
) {
935 case ANV_CMD_BUFFER_EXEC_MODE_EMIT
:
936 anv_batch_emit_batch(&primary
->batch
, &secondary
->batch
);
938 case ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT
: {
939 struct anv_batch_bo
*bbo
= anv_cmd_buffer_current_batch_bo(primary
);
940 unsigned length
= secondary
->batch
.end
- secondary
->batch
.start
;
941 anv_batch_bo_grow(primary
, bbo
, &primary
->batch
, length
,
942 GEN8_MI_BATCH_BUFFER_START_length
* 4);
943 anv_batch_emit_batch(&primary
->batch
, &secondary
->batch
);
946 case ANV_CMD_BUFFER_EXEC_MODE_CHAIN
: {
947 struct anv_batch_bo
*first_bbo
=
948 list_first_entry(&secondary
->batch_bos
, struct anv_batch_bo
, link
);
949 struct anv_batch_bo
*last_bbo
=
950 list_last_entry(&secondary
->batch_bos
, struct anv_batch_bo
, link
);
952 emit_batch_buffer_start(primary
, &first_bbo
->bo
, 0);
954 struct anv_batch_bo
*this_bbo
= anv_cmd_buffer_current_batch_bo(primary
);
955 assert(primary
->batch
.start
== this_bbo
->bo
.map
);
956 uint32_t offset
= primary
->batch
.next
- primary
->batch
.start
;
958 /* Make the tail of the secondary point back to right after the
959 * MI_BATCH_BUFFER_START in the primary batch.
961 anv_batch_bo_link(primary
, last_bbo
, this_bbo
, offset
);
963 anv_cmd_buffer_add_seen_bbos(primary
, &secondary
->batch_bos
);
966 case ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN
: {
967 struct list_head copy_list
;
968 VkResult result
= anv_batch_bo_list_clone(&secondary
->batch_bos
,
971 if (result
!= VK_SUCCESS
)
974 anv_cmd_buffer_add_seen_bbos(primary
, ©_list
);
976 struct anv_batch_bo
*first_bbo
=
977 list_first_entry(©_list
, struct anv_batch_bo
, link
);
978 struct anv_batch_bo
*last_bbo
=
979 list_last_entry(©_list
, struct anv_batch_bo
, link
);
981 cmd_buffer_chain_to_batch_bo(primary
, first_bbo
);
983 list_splicetail(©_list
, &primary
->batch_bos
);
985 anv_batch_bo_continue(last_bbo
, &primary
->batch
,
986 GEN8_MI_BATCH_BUFFER_START_length
* 4);
990 assert(!"Invalid execution mode");
993 anv_reloc_list_append(&primary
->surface_relocs
, &primary
->pool
->alloc
,
994 &secondary
->surface_relocs
, 0);
998 struct drm_i915_gem_execbuffer2 execbuf
;
1000 struct drm_i915_gem_exec_object2
* objects
;
1002 struct anv_bo
** bos
;
1004 /* Allocated length of the 'objects' and 'bos' arrays */
1005 uint32_t array_length
;
1009 uint32_t fence_count
;
1010 uint32_t fence_array_length
;
1011 struct drm_i915_gem_exec_fence
* fences
;
1012 struct anv_syncobj
** syncobjs
;
1016 anv_execbuf_init(struct anv_execbuf
*exec
)
1018 memset(exec
, 0, sizeof(*exec
));
1022 anv_execbuf_finish(struct anv_execbuf
*exec
,
1023 const VkAllocationCallbacks
*alloc
)
1025 vk_free(alloc
, exec
->objects
);
1026 vk_free(alloc
, exec
->bos
);
1027 vk_free(alloc
, exec
->fences
);
1028 vk_free(alloc
, exec
->syncobjs
);
1032 _compare_bo_handles(const void *_bo1
, const void *_bo2
)
1034 struct anv_bo
* const *bo1
= _bo1
;
1035 struct anv_bo
* const *bo2
= _bo2
;
1037 return (*bo1
)->gem_handle
- (*bo2
)->gem_handle
;
1041 anv_execbuf_add_bo_set(struct anv_execbuf
*exec
,
1043 uint32_t extra_flags
,
1044 const VkAllocationCallbacks
*alloc
);
1047 anv_execbuf_add_bo(struct anv_execbuf
*exec
,
1049 struct anv_reloc_list
*relocs
,
1050 uint32_t extra_flags
,
1051 const VkAllocationCallbacks
*alloc
)
1053 struct drm_i915_gem_exec_object2
*obj
= NULL
;
1055 if (bo
->index
< exec
->bo_count
&& exec
->bos
[bo
->index
] == bo
)
1056 obj
= &exec
->objects
[bo
->index
];
1059 /* We've never seen this one before. Add it to the list and assign
1060 * an id that we can use later.
1062 if (exec
->bo_count
>= exec
->array_length
) {
1063 uint32_t new_len
= exec
->objects
? exec
->array_length
* 2 : 64;
1065 struct drm_i915_gem_exec_object2
*new_objects
=
1066 vk_alloc(alloc
, new_len
* sizeof(*new_objects
),
1067 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND
);
1068 if (new_objects
== NULL
)
1069 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1071 struct anv_bo
**new_bos
=
1072 vk_alloc(alloc
, new_len
* sizeof(*new_bos
),
1073 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND
);
1074 if (new_bos
== NULL
) {
1075 vk_free(alloc
, new_objects
);
1076 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1079 if (exec
->objects
) {
1080 memcpy(new_objects
, exec
->objects
,
1081 exec
->bo_count
* sizeof(*new_objects
));
1082 memcpy(new_bos
, exec
->bos
,
1083 exec
->bo_count
* sizeof(*new_bos
));
1086 vk_free(alloc
, exec
->objects
);
1087 vk_free(alloc
, exec
->bos
);
1089 exec
->objects
= new_objects
;
1090 exec
->bos
= new_bos
;
1091 exec
->array_length
= new_len
;
1094 assert(exec
->bo_count
< exec
->array_length
);
1096 bo
->index
= exec
->bo_count
++;
1097 obj
= &exec
->objects
[bo
->index
];
1098 exec
->bos
[bo
->index
] = bo
;
1100 obj
->handle
= bo
->gem_handle
;
1101 obj
->relocation_count
= 0;
1102 obj
->relocs_ptr
= 0;
1104 obj
->offset
= bo
->offset
;
1105 obj
->flags
= (bo
->flags
& ~ANV_BO_FLAG_MASK
) | extra_flags
;
1110 if (relocs
!= NULL
) {
1111 assert(obj
->relocation_count
== 0);
1113 if (relocs
->num_relocs
> 0) {
1114 /* This is the first time we've ever seen a list of relocations for
1115 * this BO. Go ahead and set the relocations and then walk the list
1116 * of relocations and add them all.
1118 exec
->has_relocs
= true;
1119 obj
->relocation_count
= relocs
->num_relocs
;
1120 obj
->relocs_ptr
= (uintptr_t) relocs
->relocs
;
1122 for (size_t i
= 0; i
< relocs
->num_relocs
; i
++) {
1125 /* A quick sanity check on relocations */
1126 assert(relocs
->relocs
[i
].offset
< bo
->size
);
1127 result
= anv_execbuf_add_bo(exec
, relocs
->reloc_bos
[i
], NULL
,
1128 extra_flags
, alloc
);
1130 if (result
!= VK_SUCCESS
)
1135 return anv_execbuf_add_bo_set(exec
, relocs
->deps
, extra_flags
, alloc
);
1141 /* Add BO dependencies to execbuf */
1143 anv_execbuf_add_bo_set(struct anv_execbuf
*exec
,
1145 uint32_t extra_flags
,
1146 const VkAllocationCallbacks
*alloc
)
1148 if (!deps
|| deps
->entries
<= 0)
1151 const uint32_t entries
= deps
->entries
;
1152 struct anv_bo
**bos
=
1153 vk_alloc(alloc
, entries
* sizeof(*bos
),
1154 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND
);
1156 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1158 struct anv_bo
**bo
= bos
;
1159 set_foreach(deps
, entry
) {
1160 *bo
++ = (void *)entry
->key
;
1163 qsort(bos
, entries
, sizeof(struct anv_bo
*), _compare_bo_handles
);
1165 VkResult result
= VK_SUCCESS
;
1166 for (bo
= bos
; bo
< bos
+ entries
; bo
++) {
1167 result
= anv_execbuf_add_bo(exec
, *bo
, NULL
, extra_flags
, alloc
);
1168 if (result
!= VK_SUCCESS
)
1172 vk_free(alloc
, bos
);
1178 anv_execbuf_add_syncobj(struct anv_execbuf
*exec
,
1179 uint32_t handle
, uint32_t flags
,
1180 const VkAllocationCallbacks
*alloc
)
1184 if (exec
->fence_count
>= exec
->fence_array_length
) {
1185 uint32_t new_len
= MAX2(exec
->fence_array_length
* 2, 64);
1187 exec
->fences
= vk_realloc(alloc
, exec
->fences
,
1188 new_len
* sizeof(*exec
->fences
),
1189 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND
);
1190 if (exec
->fences
== NULL
)
1191 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1193 exec
->fence_array_length
= new_len
;
1196 exec
->fences
[exec
->fence_count
] = (struct drm_i915_gem_exec_fence
) {
1201 exec
->fence_count
++;
1207 anv_cmd_buffer_process_relocs(struct anv_cmd_buffer
*cmd_buffer
,
1208 struct anv_reloc_list
*list
)
1210 for (size_t i
= 0; i
< list
->num_relocs
; i
++)
1211 list
->relocs
[i
].target_handle
= list
->reloc_bos
[i
]->index
;
1215 adjust_relocations_from_state_pool(struct anv_state_pool
*pool
,
1216 struct anv_reloc_list
*relocs
,
1217 uint32_t last_pool_center_bo_offset
)
1219 assert(last_pool_center_bo_offset
<= pool
->block_pool
.center_bo_offset
);
1220 uint32_t delta
= pool
->block_pool
.center_bo_offset
- last_pool_center_bo_offset
;
1222 for (size_t i
= 0; i
< relocs
->num_relocs
; i
++) {
1223 /* All of the relocations from this block pool to other BO's should
1224 * have been emitted relative to the surface block pool center. We
1225 * need to add the center offset to make them relative to the
1226 * beginning of the actual GEM bo.
1228 relocs
->relocs
[i
].offset
+= delta
;
1233 adjust_relocations_to_state_pool(struct anv_state_pool
*pool
,
1234 struct anv_bo
*from_bo
,
1235 struct anv_reloc_list
*relocs
,
1236 uint32_t last_pool_center_bo_offset
)
1238 assert(last_pool_center_bo_offset
<= pool
->block_pool
.center_bo_offset
);
1239 uint32_t delta
= pool
->block_pool
.center_bo_offset
- last_pool_center_bo_offset
;
1241 /* When we initially emit relocations into a block pool, we don't
1242 * actually know what the final center_bo_offset will be so we just emit
1243 * it as if center_bo_offset == 0. Now that we know what the center
1244 * offset is, we need to walk the list of relocations and adjust any
1245 * relocations that point to the pool bo with the correct offset.
1247 for (size_t i
= 0; i
< relocs
->num_relocs
; i
++) {
1248 if (relocs
->reloc_bos
[i
] == pool
->block_pool
.bo
) {
1249 /* Adjust the delta value in the relocation to correctly
1250 * correspond to the new delta. Initially, this value may have
1251 * been negative (if treated as unsigned), but we trust in
1252 * uint32_t roll-over to fix that for us at this point.
1254 relocs
->relocs
[i
].delta
+= delta
;
1256 /* Since the delta has changed, we need to update the actual
1257 * relocated value with the new presumed value. This function
1258 * should only be called on batch buffers, so we know it isn't in
1259 * use by the GPU at the moment.
1261 assert(relocs
->relocs
[i
].offset
< from_bo
->size
);
1262 write_reloc(pool
->block_pool
.device
,
1263 from_bo
->map
+ relocs
->relocs
[i
].offset
,
1264 relocs
->relocs
[i
].presumed_offset
+
1265 relocs
->relocs
[i
].delta
, false);
1271 anv_reloc_list_apply(struct anv_device
*device
,
1272 struct anv_reloc_list
*list
,
1274 bool always_relocate
)
1276 for (size_t i
= 0; i
< list
->num_relocs
; i
++) {
1277 struct anv_bo
*target_bo
= list
->reloc_bos
[i
];
1278 if (list
->relocs
[i
].presumed_offset
== target_bo
->offset
&&
1282 void *p
= bo
->map
+ list
->relocs
[i
].offset
;
1283 write_reloc(device
, p
, target_bo
->offset
+ list
->relocs
[i
].delta
, true);
1284 list
->relocs
[i
].presumed_offset
= target_bo
->offset
;
1289 * This function applies the relocation for a command buffer and writes the
1290 * actual addresses into the buffers as per what we were told by the kernel on
1291 * the previous execbuf2 call. This should be safe to do because, for each
1292 * relocated address, we have two cases:
1294 * 1) The target BO is inactive (as seen by the kernel). In this case, it is
1295 * not in use by the GPU so updating the address is 100% ok. It won't be
1296 * in-use by the GPU (from our context) again until the next execbuf2
1297 * happens. If the kernel decides to move it in the next execbuf2, it
1298 * will have to do the relocations itself, but that's ok because it should
1299 * have all of the information needed to do so.
1301 * 2) The target BO is active (as seen by the kernel). In this case, it
1302 * hasn't moved since the last execbuffer2 call because GTT shuffling
1303 * *only* happens when the BO is idle. (From our perspective, it only
1304 * happens inside the execbuffer2 ioctl, but the shuffling may be
1305 * triggered by another ioctl, with full-ppgtt this is limited to only
1306 * execbuffer2 ioctls on the same context, or memory pressure.) Since the
1307 * target BO hasn't moved, our anv_bo::offset exactly matches the BO's GTT
1308 * address and the relocated value we are writing into the BO will be the
1309 * same as the value that is already there.
1311 * There is also a possibility that the target BO is active but the exact
1312 * RENDER_SURFACE_STATE object we are writing the relocation into isn't in
1313 * use. In this case, the address currently in the RENDER_SURFACE_STATE
1314 * may be stale but it's still safe to write the relocation because that
1315 * particular RENDER_SURFACE_STATE object isn't in-use by the GPU and
1316 * won't be until the next execbuf2 call.
1318 * By doing relocations on the CPU, we can tell the kernel that it doesn't
1319 * need to bother. We want to do this because the surface state buffer is
1320 * used by every command buffer so, if the kernel does the relocations, it
1321 * will always be busy and the kernel will always stall. This is also
1322 * probably the fastest mechanism for doing relocations since the kernel would
1323 * have to make a full copy of all the relocations lists.
1326 relocate_cmd_buffer(struct anv_cmd_buffer
*cmd_buffer
,
1327 struct anv_execbuf
*exec
)
1329 if (!exec
->has_relocs
)
1332 static int userspace_relocs
= -1;
1333 if (userspace_relocs
< 0)
1334 userspace_relocs
= env_var_as_boolean("ANV_USERSPACE_RELOCS", true);
1335 if (!userspace_relocs
)
1338 /* First, we have to check to see whether or not we can even do the
1339 * relocation. New buffers which have never been submitted to the kernel
1340 * don't have a valid offset so we need to let the kernel do relocations so
1341 * that we can get offsets for them. On future execbuf2 calls, those
1342 * buffers will have offsets and we will be able to skip relocating.
1343 * Invalid offsets are indicated by anv_bo::offset == (uint64_t)-1.
1345 for (uint32_t i
= 0; i
< exec
->bo_count
; i
++) {
1346 if (exec
->bos
[i
]->offset
== (uint64_t)-1)
1350 /* Since surface states are shared between command buffers and we don't
1351 * know what order they will be submitted to the kernel, we don't know
1352 * what address is actually written in the surface state object at any
1353 * given time. The only option is to always relocate them.
1355 anv_reloc_list_apply(cmd_buffer
->device
, &cmd_buffer
->surface_relocs
,
1356 cmd_buffer
->device
->surface_state_pool
.block_pool
.bo
,
1357 true /* always relocate surface states */);
1359 /* Since we own all of the batch buffers, we know what values are stored
1360 * in the relocated addresses and only have to update them if the offsets
1363 struct anv_batch_bo
**bbo
;
1364 u_vector_foreach(bbo
, &cmd_buffer
->seen_bbos
) {
1365 anv_reloc_list_apply(cmd_buffer
->device
,
1366 &(*bbo
)->relocs
, &(*bbo
)->bo
, false);
1369 for (uint32_t i
= 0; i
< exec
->bo_count
; i
++)
1370 exec
->objects
[i
].offset
= exec
->bos
[i
]->offset
;
1376 setup_execbuf_for_cmd_buffer(struct anv_execbuf
*execbuf
,
1377 struct anv_cmd_buffer
*cmd_buffer
)
1379 struct anv_batch
*batch
= &cmd_buffer
->batch
;
1380 struct anv_state_pool
*ss_pool
=
1381 &cmd_buffer
->device
->surface_state_pool
;
1383 adjust_relocations_from_state_pool(ss_pool
, &cmd_buffer
->surface_relocs
,
1384 cmd_buffer
->last_ss_pool_center
);
1387 if (cmd_buffer
->device
->instance
->physicalDevice
.use_softpin
) {
1388 anv_block_pool_foreach_bo(bo
, &ss_pool
->block_pool
) {
1389 result
= anv_execbuf_add_bo(execbuf
, bo
, NULL
, 0,
1390 &cmd_buffer
->device
->alloc
);
1391 if (result
!= VK_SUCCESS
)
1394 /* Add surface dependencies (BOs) to the execbuf */
1395 anv_execbuf_add_bo_set(execbuf
, cmd_buffer
->surface_relocs
.deps
, 0,
1396 &cmd_buffer
->device
->alloc
);
1398 /* Add the BOs for all the pinned buffers */
1399 if (cmd_buffer
->device
->pinned_buffers
->entries
) {
1400 struct set
*pinned_bos
= _mesa_pointer_set_create(NULL
);
1401 if (pinned_bos
== NULL
)
1402 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY
);
1403 set_foreach(cmd_buffer
->device
->pinned_buffers
, entry
) {
1404 const struct anv_buffer
*buffer
= entry
->key
;
1405 _mesa_set_add(pinned_bos
, buffer
->address
.bo
);
1407 anv_execbuf_add_bo_set(execbuf
, pinned_bos
, 0,
1408 &cmd_buffer
->device
->alloc
);
1409 _mesa_set_destroy(pinned_bos
, NULL
);
1412 struct anv_block_pool
*pool
;
1413 pool
= &cmd_buffer
->device
->dynamic_state_pool
.block_pool
;
1414 anv_block_pool_foreach_bo(bo
, pool
) {
1415 result
= anv_execbuf_add_bo(execbuf
, bo
, NULL
, 0,
1416 &cmd_buffer
->device
->alloc
);
1417 if (result
!= VK_SUCCESS
)
1421 pool
= &cmd_buffer
->device
->instruction_state_pool
.block_pool
;
1422 anv_block_pool_foreach_bo(bo
, pool
) {
1423 result
= anv_execbuf_add_bo(execbuf
, bo
, NULL
, 0,
1424 &cmd_buffer
->device
->alloc
);
1425 if (result
!= VK_SUCCESS
)
1429 pool
= &cmd_buffer
->device
->binding_table_pool
.block_pool
;
1430 anv_block_pool_foreach_bo(bo
, pool
) {
1431 result
= anv_execbuf_add_bo(execbuf
, bo
, NULL
, 0,
1432 &cmd_buffer
->device
->alloc
);
1433 if (result
!= VK_SUCCESS
)
1437 /* Since we aren't in the softpin case, all of our STATE_BASE_ADDRESS BOs
1438 * will get added automatically by processing relocations on the batch
1439 * buffer. We have to add the surface state BO manually because it has
1440 * relocations of its own that we need to be sure are processsed.
1442 result
= anv_execbuf_add_bo(execbuf
, ss_pool
->block_pool
.bo
,
1443 &cmd_buffer
->surface_relocs
, 0,
1444 &cmd_buffer
->device
->alloc
);
1445 if (result
!= VK_SUCCESS
)
1449 /* First, we walk over all of the bos we've seen and add them and their
1450 * relocations to the validate list.
1452 struct anv_batch_bo
**bbo
;
1453 u_vector_foreach(bbo
, &cmd_buffer
->seen_bbos
) {
1454 adjust_relocations_to_state_pool(ss_pool
, &(*bbo
)->bo
, &(*bbo
)->relocs
,
1455 cmd_buffer
->last_ss_pool_center
);
1457 result
= anv_execbuf_add_bo(execbuf
, &(*bbo
)->bo
, &(*bbo
)->relocs
, 0,
1458 &cmd_buffer
->device
->alloc
);
1459 if (result
!= VK_SUCCESS
)
1463 /* Now that we've adjusted all of the surface state relocations, we need to
1464 * record the surface state pool center so future executions of the command
1465 * buffer can adjust correctly.
1467 cmd_buffer
->last_ss_pool_center
= ss_pool
->block_pool
.center_bo_offset
;
1469 struct anv_batch_bo
*first_batch_bo
=
1470 list_first_entry(&cmd_buffer
->batch_bos
, struct anv_batch_bo
, link
);
1472 /* The kernel requires that the last entry in the validation list be the
1473 * batch buffer to execute. We can simply swap the element
1474 * corresponding to the first batch_bo in the chain with the last
1475 * element in the list.
1477 if (first_batch_bo
->bo
.index
!= execbuf
->bo_count
- 1) {
1478 uint32_t idx
= first_batch_bo
->bo
.index
;
1479 uint32_t last_idx
= execbuf
->bo_count
- 1;
1481 struct drm_i915_gem_exec_object2 tmp_obj
= execbuf
->objects
[idx
];
1482 assert(execbuf
->bos
[idx
] == &first_batch_bo
->bo
);
1484 execbuf
->objects
[idx
] = execbuf
->objects
[last_idx
];
1485 execbuf
->bos
[idx
] = execbuf
->bos
[last_idx
];
1486 execbuf
->bos
[idx
]->index
= idx
;
1488 execbuf
->objects
[last_idx
] = tmp_obj
;
1489 execbuf
->bos
[last_idx
] = &first_batch_bo
->bo
;
1490 first_batch_bo
->bo
.index
= last_idx
;
1493 /* If we are pinning our BOs, we shouldn't have to relocate anything */
1494 if (cmd_buffer
->device
->instance
->physicalDevice
.use_softpin
)
1495 assert(!execbuf
->has_relocs
);
1497 /* Now we go through and fixup all of the relocation lists to point to
1498 * the correct indices in the object array. We have to do this after we
1499 * reorder the list above as some of the indices may have changed.
1501 if (execbuf
->has_relocs
) {
1502 u_vector_foreach(bbo
, &cmd_buffer
->seen_bbos
)
1503 anv_cmd_buffer_process_relocs(cmd_buffer
, &(*bbo
)->relocs
);
1505 anv_cmd_buffer_process_relocs(cmd_buffer
, &cmd_buffer
->surface_relocs
);
1508 if (!cmd_buffer
->device
->info
.has_llc
) {
1509 __builtin_ia32_mfence();
1510 u_vector_foreach(bbo
, &cmd_buffer
->seen_bbos
) {
1511 for (uint32_t i
= 0; i
< (*bbo
)->length
; i
+= CACHELINE_SIZE
)
1512 __builtin_ia32_clflush((*bbo
)->bo
.map
+ i
);
1516 execbuf
->execbuf
= (struct drm_i915_gem_execbuffer2
) {
1517 .buffers_ptr
= (uintptr_t) execbuf
->objects
,
1518 .buffer_count
= execbuf
->bo_count
,
1519 .batch_start_offset
= 0,
1520 .batch_len
= batch
->next
- batch
->start
,
1525 .flags
= I915_EXEC_HANDLE_LUT
| I915_EXEC_RENDER
,
1526 .rsvd1
= cmd_buffer
->device
->context_id
,
1530 if (relocate_cmd_buffer(cmd_buffer
, execbuf
)) {
1531 /* If we were able to successfully relocate everything, tell the kernel
1532 * that it can skip doing relocations. The requirement for using
1535 * 1) The addresses written in the objects must match the corresponding
1536 * reloc.presumed_offset which in turn must match the corresponding
1537 * execobject.offset.
1539 * 2) To avoid stalling, execobject.offset should match the current
1540 * address of that object within the active context.
1542 * In order to satisfy all of the invariants that make userspace
1543 * relocations to be safe (see relocate_cmd_buffer()), we need to
1544 * further ensure that the addresses we use match those used by the
1545 * kernel for the most recent execbuf2.
1547 * The kernel may still choose to do relocations anyway if something has
1548 * moved in the GTT. In this case, the relocation list still needs to be
1549 * valid. All relocations on the batch buffers are already valid and
1550 * kept up-to-date. For surface state relocations, by applying the
1551 * relocations in relocate_cmd_buffer, we ensured that the address in
1552 * the RENDER_SURFACE_STATE matches presumed_offset, so it should be
1553 * safe for the kernel to relocate them as needed.
1555 execbuf
->execbuf
.flags
|= I915_EXEC_NO_RELOC
;
1557 /* In the case where we fall back to doing kernel relocations, we need
1558 * to ensure that the relocation list is valid. All relocations on the
1559 * batch buffers are already valid and kept up-to-date. Since surface
1560 * states are shared between command buffers and we don't know what
1561 * order they will be submitted to the kernel, we don't know what
1562 * address is actually written in the surface state object at any given
1563 * time. The only option is to set a bogus presumed offset and let the
1564 * kernel relocate them.
1566 for (size_t i
= 0; i
< cmd_buffer
->surface_relocs
.num_relocs
; i
++)
1567 cmd_buffer
->surface_relocs
.relocs
[i
].presumed_offset
= -1;
1574 setup_empty_execbuf(struct anv_execbuf
*execbuf
, struct anv_device
*device
)
1576 VkResult result
= anv_execbuf_add_bo(execbuf
, &device
->trivial_batch_bo
,
1577 NULL
, 0, &device
->alloc
);
1578 if (result
!= VK_SUCCESS
)
1581 execbuf
->execbuf
= (struct drm_i915_gem_execbuffer2
) {
1582 .buffers_ptr
= (uintptr_t) execbuf
->objects
,
1583 .buffer_count
= execbuf
->bo_count
,
1584 .batch_start_offset
= 0,
1585 .batch_len
= 8, /* GEN7_MI_BATCH_BUFFER_END and NOOP */
1586 .flags
= I915_EXEC_HANDLE_LUT
| I915_EXEC_RENDER
,
1587 .rsvd1
= device
->context_id
,
1594 /* Finding a buffer for batch decoding */
1595 static struct gen_batch_decode_bo
1596 decode_get_bo(void *v_batch
, uint64_t address
)
1598 struct anv_cmd_buffer
*cmd_buffer
= v_batch
;
1599 struct anv_batch_bo
*bo
;
1601 u_vector_foreach(bo
, &cmd_buffer
->seen_bbos
) {
1602 /* The decoder zeroes out the top 16 bits, so we need to as well */
1603 uint64_t bo_address
= bo
->bo
.offset
& (~0ull >> 16);
1605 if (address
>= bo_address
&& address
< bo_address
+ bo
->bo
.size
) {
1606 return (struct gen_batch_decode_bo
) {
1608 .size
= bo
->bo
.size
,
1614 return (struct gen_batch_decode_bo
) { };
1618 decode_batch(struct anv_cmd_buffer
*cmd_buffer
)
1620 struct gen_batch_decode_ctx ctx
;
1621 struct anv_batch_bo
*bo
= u_vector_head(&cmd_buffer
->seen_bbos
);
1622 const unsigned decode_flags
=
1623 GEN_BATCH_DECODE_FULL
|
1624 ((INTEL_DEBUG
& DEBUG_COLOR
) ? GEN_BATCH_DECODE_IN_COLOR
: 0) |
1625 GEN_BATCH_DECODE_OFFSETS
|
1626 GEN_BATCH_DECODE_FLOATS
;
1628 gen_batch_decode_ctx_init(&ctx
,
1629 &cmd_buffer
->device
->instance
->physicalDevice
.info
,
1630 stderr
, decode_flags
, NULL
,
1631 decode_get_bo
, NULL
, cmd_buffer
);
1633 gen_print_batch(&ctx
, bo
->bo
.map
, bo
->bo
.size
, bo
->bo
.offset
);
1635 gen_batch_decode_ctx_finish(&ctx
);
1639 anv_cmd_buffer_execbuf(struct anv_device
*device
,
1640 struct anv_cmd_buffer
*cmd_buffer
,
1641 const VkSemaphore
*in_semaphores
,
1642 uint32_t num_in_semaphores
,
1643 const VkSemaphore
*out_semaphores
,
1644 uint32_t num_out_semaphores
,
1647 ANV_FROM_HANDLE(anv_fence
, fence
, _fence
);
1649 struct anv_execbuf execbuf
;
1650 anv_execbuf_init(&execbuf
);
1653 VkResult result
= VK_SUCCESS
;
1654 for (uint32_t i
= 0; i
< num_in_semaphores
; i
++) {
1655 ANV_FROM_HANDLE(anv_semaphore
, semaphore
, in_semaphores
[i
]);
1656 struct anv_semaphore_impl
*impl
=
1657 semaphore
->temporary
.type
!= ANV_SEMAPHORE_TYPE_NONE
?
1658 &semaphore
->temporary
: &semaphore
->permanent
;
1660 switch (impl
->type
) {
1661 case ANV_SEMAPHORE_TYPE_BO
:
1662 result
= anv_execbuf_add_bo(&execbuf
, impl
->bo
, NULL
,
1664 if (result
!= VK_SUCCESS
)
1668 case ANV_SEMAPHORE_TYPE_SYNC_FILE
:
1669 if (in_fence
== -1) {
1670 in_fence
= impl
->fd
;
1672 int merge
= anv_gem_sync_file_merge(device
, in_fence
, impl
->fd
);
1674 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE
);
1684 case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ
:
1685 result
= anv_execbuf_add_syncobj(&execbuf
, impl
->syncobj
,
1686 I915_EXEC_FENCE_WAIT
,
1688 if (result
!= VK_SUCCESS
)
1697 bool need_out_fence
= false;
1698 for (uint32_t i
= 0; i
< num_out_semaphores
; i
++) {
1699 ANV_FROM_HANDLE(anv_semaphore
, semaphore
, out_semaphores
[i
]);
1701 /* Under most circumstances, out fences won't be temporary. However,
1702 * the spec does allow it for opaque_fd. From the Vulkan 1.0.53 spec:
1704 * "If the import is temporary, the implementation must restore the
1705 * semaphore to its prior permanent state after submitting the next
1706 * semaphore wait operation."
1708 * The spec says nothing whatsoever about signal operations on
1709 * temporarily imported semaphores so it appears they are allowed.
1710 * There are also CTS tests that require this to work.
1712 struct anv_semaphore_impl
*impl
=
1713 semaphore
->temporary
.type
!= ANV_SEMAPHORE_TYPE_NONE
?
1714 &semaphore
->temporary
: &semaphore
->permanent
;
1716 switch (impl
->type
) {
1717 case ANV_SEMAPHORE_TYPE_BO
:
1718 result
= anv_execbuf_add_bo(&execbuf
, impl
->bo
, NULL
,
1719 EXEC_OBJECT_WRITE
, &device
->alloc
);
1720 if (result
!= VK_SUCCESS
)
1724 case ANV_SEMAPHORE_TYPE_SYNC_FILE
:
1725 need_out_fence
= true;
1728 case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ
:
1729 result
= anv_execbuf_add_syncobj(&execbuf
, impl
->syncobj
,
1730 I915_EXEC_FENCE_SIGNAL
,
1732 if (result
!= VK_SUCCESS
)
1742 /* Under most circumstances, out fences won't be temporary. However,
1743 * the spec does allow it for opaque_fd. From the Vulkan 1.0.53 spec:
1745 * "If the import is temporary, the implementation must restore the
1746 * semaphore to its prior permanent state after submitting the next
1747 * semaphore wait operation."
1749 * The spec says nothing whatsoever about signal operations on
1750 * temporarily imported semaphores so it appears they are allowed.
1751 * There are also CTS tests that require this to work.
1753 struct anv_fence_impl
*impl
=
1754 fence
->temporary
.type
!= ANV_FENCE_TYPE_NONE
?
1755 &fence
->temporary
: &fence
->permanent
;
1757 switch (impl
->type
) {
1758 case ANV_FENCE_TYPE_BO
:
1759 result
= anv_execbuf_add_bo(&execbuf
, &impl
->bo
.bo
, NULL
,
1760 EXEC_OBJECT_WRITE
, &device
->alloc
);
1761 if (result
!= VK_SUCCESS
)
1765 case ANV_FENCE_TYPE_SYNCOBJ
:
1766 result
= anv_execbuf_add_syncobj(&execbuf
, impl
->syncobj
,
1767 I915_EXEC_FENCE_SIGNAL
,
1769 if (result
!= VK_SUCCESS
)
1774 unreachable("Invalid fence type");
1779 result
= setup_execbuf_for_cmd_buffer(&execbuf
, cmd_buffer
);
1781 result
= setup_empty_execbuf(&execbuf
, device
);
1783 if (result
!= VK_SUCCESS
)
1786 if (execbuf
.fence_count
> 0) {
1787 assert(device
->instance
->physicalDevice
.has_syncobj
);
1788 execbuf
.execbuf
.flags
|= I915_EXEC_FENCE_ARRAY
;
1789 execbuf
.execbuf
.num_cliprects
= execbuf
.fence_count
;
1790 execbuf
.execbuf
.cliprects_ptr
= (uintptr_t) execbuf
.fences
;
1793 if (in_fence
!= -1) {
1794 execbuf
.execbuf
.flags
|= I915_EXEC_FENCE_IN
;
1795 execbuf
.execbuf
.rsvd2
|= (uint32_t)in_fence
;
1799 execbuf
.execbuf
.flags
|= I915_EXEC_FENCE_OUT
;
1801 if (unlikely(INTEL_DEBUG
& DEBUG_BATCH
))
1802 decode_batch(cmd_buffer
);
1804 result
= anv_device_execbuf(device
, &execbuf
.execbuf
, execbuf
.bos
);
1806 /* Execbuf does not consume the in_fence. It's our job to close it. */
1810 for (uint32_t i
= 0; i
< num_in_semaphores
; i
++) {
1811 ANV_FROM_HANDLE(anv_semaphore
, semaphore
, in_semaphores
[i
]);
1812 /* From the Vulkan 1.0.53 spec:
1814 * "If the import is temporary, the implementation must restore the
1815 * semaphore to its prior permanent state after submitting the next
1816 * semaphore wait operation."
1818 * This has to happen after the execbuf in case we close any syncobjs in
1821 anv_semaphore_reset_temporary(device
, semaphore
);
1824 if (fence
&& fence
->permanent
.type
== ANV_FENCE_TYPE_BO
) {
1825 /* BO fences can't be shared, so they can't be temporary. */
1826 assert(fence
->temporary
.type
== ANV_FENCE_TYPE_NONE
);
1828 /* Once the execbuf has returned, we need to set the fence state to
1829 * SUBMITTED. We can't do this before calling execbuf because
1830 * anv_GetFenceStatus does take the global device lock before checking
1833 * We set the fence state to SUBMITTED regardless of whether or not the
1834 * execbuf succeeds because we need to ensure that vkWaitForFences() and
1835 * vkGetFenceStatus() return a valid result (VK_ERROR_DEVICE_LOST or
1836 * VK_SUCCESS) in a finite amount of time even if execbuf fails.
1838 fence
->permanent
.bo
.state
= ANV_BO_FENCE_STATE_SUBMITTED
;
1841 if (result
== VK_SUCCESS
&& need_out_fence
) {
1842 int out_fence
= execbuf
.execbuf
.rsvd2
>> 32;
1843 for (uint32_t i
= 0; i
< num_out_semaphores
; i
++) {
1844 ANV_FROM_HANDLE(anv_semaphore
, semaphore
, out_semaphores
[i
]);
1845 /* Out fences can't have temporary state because that would imply
1846 * that we imported a sync file and are trying to signal it.
1848 assert(semaphore
->temporary
.type
== ANV_SEMAPHORE_TYPE_NONE
);
1849 struct anv_semaphore_impl
*impl
= &semaphore
->permanent
;
1851 if (impl
->type
== ANV_SEMAPHORE_TYPE_SYNC_FILE
) {
1852 assert(impl
->fd
== -1);
1853 impl
->fd
= dup(out_fence
);
1859 anv_execbuf_finish(&execbuf
, &device
->alloc
);