2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
32 #include "genxml/gen8_pack.h"
33 #include "genxml/genX_bits.h"
34 #include "perf/gen_perf.h"
36 #include "util/debug.h"
38 /** \file anv_batch_chain.c
40 * This file contains functions related to anv_cmd_buffer as a data
41 * structure. This involves everything required to create and destroy
42 * the actual batch buffers as well as link them together and handle
43 * relocations and surface state. It specifically does *not* contain any
44 * handling of actual vkCmd calls beyond vkCmdExecuteCommands.
47 /*-----------------------------------------------------------------------*
48 * Functions related to anv_reloc_list
49 *-----------------------------------------------------------------------*/
52 anv_reloc_list_init(struct anv_reloc_list
*list
,
53 const VkAllocationCallbacks
*alloc
)
55 memset(list
, 0, sizeof(*list
));
60 anv_reloc_list_init_clone(struct anv_reloc_list
*list
,
61 const VkAllocationCallbacks
*alloc
,
62 const struct anv_reloc_list
*other_list
)
64 list
->num_relocs
= other_list
->num_relocs
;
65 list
->array_length
= other_list
->array_length
;
67 if (list
->num_relocs
> 0) {
69 vk_alloc(alloc
, list
->array_length
* sizeof(*list
->relocs
), 8,
70 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
71 if (list
->relocs
== NULL
)
72 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
75 vk_alloc(alloc
, list
->array_length
* sizeof(*list
->reloc_bos
), 8,
76 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
77 if (list
->reloc_bos
== NULL
) {
78 vk_free(alloc
, list
->relocs
);
79 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
82 memcpy(list
->relocs
, other_list
->relocs
,
83 list
->array_length
* sizeof(*list
->relocs
));
84 memcpy(list
->reloc_bos
, other_list
->reloc_bos
,
85 list
->array_length
* sizeof(*list
->reloc_bos
));
88 list
->reloc_bos
= NULL
;
91 list
->dep_words
= other_list
->dep_words
;
93 if (list
->dep_words
> 0) {
95 vk_alloc(alloc
, list
->dep_words
* sizeof(BITSET_WORD
), 8,
96 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
97 memcpy(list
->deps
, other_list
->deps
,
98 list
->dep_words
* sizeof(BITSET_WORD
));
107 anv_reloc_list_finish(struct anv_reloc_list
*list
,
108 const VkAllocationCallbacks
*alloc
)
110 vk_free(alloc
, list
->relocs
);
111 vk_free(alloc
, list
->reloc_bos
);
112 vk_free(alloc
, list
->deps
);
116 anv_reloc_list_grow(struct anv_reloc_list
*list
,
117 const VkAllocationCallbacks
*alloc
,
118 size_t num_additional_relocs
)
120 if (list
->num_relocs
+ num_additional_relocs
<= list
->array_length
)
123 size_t new_length
= MAX2(16, list
->array_length
* 2);
124 while (new_length
< list
->num_relocs
+ num_additional_relocs
)
127 struct drm_i915_gem_relocation_entry
*new_relocs
=
128 vk_realloc(alloc
, list
->relocs
,
129 new_length
* sizeof(*list
->relocs
), 8,
130 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
131 if (new_relocs
== NULL
)
132 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
133 list
->relocs
= new_relocs
;
135 struct anv_bo
**new_reloc_bos
=
136 vk_realloc(alloc
, list
->reloc_bos
,
137 new_length
* sizeof(*list
->reloc_bos
), 8,
138 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
139 if (new_reloc_bos
== NULL
)
140 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
141 list
->reloc_bos
= new_reloc_bos
;
143 list
->array_length
= new_length
;
149 anv_reloc_list_grow_deps(struct anv_reloc_list
*list
,
150 const VkAllocationCallbacks
*alloc
,
151 uint32_t min_num_words
)
153 if (min_num_words
<= list
->dep_words
)
156 uint32_t new_length
= MAX2(32, list
->dep_words
* 2);
157 while (new_length
< min_num_words
)
160 BITSET_WORD
*new_deps
=
161 vk_realloc(alloc
, list
->deps
, new_length
* sizeof(BITSET_WORD
), 8,
162 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
163 if (new_deps
== NULL
)
164 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
165 list
->deps
= new_deps
;
167 /* Zero out the new data */
168 memset(list
->deps
+ list
->dep_words
, 0,
169 (new_length
- list
->dep_words
) * sizeof(BITSET_WORD
));
170 list
->dep_words
= new_length
;
175 #define READ_ONCE(x) (*(volatile __typeof__(x) *)&(x))
178 anv_reloc_list_add(struct anv_reloc_list
*list
,
179 const VkAllocationCallbacks
*alloc
,
180 uint32_t offset
, struct anv_bo
*target_bo
, uint32_t delta
,
181 uint64_t *address_u64_out
)
183 struct drm_i915_gem_relocation_entry
*entry
;
186 struct anv_bo
*unwrapped_target_bo
= anv_bo_unwrap(target_bo
);
187 uint64_t target_bo_offset
= READ_ONCE(unwrapped_target_bo
->offset
);
189 *address_u64_out
= target_bo_offset
+ delta
;
191 if (unwrapped_target_bo
->flags
& EXEC_OBJECT_PINNED
) {
192 assert(!target_bo
->is_wrapper
);
193 uint32_t idx
= unwrapped_target_bo
->gem_handle
;
194 anv_reloc_list_grow_deps(list
, alloc
, (idx
/ BITSET_WORDBITS
) + 1);
195 BITSET_SET(list
->deps
, unwrapped_target_bo
->gem_handle
);
199 VkResult result
= anv_reloc_list_grow(list
, alloc
, 1);
200 if (result
!= VK_SUCCESS
)
203 /* XXX: Can we use I915_EXEC_HANDLE_LUT? */
204 index
= list
->num_relocs
++;
205 list
->reloc_bos
[index
] = target_bo
;
206 entry
= &list
->relocs
[index
];
207 entry
->target_handle
= -1; /* See also anv_cmd_buffer_process_relocs() */
208 entry
->delta
= delta
;
209 entry
->offset
= offset
;
210 entry
->presumed_offset
= target_bo_offset
;
211 entry
->read_domains
= 0;
212 entry
->write_domain
= 0;
213 VG(VALGRIND_CHECK_MEM_IS_DEFINED(entry
, sizeof(*entry
)));
219 anv_reloc_list_clear(struct anv_reloc_list
*list
)
221 list
->num_relocs
= 0;
222 if (list
->dep_words
> 0)
223 memset(list
->deps
, 0, list
->dep_words
* sizeof(BITSET_WORD
));
227 anv_reloc_list_append(struct anv_reloc_list
*list
,
228 const VkAllocationCallbacks
*alloc
,
229 struct anv_reloc_list
*other
, uint32_t offset
)
231 VkResult result
= anv_reloc_list_grow(list
, alloc
, other
->num_relocs
);
232 if (result
!= VK_SUCCESS
)
235 if (other
->num_relocs
> 0) {
236 memcpy(&list
->relocs
[list
->num_relocs
], &other
->relocs
[0],
237 other
->num_relocs
* sizeof(other
->relocs
[0]));
238 memcpy(&list
->reloc_bos
[list
->num_relocs
], &other
->reloc_bos
[0],
239 other
->num_relocs
* sizeof(other
->reloc_bos
[0]));
241 for (uint32_t i
= 0; i
< other
->num_relocs
; i
++)
242 list
->relocs
[i
+ list
->num_relocs
].offset
+= offset
;
244 list
->num_relocs
+= other
->num_relocs
;
247 anv_reloc_list_grow_deps(list
, alloc
, other
->dep_words
);
248 for (uint32_t w
= 0; w
< other
->dep_words
; w
++)
249 list
->deps
[w
] |= other
->deps
[w
];
254 /*-----------------------------------------------------------------------*
255 * Functions related to anv_batch
256 *-----------------------------------------------------------------------*/
259 anv_batch_emit_dwords(struct anv_batch
*batch
, int num_dwords
)
261 if (batch
->next
+ num_dwords
* 4 > batch
->end
) {
262 VkResult result
= batch
->extend_cb(batch
, batch
->user_data
);
263 if (result
!= VK_SUCCESS
) {
264 anv_batch_set_error(batch
, result
);
269 void *p
= batch
->next
;
271 batch
->next
+= num_dwords
* 4;
272 assert(batch
->next
<= batch
->end
);
278 anv_batch_emit_reloc(struct anv_batch
*batch
,
279 void *location
, struct anv_bo
*bo
, uint32_t delta
)
281 uint64_t address_u64
= 0;
282 VkResult result
= anv_reloc_list_add(batch
->relocs
, batch
->alloc
,
283 location
- batch
->start
, bo
, delta
,
285 if (result
!= VK_SUCCESS
) {
286 anv_batch_set_error(batch
, result
);
294 anv_batch_address(struct anv_batch
*batch
, void *batch_location
)
296 assert(batch
->start
< batch_location
);
298 /* Allow a jump at the current location of the batch. */
299 assert(batch
->next
>= batch_location
);
301 return anv_address_add(batch
->start_addr
, batch_location
- batch
->start
);
305 anv_batch_emit_batch(struct anv_batch
*batch
, struct anv_batch
*other
)
307 uint32_t size
, offset
;
309 size
= other
->next
- other
->start
;
310 assert(size
% 4 == 0);
312 if (batch
->next
+ size
> batch
->end
) {
313 VkResult result
= batch
->extend_cb(batch
, batch
->user_data
);
314 if (result
!= VK_SUCCESS
) {
315 anv_batch_set_error(batch
, result
);
320 assert(batch
->next
+ size
<= batch
->end
);
322 VG(VALGRIND_CHECK_MEM_IS_DEFINED(other
->start
, size
));
323 memcpy(batch
->next
, other
->start
, size
);
325 offset
= batch
->next
- batch
->start
;
326 VkResult result
= anv_reloc_list_append(batch
->relocs
, batch
->alloc
,
327 other
->relocs
, offset
);
328 if (result
!= VK_SUCCESS
) {
329 anv_batch_set_error(batch
, result
);
336 /*-----------------------------------------------------------------------*
337 * Functions related to anv_batch_bo
338 *-----------------------------------------------------------------------*/
341 anv_batch_bo_create(struct anv_cmd_buffer
*cmd_buffer
,
342 struct anv_batch_bo
**bbo_out
)
346 struct anv_batch_bo
*bbo
= vk_alloc(&cmd_buffer
->pool
->alloc
, sizeof(*bbo
),
347 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
349 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
351 result
= anv_bo_pool_alloc(&cmd_buffer
->device
->batch_bo_pool
,
352 ANV_CMD_BUFFER_BATCH_SIZE
, &bbo
->bo
);
353 if (result
!= VK_SUCCESS
)
356 result
= anv_reloc_list_init(&bbo
->relocs
, &cmd_buffer
->pool
->alloc
);
357 if (result
!= VK_SUCCESS
)
365 anv_bo_pool_free(&cmd_buffer
->device
->batch_bo_pool
, bbo
->bo
);
367 vk_free(&cmd_buffer
->pool
->alloc
, bbo
);
373 anv_batch_bo_clone(struct anv_cmd_buffer
*cmd_buffer
,
374 const struct anv_batch_bo
*other_bbo
,
375 struct anv_batch_bo
**bbo_out
)
379 struct anv_batch_bo
*bbo
= vk_alloc(&cmd_buffer
->pool
->alloc
, sizeof(*bbo
),
380 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
382 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
384 result
= anv_bo_pool_alloc(&cmd_buffer
->device
->batch_bo_pool
,
385 other_bbo
->bo
->size
, &bbo
->bo
);
386 if (result
!= VK_SUCCESS
)
389 result
= anv_reloc_list_init_clone(&bbo
->relocs
, &cmd_buffer
->pool
->alloc
,
391 if (result
!= VK_SUCCESS
)
394 bbo
->length
= other_bbo
->length
;
395 memcpy(bbo
->bo
->map
, other_bbo
->bo
->map
, other_bbo
->length
);
401 anv_bo_pool_free(&cmd_buffer
->device
->batch_bo_pool
, bbo
->bo
);
403 vk_free(&cmd_buffer
->pool
->alloc
, bbo
);
409 anv_batch_bo_start(struct anv_batch_bo
*bbo
, struct anv_batch
*batch
,
410 size_t batch_padding
)
412 anv_batch_set_storage(batch
, (struct anv_address
) { .bo
= bbo
->bo
, },
413 bbo
->bo
->map
, bbo
->bo
->size
- batch_padding
);
414 batch
->relocs
= &bbo
->relocs
;
415 anv_reloc_list_clear(&bbo
->relocs
);
419 anv_batch_bo_continue(struct anv_batch_bo
*bbo
, struct anv_batch
*batch
,
420 size_t batch_padding
)
422 batch
->start_addr
= (struct anv_address
) { .bo
= bbo
->bo
, };
423 batch
->start
= bbo
->bo
->map
;
424 batch
->next
= bbo
->bo
->map
+ bbo
->length
;
425 batch
->end
= bbo
->bo
->map
+ bbo
->bo
->size
- batch_padding
;
426 batch
->relocs
= &bbo
->relocs
;
430 anv_batch_bo_finish(struct anv_batch_bo
*bbo
, struct anv_batch
*batch
)
432 assert(batch
->start
== bbo
->bo
->map
);
433 bbo
->length
= batch
->next
- batch
->start
;
434 VG(VALGRIND_CHECK_MEM_IS_DEFINED(batch
->start
, bbo
->length
));
438 anv_batch_bo_grow(struct anv_cmd_buffer
*cmd_buffer
, struct anv_batch_bo
*bbo
,
439 struct anv_batch
*batch
, size_t aditional
,
440 size_t batch_padding
)
442 assert(batch
->start
== bbo
->bo
->map
);
443 bbo
->length
= batch
->next
- batch
->start
;
445 size_t new_size
= bbo
->bo
->size
;
446 while (new_size
<= bbo
->length
+ aditional
+ batch_padding
)
449 if (new_size
== bbo
->bo
->size
)
452 struct anv_bo
*new_bo
;
453 VkResult result
= anv_bo_pool_alloc(&cmd_buffer
->device
->batch_bo_pool
,
455 if (result
!= VK_SUCCESS
)
458 memcpy(new_bo
->map
, bbo
->bo
->map
, bbo
->length
);
460 anv_bo_pool_free(&cmd_buffer
->device
->batch_bo_pool
, bbo
->bo
);
463 anv_batch_bo_continue(bbo
, batch
, batch_padding
);
469 anv_batch_bo_link(struct anv_cmd_buffer
*cmd_buffer
,
470 struct anv_batch_bo
*prev_bbo
,
471 struct anv_batch_bo
*next_bbo
,
472 uint32_t next_bbo_offset
)
474 const uint32_t bb_start_offset
=
475 prev_bbo
->length
- GEN8_MI_BATCH_BUFFER_START_length
* 4;
476 ASSERTED
const uint32_t *bb_start
= prev_bbo
->bo
->map
+ bb_start_offset
;
478 /* Make sure we're looking at a MI_BATCH_BUFFER_START */
479 assert(((*bb_start
>> 29) & 0x07) == 0);
480 assert(((*bb_start
>> 23) & 0x3f) == 49);
482 if (cmd_buffer
->device
->physical
->use_softpin
) {
483 assert(prev_bbo
->bo
->flags
& EXEC_OBJECT_PINNED
);
484 assert(next_bbo
->bo
->flags
& EXEC_OBJECT_PINNED
);
486 write_reloc(cmd_buffer
->device
,
487 prev_bbo
->bo
->map
+ bb_start_offset
+ 4,
488 next_bbo
->bo
->offset
+ next_bbo_offset
, true);
490 uint32_t reloc_idx
= prev_bbo
->relocs
.num_relocs
- 1;
491 assert(prev_bbo
->relocs
.relocs
[reloc_idx
].offset
== bb_start_offset
+ 4);
493 prev_bbo
->relocs
.reloc_bos
[reloc_idx
] = next_bbo
->bo
;
494 prev_bbo
->relocs
.relocs
[reloc_idx
].delta
= next_bbo_offset
;
496 /* Use a bogus presumed offset to force a relocation */
497 prev_bbo
->relocs
.relocs
[reloc_idx
].presumed_offset
= -1;
502 anv_batch_bo_destroy(struct anv_batch_bo
*bbo
,
503 struct anv_cmd_buffer
*cmd_buffer
)
505 anv_reloc_list_finish(&bbo
->relocs
, &cmd_buffer
->pool
->alloc
);
506 anv_bo_pool_free(&cmd_buffer
->device
->batch_bo_pool
, bbo
->bo
);
507 vk_free(&cmd_buffer
->pool
->alloc
, bbo
);
511 anv_batch_bo_list_clone(const struct list_head
*list
,
512 struct anv_cmd_buffer
*cmd_buffer
,
513 struct list_head
*new_list
)
515 VkResult result
= VK_SUCCESS
;
517 list_inithead(new_list
);
519 struct anv_batch_bo
*prev_bbo
= NULL
;
520 list_for_each_entry(struct anv_batch_bo
, bbo
, list
, link
) {
521 struct anv_batch_bo
*new_bbo
= NULL
;
522 result
= anv_batch_bo_clone(cmd_buffer
, bbo
, &new_bbo
);
523 if (result
!= VK_SUCCESS
)
525 list_addtail(&new_bbo
->link
, new_list
);
528 anv_batch_bo_link(cmd_buffer
, prev_bbo
, new_bbo
, 0);
533 if (result
!= VK_SUCCESS
) {
534 list_for_each_entry_safe(struct anv_batch_bo
, bbo
, new_list
, link
) {
535 list_del(&bbo
->link
);
536 anv_batch_bo_destroy(bbo
, cmd_buffer
);
543 /*-----------------------------------------------------------------------*
544 * Functions related to anv_batch_bo
545 *-----------------------------------------------------------------------*/
547 static struct anv_batch_bo
*
548 anv_cmd_buffer_current_batch_bo(struct anv_cmd_buffer
*cmd_buffer
)
550 return LIST_ENTRY(struct anv_batch_bo
, cmd_buffer
->batch_bos
.prev
, link
);
554 anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer
*cmd_buffer
)
556 struct anv_state_pool
*pool
= anv_binding_table_pool(cmd_buffer
->device
);
557 struct anv_state
*bt_block
= u_vector_head(&cmd_buffer
->bt_block_states
);
558 return (struct anv_address
) {
559 .bo
= pool
->block_pool
.bo
,
560 .offset
= bt_block
->offset
- pool
->start_offset
,
565 emit_batch_buffer_start(struct anv_cmd_buffer
*cmd_buffer
,
566 struct anv_bo
*bo
, uint32_t offset
)
568 /* In gen8+ the address field grew to two dwords to accomodate 48 bit
569 * offsets. The high 16 bits are in the last dword, so we can use the gen8
570 * version in either case, as long as we set the instruction length in the
571 * header accordingly. This means that we always emit three dwords here
572 * and all the padding and adjustment we do in this file works for all
576 #define GEN7_MI_BATCH_BUFFER_START_length 2
577 #define GEN7_MI_BATCH_BUFFER_START_length_bias 2
579 const uint32_t gen7_length
=
580 GEN7_MI_BATCH_BUFFER_START_length
- GEN7_MI_BATCH_BUFFER_START_length_bias
;
581 const uint32_t gen8_length
=
582 GEN8_MI_BATCH_BUFFER_START_length
- GEN8_MI_BATCH_BUFFER_START_length_bias
;
584 anv_batch_emit(&cmd_buffer
->batch
, GEN8_MI_BATCH_BUFFER_START
, bbs
) {
585 bbs
.DWordLength
= cmd_buffer
->device
->info
.gen
< 8 ?
586 gen7_length
: gen8_length
;
587 bbs
.SecondLevelBatchBuffer
= Firstlevelbatch
;
588 bbs
.AddressSpaceIndicator
= ASI_PPGTT
;
589 bbs
.BatchBufferStartAddress
= (struct anv_address
) { bo
, offset
};
594 cmd_buffer_chain_to_batch_bo(struct anv_cmd_buffer
*cmd_buffer
,
595 struct anv_batch_bo
*bbo
)
597 struct anv_batch
*batch
= &cmd_buffer
->batch
;
598 struct anv_batch_bo
*current_bbo
=
599 anv_cmd_buffer_current_batch_bo(cmd_buffer
);
601 /* We set the end of the batch a little short so we would be sure we
602 * have room for the chaining command. Since we're about to emit the
603 * chaining command, let's set it back where it should go.
605 batch
->end
+= GEN8_MI_BATCH_BUFFER_START_length
* 4;
606 assert(batch
->end
== current_bbo
->bo
->map
+ current_bbo
->bo
->size
);
608 emit_batch_buffer_start(cmd_buffer
, bbo
->bo
, 0);
610 anv_batch_bo_finish(current_bbo
, batch
);
614 anv_cmd_buffer_chain_batch(struct anv_batch
*batch
, void *_data
)
616 struct anv_cmd_buffer
*cmd_buffer
= _data
;
617 struct anv_batch_bo
*new_bbo
;
619 VkResult result
= anv_batch_bo_create(cmd_buffer
, &new_bbo
);
620 if (result
!= VK_SUCCESS
)
623 struct anv_batch_bo
**seen_bbo
= u_vector_add(&cmd_buffer
->seen_bbos
);
624 if (seen_bbo
== NULL
) {
625 anv_batch_bo_destroy(new_bbo
, cmd_buffer
);
626 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
630 cmd_buffer_chain_to_batch_bo(cmd_buffer
, new_bbo
);
632 list_addtail(&new_bbo
->link
, &cmd_buffer
->batch_bos
);
634 anv_batch_bo_start(new_bbo
, batch
, GEN8_MI_BATCH_BUFFER_START_length
* 4);
640 anv_cmd_buffer_grow_batch(struct anv_batch
*batch
, void *_data
)
642 struct anv_cmd_buffer
*cmd_buffer
= _data
;
643 struct anv_batch_bo
*bbo
= anv_cmd_buffer_current_batch_bo(cmd_buffer
);
645 anv_batch_bo_grow(cmd_buffer
, bbo
, &cmd_buffer
->batch
, 4096,
646 GEN8_MI_BATCH_BUFFER_START_length
* 4);
651 /** Allocate a binding table
653 * This function allocates a binding table. This is a bit more complicated
654 * than one would think due to a combination of Vulkan driver design and some
655 * unfortunate hardware restrictions.
657 * The 3DSTATE_BINDING_TABLE_POINTERS_* packets only have a 16-bit field for
658 * the binding table pointer which means that all binding tables need to live
659 * in the bottom 64k of surface state base address. The way the GL driver has
660 * classically dealt with this restriction is to emit all surface states
661 * on-the-fly into the batch and have a batch buffer smaller than 64k. This
662 * isn't really an option in Vulkan for a couple of reasons:
664 * 1) In Vulkan, we have growing (or chaining) batches so surface states have
665 * to live in their own buffer and we have to be able to re-emit
666 * STATE_BASE_ADDRESS as needed which requires a full pipeline stall. In
667 * order to avoid emitting STATE_BASE_ADDRESS any more often than needed
668 * (it's not that hard to hit 64k of just binding tables), we allocate
669 * surface state objects up-front when VkImageView is created. In order
670 * for this to work, surface state objects need to be allocated from a
673 * 2) We tried to design the surface state system in such a way that it's
674 * already ready for bindless texturing. The way bindless texturing works
675 * on our hardware is that you have a big pool of surface state objects
676 * (with its own state base address) and the bindless handles are simply
677 * offsets into that pool. With the architecture we chose, we already
678 * have that pool and it's exactly the same pool that we use for regular
679 * surface states so we should already be ready for bindless.
681 * 3) For render targets, we need to be able to fill out the surface states
682 * later in vkBeginRenderPass so that we can assign clear colors
683 * correctly. One way to do this would be to just create the surface
684 * state data and then repeatedly copy it into the surface state BO every
685 * time we have to re-emit STATE_BASE_ADDRESS. While this works, it's
686 * rather annoying and just being able to allocate them up-front and
687 * re-use them for the entire render pass.
689 * While none of these are technically blockers for emitting state on the fly
690 * like we do in GL, the ability to have a single surface state pool is
691 * simplifies things greatly. Unfortunately, it comes at a cost...
693 * Because of the 64k limitation of 3DSTATE_BINDING_TABLE_POINTERS_*, we can't
694 * place the binding tables just anywhere in surface state base address.
695 * Because 64k isn't a whole lot of space, we can't simply restrict the
696 * surface state buffer to 64k, we have to be more clever. The solution we've
697 * chosen is to have a block pool with a maximum size of 2G that starts at
698 * zero and grows in both directions. All surface states are allocated from
699 * the top of the pool (positive offsets) and we allocate blocks (< 64k) of
700 * binding tables from the bottom of the pool (negative offsets). Every time
701 * we allocate a new binding table block, we set surface state base address to
702 * point to the bottom of the binding table block. This way all of the
703 * binding tables in the block are in the bottom 64k of surface state base
704 * address. When we fill out the binding table, we add the distance between
705 * the bottom of our binding table block and zero of the block pool to the
706 * surface state offsets so that they are correct relative to out new surface
707 * state base address at the bottom of the binding table block.
709 * \see adjust_relocations_from_block_pool()
710 * \see adjust_relocations_too_block_pool()
712 * \param[in] entries The number of surface state entries the binding
713 * table should be able to hold.
715 * \param[out] state_offset The offset surface surface state base address
716 * where the surface states live. This must be
717 * added to the surface state offset when it is
718 * written into the binding table entry.
720 * \return An anv_state representing the binding table
723 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer
*cmd_buffer
,
724 uint32_t entries
, uint32_t *state_offset
)
726 struct anv_state
*bt_block
= u_vector_head(&cmd_buffer
->bt_block_states
);
728 uint32_t bt_size
= align_u32(entries
* 4, 32);
730 struct anv_state state
= cmd_buffer
->bt_next
;
731 if (bt_size
> state
.alloc_size
)
732 return (struct anv_state
) { 0 };
734 state
.alloc_size
= bt_size
;
735 cmd_buffer
->bt_next
.offset
+= bt_size
;
736 cmd_buffer
->bt_next
.map
+= bt_size
;
737 cmd_buffer
->bt_next
.alloc_size
-= bt_size
;
739 assert(bt_block
->offset
< 0);
740 *state_offset
= -bt_block
->offset
;
746 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer
*cmd_buffer
)
748 struct isl_device
*isl_dev
= &cmd_buffer
->device
->isl_dev
;
749 return anv_state_stream_alloc(&cmd_buffer
->surface_state_stream
,
750 isl_dev
->ss
.size
, isl_dev
->ss
.align
);
754 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer
*cmd_buffer
,
755 uint32_t size
, uint32_t alignment
)
757 return anv_state_stream_alloc(&cmd_buffer
->dynamic_state_stream
,
762 anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer
*cmd_buffer
)
764 struct anv_state
*bt_block
= u_vector_add(&cmd_buffer
->bt_block_states
);
765 if (bt_block
== NULL
) {
766 anv_batch_set_error(&cmd_buffer
->batch
, VK_ERROR_OUT_OF_HOST_MEMORY
);
767 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
770 *bt_block
= anv_binding_table_pool_alloc(cmd_buffer
->device
);
772 /* The bt_next state is a rolling state (we update it as we suballocate
773 * from it) which is relative to the start of the binding table block.
775 cmd_buffer
->bt_next
= *bt_block
;
776 cmd_buffer
->bt_next
.offset
= 0;
782 anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer
*cmd_buffer
)
784 struct anv_batch_bo
*batch_bo
;
787 list_inithead(&cmd_buffer
->batch_bos
);
789 result
= anv_batch_bo_create(cmd_buffer
, &batch_bo
);
790 if (result
!= VK_SUCCESS
)
793 list_addtail(&batch_bo
->link
, &cmd_buffer
->batch_bos
);
795 cmd_buffer
->batch
.alloc
= &cmd_buffer
->pool
->alloc
;
796 cmd_buffer
->batch
.user_data
= cmd_buffer
;
798 if (cmd_buffer
->device
->can_chain_batches
) {
799 cmd_buffer
->batch
.extend_cb
= anv_cmd_buffer_chain_batch
;
801 cmd_buffer
->batch
.extend_cb
= anv_cmd_buffer_grow_batch
;
804 anv_batch_bo_start(batch_bo
, &cmd_buffer
->batch
,
805 GEN8_MI_BATCH_BUFFER_START_length
* 4);
807 int success
= u_vector_init(&cmd_buffer
->seen_bbos
,
808 sizeof(struct anv_bo
*),
809 8 * sizeof(struct anv_bo
*));
813 *(struct anv_batch_bo
**)u_vector_add(&cmd_buffer
->seen_bbos
) = batch_bo
;
815 /* u_vector requires power-of-two size elements */
816 unsigned pow2_state_size
= util_next_power_of_two(sizeof(struct anv_state
));
817 success
= u_vector_init(&cmd_buffer
->bt_block_states
,
818 pow2_state_size
, 8 * pow2_state_size
);
822 result
= anv_reloc_list_init(&cmd_buffer
->surface_relocs
,
823 &cmd_buffer
->pool
->alloc
);
824 if (result
!= VK_SUCCESS
)
826 cmd_buffer
->last_ss_pool_center
= 0;
828 result
= anv_cmd_buffer_new_binding_table_block(cmd_buffer
);
829 if (result
!= VK_SUCCESS
)
835 u_vector_finish(&cmd_buffer
->bt_block_states
);
837 u_vector_finish(&cmd_buffer
->seen_bbos
);
839 anv_batch_bo_destroy(batch_bo
, cmd_buffer
);
845 anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer
*cmd_buffer
)
847 struct anv_state
*bt_block
;
848 u_vector_foreach(bt_block
, &cmd_buffer
->bt_block_states
)
849 anv_binding_table_pool_free(cmd_buffer
->device
, *bt_block
);
850 u_vector_finish(&cmd_buffer
->bt_block_states
);
852 anv_reloc_list_finish(&cmd_buffer
->surface_relocs
, &cmd_buffer
->pool
->alloc
);
854 u_vector_finish(&cmd_buffer
->seen_bbos
);
856 /* Destroy all of the batch buffers */
857 list_for_each_entry_safe(struct anv_batch_bo
, bbo
,
858 &cmd_buffer
->batch_bos
, link
) {
859 list_del(&bbo
->link
);
860 anv_batch_bo_destroy(bbo
, cmd_buffer
);
865 anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer
*cmd_buffer
)
867 /* Delete all but the first batch bo */
868 assert(!list_is_empty(&cmd_buffer
->batch_bos
));
869 while (cmd_buffer
->batch_bos
.next
!= cmd_buffer
->batch_bos
.prev
) {
870 struct anv_batch_bo
*bbo
= anv_cmd_buffer_current_batch_bo(cmd_buffer
);
871 list_del(&bbo
->link
);
872 anv_batch_bo_destroy(bbo
, cmd_buffer
);
874 assert(!list_is_empty(&cmd_buffer
->batch_bos
));
876 anv_batch_bo_start(anv_cmd_buffer_current_batch_bo(cmd_buffer
),
878 GEN8_MI_BATCH_BUFFER_START_length
* 4);
880 while (u_vector_length(&cmd_buffer
->bt_block_states
) > 1) {
881 struct anv_state
*bt_block
= u_vector_remove(&cmd_buffer
->bt_block_states
);
882 anv_binding_table_pool_free(cmd_buffer
->device
, *bt_block
);
884 assert(u_vector_length(&cmd_buffer
->bt_block_states
) == 1);
885 cmd_buffer
->bt_next
= *(struct anv_state
*)u_vector_head(&cmd_buffer
->bt_block_states
);
886 cmd_buffer
->bt_next
.offset
= 0;
888 anv_reloc_list_clear(&cmd_buffer
->surface_relocs
);
889 cmd_buffer
->last_ss_pool_center
= 0;
891 /* Reset the list of seen buffers */
892 cmd_buffer
->seen_bbos
.head
= 0;
893 cmd_buffer
->seen_bbos
.tail
= 0;
895 *(struct anv_batch_bo
**)u_vector_add(&cmd_buffer
->seen_bbos
) =
896 anv_cmd_buffer_current_batch_bo(cmd_buffer
);
900 anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer
*cmd_buffer
)
902 struct anv_batch_bo
*batch_bo
= anv_cmd_buffer_current_batch_bo(cmd_buffer
);
904 if (cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_PRIMARY
) {
905 /* When we start a batch buffer, we subtract a certain amount of
906 * padding from the end to ensure that we always have room to emit a
907 * BATCH_BUFFER_START to chain to the next BO. We need to remove
908 * that padding before we end the batch; otherwise, we may end up
909 * with our BATCH_BUFFER_END in another BO.
911 cmd_buffer
->batch
.end
+= GEN8_MI_BATCH_BUFFER_START_length
* 4;
912 assert(cmd_buffer
->batch
.end
== batch_bo
->bo
->map
+ batch_bo
->bo
->size
);
914 anv_batch_emit(&cmd_buffer
->batch
, GEN8_MI_BATCH_BUFFER_END
, bbe
);
916 /* Round batch up to an even number of dwords. */
917 if ((cmd_buffer
->batch
.next
- cmd_buffer
->batch
.start
) & 4)
918 anv_batch_emit(&cmd_buffer
->batch
, GEN8_MI_NOOP
, noop
);
920 cmd_buffer
->exec_mode
= ANV_CMD_BUFFER_EXEC_MODE_PRIMARY
;
922 assert(cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_SECONDARY
);
923 /* If this is a secondary command buffer, we need to determine the
924 * mode in which it will be executed with vkExecuteCommands. We
925 * determine this statically here so that this stays in sync with the
926 * actual ExecuteCommands implementation.
928 const uint32_t length
= cmd_buffer
->batch
.next
- cmd_buffer
->batch
.start
;
929 if (!cmd_buffer
->device
->can_chain_batches
) {
930 cmd_buffer
->exec_mode
= ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT
;
931 } else if (cmd_buffer
->device
->physical
->use_call_secondary
) {
932 cmd_buffer
->exec_mode
= ANV_CMD_BUFFER_EXEC_MODE_CALL_AND_RETURN
;
933 /* If the secondary command buffer begins & ends in the same BO and
934 * its length is less than the length of CS prefetch, add some NOOPs
935 * instructions so the last MI_BATCH_BUFFER_START is outside the CS
938 if (cmd_buffer
->batch_bos
.next
== cmd_buffer
->batch_bos
.prev
) {
940 cmd_buffer
->batch
.next
- cmd_buffer
->batch
.start
;
942 for (int32_t i
= 0; i
< (512 - batch_len
); i
+= 4)
943 anv_batch_emit(&cmd_buffer
->batch
, GEN8_MI_NOOP
, noop
);
947 anv_batch_emitn(&cmd_buffer
->batch
,
948 GEN8_MI_BATCH_BUFFER_START_length
,
949 GEN8_MI_BATCH_BUFFER_START
,
950 .AddressSpaceIndicator
= ASI_PPGTT
,
951 .SecondLevelBatchBuffer
= Firstlevelbatch
) +
952 (GEN8_MI_BATCH_BUFFER_START_BatchBufferStartAddress_start
/ 8);
953 cmd_buffer
->return_addr
= anv_batch_address(&cmd_buffer
->batch
, jump_addr
);
954 } else if ((cmd_buffer
->batch_bos
.next
== cmd_buffer
->batch_bos
.prev
) &&
955 (length
< ANV_CMD_BUFFER_BATCH_SIZE
/ 2)) {
956 /* If the secondary has exactly one batch buffer in its list *and*
957 * that batch buffer is less than half of the maximum size, we're
958 * probably better of simply copying it into our batch.
960 cmd_buffer
->exec_mode
= ANV_CMD_BUFFER_EXEC_MODE_EMIT
;
961 } else if (!(cmd_buffer
->usage_flags
&
962 VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT
)) {
963 cmd_buffer
->exec_mode
= ANV_CMD_BUFFER_EXEC_MODE_CHAIN
;
965 /* In order to chain, we need this command buffer to contain an
966 * MI_BATCH_BUFFER_START which will jump back to the calling batch.
967 * It doesn't matter where it points now so long as has a valid
968 * relocation. We'll adjust it later as part of the chaining
971 * We set the end of the batch a little short so we would be sure we
972 * have room for the chaining command. Since we're about to emit the
973 * chaining command, let's set it back where it should go.
975 cmd_buffer
->batch
.end
+= GEN8_MI_BATCH_BUFFER_START_length
* 4;
976 assert(cmd_buffer
->batch
.start
== batch_bo
->bo
->map
);
977 assert(cmd_buffer
->batch
.end
== batch_bo
->bo
->map
+ batch_bo
->bo
->size
);
979 emit_batch_buffer_start(cmd_buffer
, batch_bo
->bo
, 0);
980 assert(cmd_buffer
->batch
.start
== batch_bo
->bo
->map
);
982 cmd_buffer
->exec_mode
= ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN
;
986 anv_batch_bo_finish(batch_bo
, &cmd_buffer
->batch
);
990 anv_cmd_buffer_add_seen_bbos(struct anv_cmd_buffer
*cmd_buffer
,
991 struct list_head
*list
)
993 list_for_each_entry(struct anv_batch_bo
, bbo
, list
, link
) {
994 struct anv_batch_bo
**bbo_ptr
= u_vector_add(&cmd_buffer
->seen_bbos
);
996 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1005 anv_cmd_buffer_add_secondary(struct anv_cmd_buffer
*primary
,
1006 struct anv_cmd_buffer
*secondary
)
1008 switch (secondary
->exec_mode
) {
1009 case ANV_CMD_BUFFER_EXEC_MODE_EMIT
:
1010 anv_batch_emit_batch(&primary
->batch
, &secondary
->batch
);
1012 case ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT
: {
1013 struct anv_batch_bo
*bbo
= anv_cmd_buffer_current_batch_bo(primary
);
1014 unsigned length
= secondary
->batch
.end
- secondary
->batch
.start
;
1015 anv_batch_bo_grow(primary
, bbo
, &primary
->batch
, length
,
1016 GEN8_MI_BATCH_BUFFER_START_length
* 4);
1017 anv_batch_emit_batch(&primary
->batch
, &secondary
->batch
);
1020 case ANV_CMD_BUFFER_EXEC_MODE_CHAIN
: {
1021 struct anv_batch_bo
*first_bbo
=
1022 list_first_entry(&secondary
->batch_bos
, struct anv_batch_bo
, link
);
1023 struct anv_batch_bo
*last_bbo
=
1024 list_last_entry(&secondary
->batch_bos
, struct anv_batch_bo
, link
);
1026 emit_batch_buffer_start(primary
, first_bbo
->bo
, 0);
1028 struct anv_batch_bo
*this_bbo
= anv_cmd_buffer_current_batch_bo(primary
);
1029 assert(primary
->batch
.start
== this_bbo
->bo
->map
);
1030 uint32_t offset
= primary
->batch
.next
- primary
->batch
.start
;
1032 /* Make the tail of the secondary point back to right after the
1033 * MI_BATCH_BUFFER_START in the primary batch.
1035 anv_batch_bo_link(primary
, last_bbo
, this_bbo
, offset
);
1037 anv_cmd_buffer_add_seen_bbos(primary
, &secondary
->batch_bos
);
1040 case ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN
: {
1041 struct list_head copy_list
;
1042 VkResult result
= anv_batch_bo_list_clone(&secondary
->batch_bos
,
1045 if (result
!= VK_SUCCESS
)
1048 anv_cmd_buffer_add_seen_bbos(primary
, ©_list
);
1050 struct anv_batch_bo
*first_bbo
=
1051 list_first_entry(©_list
, struct anv_batch_bo
, link
);
1052 struct anv_batch_bo
*last_bbo
=
1053 list_last_entry(©_list
, struct anv_batch_bo
, link
);
1055 cmd_buffer_chain_to_batch_bo(primary
, first_bbo
);
1057 list_splicetail(©_list
, &primary
->batch_bos
);
1059 anv_batch_bo_continue(last_bbo
, &primary
->batch
,
1060 GEN8_MI_BATCH_BUFFER_START_length
* 4);
1063 case ANV_CMD_BUFFER_EXEC_MODE_CALL_AND_RETURN
: {
1064 struct anv_batch_bo
*first_bbo
=
1065 list_first_entry(&secondary
->batch_bos
, struct anv_batch_bo
, link
);
1067 uint64_t *write_return_addr
=
1068 anv_batch_emitn(&primary
->batch
,
1069 GEN8_MI_STORE_DATA_IMM_length
+ 1 /* QWord write */,
1070 GEN8_MI_STORE_DATA_IMM
,
1071 .Address
= secondary
->return_addr
)
1072 + (GEN8_MI_STORE_DATA_IMM_ImmediateData_start
/ 8);
1074 emit_batch_buffer_start(primary
, first_bbo
->bo
, 0);
1076 *write_return_addr
=
1077 anv_address_physical(anv_batch_address(&primary
->batch
,
1078 primary
->batch
.next
));
1080 anv_cmd_buffer_add_seen_bbos(primary
, &secondary
->batch_bos
);
1084 assert(!"Invalid execution mode");
1087 anv_reloc_list_append(&primary
->surface_relocs
, &primary
->pool
->alloc
,
1088 &secondary
->surface_relocs
, 0);
1091 struct anv_execbuf
{
1092 struct drm_i915_gem_execbuffer2 execbuf
;
1094 struct drm_i915_gem_exec_object2
* objects
;
1096 struct anv_bo
** bos
;
1098 /* Allocated length of the 'objects' and 'bos' arrays */
1099 uint32_t array_length
;
1103 const VkAllocationCallbacks
* alloc
;
1104 VkSystemAllocationScope alloc_scope
;
1106 int perf_query_pass
;
1110 anv_execbuf_init(struct anv_execbuf
*exec
)
1112 memset(exec
, 0, sizeof(*exec
));
1116 anv_execbuf_finish(struct anv_execbuf
*exec
)
1118 vk_free(exec
->alloc
, exec
->objects
);
1119 vk_free(exec
->alloc
, exec
->bos
);
1123 anv_execbuf_add_bo_bitset(struct anv_device
*device
,
1124 struct anv_execbuf
*exec
,
1127 uint32_t extra_flags
);
1130 anv_execbuf_add_bo(struct anv_device
*device
,
1131 struct anv_execbuf
*exec
,
1133 struct anv_reloc_list
*relocs
,
1134 uint32_t extra_flags
)
1136 struct drm_i915_gem_exec_object2
*obj
= NULL
;
1138 bo
= anv_bo_unwrap(bo
);
1140 if (bo
->index
< exec
->bo_count
&& exec
->bos
[bo
->index
] == bo
)
1141 obj
= &exec
->objects
[bo
->index
];
1144 /* We've never seen this one before. Add it to the list and assign
1145 * an id that we can use later.
1147 if (exec
->bo_count
>= exec
->array_length
) {
1148 uint32_t new_len
= exec
->objects
? exec
->array_length
* 2 : 64;
1150 struct drm_i915_gem_exec_object2
*new_objects
=
1151 vk_alloc(exec
->alloc
, new_len
* sizeof(*new_objects
), 8, exec
->alloc_scope
);
1152 if (new_objects
== NULL
)
1153 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1155 struct anv_bo
**new_bos
=
1156 vk_alloc(exec
->alloc
, new_len
* sizeof(*new_bos
), 8, exec
->alloc_scope
);
1157 if (new_bos
== NULL
) {
1158 vk_free(exec
->alloc
, new_objects
);
1159 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
1162 if (exec
->objects
) {
1163 memcpy(new_objects
, exec
->objects
,
1164 exec
->bo_count
* sizeof(*new_objects
));
1165 memcpy(new_bos
, exec
->bos
,
1166 exec
->bo_count
* sizeof(*new_bos
));
1169 vk_free(exec
->alloc
, exec
->objects
);
1170 vk_free(exec
->alloc
, exec
->bos
);
1172 exec
->objects
= new_objects
;
1173 exec
->bos
= new_bos
;
1174 exec
->array_length
= new_len
;
1177 assert(exec
->bo_count
< exec
->array_length
);
1179 bo
->index
= exec
->bo_count
++;
1180 obj
= &exec
->objects
[bo
->index
];
1181 exec
->bos
[bo
->index
] = bo
;
1183 obj
->handle
= bo
->gem_handle
;
1184 obj
->relocation_count
= 0;
1185 obj
->relocs_ptr
= 0;
1187 obj
->offset
= bo
->offset
;
1188 obj
->flags
= bo
->flags
| extra_flags
;
1193 if (extra_flags
& EXEC_OBJECT_WRITE
) {
1194 obj
->flags
|= EXEC_OBJECT_WRITE
;
1195 obj
->flags
&= ~EXEC_OBJECT_ASYNC
;
1198 if (relocs
!= NULL
) {
1199 assert(obj
->relocation_count
== 0);
1201 if (relocs
->num_relocs
> 0) {
1202 /* This is the first time we've ever seen a list of relocations for
1203 * this BO. Go ahead and set the relocations and then walk the list
1204 * of relocations and add them all.
1206 exec
->has_relocs
= true;
1207 obj
->relocation_count
= relocs
->num_relocs
;
1208 obj
->relocs_ptr
= (uintptr_t) relocs
->relocs
;
1210 for (size_t i
= 0; i
< relocs
->num_relocs
; i
++) {
1213 /* A quick sanity check on relocations */
1214 assert(relocs
->relocs
[i
].offset
< bo
->size
);
1215 result
= anv_execbuf_add_bo(device
, exec
, relocs
->reloc_bos
[i
],
1217 if (result
!= VK_SUCCESS
)
1222 return anv_execbuf_add_bo_bitset(device
, exec
, relocs
->dep_words
,
1223 relocs
->deps
, extra_flags
);
1229 /* Add BO dependencies to execbuf */
1231 anv_execbuf_add_bo_bitset(struct anv_device
*device
,
1232 struct anv_execbuf
*exec
,
1235 uint32_t extra_flags
)
1237 for (uint32_t w
= 0; w
< dep_words
; w
++) {
1238 BITSET_WORD mask
= deps
[w
];
1240 int i
= u_bit_scan(&mask
);
1241 uint32_t gem_handle
= w
* BITSET_WORDBITS
+ i
;
1242 struct anv_bo
*bo
= anv_device_lookup_bo(device
, gem_handle
);
1243 assert(bo
->refcount
> 0);
1245 anv_execbuf_add_bo(device
, exec
, bo
, NULL
, extra_flags
);
1246 if (result
!= VK_SUCCESS
)
1255 anv_cmd_buffer_process_relocs(struct anv_cmd_buffer
*cmd_buffer
,
1256 struct anv_reloc_list
*list
)
1258 for (size_t i
= 0; i
< list
->num_relocs
; i
++)
1259 list
->relocs
[i
].target_handle
= anv_bo_unwrap(list
->reloc_bos
[i
])->index
;
1263 adjust_relocations_from_state_pool(struct anv_state_pool
*pool
,
1264 struct anv_reloc_list
*relocs
,
1265 uint32_t last_pool_center_bo_offset
)
1267 assert(last_pool_center_bo_offset
<= pool
->block_pool
.center_bo_offset
);
1268 uint32_t delta
= pool
->block_pool
.center_bo_offset
- last_pool_center_bo_offset
;
1270 for (size_t i
= 0; i
< relocs
->num_relocs
; i
++) {
1271 /* All of the relocations from this block pool to other BO's should
1272 * have been emitted relative to the surface block pool center. We
1273 * need to add the center offset to make them relative to the
1274 * beginning of the actual GEM bo.
1276 relocs
->relocs
[i
].offset
+= delta
;
1281 adjust_relocations_to_state_pool(struct anv_state_pool
*pool
,
1282 struct anv_bo
*from_bo
,
1283 struct anv_reloc_list
*relocs
,
1284 uint32_t last_pool_center_bo_offset
)
1286 assert(!from_bo
->is_wrapper
);
1287 assert(last_pool_center_bo_offset
<= pool
->block_pool
.center_bo_offset
);
1288 uint32_t delta
= pool
->block_pool
.center_bo_offset
- last_pool_center_bo_offset
;
1290 /* When we initially emit relocations into a block pool, we don't
1291 * actually know what the final center_bo_offset will be so we just emit
1292 * it as if center_bo_offset == 0. Now that we know what the center
1293 * offset is, we need to walk the list of relocations and adjust any
1294 * relocations that point to the pool bo with the correct offset.
1296 for (size_t i
= 0; i
< relocs
->num_relocs
; i
++) {
1297 if (relocs
->reloc_bos
[i
] == pool
->block_pool
.bo
) {
1298 /* Adjust the delta value in the relocation to correctly
1299 * correspond to the new delta. Initially, this value may have
1300 * been negative (if treated as unsigned), but we trust in
1301 * uint32_t roll-over to fix that for us at this point.
1303 relocs
->relocs
[i
].delta
+= delta
;
1305 /* Since the delta has changed, we need to update the actual
1306 * relocated value with the new presumed value. This function
1307 * should only be called on batch buffers, so we know it isn't in
1308 * use by the GPU at the moment.
1310 assert(relocs
->relocs
[i
].offset
< from_bo
->size
);
1311 write_reloc(pool
->block_pool
.device
,
1312 from_bo
->map
+ relocs
->relocs
[i
].offset
,
1313 relocs
->relocs
[i
].presumed_offset
+
1314 relocs
->relocs
[i
].delta
, false);
1320 anv_reloc_list_apply(struct anv_device
*device
,
1321 struct anv_reloc_list
*list
,
1323 bool always_relocate
)
1325 bo
= anv_bo_unwrap(bo
);
1327 for (size_t i
= 0; i
< list
->num_relocs
; i
++) {
1328 struct anv_bo
*target_bo
= anv_bo_unwrap(list
->reloc_bos
[i
]);
1329 if (list
->relocs
[i
].presumed_offset
== target_bo
->offset
&&
1333 void *p
= bo
->map
+ list
->relocs
[i
].offset
;
1334 write_reloc(device
, p
, target_bo
->offset
+ list
->relocs
[i
].delta
, true);
1335 list
->relocs
[i
].presumed_offset
= target_bo
->offset
;
1340 * This function applies the relocation for a command buffer and writes the
1341 * actual addresses into the buffers as per what we were told by the kernel on
1342 * the previous execbuf2 call. This should be safe to do because, for each
1343 * relocated address, we have two cases:
1345 * 1) The target BO is inactive (as seen by the kernel). In this case, it is
1346 * not in use by the GPU so updating the address is 100% ok. It won't be
1347 * in-use by the GPU (from our context) again until the next execbuf2
1348 * happens. If the kernel decides to move it in the next execbuf2, it
1349 * will have to do the relocations itself, but that's ok because it should
1350 * have all of the information needed to do so.
1352 * 2) The target BO is active (as seen by the kernel). In this case, it
1353 * hasn't moved since the last execbuffer2 call because GTT shuffling
1354 * *only* happens when the BO is idle. (From our perspective, it only
1355 * happens inside the execbuffer2 ioctl, but the shuffling may be
1356 * triggered by another ioctl, with full-ppgtt this is limited to only
1357 * execbuffer2 ioctls on the same context, or memory pressure.) Since the
1358 * target BO hasn't moved, our anv_bo::offset exactly matches the BO's GTT
1359 * address and the relocated value we are writing into the BO will be the
1360 * same as the value that is already there.
1362 * There is also a possibility that the target BO is active but the exact
1363 * RENDER_SURFACE_STATE object we are writing the relocation into isn't in
1364 * use. In this case, the address currently in the RENDER_SURFACE_STATE
1365 * may be stale but it's still safe to write the relocation because that
1366 * particular RENDER_SURFACE_STATE object isn't in-use by the GPU and
1367 * won't be until the next execbuf2 call.
1369 * By doing relocations on the CPU, we can tell the kernel that it doesn't
1370 * need to bother. We want to do this because the surface state buffer is
1371 * used by every command buffer so, if the kernel does the relocations, it
1372 * will always be busy and the kernel will always stall. This is also
1373 * probably the fastest mechanism for doing relocations since the kernel would
1374 * have to make a full copy of all the relocations lists.
1377 relocate_cmd_buffer(struct anv_cmd_buffer
*cmd_buffer
,
1378 struct anv_execbuf
*exec
)
1380 if (cmd_buffer
->perf_query_pool
)
1383 if (!exec
->has_relocs
)
1386 static int userspace_relocs
= -1;
1387 if (userspace_relocs
< 0)
1388 userspace_relocs
= env_var_as_boolean("ANV_USERSPACE_RELOCS", true);
1389 if (!userspace_relocs
)
1392 /* First, we have to check to see whether or not we can even do the
1393 * relocation. New buffers which have never been submitted to the kernel
1394 * don't have a valid offset so we need to let the kernel do relocations so
1395 * that we can get offsets for them. On future execbuf2 calls, those
1396 * buffers will have offsets and we will be able to skip relocating.
1397 * Invalid offsets are indicated by anv_bo::offset == (uint64_t)-1.
1399 for (uint32_t i
= 0; i
< exec
->bo_count
; i
++) {
1400 assert(!exec
->bos
[i
]->is_wrapper
);
1401 if (exec
->bos
[i
]->offset
== (uint64_t)-1)
1405 /* Since surface states are shared between command buffers and we don't
1406 * know what order they will be submitted to the kernel, we don't know
1407 * what address is actually written in the surface state object at any
1408 * given time. The only option is to always relocate them.
1410 struct anv_bo
*surface_state_bo
=
1411 anv_bo_unwrap(cmd_buffer
->device
->surface_state_pool
.block_pool
.bo
);
1412 anv_reloc_list_apply(cmd_buffer
->device
, &cmd_buffer
->surface_relocs
,
1414 true /* always relocate surface states */);
1416 /* Since we own all of the batch buffers, we know what values are stored
1417 * in the relocated addresses and only have to update them if the offsets
1420 struct anv_batch_bo
**bbo
;
1421 u_vector_foreach(bbo
, &cmd_buffer
->seen_bbos
) {
1422 anv_reloc_list_apply(cmd_buffer
->device
,
1423 &(*bbo
)->relocs
, (*bbo
)->bo
, false);
1426 for (uint32_t i
= 0; i
< exec
->bo_count
; i
++)
1427 exec
->objects
[i
].offset
= exec
->bos
[i
]->offset
;
1433 setup_execbuf_for_cmd_buffer(struct anv_execbuf
*execbuf
,
1434 struct anv_cmd_buffer
*cmd_buffer
)
1436 struct anv_batch
*batch
= &cmd_buffer
->batch
;
1437 struct anv_state_pool
*ss_pool
=
1438 &cmd_buffer
->device
->surface_state_pool
;
1440 adjust_relocations_from_state_pool(ss_pool
, &cmd_buffer
->surface_relocs
,
1441 cmd_buffer
->last_ss_pool_center
);
1443 if (cmd_buffer
->device
->physical
->use_softpin
) {
1444 anv_block_pool_foreach_bo(bo
, &ss_pool
->block_pool
) {
1445 result
= anv_execbuf_add_bo(cmd_buffer
->device
, execbuf
,
1447 if (result
!= VK_SUCCESS
)
1450 /* Add surface dependencies (BOs) to the execbuf */
1451 anv_execbuf_add_bo_bitset(cmd_buffer
->device
, execbuf
,
1452 cmd_buffer
->surface_relocs
.dep_words
,
1453 cmd_buffer
->surface_relocs
.deps
, 0);
1455 /* Add the BOs for all memory objects */
1456 list_for_each_entry(struct anv_device_memory
, mem
,
1457 &cmd_buffer
->device
->memory_objects
, link
) {
1458 result
= anv_execbuf_add_bo(cmd_buffer
->device
, execbuf
,
1460 if (result
!= VK_SUCCESS
)
1464 struct anv_block_pool
*pool
;
1465 pool
= &cmd_buffer
->device
->dynamic_state_pool
.block_pool
;
1466 anv_block_pool_foreach_bo(bo
, pool
) {
1467 result
= anv_execbuf_add_bo(cmd_buffer
->device
, execbuf
,
1469 if (result
!= VK_SUCCESS
)
1473 pool
= &cmd_buffer
->device
->instruction_state_pool
.block_pool
;
1474 anv_block_pool_foreach_bo(bo
, pool
) {
1475 result
= anv_execbuf_add_bo(cmd_buffer
->device
, execbuf
,
1477 if (result
!= VK_SUCCESS
)
1481 pool
= &cmd_buffer
->device
->binding_table_pool
.block_pool
;
1482 anv_block_pool_foreach_bo(bo
, pool
) {
1483 result
= anv_execbuf_add_bo(cmd_buffer
->device
, execbuf
,
1485 if (result
!= VK_SUCCESS
)
1489 /* Since we aren't in the softpin case, all of our STATE_BASE_ADDRESS BOs
1490 * will get added automatically by processing relocations on the batch
1491 * buffer. We have to add the surface state BO manually because it has
1492 * relocations of its own that we need to be sure are processsed.
1494 result
= anv_execbuf_add_bo(cmd_buffer
->device
, execbuf
,
1495 ss_pool
->block_pool
.bo
,
1496 &cmd_buffer
->surface_relocs
, 0);
1497 if (result
!= VK_SUCCESS
)
1501 /* First, we walk over all of the bos we've seen and add them and their
1502 * relocations to the validate list.
1504 struct anv_batch_bo
**bbo
;
1505 u_vector_foreach(bbo
, &cmd_buffer
->seen_bbos
) {
1506 adjust_relocations_to_state_pool(ss_pool
, (*bbo
)->bo
, &(*bbo
)->relocs
,
1507 cmd_buffer
->last_ss_pool_center
);
1509 result
= anv_execbuf_add_bo(cmd_buffer
->device
, execbuf
,
1510 (*bbo
)->bo
, &(*bbo
)->relocs
, 0);
1511 if (result
!= VK_SUCCESS
)
1515 /* Now that we've adjusted all of the surface state relocations, we need to
1516 * record the surface state pool center so future executions of the command
1517 * buffer can adjust correctly.
1519 cmd_buffer
->last_ss_pool_center
= ss_pool
->block_pool
.center_bo_offset
;
1521 struct anv_batch_bo
*first_batch_bo
=
1522 list_first_entry(&cmd_buffer
->batch_bos
, struct anv_batch_bo
, link
);
1524 /* The kernel requires that the last entry in the validation list be the
1525 * batch buffer to execute. We can simply swap the element
1526 * corresponding to the first batch_bo in the chain with the last
1527 * element in the list.
1529 if (first_batch_bo
->bo
->index
!= execbuf
->bo_count
- 1) {
1530 uint32_t idx
= first_batch_bo
->bo
->index
;
1531 uint32_t last_idx
= execbuf
->bo_count
- 1;
1533 struct drm_i915_gem_exec_object2 tmp_obj
= execbuf
->objects
[idx
];
1534 assert(execbuf
->bos
[idx
] == first_batch_bo
->bo
);
1536 execbuf
->objects
[idx
] = execbuf
->objects
[last_idx
];
1537 execbuf
->bos
[idx
] = execbuf
->bos
[last_idx
];
1538 execbuf
->bos
[idx
]->index
= idx
;
1540 execbuf
->objects
[last_idx
] = tmp_obj
;
1541 execbuf
->bos
[last_idx
] = first_batch_bo
->bo
;
1542 first_batch_bo
->bo
->index
= last_idx
;
1545 /* If we are pinning our BOs, we shouldn't have to relocate anything */
1546 if (cmd_buffer
->device
->physical
->use_softpin
)
1547 assert(!execbuf
->has_relocs
);
1549 /* Now we go through and fixup all of the relocation lists to point to
1550 * the correct indices in the object array. We have to do this after we
1551 * reorder the list above as some of the indices may have changed.
1553 if (execbuf
->has_relocs
) {
1554 u_vector_foreach(bbo
, &cmd_buffer
->seen_bbos
)
1555 anv_cmd_buffer_process_relocs(cmd_buffer
, &(*bbo
)->relocs
);
1557 anv_cmd_buffer_process_relocs(cmd_buffer
, &cmd_buffer
->surface_relocs
);
1560 if (!cmd_buffer
->device
->info
.has_llc
) {
1561 __builtin_ia32_mfence();
1562 u_vector_foreach(bbo
, &cmd_buffer
->seen_bbos
) {
1563 for (uint32_t i
= 0; i
< (*bbo
)->length
; i
+= CACHELINE_SIZE
)
1564 __builtin_ia32_clflush((*bbo
)->bo
->map
+ i
);
1568 execbuf
->execbuf
= (struct drm_i915_gem_execbuffer2
) {
1569 .buffers_ptr
= (uintptr_t) execbuf
->objects
,
1570 .buffer_count
= execbuf
->bo_count
,
1571 .batch_start_offset
= 0,
1572 .batch_len
= batch
->next
- batch
->start
,
1577 .flags
= I915_EXEC_HANDLE_LUT
| I915_EXEC_RENDER
,
1578 .rsvd1
= cmd_buffer
->device
->context_id
,
1582 if (relocate_cmd_buffer(cmd_buffer
, execbuf
)) {
1583 /* If we were able to successfully relocate everything, tell the kernel
1584 * that it can skip doing relocations. The requirement for using
1587 * 1) The addresses written in the objects must match the corresponding
1588 * reloc.presumed_offset which in turn must match the corresponding
1589 * execobject.offset.
1591 * 2) To avoid stalling, execobject.offset should match the current
1592 * address of that object within the active context.
1594 * In order to satisfy all of the invariants that make userspace
1595 * relocations to be safe (see relocate_cmd_buffer()), we need to
1596 * further ensure that the addresses we use match those used by the
1597 * kernel for the most recent execbuf2.
1599 * The kernel may still choose to do relocations anyway if something has
1600 * moved in the GTT. In this case, the relocation list still needs to be
1601 * valid. All relocations on the batch buffers are already valid and
1602 * kept up-to-date. For surface state relocations, by applying the
1603 * relocations in relocate_cmd_buffer, we ensured that the address in
1604 * the RENDER_SURFACE_STATE matches presumed_offset, so it should be
1605 * safe for the kernel to relocate them as needed.
1607 execbuf
->execbuf
.flags
|= I915_EXEC_NO_RELOC
;
1609 /* In the case where we fall back to doing kernel relocations, we need
1610 * to ensure that the relocation list is valid. All relocations on the
1611 * batch buffers are already valid and kept up-to-date. Since surface
1612 * states are shared between command buffers and we don't know what
1613 * order they will be submitted to the kernel, we don't know what
1614 * address is actually written in the surface state object at any given
1615 * time. The only option is to set a bogus presumed offset and let the
1616 * kernel relocate them.
1618 for (size_t i
= 0; i
< cmd_buffer
->surface_relocs
.num_relocs
; i
++)
1619 cmd_buffer
->surface_relocs
.relocs
[i
].presumed_offset
= -1;
1626 setup_empty_execbuf(struct anv_execbuf
*execbuf
, struct anv_device
*device
)
1628 VkResult result
= anv_execbuf_add_bo(device
, execbuf
,
1629 device
->trivial_batch_bo
,
1631 if (result
!= VK_SUCCESS
)
1634 execbuf
->execbuf
= (struct drm_i915_gem_execbuffer2
) {
1635 .buffers_ptr
= (uintptr_t) execbuf
->objects
,
1636 .buffer_count
= execbuf
->bo_count
,
1637 .batch_start_offset
= 0,
1638 .batch_len
= 8, /* GEN7_MI_BATCH_BUFFER_END and NOOP */
1639 .flags
= I915_EXEC_HANDLE_LUT
| I915_EXEC_RENDER
,
1640 .rsvd1
= device
->context_id
,
1647 /* We lock around execbuf for three main reasons:
1649 * 1) When a block pool is resized, we create a new gem handle with a
1650 * different size and, in the case of surface states, possibly a different
1651 * center offset but we re-use the same anv_bo struct when we do so. If
1652 * this happens in the middle of setting up an execbuf, we could end up
1653 * with our list of BOs out of sync with our list of gem handles.
1655 * 2) The algorithm we use for building the list of unique buffers isn't
1656 * thread-safe. While the client is supposed to syncronize around
1657 * QueueSubmit, this would be extremely difficult to debug if it ever came
1658 * up in the wild due to a broken app. It's better to play it safe and
1659 * just lock around QueueSubmit.
1661 * 3) The anv_cmd_buffer_execbuf function may perform relocations in
1662 * userspace. Due to the fact that the surface state buffer is shared
1663 * between batches, we can't afford to have that happen from multiple
1664 * threads at the same time. Even though the user is supposed to ensure
1665 * this doesn't happen, we play it safe as in (2) above.
1667 * Since the only other things that ever take the device lock such as block
1668 * pool resize only rarely happen, this will almost never be contended so
1669 * taking a lock isn't really an expensive operation in this case.
1672 anv_queue_execbuf_locked(struct anv_queue
*queue
,
1673 struct anv_queue_submit
*submit
)
1675 struct anv_device
*device
= queue
->device
;
1676 struct anv_execbuf execbuf
;
1677 anv_execbuf_init(&execbuf
);
1678 execbuf
.alloc
= submit
->alloc
;
1679 execbuf
.alloc_scope
= submit
->alloc_scope
;
1680 execbuf
.perf_query_pass
= submit
->perf_query_pass
;
1682 /* Always add the workaround BO as it includes a driver identifier for the
1686 anv_execbuf_add_bo(device
, &execbuf
, device
->workaround_bo
, NULL
, 0);
1687 if (result
!= VK_SUCCESS
)
1690 for (uint32_t i
= 0; i
< submit
->fence_bo_count
; i
++) {
1692 struct anv_bo
*bo
= anv_unpack_ptr(submit
->fence_bos
[i
], 1, &signaled
);
1694 result
= anv_execbuf_add_bo(device
, &execbuf
, bo
, NULL
,
1695 signaled
? EXEC_OBJECT_WRITE
: 0);
1696 if (result
!= VK_SUCCESS
)
1700 if (submit
->cmd_buffer
) {
1701 result
= setup_execbuf_for_cmd_buffer(&execbuf
, submit
->cmd_buffer
);
1702 } else if (submit
->simple_bo
) {
1703 result
= anv_execbuf_add_bo(device
, &execbuf
, submit
->simple_bo
, NULL
, 0);
1704 if (result
!= VK_SUCCESS
)
1707 execbuf
.execbuf
= (struct drm_i915_gem_execbuffer2
) {
1708 .buffers_ptr
= (uintptr_t) execbuf
.objects
,
1709 .buffer_count
= execbuf
.bo_count
,
1710 .batch_start_offset
= 0,
1711 .batch_len
= submit
->simple_bo_size
,
1712 .flags
= I915_EXEC_HANDLE_LUT
| I915_EXEC_RENDER
,
1713 .rsvd1
= device
->context_id
,
1717 result
= setup_empty_execbuf(&execbuf
, queue
->device
);
1720 if (result
!= VK_SUCCESS
)
1723 const bool has_perf_query
=
1724 submit
->perf_query_pass
>= 0 &&
1725 submit
->cmd_buffer
&&
1726 submit
->cmd_buffer
->perf_query_pool
;
1728 if (unlikely(INTEL_DEBUG
& DEBUG_BATCH
)) {
1729 if (submit
->cmd_buffer
) {
1730 if (has_perf_query
) {
1731 struct anv_query_pool
*query_pool
= submit
->cmd_buffer
->perf_query_pool
;
1732 struct anv_bo
*pass_batch_bo
= query_pool
->bo
;
1733 uint64_t pass_batch_offset
=
1734 khr_perf_query_preamble_offset(query_pool
,
1735 submit
->perf_query_pass
);
1737 gen_print_batch(&device
->decoder_ctx
,
1738 pass_batch_bo
->map
+ pass_batch_offset
, 64,
1739 pass_batch_bo
->offset
+ pass_batch_offset
, false);
1742 struct anv_batch_bo
**bo
= u_vector_tail(&submit
->cmd_buffer
->seen_bbos
);
1743 device
->cmd_buffer_being_decoded
= submit
->cmd_buffer
;
1744 gen_print_batch(&device
->decoder_ctx
, (*bo
)->bo
->map
,
1745 (*bo
)->bo
->size
, (*bo
)->bo
->offset
, false);
1746 device
->cmd_buffer_being_decoded
= NULL
;
1747 } else if (submit
->simple_bo
) {
1748 gen_print_batch(&device
->decoder_ctx
, submit
->simple_bo
->map
,
1749 submit
->simple_bo
->size
, submit
->simple_bo
->offset
, false);
1751 gen_print_batch(&device
->decoder_ctx
,
1752 device
->trivial_batch_bo
->map
,
1753 device
->trivial_batch_bo
->size
,
1754 device
->trivial_batch_bo
->offset
, false);
1758 if (submit
->fence_count
> 0) {
1759 assert(device
->physical
->has_syncobj
);
1760 execbuf
.execbuf
.flags
|= I915_EXEC_FENCE_ARRAY
;
1761 execbuf
.execbuf
.num_cliprects
= submit
->fence_count
;
1762 execbuf
.execbuf
.cliprects_ptr
= (uintptr_t)submit
->fences
;
1765 if (submit
->in_fence
!= -1) {
1766 execbuf
.execbuf
.flags
|= I915_EXEC_FENCE_IN
;
1767 execbuf
.execbuf
.rsvd2
|= (uint32_t)submit
->in_fence
;
1770 if (submit
->need_out_fence
)
1771 execbuf
.execbuf
.flags
|= I915_EXEC_FENCE_OUT
;
1773 if (has_perf_query
) {
1774 struct anv_query_pool
*query_pool
= submit
->cmd_buffer
->perf_query_pool
;
1775 assert(submit
->perf_query_pass
< query_pool
->n_passes
);
1776 struct gen_perf_query_info
*query_info
=
1777 query_pool
->pass_query
[submit
->perf_query_pass
];
1779 /* Some performance queries just the pipeline statistic HW, no need for
1780 * OA in that case, so no need to reconfigure.
1782 if (likely((INTEL_DEBUG
& DEBUG_NO_OACONFIG
) == 0) &&
1783 (query_info
->kind
== GEN_PERF_QUERY_TYPE_OA
||
1784 query_info
->kind
== GEN_PERF_QUERY_TYPE_RAW
)) {
1785 int ret
= gen_ioctl(device
->perf_fd
, I915_PERF_IOCTL_CONFIG
,
1786 (void *)(uintptr_t) query_info
->oa_metrics_set_id
);
1788 result
= anv_device_set_lost(device
,
1789 "i915-perf config failed: %s",
1794 struct anv_bo
*pass_batch_bo
= query_pool
->bo
;
1796 struct drm_i915_gem_exec_object2 query_pass_object
= {
1797 .handle
= pass_batch_bo
->gem_handle
,
1798 .offset
= pass_batch_bo
->offset
,
1799 .flags
= pass_batch_bo
->flags
,
1801 struct drm_i915_gem_execbuffer2 query_pass_execbuf
= {
1802 .buffers_ptr
= (uintptr_t) &query_pass_object
,
1804 .batch_start_offset
= khr_perf_query_preamble_offset(query_pool
,
1805 submit
->perf_query_pass
),
1806 .flags
= I915_EXEC_HANDLE_LUT
| I915_EXEC_RENDER
,
1807 .rsvd1
= device
->context_id
,
1810 int ret
= queue
->device
->no_hw
? 0 :
1811 anv_gem_execbuffer(queue
->device
, &query_pass_execbuf
);
1813 result
= anv_queue_set_lost(queue
, "execbuf2 failed: %m");
1816 int ret
= queue
->device
->no_hw
? 0 :
1817 anv_gem_execbuffer(queue
->device
, &execbuf
.execbuf
);
1819 result
= anv_queue_set_lost(queue
, "execbuf2 failed: %m");
1821 struct drm_i915_gem_exec_object2
*objects
= execbuf
.objects
;
1822 for (uint32_t k
= 0; k
< execbuf
.bo_count
; k
++) {
1823 if (execbuf
.bos
[k
]->flags
& EXEC_OBJECT_PINNED
)
1824 assert(execbuf
.bos
[k
]->offset
== objects
[k
].offset
);
1825 execbuf
.bos
[k
]->offset
= objects
[k
].offset
;
1828 if (result
== VK_SUCCESS
&& submit
->need_out_fence
)
1829 submit
->out_fence
= execbuf
.execbuf
.rsvd2
>> 32;
1832 pthread_cond_broadcast(&device
->queue_submit
);
1834 anv_execbuf_finish(&execbuf
);