2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
32 #include "genxml/gen7_pack.h"
33 #include "genxml/gen8_pack.h"
35 #include "util/debug.h"
37 /** \file anv_batch_chain.c
39 * This file contains functions related to anv_cmd_buffer as a data
40 * structure. This involves everything required to create and destroy
41 * the actual batch buffers as well as link them together and handle
42 * relocations and surface state. It specifically does *not* contain any
43 * handling of actual vkCmd calls beyond vkCmdExecuteCommands.
46 /*-----------------------------------------------------------------------*
47 * Functions related to anv_reloc_list
48 *-----------------------------------------------------------------------*/
51 anv_reloc_list_init_clone(struct anv_reloc_list
*list
,
52 const VkAllocationCallbacks
*alloc
,
53 const struct anv_reloc_list
*other_list
)
56 list
->num_relocs
= other_list
->num_relocs
;
57 list
->array_length
= other_list
->array_length
;
60 list
->array_length
= 256;
64 vk_alloc(alloc
, list
->array_length
* sizeof(*list
->relocs
), 8,
65 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
67 if (list
->relocs
== NULL
)
68 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
71 vk_alloc(alloc
, list
->array_length
* sizeof(*list
->reloc_bos
), 8,
72 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
74 if (list
->reloc_bos
== NULL
) {
75 vk_free(alloc
, list
->relocs
);
76 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
80 memcpy(list
->relocs
, other_list
->relocs
,
81 list
->array_length
* sizeof(*list
->relocs
));
82 memcpy(list
->reloc_bos
, other_list
->reloc_bos
,
83 list
->array_length
* sizeof(*list
->reloc_bos
));
90 anv_reloc_list_init(struct anv_reloc_list
*list
,
91 const VkAllocationCallbacks
*alloc
)
93 return anv_reloc_list_init_clone(list
, alloc
, NULL
);
97 anv_reloc_list_finish(struct anv_reloc_list
*list
,
98 const VkAllocationCallbacks
*alloc
)
100 vk_free(alloc
, list
->relocs
);
101 vk_free(alloc
, list
->reloc_bos
);
105 anv_reloc_list_grow(struct anv_reloc_list
*list
,
106 const VkAllocationCallbacks
*alloc
,
107 size_t num_additional_relocs
)
109 if (list
->num_relocs
+ num_additional_relocs
<= list
->array_length
)
112 size_t new_length
= list
->array_length
* 2;
113 while (new_length
< list
->num_relocs
+ num_additional_relocs
)
116 struct drm_i915_gem_relocation_entry
*new_relocs
=
117 vk_alloc(alloc
, new_length
* sizeof(*list
->relocs
), 8,
118 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
119 if (new_relocs
== NULL
)
120 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
122 struct anv_bo
**new_reloc_bos
=
123 vk_alloc(alloc
, new_length
* sizeof(*list
->reloc_bos
), 8,
124 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
125 if (new_reloc_bos
== NULL
) {
126 vk_free(alloc
, new_relocs
);
127 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
130 memcpy(new_relocs
, list
->relocs
, list
->num_relocs
* sizeof(*list
->relocs
));
131 memcpy(new_reloc_bos
, list
->reloc_bos
,
132 list
->num_relocs
* sizeof(*list
->reloc_bos
));
134 vk_free(alloc
, list
->relocs
);
135 vk_free(alloc
, list
->reloc_bos
);
137 list
->array_length
= new_length
;
138 list
->relocs
= new_relocs
;
139 list
->reloc_bos
= new_reloc_bos
;
145 anv_reloc_list_add(struct anv_reloc_list
*list
,
146 const VkAllocationCallbacks
*alloc
,
147 uint32_t offset
, struct anv_bo
*target_bo
, uint32_t delta
)
149 struct drm_i915_gem_relocation_entry
*entry
;
152 const uint32_t domain
=
153 target_bo
->is_winsys_bo
? I915_GEM_DOMAIN_RENDER
: 0;
155 anv_reloc_list_grow(list
, alloc
, 1);
156 /* TODO: Handle failure */
158 /* XXX: Can we use I915_EXEC_HANDLE_LUT? */
159 index
= list
->num_relocs
++;
160 list
->reloc_bos
[index
] = target_bo
;
161 entry
= &list
->relocs
[index
];
162 entry
->target_handle
= target_bo
->gem_handle
;
163 entry
->delta
= delta
;
164 entry
->offset
= offset
;
165 entry
->presumed_offset
= target_bo
->offset
;
166 entry
->read_domains
= domain
;
167 entry
->write_domain
= domain
;
168 VG(VALGRIND_CHECK_MEM_IS_DEFINED(entry
, sizeof(*entry
)));
170 return target_bo
->offset
+ delta
;
174 anv_reloc_list_append(struct anv_reloc_list
*list
,
175 const VkAllocationCallbacks
*alloc
,
176 struct anv_reloc_list
*other
, uint32_t offset
)
178 anv_reloc_list_grow(list
, alloc
, other
->num_relocs
);
179 /* TODO: Handle failure */
181 memcpy(&list
->relocs
[list
->num_relocs
], &other
->relocs
[0],
182 other
->num_relocs
* sizeof(other
->relocs
[0]));
183 memcpy(&list
->reloc_bos
[list
->num_relocs
], &other
->reloc_bos
[0],
184 other
->num_relocs
* sizeof(other
->reloc_bos
[0]));
186 for (uint32_t i
= 0; i
< other
->num_relocs
; i
++)
187 list
->relocs
[i
+ list
->num_relocs
].offset
+= offset
;
189 list
->num_relocs
+= other
->num_relocs
;
192 /*-----------------------------------------------------------------------*
193 * Functions related to anv_batch
194 *-----------------------------------------------------------------------*/
197 anv_batch_emit_dwords(struct anv_batch
*batch
, int num_dwords
)
199 if (batch
->next
+ num_dwords
* 4 > batch
->end
)
200 batch
->extend_cb(batch
, batch
->user_data
);
202 void *p
= batch
->next
;
204 batch
->next
+= num_dwords
* 4;
205 assert(batch
->next
<= batch
->end
);
211 anv_batch_emit_reloc(struct anv_batch
*batch
,
212 void *location
, struct anv_bo
*bo
, uint32_t delta
)
214 return anv_reloc_list_add(batch
->relocs
, batch
->alloc
,
215 location
- batch
->start
, bo
, delta
);
219 anv_batch_emit_batch(struct anv_batch
*batch
, struct anv_batch
*other
)
221 uint32_t size
, offset
;
223 size
= other
->next
- other
->start
;
224 assert(size
% 4 == 0);
226 if (batch
->next
+ size
> batch
->end
)
227 batch
->extend_cb(batch
, batch
->user_data
);
229 assert(batch
->next
+ size
<= batch
->end
);
231 VG(VALGRIND_CHECK_MEM_IS_DEFINED(other
->start
, size
));
232 memcpy(batch
->next
, other
->start
, size
);
234 offset
= batch
->next
- batch
->start
;
235 anv_reloc_list_append(batch
->relocs
, batch
->alloc
,
236 other
->relocs
, offset
);
241 /*-----------------------------------------------------------------------*
242 * Functions related to anv_batch_bo
243 *-----------------------------------------------------------------------*/
246 anv_batch_bo_create(struct anv_cmd_buffer
*cmd_buffer
,
247 struct anv_batch_bo
**bbo_out
)
251 struct anv_batch_bo
*bbo
= vk_alloc(&cmd_buffer
->pool
->alloc
, sizeof(*bbo
),
252 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
254 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
256 result
= anv_bo_pool_alloc(&cmd_buffer
->device
->batch_bo_pool
, &bbo
->bo
,
257 ANV_CMD_BUFFER_BATCH_SIZE
);
258 if (result
!= VK_SUCCESS
)
261 result
= anv_reloc_list_init(&bbo
->relocs
, &cmd_buffer
->pool
->alloc
);
262 if (result
!= VK_SUCCESS
)
270 anv_bo_pool_free(&cmd_buffer
->device
->batch_bo_pool
, &bbo
->bo
);
272 vk_free(&cmd_buffer
->pool
->alloc
, bbo
);
278 anv_batch_bo_clone(struct anv_cmd_buffer
*cmd_buffer
,
279 const struct anv_batch_bo
*other_bbo
,
280 struct anv_batch_bo
**bbo_out
)
284 struct anv_batch_bo
*bbo
= vk_alloc(&cmd_buffer
->pool
->alloc
, sizeof(*bbo
),
285 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
287 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
289 result
= anv_bo_pool_alloc(&cmd_buffer
->device
->batch_bo_pool
, &bbo
->bo
,
291 if (result
!= VK_SUCCESS
)
294 result
= anv_reloc_list_init_clone(&bbo
->relocs
, &cmd_buffer
->pool
->alloc
,
296 if (result
!= VK_SUCCESS
)
299 bbo
->length
= other_bbo
->length
;
300 memcpy(bbo
->bo
.map
, other_bbo
->bo
.map
, other_bbo
->length
);
307 anv_bo_pool_free(&cmd_buffer
->device
->batch_bo_pool
, &bbo
->bo
);
309 vk_free(&cmd_buffer
->pool
->alloc
, bbo
);
315 anv_batch_bo_start(struct anv_batch_bo
*bbo
, struct anv_batch
*batch
,
316 size_t batch_padding
)
318 batch
->next
= batch
->start
= bbo
->bo
.map
;
319 batch
->end
= bbo
->bo
.map
+ bbo
->bo
.size
- batch_padding
;
320 batch
->relocs
= &bbo
->relocs
;
321 bbo
->relocs
.num_relocs
= 0;
325 anv_batch_bo_continue(struct anv_batch_bo
*bbo
, struct anv_batch
*batch
,
326 size_t batch_padding
)
328 batch
->start
= bbo
->bo
.map
;
329 batch
->next
= bbo
->bo
.map
+ bbo
->length
;
330 batch
->end
= bbo
->bo
.map
+ bbo
->bo
.size
- batch_padding
;
331 batch
->relocs
= &bbo
->relocs
;
335 anv_batch_bo_finish(struct anv_batch_bo
*bbo
, struct anv_batch
*batch
)
337 assert(batch
->start
== bbo
->bo
.map
);
338 bbo
->length
= batch
->next
- batch
->start
;
339 VG(VALGRIND_CHECK_MEM_IS_DEFINED(batch
->start
, bbo
->length
));
343 anv_batch_bo_grow(struct anv_cmd_buffer
*cmd_buffer
, struct anv_batch_bo
*bbo
,
344 struct anv_batch
*batch
, size_t aditional
,
345 size_t batch_padding
)
347 assert(batch
->start
== bbo
->bo
.map
);
348 bbo
->length
= batch
->next
- batch
->start
;
350 size_t new_size
= bbo
->bo
.size
;
351 while (new_size
<= bbo
->length
+ aditional
+ batch_padding
)
354 if (new_size
== bbo
->bo
.size
)
357 struct anv_bo new_bo
;
358 VkResult result
= anv_bo_pool_alloc(&cmd_buffer
->device
->batch_bo_pool
,
360 if (result
!= VK_SUCCESS
)
363 memcpy(new_bo
.map
, bbo
->bo
.map
, bbo
->length
);
365 anv_bo_pool_free(&cmd_buffer
->device
->batch_bo_pool
, &bbo
->bo
);
368 anv_batch_bo_continue(bbo
, batch
, batch_padding
);
374 anv_batch_bo_destroy(struct anv_batch_bo
*bbo
,
375 struct anv_cmd_buffer
*cmd_buffer
)
377 anv_reloc_list_finish(&bbo
->relocs
, &cmd_buffer
->pool
->alloc
);
378 anv_bo_pool_free(&cmd_buffer
->device
->batch_bo_pool
, &bbo
->bo
);
379 vk_free(&cmd_buffer
->pool
->alloc
, bbo
);
383 anv_batch_bo_list_clone(const struct list_head
*list
,
384 struct anv_cmd_buffer
*cmd_buffer
,
385 struct list_head
*new_list
)
387 VkResult result
= VK_SUCCESS
;
389 list_inithead(new_list
);
391 struct anv_batch_bo
*prev_bbo
= NULL
;
392 list_for_each_entry(struct anv_batch_bo
, bbo
, list
, link
) {
393 struct anv_batch_bo
*new_bbo
= NULL
;
394 result
= anv_batch_bo_clone(cmd_buffer
, bbo
, &new_bbo
);
395 if (result
!= VK_SUCCESS
)
397 list_addtail(&new_bbo
->link
, new_list
);
400 /* As we clone this list of batch_bo's, they chain one to the
401 * other using MI_BATCH_BUFFER_START commands. We need to fix up
402 * those relocations as we go. Fortunately, this is pretty easy
403 * as it will always be the last relocation in the list.
405 uint32_t last_idx
= prev_bbo
->relocs
.num_relocs
- 1;
406 assert(prev_bbo
->relocs
.reloc_bos
[last_idx
] == &bbo
->bo
);
407 prev_bbo
->relocs
.reloc_bos
[last_idx
] = &new_bbo
->bo
;
413 if (result
!= VK_SUCCESS
) {
414 list_for_each_entry_safe(struct anv_batch_bo
, bbo
, new_list
, link
)
415 anv_batch_bo_destroy(bbo
, cmd_buffer
);
421 /*-----------------------------------------------------------------------*
422 * Functions related to anv_batch_bo
423 *-----------------------------------------------------------------------*/
425 static inline struct anv_batch_bo
*
426 anv_cmd_buffer_current_batch_bo(struct anv_cmd_buffer
*cmd_buffer
)
428 return LIST_ENTRY(struct anv_batch_bo
, cmd_buffer
->batch_bos
.prev
, link
);
432 anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer
*cmd_buffer
)
434 return (struct anv_address
) {
435 .bo
= &cmd_buffer
->device
->surface_state_block_pool
.bo
,
436 .offset
= *(int32_t *)u_vector_head(&cmd_buffer
->bt_blocks
),
441 emit_batch_buffer_start(struct anv_cmd_buffer
*cmd_buffer
,
442 struct anv_bo
*bo
, uint32_t offset
)
444 /* In gen8+ the address field grew to two dwords to accomodate 48 bit
445 * offsets. The high 16 bits are in the last dword, so we can use the gen8
446 * version in either case, as long as we set the instruction length in the
447 * header accordingly. This means that we always emit three dwords here
448 * and all the padding and adjustment we do in this file works for all
452 const uint32_t gen7_length
=
453 GEN7_MI_BATCH_BUFFER_START_length
- GEN7_MI_BATCH_BUFFER_START_length_bias
;
454 const uint32_t gen8_length
=
455 GEN8_MI_BATCH_BUFFER_START_length
- GEN8_MI_BATCH_BUFFER_START_length_bias
;
457 anv_batch_emit(&cmd_buffer
->batch
, GEN8_MI_BATCH_BUFFER_START
, bbs
) {
458 bbs
.DWordLength
= cmd_buffer
->device
->info
.gen
< 8 ?
459 gen7_length
: gen8_length
;
460 bbs
._2ndLevelBatchBuffer
= _1stlevelbatch
;
461 bbs
.AddressSpaceIndicator
= ASI_PPGTT
;
462 bbs
.BatchBufferStartAddress
= (struct anv_address
) { bo
, offset
};
467 cmd_buffer_chain_to_batch_bo(struct anv_cmd_buffer
*cmd_buffer
,
468 struct anv_batch_bo
*bbo
)
470 struct anv_batch
*batch
= &cmd_buffer
->batch
;
471 struct anv_batch_bo
*current_bbo
=
472 anv_cmd_buffer_current_batch_bo(cmd_buffer
);
474 /* We set the end of the batch a little short so we would be sure we
475 * have room for the chaining command. Since we're about to emit the
476 * chaining command, let's set it back where it should go.
478 batch
->end
+= GEN8_MI_BATCH_BUFFER_START_length
* 4;
479 assert(batch
->end
== current_bbo
->bo
.map
+ current_bbo
->bo
.size
);
481 emit_batch_buffer_start(cmd_buffer
, &bbo
->bo
, 0);
483 anv_batch_bo_finish(current_bbo
, batch
);
487 anv_cmd_buffer_chain_batch(struct anv_batch
*batch
, void *_data
)
489 struct anv_cmd_buffer
*cmd_buffer
= _data
;
490 struct anv_batch_bo
*new_bbo
;
492 VkResult result
= anv_batch_bo_create(cmd_buffer
, &new_bbo
);
493 if (result
!= VK_SUCCESS
)
496 struct anv_batch_bo
**seen_bbo
= u_vector_add(&cmd_buffer
->seen_bbos
);
497 if (seen_bbo
== NULL
) {
498 anv_batch_bo_destroy(new_bbo
, cmd_buffer
);
499 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
503 cmd_buffer_chain_to_batch_bo(cmd_buffer
, new_bbo
);
505 list_addtail(&new_bbo
->link
, &cmd_buffer
->batch_bos
);
507 anv_batch_bo_start(new_bbo
, batch
, GEN8_MI_BATCH_BUFFER_START_length
* 4);
513 anv_cmd_buffer_grow_batch(struct anv_batch
*batch
, void *_data
)
515 struct anv_cmd_buffer
*cmd_buffer
= _data
;
516 struct anv_batch_bo
*bbo
= anv_cmd_buffer_current_batch_bo(cmd_buffer
);
518 anv_batch_bo_grow(cmd_buffer
, bbo
, &cmd_buffer
->batch
, 4096,
519 GEN8_MI_BATCH_BUFFER_START_length
* 4);
524 /** Allocate a binding table
526 * This function allocates a binding table. This is a bit more complicated
527 * than one would think due to a combination of Vulkan driver design and some
528 * unfortunate hardware restrictions.
530 * The 3DSTATE_BINDING_TABLE_POINTERS_* packets only have a 16-bit field for
531 * the binding table pointer which means that all binding tables need to live
532 * in the bottom 64k of surface state base address. The way the GL driver has
533 * classically dealt with this restriction is to emit all surface states
534 * on-the-fly into the batch and have a batch buffer smaller than 64k. This
535 * isn't really an option in Vulkan for a couple of reasons:
537 * 1) In Vulkan, we have growing (or chaining) batches so surface states have
538 * to live in their own buffer and we have to be able to re-emit
539 * STATE_BASE_ADDRESS as needed which requires a full pipeline stall. In
540 * order to avoid emitting STATE_BASE_ADDRESS any more often than needed
541 * (it's not that hard to hit 64k of just binding tables), we allocate
542 * surface state objects up-front when VkImageView is created. In order
543 * for this to work, surface state objects need to be allocated from a
546 * 2) We tried to design the surface state system in such a way that it's
547 * already ready for bindless texturing. The way bindless texturing works
548 * on our hardware is that you have a big pool of surface state objects
549 * (with its own state base address) and the bindless handles are simply
550 * offsets into that pool. With the architecture we chose, we already
551 * have that pool and it's exactly the same pool that we use for regular
552 * surface states so we should already be ready for bindless.
554 * 3) For render targets, we need to be able to fill out the surface states
555 * later in vkBeginRenderPass so that we can assign clear colors
556 * correctly. One way to do this would be to just create the surface
557 * state data and then repeatedly copy it into the surface state BO every
558 * time we have to re-emit STATE_BASE_ADDRESS. While this works, it's
559 * rather annoying and just being able to allocate them up-front and
560 * re-use them for the entire render pass.
562 * While none of these are technically blockers for emitting state on the fly
563 * like we do in GL, the ability to have a single surface state pool is
564 * simplifies things greatly. Unfortunately, it comes at a cost...
566 * Because of the 64k limitation of 3DSTATE_BINDING_TABLE_POINTERS_*, we can't
567 * place the binding tables just anywhere in surface state base address.
568 * Because 64k isn't a whole lot of space, we can't simply restrict the
569 * surface state buffer to 64k, we have to be more clever. The solution we've
570 * chosen is to have a block pool with a maximum size of 2G that starts at
571 * zero and grows in both directions. All surface states are allocated from
572 * the top of the pool (positive offsets) and we allocate blocks (< 64k) of
573 * binding tables from the bottom of the pool (negative offsets). Every time
574 * we allocate a new binding table block, we set surface state base address to
575 * point to the bottom of the binding table block. This way all of the
576 * binding tables in the block are in the bottom 64k of surface state base
577 * address. When we fill out the binding table, we add the distance between
578 * the bottom of our binding table block and zero of the block pool to the
579 * surface state offsets so that they are correct relative to out new surface
580 * state base address at the bottom of the binding table block.
582 * \see adjust_relocations_from_block_pool()
583 * \see adjust_relocations_too_block_pool()
585 * \param[in] entries The number of surface state entries the binding
586 * table should be able to hold.
588 * \param[out] state_offset The offset surface surface state base address
589 * where the surface states live. This must be
590 * added to the surface state offset when it is
591 * written into the binding table entry.
593 * \return An anv_state representing the binding table
596 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer
*cmd_buffer
,
597 uint32_t entries
, uint32_t *state_offset
)
599 struct anv_block_pool
*block_pool
=
600 &cmd_buffer
->device
->surface_state_block_pool
;
601 int32_t *bt_block
= u_vector_head(&cmd_buffer
->bt_blocks
);
602 struct anv_state state
;
604 state
.alloc_size
= align_u32(entries
* 4, 32);
606 if (cmd_buffer
->bt_next
+ state
.alloc_size
> block_pool
->block_size
)
607 return (struct anv_state
) { 0 };
609 state
.offset
= cmd_buffer
->bt_next
;
610 state
.map
= block_pool
->map
+ *bt_block
+ state
.offset
;
612 cmd_buffer
->bt_next
+= state
.alloc_size
;
614 assert(*bt_block
< 0);
615 *state_offset
= -(*bt_block
);
621 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer
*cmd_buffer
)
623 struct isl_device
*isl_dev
= &cmd_buffer
->device
->isl_dev
;
624 return anv_state_stream_alloc(&cmd_buffer
->surface_state_stream
,
625 isl_dev
->ss
.size
, isl_dev
->ss
.align
);
629 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer
*cmd_buffer
,
630 uint32_t size
, uint32_t alignment
)
632 return anv_state_stream_alloc(&cmd_buffer
->dynamic_state_stream
,
637 anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer
*cmd_buffer
)
639 struct anv_block_pool
*block_pool
=
640 &cmd_buffer
->device
->surface_state_block_pool
;
642 int32_t *offset
= u_vector_add(&cmd_buffer
->bt_blocks
);
644 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
646 *offset
= anv_block_pool_alloc_back(block_pool
);
647 cmd_buffer
->bt_next
= 0;
653 anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer
*cmd_buffer
)
655 struct anv_batch_bo
*batch_bo
;
658 list_inithead(&cmd_buffer
->batch_bos
);
660 result
= anv_batch_bo_create(cmd_buffer
, &batch_bo
);
661 if (result
!= VK_SUCCESS
)
664 list_addtail(&batch_bo
->link
, &cmd_buffer
->batch_bos
);
666 cmd_buffer
->batch
.alloc
= &cmd_buffer
->pool
->alloc
;
667 cmd_buffer
->batch
.user_data
= cmd_buffer
;
669 if (cmd_buffer
->device
->can_chain_batches
) {
670 cmd_buffer
->batch
.extend_cb
= anv_cmd_buffer_chain_batch
;
672 cmd_buffer
->batch
.extend_cb
= anv_cmd_buffer_grow_batch
;
675 anv_batch_bo_start(batch_bo
, &cmd_buffer
->batch
,
676 GEN8_MI_BATCH_BUFFER_START_length
* 4);
678 int success
= u_vector_init(&cmd_buffer
->seen_bbos
,
679 sizeof(struct anv_bo
*),
680 8 * sizeof(struct anv_bo
*));
684 *(struct anv_batch_bo
**)u_vector_add(&cmd_buffer
->seen_bbos
) = batch_bo
;
686 success
= u_vector_init(&cmd_buffer
->bt_blocks
, sizeof(int32_t),
687 8 * sizeof(int32_t));
691 result
= anv_reloc_list_init(&cmd_buffer
->surface_relocs
,
692 &cmd_buffer
->pool
->alloc
);
693 if (result
!= VK_SUCCESS
)
695 cmd_buffer
->last_ss_pool_center
= 0;
697 anv_cmd_buffer_new_binding_table_block(cmd_buffer
);
702 u_vector_finish(&cmd_buffer
->bt_blocks
);
704 u_vector_finish(&cmd_buffer
->seen_bbos
);
706 anv_batch_bo_destroy(batch_bo
, cmd_buffer
);
712 anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer
*cmd_buffer
)
715 u_vector_foreach(bt_block
, &cmd_buffer
->bt_blocks
) {
716 anv_block_pool_free(&cmd_buffer
->device
->surface_state_block_pool
,
719 u_vector_finish(&cmd_buffer
->bt_blocks
);
721 anv_reloc_list_finish(&cmd_buffer
->surface_relocs
, &cmd_buffer
->pool
->alloc
);
723 u_vector_finish(&cmd_buffer
->seen_bbos
);
725 /* Destroy all of the batch buffers */
726 list_for_each_entry_safe(struct anv_batch_bo
, bbo
,
727 &cmd_buffer
->batch_bos
, link
) {
728 anv_batch_bo_destroy(bbo
, cmd_buffer
);
733 anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer
*cmd_buffer
)
735 /* Delete all but the first batch bo */
736 assert(!list_empty(&cmd_buffer
->batch_bos
));
737 while (cmd_buffer
->batch_bos
.next
!= cmd_buffer
->batch_bos
.prev
) {
738 struct anv_batch_bo
*bbo
= anv_cmd_buffer_current_batch_bo(cmd_buffer
);
739 list_del(&bbo
->link
);
740 anv_batch_bo_destroy(bbo
, cmd_buffer
);
742 assert(!list_empty(&cmd_buffer
->batch_bos
));
744 anv_batch_bo_start(anv_cmd_buffer_current_batch_bo(cmd_buffer
),
746 GEN8_MI_BATCH_BUFFER_START_length
* 4);
748 while (u_vector_length(&cmd_buffer
->bt_blocks
) > 1) {
749 int32_t *bt_block
= u_vector_remove(&cmd_buffer
->bt_blocks
);
750 anv_block_pool_free(&cmd_buffer
->device
->surface_state_block_pool
,
753 assert(u_vector_length(&cmd_buffer
->bt_blocks
) == 1);
754 cmd_buffer
->bt_next
= 0;
756 cmd_buffer
->surface_relocs
.num_relocs
= 0;
757 cmd_buffer
->last_ss_pool_center
= 0;
759 /* Reset the list of seen buffers */
760 cmd_buffer
->seen_bbos
.head
= 0;
761 cmd_buffer
->seen_bbos
.tail
= 0;
763 *(struct anv_batch_bo
**)u_vector_add(&cmd_buffer
->seen_bbos
) =
764 anv_cmd_buffer_current_batch_bo(cmd_buffer
);
768 anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer
*cmd_buffer
)
770 struct anv_batch_bo
*batch_bo
= anv_cmd_buffer_current_batch_bo(cmd_buffer
);
772 if (cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_PRIMARY
) {
773 /* When we start a batch buffer, we subtract a certain amount of
774 * padding from the end to ensure that we always have room to emit a
775 * BATCH_BUFFER_START to chain to the next BO. We need to remove
776 * that padding before we end the batch; otherwise, we may end up
777 * with our BATCH_BUFFER_END in another BO.
779 cmd_buffer
->batch
.end
+= GEN8_MI_BATCH_BUFFER_START_length
* 4;
780 assert(cmd_buffer
->batch
.end
== batch_bo
->bo
.map
+ batch_bo
->bo
.size
);
782 anv_batch_emit(&cmd_buffer
->batch
, GEN7_MI_BATCH_BUFFER_END
, bbe
);
784 /* Round batch up to an even number of dwords. */
785 if ((cmd_buffer
->batch
.next
- cmd_buffer
->batch
.start
) & 4)
786 anv_batch_emit(&cmd_buffer
->batch
, GEN7_MI_NOOP
, noop
);
788 cmd_buffer
->exec_mode
= ANV_CMD_BUFFER_EXEC_MODE_PRIMARY
;
791 anv_batch_bo_finish(batch_bo
, &cmd_buffer
->batch
);
793 if (cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_SECONDARY
) {
794 /* If this is a secondary command buffer, we need to determine the
795 * mode in which it will be executed with vkExecuteCommands. We
796 * determine this statically here so that this stays in sync with the
797 * actual ExecuteCommands implementation.
799 if (!cmd_buffer
->device
->can_chain_batches
) {
800 cmd_buffer
->exec_mode
= ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT
;
801 } else if ((cmd_buffer
->batch_bos
.next
== cmd_buffer
->batch_bos
.prev
) &&
802 (batch_bo
->length
< ANV_CMD_BUFFER_BATCH_SIZE
/ 2)) {
803 /* If the secondary has exactly one batch buffer in its list *and*
804 * that batch buffer is less than half of the maximum size, we're
805 * probably better of simply copying it into our batch.
807 cmd_buffer
->exec_mode
= ANV_CMD_BUFFER_EXEC_MODE_EMIT
;
808 } else if (!(cmd_buffer
->usage_flags
&
809 VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT
)) {
810 cmd_buffer
->exec_mode
= ANV_CMD_BUFFER_EXEC_MODE_CHAIN
;
812 /* When we chain, we need to add an MI_BATCH_BUFFER_START command
813 * with its relocation. In order to handle this we'll increment here
814 * so we can unconditionally decrement right before adding the
815 * MI_BATCH_BUFFER_START command.
817 batch_bo
->relocs
.num_relocs
++;
818 cmd_buffer
->batch
.next
+= GEN8_MI_BATCH_BUFFER_START_length
* 4;
820 cmd_buffer
->exec_mode
= ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN
;
825 static inline VkResult
826 anv_cmd_buffer_add_seen_bbos(struct anv_cmd_buffer
*cmd_buffer
,
827 struct list_head
*list
)
829 list_for_each_entry(struct anv_batch_bo
, bbo
, list
, link
) {
830 struct anv_batch_bo
**bbo_ptr
= u_vector_add(&cmd_buffer
->seen_bbos
);
832 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
841 anv_cmd_buffer_add_secondary(struct anv_cmd_buffer
*primary
,
842 struct anv_cmd_buffer
*secondary
)
844 switch (secondary
->exec_mode
) {
845 case ANV_CMD_BUFFER_EXEC_MODE_EMIT
:
846 anv_batch_emit_batch(&primary
->batch
, &secondary
->batch
);
848 case ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT
: {
849 struct anv_batch_bo
*bbo
= anv_cmd_buffer_current_batch_bo(primary
);
850 unsigned length
= secondary
->batch
.end
- secondary
->batch
.start
;
851 anv_batch_bo_grow(primary
, bbo
, &primary
->batch
, length
,
852 GEN8_MI_BATCH_BUFFER_START_length
* 4);
853 anv_batch_emit_batch(&primary
->batch
, &secondary
->batch
);
856 case ANV_CMD_BUFFER_EXEC_MODE_CHAIN
: {
857 struct anv_batch_bo
*first_bbo
=
858 list_first_entry(&secondary
->batch_bos
, struct anv_batch_bo
, link
);
859 struct anv_batch_bo
*last_bbo
=
860 list_last_entry(&secondary
->batch_bos
, struct anv_batch_bo
, link
);
862 emit_batch_buffer_start(primary
, &first_bbo
->bo
, 0);
864 struct anv_batch_bo
*this_bbo
= anv_cmd_buffer_current_batch_bo(primary
);
865 assert(primary
->batch
.start
== this_bbo
->bo
.map
);
866 uint32_t offset
= primary
->batch
.next
- primary
->batch
.start
;
867 const uint32_t inst_size
= GEN8_MI_BATCH_BUFFER_START_length
* 4;
869 /* Roll back the previous MI_BATCH_BUFFER_START and its relocation so we
870 * can emit a new command and relocation for the current splice. In
871 * order to handle the initial-use case, we incremented next and
872 * num_relocs in end_batch_buffer() so we can alyways just subtract
875 last_bbo
->relocs
.num_relocs
--;
876 secondary
->batch
.next
-= inst_size
;
877 emit_batch_buffer_start(secondary
, &this_bbo
->bo
, offset
);
878 anv_cmd_buffer_add_seen_bbos(primary
, &secondary
->batch_bos
);
880 /* After patching up the secondary buffer, we need to clflush the
881 * modified instruction in case we're on a !llc platform. We use a
882 * little loop to handle the case where the instruction crosses a cache
885 if (!primary
->device
->info
.has_llc
) {
886 void *inst
= secondary
->batch
.next
- inst_size
;
887 void *p
= (void *) (((uintptr_t) inst
) & ~CACHELINE_MASK
);
888 __builtin_ia32_mfence();
889 while (p
< secondary
->batch
.next
) {
890 __builtin_ia32_clflush(p
);
896 case ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN
: {
897 struct list_head copy_list
;
898 VkResult result
= anv_batch_bo_list_clone(&secondary
->batch_bos
,
901 if (result
!= VK_SUCCESS
)
904 anv_cmd_buffer_add_seen_bbos(primary
, ©_list
);
906 struct anv_batch_bo
*first_bbo
=
907 list_first_entry(©_list
, struct anv_batch_bo
, link
);
908 struct anv_batch_bo
*last_bbo
=
909 list_last_entry(©_list
, struct anv_batch_bo
, link
);
911 cmd_buffer_chain_to_batch_bo(primary
, first_bbo
);
913 list_splicetail(©_list
, &primary
->batch_bos
);
915 anv_batch_bo_continue(last_bbo
, &primary
->batch
,
916 GEN8_MI_BATCH_BUFFER_START_length
* 4);
920 assert(!"Invalid execution mode");
923 anv_reloc_list_append(&primary
->surface_relocs
, &primary
->pool
->alloc
,
924 &secondary
->surface_relocs
, 0);
928 struct drm_i915_gem_execbuffer2 execbuf
;
930 struct drm_i915_gem_exec_object2
* objects
;
932 struct anv_bo
** bos
;
934 /* Allocated length of the 'objects' and 'bos' arrays */
935 uint32_t array_length
;
939 anv_execbuf_init(struct anv_execbuf
*exec
)
941 memset(exec
, 0, sizeof(*exec
));
945 anv_execbuf_finish(struct anv_execbuf
*exec
,
946 const VkAllocationCallbacks
*alloc
)
948 vk_free(alloc
, exec
->objects
);
949 vk_free(alloc
, exec
->bos
);
953 anv_execbuf_add_bo(struct anv_execbuf
*exec
,
955 struct anv_reloc_list
*relocs
,
956 const VkAllocationCallbacks
*alloc
)
958 struct drm_i915_gem_exec_object2
*obj
= NULL
;
960 if (bo
->index
< exec
->bo_count
&& exec
->bos
[bo
->index
] == bo
)
961 obj
= &exec
->objects
[bo
->index
];
964 /* We've never seen this one before. Add it to the list and assign
965 * an id that we can use later.
967 if (exec
->bo_count
>= exec
->array_length
) {
968 uint32_t new_len
= exec
->objects
? exec
->array_length
* 2 : 64;
970 struct drm_i915_gem_exec_object2
*new_objects
=
971 vk_alloc(alloc
, new_len
* sizeof(*new_objects
),
972 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND
);
973 if (new_objects
== NULL
)
974 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
976 struct anv_bo
**new_bos
=
977 vk_alloc(alloc
, new_len
* sizeof(*new_bos
),
978 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND
);
979 if (new_bos
== NULL
) {
980 vk_free(alloc
, new_objects
);
981 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
985 memcpy(new_objects
, exec
->objects
,
986 exec
->bo_count
* sizeof(*new_objects
));
987 memcpy(new_bos
, exec
->bos
,
988 exec
->bo_count
* sizeof(*new_bos
));
991 vk_free(alloc
, exec
->objects
);
992 vk_free(alloc
, exec
->bos
);
994 exec
->objects
= new_objects
;
996 exec
->array_length
= new_len
;
999 assert(exec
->bo_count
< exec
->array_length
);
1001 bo
->index
= exec
->bo_count
++;
1002 obj
= &exec
->objects
[bo
->index
];
1003 exec
->bos
[bo
->index
] = bo
;
1005 obj
->handle
= bo
->gem_handle
;
1006 obj
->relocation_count
= 0;
1007 obj
->relocs_ptr
= 0;
1009 obj
->offset
= bo
->offset
;
1010 obj
->flags
= bo
->is_winsys_bo
? EXEC_OBJECT_WRITE
: 0;
1015 if (relocs
!= NULL
&& obj
->relocation_count
== 0) {
1016 /* This is the first time we've ever seen a list of relocations for
1017 * this BO. Go ahead and set the relocations and then walk the list
1018 * of relocations and add them all.
1020 obj
->relocation_count
= relocs
->num_relocs
;
1021 obj
->relocs_ptr
= (uintptr_t) relocs
->relocs
;
1023 for (size_t i
= 0; i
< relocs
->num_relocs
; i
++) {
1024 /* A quick sanity check on relocations */
1025 assert(relocs
->relocs
[i
].offset
< bo
->size
);
1026 anv_execbuf_add_bo(exec
, relocs
->reloc_bos
[i
], NULL
, alloc
);
1034 anv_cmd_buffer_process_relocs(struct anv_cmd_buffer
*cmd_buffer
,
1035 struct anv_reloc_list
*list
)
1037 for (size_t i
= 0; i
< list
->num_relocs
; i
++)
1038 list
->relocs
[i
].target_handle
= list
->reloc_bos
[i
]->index
;
1042 write_reloc(const struct anv_device
*device
, void *p
, uint64_t v
, bool flush
)
1044 unsigned reloc_size
= 0;
1045 if (device
->info
.gen
>= 8) {
1046 /* From the Broadwell PRM Vol. 2a, MI_LOAD_REGISTER_MEM::MemoryAddress:
1048 * "This field specifies the address of the memory location where the
1049 * register value specified in the DWord above will read from. The
1050 * address specifies the DWord location of the data. Range =
1051 * GraphicsVirtualAddress[63:2] for a DWord register GraphicsAddress
1052 * [63:48] are ignored by the HW and assumed to be in correct
1053 * canonical form [63:48] == [47]."
1055 const int shift
= 63 - 47;
1056 reloc_size
= sizeof(uint64_t);
1057 *(uint64_t *)p
= (((int64_t)v
) << shift
) >> shift
;
1059 reloc_size
= sizeof(uint32_t);
1063 if (flush
&& !device
->info
.has_llc
)
1064 anv_clflush_range(p
, reloc_size
);
1068 adjust_relocations_from_state_pool(struct anv_block_pool
*pool
,
1069 struct anv_reloc_list
*relocs
,
1070 uint32_t last_pool_center_bo_offset
)
1072 assert(last_pool_center_bo_offset
<= pool
->center_bo_offset
);
1073 uint32_t delta
= pool
->center_bo_offset
- last_pool_center_bo_offset
;
1075 for (size_t i
= 0; i
< relocs
->num_relocs
; i
++) {
1076 /* All of the relocations from this block pool to other BO's should
1077 * have been emitted relative to the surface block pool center. We
1078 * need to add the center offset to make them relative to the
1079 * beginning of the actual GEM bo.
1081 relocs
->relocs
[i
].offset
+= delta
;
1086 adjust_relocations_to_state_pool(struct anv_block_pool
*pool
,
1087 struct anv_bo
*from_bo
,
1088 struct anv_reloc_list
*relocs
,
1089 uint32_t last_pool_center_bo_offset
)
1091 assert(last_pool_center_bo_offset
<= pool
->center_bo_offset
);
1092 uint32_t delta
= pool
->center_bo_offset
- last_pool_center_bo_offset
;
1094 /* When we initially emit relocations into a block pool, we don't
1095 * actually know what the final center_bo_offset will be so we just emit
1096 * it as if center_bo_offset == 0. Now that we know what the center
1097 * offset is, we need to walk the list of relocations and adjust any
1098 * relocations that point to the pool bo with the correct offset.
1100 for (size_t i
= 0; i
< relocs
->num_relocs
; i
++) {
1101 if (relocs
->reloc_bos
[i
] == &pool
->bo
) {
1102 /* Adjust the delta value in the relocation to correctly
1103 * correspond to the new delta. Initially, this value may have
1104 * been negative (if treated as unsigned), but we trust in
1105 * uint32_t roll-over to fix that for us at this point.
1107 relocs
->relocs
[i
].delta
+= delta
;
1109 /* Since the delta has changed, we need to update the actual
1110 * relocated value with the new presumed value. This function
1111 * should only be called on batch buffers, so we know it isn't in
1112 * use by the GPU at the moment.
1114 assert(relocs
->relocs
[i
].offset
< from_bo
->size
);
1115 write_reloc(pool
->device
, from_bo
->map
+ relocs
->relocs
[i
].offset
,
1116 relocs
->relocs
[i
].presumed_offset
+
1117 relocs
->relocs
[i
].delta
, false);
1123 anv_reloc_list_apply(struct anv_device
*device
,
1124 struct anv_reloc_list
*list
,
1126 bool always_relocate
)
1128 for (size_t i
= 0; i
< list
->num_relocs
; i
++) {
1129 struct anv_bo
*target_bo
= list
->reloc_bos
[i
];
1130 if (list
->relocs
[i
].presumed_offset
== target_bo
->offset
&&
1134 void *p
= bo
->map
+ list
->relocs
[i
].offset
;
1135 write_reloc(device
, p
, target_bo
->offset
+ list
->relocs
[i
].delta
, true);
1136 list
->relocs
[i
].presumed_offset
= target_bo
->offset
;
1141 * This function applies the relocation for a command buffer and writes the
1142 * actual addresses into the buffers as per what we were told by the kernel on
1143 * the previous execbuf2 call. This should be safe to do because, for each
1144 * relocated address, we have two cases:
1146 * 1) The target BO is inactive (as seen by the kernel). In this case, it is
1147 * not in use by the GPU so updating the address is 100% ok. It won't be
1148 * in-use by the GPU (from our context) again until the next execbuf2
1149 * happens. If the kernel decides to move it in the next execbuf2, it
1150 * will have to do the relocations itself, but that's ok because it should
1151 * have all of the information needed to do so.
1153 * 2) The target BO is active (as seen by the kernel). In this case, it
1154 * hasn't moved since the last execbuffer2 call because GTT shuffling
1155 * *only* happens when the BO is idle. (From our perspective, it only
1156 * happens inside the execbuffer2 ioctl, but the shuffling may be
1157 * triggered by another ioctl, with full-ppgtt this is limited to only
1158 * execbuffer2 ioctls on the same context, or memory pressure.) Since the
1159 * target BO hasn't moved, our anv_bo::offset exactly matches the BO's GTT
1160 * address and the relocated value we are writing into the BO will be the
1161 * same as the value that is already there.
1163 * There is also a possibility that the target BO is active but the exact
1164 * RENDER_SURFACE_STATE object we are writing the relocation into isn't in
1165 * use. In this case, the address currently in the RENDER_SURFACE_STATE
1166 * may be stale but it's still safe to write the relocation because that
1167 * particular RENDER_SURFACE_STATE object isn't in-use by the GPU and
1168 * won't be until the next execbuf2 call.
1170 * By doing relocations on the CPU, we can tell the kernel that it doesn't
1171 * need to bother. We want to do this because the surface state buffer is
1172 * used by every command buffer so, if the kernel does the relocations, it
1173 * will always be busy and the kernel will always stall. This is also
1174 * probably the fastest mechanism for doing relocations since the kernel would
1175 * have to make a full copy of all the relocations lists.
1178 relocate_cmd_buffer(struct anv_cmd_buffer
*cmd_buffer
,
1179 struct anv_execbuf
*exec
)
1181 static int userspace_relocs
= -1;
1182 if (userspace_relocs
< 0)
1183 userspace_relocs
= env_var_as_boolean("ANV_USERSPACE_RELOCS", true);
1184 if (!userspace_relocs
)
1187 /* First, we have to check to see whether or not we can even do the
1188 * relocation. New buffers which have never been submitted to the kernel
1189 * don't have a valid offset so we need to let the kernel do relocations so
1190 * that we can get offsets for them. On future execbuf2 calls, those
1191 * buffers will have offsets and we will be able to skip relocating.
1192 * Invalid offsets are indicated by anv_bo::offset == (uint64_t)-1.
1194 for (uint32_t i
= 0; i
< exec
->bo_count
; i
++) {
1195 if (exec
->bos
[i
]->offset
== (uint64_t)-1)
1199 /* Since surface states are shared between command buffers and we don't
1200 * know what order they will be submitted to the kernel, we don't know
1201 * what address is actually written in the surface state object at any
1202 * given time. The only option is to always relocate them.
1204 anv_reloc_list_apply(cmd_buffer
->device
, &cmd_buffer
->surface_relocs
,
1205 &cmd_buffer
->device
->surface_state_block_pool
.bo
,
1206 true /* always relocate surface states */);
1208 /* Since we own all of the batch buffers, we know what values are stored
1209 * in the relocated addresses and only have to update them if the offsets
1212 struct anv_batch_bo
**bbo
;
1213 u_vector_foreach(bbo
, &cmd_buffer
->seen_bbos
) {
1214 anv_reloc_list_apply(cmd_buffer
->device
,
1215 &(*bbo
)->relocs
, &(*bbo
)->bo
, false);
1218 for (uint32_t i
= 0; i
< exec
->bo_count
; i
++)
1219 exec
->objects
[i
].offset
= exec
->bos
[i
]->offset
;
1225 anv_cmd_buffer_execbuf(struct anv_device
*device
,
1226 struct anv_cmd_buffer
*cmd_buffer
)
1228 struct anv_batch
*batch
= &cmd_buffer
->batch
;
1229 struct anv_block_pool
*ss_pool
=
1230 &cmd_buffer
->device
->surface_state_block_pool
;
1232 struct anv_execbuf execbuf
;
1233 anv_execbuf_init(&execbuf
);
1235 adjust_relocations_from_state_pool(ss_pool
, &cmd_buffer
->surface_relocs
,
1236 cmd_buffer
->last_ss_pool_center
);
1237 anv_execbuf_add_bo(&execbuf
, &ss_pool
->bo
, &cmd_buffer
->surface_relocs
,
1238 &cmd_buffer
->pool
->alloc
);
1240 /* First, we walk over all of the bos we've seen and add them and their
1241 * relocations to the validate list.
1243 struct anv_batch_bo
**bbo
;
1244 u_vector_foreach(bbo
, &cmd_buffer
->seen_bbos
) {
1245 adjust_relocations_to_state_pool(ss_pool
, &(*bbo
)->bo
, &(*bbo
)->relocs
,
1246 cmd_buffer
->last_ss_pool_center
);
1248 anv_execbuf_add_bo(&execbuf
, &(*bbo
)->bo
, &(*bbo
)->relocs
,
1249 &cmd_buffer
->pool
->alloc
);
1252 /* Now that we've adjusted all of the surface state relocations, we need to
1253 * record the surface state pool center so future executions of the command
1254 * buffer can adjust correctly.
1256 cmd_buffer
->last_ss_pool_center
= ss_pool
->center_bo_offset
;
1258 struct anv_batch_bo
*first_batch_bo
=
1259 list_first_entry(&cmd_buffer
->batch_bos
, struct anv_batch_bo
, link
);
1261 /* The kernel requires that the last entry in the validation list be the
1262 * batch buffer to execute. We can simply swap the element
1263 * corresponding to the first batch_bo in the chain with the last
1264 * element in the list.
1266 if (first_batch_bo
->bo
.index
!= execbuf
.bo_count
- 1) {
1267 uint32_t idx
= first_batch_bo
->bo
.index
;
1268 uint32_t last_idx
= execbuf
.bo_count
- 1;
1270 struct drm_i915_gem_exec_object2 tmp_obj
= execbuf
.objects
[idx
];
1271 assert(execbuf
.bos
[idx
] == &first_batch_bo
->bo
);
1273 execbuf
.objects
[idx
] = execbuf
.objects
[last_idx
];
1274 execbuf
.bos
[idx
] = execbuf
.bos
[last_idx
];
1275 execbuf
.bos
[idx
]->index
= idx
;
1277 execbuf
.objects
[last_idx
] = tmp_obj
;
1278 execbuf
.bos
[last_idx
] = &first_batch_bo
->bo
;
1279 first_batch_bo
->bo
.index
= last_idx
;
1282 /* Now we go through and fixup all of the relocation lists to point to
1283 * the correct indices in the object array. We have to do this after we
1284 * reorder the list above as some of the indices may have changed.
1286 u_vector_foreach(bbo
, &cmd_buffer
->seen_bbos
)
1287 anv_cmd_buffer_process_relocs(cmd_buffer
, &(*bbo
)->relocs
);
1289 anv_cmd_buffer_process_relocs(cmd_buffer
, &cmd_buffer
->surface_relocs
);
1291 if (!cmd_buffer
->device
->info
.has_llc
) {
1292 __builtin_ia32_mfence();
1293 u_vector_foreach(bbo
, &cmd_buffer
->seen_bbos
) {
1294 for (uint32_t i
= 0; i
< (*bbo
)->length
; i
+= CACHELINE_SIZE
)
1295 __builtin_ia32_clflush((*bbo
)->bo
.map
+ i
);
1299 execbuf
.execbuf
= (struct drm_i915_gem_execbuffer2
) {
1300 .buffers_ptr
= (uintptr_t) execbuf
.objects
,
1301 .buffer_count
= execbuf
.bo_count
,
1302 .batch_start_offset
= 0,
1303 .batch_len
= batch
->next
- batch
->start
,
1308 .flags
= I915_EXEC_HANDLE_LUT
| I915_EXEC_RENDER
|
1309 I915_EXEC_CONSTANTS_REL_GENERAL
,
1310 .rsvd1
= cmd_buffer
->device
->context_id
,
1314 if (relocate_cmd_buffer(cmd_buffer
, &execbuf
)) {
1315 /* If we were able to successfully relocate everything, tell the kernel
1316 * that it can skip doing relocations. The requirement for using
1319 * 1) The addresses written in the objects must match the corresponding
1320 * reloc.presumed_offset which in turn must match the corresponding
1321 * execobject.offset.
1323 * 2) To avoid stalling, execobject.offset should match the current
1324 * address of that object within the active context.
1326 * In order to satisfy all of the invariants that make userspace
1327 * relocations to be safe (see relocate_cmd_buffer()), we need to
1328 * further ensure that the addresses we use match those used by the
1329 * kernel for the most recent execbuf2.
1331 * The kernel may still choose to do relocations anyway if something has
1332 * moved in the GTT. In this case, the relocation list still needs to be
1333 * valid. All relocations on the batch buffers are already valid and
1334 * kept up-to-date. For surface state relocations, by applying the
1335 * relocations in relocate_cmd_buffer, we ensured that the address in
1336 * the RENDER_SURFACE_STATE matches presumed_offset, so it should be
1337 * safe for the kernel to relocate them as needed.
1339 execbuf
.execbuf
.flags
|= I915_EXEC_NO_RELOC
;
1341 /* In the case where we fall back to doing kernel relocations, we need
1342 * to ensure that the relocation list is valid. All relocations on the
1343 * batch buffers are already valid and kept up-to-date. Since surface
1344 * states are shared between command buffers and we don't know what
1345 * order they will be submitted to the kernel, we don't know what
1346 * address is actually written in the surface state object at any given
1347 * time. The only option is to set a bogus presumed offset and let the
1348 * kernel relocate them.
1350 for (size_t i
= 0; i
< cmd_buffer
->surface_relocs
.num_relocs
; i
++)
1351 cmd_buffer
->surface_relocs
.relocs
[i
].presumed_offset
= -1;
1354 VkResult result
= anv_device_execbuf(device
, &execbuf
.execbuf
, execbuf
.bos
);
1356 anv_execbuf_finish(&execbuf
, &cmd_buffer
->pool
->alloc
);