2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
32 #include "gen7_pack.h"
33 #include "gen8_pack.h"
35 /** \file anv_batch_chain.c
37 * This file contains functions related to anv_cmd_buffer as a data
38 * structure. This involves everything required to create and destroy
39 * the actual batch buffers as well as link them together and handle
40 * relocations and surface state. It specifically does *not* contain any
41 * handling of actual vkCmd calls beyond vkCmdExecuteCommands.
44 /*-----------------------------------------------------------------------*
45 * Functions related to anv_reloc_list
46 *-----------------------------------------------------------------------*/
49 anv_reloc_list_init_clone(struct anv_reloc_list
*list
,
50 const VkAllocationCallbacks
*alloc
,
51 const struct anv_reloc_list
*other_list
)
54 list
->num_relocs
= other_list
->num_relocs
;
55 list
->array_length
= other_list
->array_length
;
58 list
->array_length
= 256;
62 anv_alloc(alloc
, list
->array_length
* sizeof(*list
->relocs
), 8,
63 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
65 if (list
->relocs
== NULL
)
66 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
69 anv_alloc(alloc
, list
->array_length
* sizeof(*list
->reloc_bos
), 8,
70 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
72 if (list
->reloc_bos
== NULL
) {
73 anv_free(alloc
, list
->relocs
);
74 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
78 memcpy(list
->relocs
, other_list
->relocs
,
79 list
->array_length
* sizeof(*list
->relocs
));
80 memcpy(list
->reloc_bos
, other_list
->reloc_bos
,
81 list
->array_length
* sizeof(*list
->reloc_bos
));
88 anv_reloc_list_init(struct anv_reloc_list
*list
,
89 const VkAllocationCallbacks
*alloc
)
91 return anv_reloc_list_init_clone(list
, alloc
, NULL
);
95 anv_reloc_list_finish(struct anv_reloc_list
*list
,
96 const VkAllocationCallbacks
*alloc
)
98 anv_free(alloc
, list
->relocs
);
99 anv_free(alloc
, list
->reloc_bos
);
103 anv_reloc_list_grow(struct anv_reloc_list
*list
,
104 const VkAllocationCallbacks
*alloc
,
105 size_t num_additional_relocs
)
107 if (list
->num_relocs
+ num_additional_relocs
<= list
->array_length
)
110 size_t new_length
= list
->array_length
* 2;
111 while (new_length
< list
->num_relocs
+ num_additional_relocs
)
114 struct drm_i915_gem_relocation_entry
*new_relocs
=
115 anv_alloc(alloc
, new_length
* sizeof(*list
->relocs
), 8,
116 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
117 if (new_relocs
== NULL
)
118 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
120 struct anv_bo
**new_reloc_bos
=
121 anv_alloc(alloc
, new_length
* sizeof(*list
->reloc_bos
), 8,
122 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
123 if (new_relocs
== NULL
) {
124 anv_free(alloc
, new_relocs
);
125 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
128 memcpy(new_relocs
, list
->relocs
, list
->num_relocs
* sizeof(*list
->relocs
));
129 memcpy(new_reloc_bos
, list
->reloc_bos
,
130 list
->num_relocs
* sizeof(*list
->reloc_bos
));
132 anv_free(alloc
, list
->relocs
);
133 anv_free(alloc
, list
->reloc_bos
);
135 list
->array_length
= new_length
;
136 list
->relocs
= new_relocs
;
137 list
->reloc_bos
= new_reloc_bos
;
143 anv_reloc_list_add(struct anv_reloc_list
*list
,
144 const VkAllocationCallbacks
*alloc
,
145 uint32_t offset
, struct anv_bo
*target_bo
, uint32_t delta
)
147 struct drm_i915_gem_relocation_entry
*entry
;
150 anv_reloc_list_grow(list
, alloc
, 1);
151 /* TODO: Handle failure */
153 /* XXX: Can we use I915_EXEC_HANDLE_LUT? */
154 index
= list
->num_relocs
++;
155 list
->reloc_bos
[index
] = target_bo
;
156 entry
= &list
->relocs
[index
];
157 entry
->target_handle
= target_bo
->gem_handle
;
158 entry
->delta
= delta
;
159 entry
->offset
= offset
;
160 entry
->presumed_offset
= target_bo
->offset
;
161 entry
->read_domains
= 0;
162 entry
->write_domain
= 0;
164 return target_bo
->offset
+ delta
;
168 anv_reloc_list_append(struct anv_reloc_list
*list
,
169 const VkAllocationCallbacks
*alloc
,
170 struct anv_reloc_list
*other
, uint32_t offset
)
172 anv_reloc_list_grow(list
, alloc
, other
->num_relocs
);
173 /* TODO: Handle failure */
175 memcpy(&list
->relocs
[list
->num_relocs
], &other
->relocs
[0],
176 other
->num_relocs
* sizeof(other
->relocs
[0]));
177 memcpy(&list
->reloc_bos
[list
->num_relocs
], &other
->reloc_bos
[0],
178 other
->num_relocs
* sizeof(other
->reloc_bos
[0]));
180 for (uint32_t i
= 0; i
< other
->num_relocs
; i
++)
181 list
->relocs
[i
+ list
->num_relocs
].offset
+= offset
;
183 list
->num_relocs
+= other
->num_relocs
;
186 /*-----------------------------------------------------------------------*
187 * Functions related to anv_batch
188 *-----------------------------------------------------------------------*/
191 anv_batch_emit_dwords(struct anv_batch
*batch
, int num_dwords
)
193 if (batch
->next
+ num_dwords
* 4 > batch
->end
)
194 batch
->extend_cb(batch
, batch
->user_data
);
196 void *p
= batch
->next
;
198 batch
->next
+= num_dwords
* 4;
199 assert(batch
->next
<= batch
->end
);
205 anv_batch_emit_reloc(struct anv_batch
*batch
,
206 void *location
, struct anv_bo
*bo
, uint32_t delta
)
208 return anv_reloc_list_add(batch
->relocs
, batch
->alloc
,
209 location
- batch
->start
, bo
, delta
);
213 anv_batch_emit_batch(struct anv_batch
*batch
, struct anv_batch
*other
)
215 uint32_t size
, offset
;
217 size
= other
->next
- other
->start
;
218 assert(size
% 4 == 0);
220 if (batch
->next
+ size
> batch
->end
)
221 batch
->extend_cb(batch
, batch
->user_data
);
223 assert(batch
->next
+ size
<= batch
->end
);
225 VG(VALGRIND_CHECK_MEM_IS_DEFINED(other
->start
, size
));
226 memcpy(batch
->next
, other
->start
, size
);
228 offset
= batch
->next
- batch
->start
;
229 anv_reloc_list_append(batch
->relocs
, batch
->alloc
,
230 other
->relocs
, offset
);
235 /*-----------------------------------------------------------------------*
236 * Functions related to anv_batch_bo
237 *-----------------------------------------------------------------------*/
240 anv_batch_bo_create(struct anv_cmd_buffer
*cmd_buffer
,
241 struct anv_batch_bo
**bbo_out
)
245 struct anv_batch_bo
*bbo
= anv_alloc(&cmd_buffer
->pool
->alloc
, sizeof(*bbo
),
246 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
248 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
250 result
= anv_bo_pool_alloc(&cmd_buffer
->device
->batch_bo_pool
, &bbo
->bo
);
251 if (result
!= VK_SUCCESS
)
254 result
= anv_reloc_list_init(&bbo
->relocs
, &cmd_buffer
->pool
->alloc
);
255 if (result
!= VK_SUCCESS
)
263 anv_bo_pool_free(&cmd_buffer
->device
->batch_bo_pool
, &bbo
->bo
);
265 anv_free(&cmd_buffer
->pool
->alloc
, bbo
);
271 anv_batch_bo_clone(struct anv_cmd_buffer
*cmd_buffer
,
272 const struct anv_batch_bo
*other_bbo
,
273 struct anv_batch_bo
**bbo_out
)
277 struct anv_batch_bo
*bbo
= anv_alloc(&cmd_buffer
->pool
->alloc
, sizeof(*bbo
),
278 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
280 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
282 result
= anv_bo_pool_alloc(&cmd_buffer
->device
->batch_bo_pool
, &bbo
->bo
);
283 if (result
!= VK_SUCCESS
)
286 result
= anv_reloc_list_init_clone(&bbo
->relocs
, &cmd_buffer
->pool
->alloc
,
288 if (result
!= VK_SUCCESS
)
291 bbo
->length
= other_bbo
->length
;
292 memcpy(bbo
->bo
.map
, other_bbo
->bo
.map
, other_bbo
->length
);
294 bbo
->last_ss_pool_bo_offset
= other_bbo
->last_ss_pool_bo_offset
;
301 anv_bo_pool_free(&cmd_buffer
->device
->batch_bo_pool
, &bbo
->bo
);
303 anv_free(&cmd_buffer
->pool
->alloc
, bbo
);
309 anv_batch_bo_start(struct anv_batch_bo
*bbo
, struct anv_batch
*batch
,
310 size_t batch_padding
)
312 batch
->next
= batch
->start
= bbo
->bo
.map
;
313 batch
->end
= bbo
->bo
.map
+ bbo
->bo
.size
- batch_padding
;
314 batch
->relocs
= &bbo
->relocs
;
315 bbo
->last_ss_pool_bo_offset
= 0;
316 bbo
->relocs
.num_relocs
= 0;
320 anv_batch_bo_continue(struct anv_batch_bo
*bbo
, struct anv_batch
*batch
,
321 size_t batch_padding
)
323 batch
->start
= bbo
->bo
.map
;
324 batch
->next
= bbo
->bo
.map
+ bbo
->length
;
325 batch
->end
= bbo
->bo
.map
+ bbo
->bo
.size
- batch_padding
;
326 batch
->relocs
= &bbo
->relocs
;
330 anv_batch_bo_finish(struct anv_batch_bo
*bbo
, struct anv_batch
*batch
)
332 assert(batch
->start
== bbo
->bo
.map
);
333 bbo
->length
= batch
->next
- batch
->start
;
334 VG(VALGRIND_CHECK_MEM_IS_DEFINED(batch
->start
, bbo
->length
));
338 anv_batch_bo_destroy(struct anv_batch_bo
*bbo
,
339 struct anv_cmd_buffer
*cmd_buffer
)
341 anv_reloc_list_finish(&bbo
->relocs
, &cmd_buffer
->pool
->alloc
);
342 anv_bo_pool_free(&cmd_buffer
->device
->batch_bo_pool
, &bbo
->bo
);
343 anv_free(&cmd_buffer
->pool
->alloc
, bbo
);
347 anv_batch_bo_list_clone(const struct list_head
*list
,
348 struct anv_cmd_buffer
*cmd_buffer
,
349 struct list_head
*new_list
)
351 VkResult result
= VK_SUCCESS
;
353 list_inithead(new_list
);
355 struct anv_batch_bo
*prev_bbo
= NULL
;
356 list_for_each_entry(struct anv_batch_bo
, bbo
, list
, link
) {
357 struct anv_batch_bo
*new_bbo
;
358 result
= anv_batch_bo_clone(cmd_buffer
, bbo
, &new_bbo
);
359 if (result
!= VK_SUCCESS
)
361 list_addtail(&new_bbo
->link
, new_list
);
364 /* As we clone this list of batch_bo's, they chain one to the
365 * other using MI_BATCH_BUFFER_START commands. We need to fix up
366 * those relocations as we go. Fortunately, this is pretty easy
367 * as it will always be the last relocation in the list.
369 uint32_t last_idx
= prev_bbo
->relocs
.num_relocs
- 1;
370 assert(prev_bbo
->relocs
.reloc_bos
[last_idx
] == &bbo
->bo
);
371 prev_bbo
->relocs
.reloc_bos
[last_idx
] = &new_bbo
->bo
;
377 if (result
!= VK_SUCCESS
) {
378 list_for_each_entry_safe(struct anv_batch_bo
, bbo
, new_list
, link
)
379 anv_batch_bo_destroy(bbo
, cmd_buffer
);
385 /*-----------------------------------------------------------------------*
386 * Functions related to anv_batch_bo
387 *-----------------------------------------------------------------------*/
389 static inline struct anv_batch_bo
*
390 anv_cmd_buffer_current_batch_bo(struct anv_cmd_buffer
*cmd_buffer
)
392 return LIST_ENTRY(struct anv_batch_bo
, cmd_buffer
->batch_bos
.prev
, link
);
396 anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer
*cmd_buffer
)
398 return (struct anv_address
) {
399 .bo
= &cmd_buffer
->device
->surface_state_block_pool
.bo
,
400 .offset
= *(int32_t *)anv_vector_head(&cmd_buffer
->bt_blocks
),
405 emit_batch_buffer_start(struct anv_cmd_buffer
*cmd_buffer
,
406 struct anv_bo
*bo
, uint32_t offset
)
408 /* In gen8+ the address field grew to two dwords to accomodate 48 bit
409 * offsets. The high 16 bits are in the last dword, so we can use the gen8
410 * version in either case, as long as we set the instruction length in the
411 * header accordingly. This means that we always emit three dwords here
412 * and all the padding and adjustment we do in this file works for all
416 const uint32_t gen7_length
=
417 GEN7_MI_BATCH_BUFFER_START_length
- GEN7_MI_BATCH_BUFFER_START_length_bias
;
418 const uint32_t gen8_length
=
419 GEN8_MI_BATCH_BUFFER_START_length
- GEN8_MI_BATCH_BUFFER_START_length_bias
;
421 anv_batch_emit(&cmd_buffer
->batch
, GEN8_MI_BATCH_BUFFER_START
,
422 .DwordLength
= cmd_buffer
->device
->info
.gen
< 8 ?
423 gen7_length
: gen8_length
,
424 ._2ndLevelBatchBuffer
= _1stlevelbatch
,
425 .AddressSpaceIndicator
= ASI_PPGTT
,
426 .BatchBufferStartAddress
= { bo
, offset
});
430 cmd_buffer_chain_to_batch_bo(struct anv_cmd_buffer
*cmd_buffer
,
431 struct anv_batch_bo
*bbo
)
433 struct anv_batch
*batch
= &cmd_buffer
->batch
;
434 struct anv_batch_bo
*current_bbo
=
435 anv_cmd_buffer_current_batch_bo(cmd_buffer
);
437 /* We set the end of the batch a little short so we would be sure we
438 * have room for the chaining command. Since we're about to emit the
439 * chaining command, let's set it back where it should go.
441 batch
->end
+= GEN8_MI_BATCH_BUFFER_START_length
* 4;
442 assert(batch
->end
== current_bbo
->bo
.map
+ current_bbo
->bo
.size
);
444 emit_batch_buffer_start(cmd_buffer
, &bbo
->bo
, 0);
446 anv_batch_bo_finish(current_bbo
, batch
);
450 anv_cmd_buffer_chain_batch(struct anv_batch
*batch
, void *_data
)
452 struct anv_cmd_buffer
*cmd_buffer
= _data
;
453 struct anv_batch_bo
*new_bbo
;
455 VkResult result
= anv_batch_bo_create(cmd_buffer
, &new_bbo
);
456 if (result
!= VK_SUCCESS
)
459 struct anv_batch_bo
**seen_bbo
= anv_vector_add(&cmd_buffer
->seen_bbos
);
460 if (seen_bbo
== NULL
) {
461 anv_batch_bo_destroy(new_bbo
, cmd_buffer
);
462 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
466 cmd_buffer_chain_to_batch_bo(cmd_buffer
, new_bbo
);
468 list_addtail(&new_bbo
->link
, &cmd_buffer
->batch_bos
);
470 anv_batch_bo_start(new_bbo
, batch
, GEN8_MI_BATCH_BUFFER_START_length
* 4);
476 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer
*cmd_buffer
,
477 uint32_t entries
, uint32_t *state_offset
)
479 struct anv_block_pool
*block_pool
=
480 &cmd_buffer
->device
->surface_state_block_pool
;
481 int32_t *bt_block
= anv_vector_head(&cmd_buffer
->bt_blocks
);
482 struct anv_state state
;
484 state
.alloc_size
= align_u32(entries
* 4, 32);
486 if (cmd_buffer
->bt_next
+ state
.alloc_size
> block_pool
->block_size
)
487 return (struct anv_state
) { 0 };
489 state
.offset
= cmd_buffer
->bt_next
;
490 state
.map
= block_pool
->map
+ *bt_block
+ state
.offset
;
492 cmd_buffer
->bt_next
+= state
.alloc_size
;
494 assert(*bt_block
< 0);
495 *state_offset
= -(*bt_block
);
501 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer
*cmd_buffer
)
503 return anv_state_stream_alloc(&cmd_buffer
->surface_state_stream
, 64, 64);
507 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer
*cmd_buffer
,
508 uint32_t size
, uint32_t alignment
)
510 return anv_state_stream_alloc(&cmd_buffer
->dynamic_state_stream
,
515 anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer
*cmd_buffer
)
517 struct anv_block_pool
*block_pool
=
518 &cmd_buffer
->device
->surface_state_block_pool
;
520 int32_t *offset
= anv_vector_add(&cmd_buffer
->bt_blocks
);
522 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
524 *offset
= anv_block_pool_alloc_back(block_pool
);
525 cmd_buffer
->bt_next
= 0;
531 anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer
*cmd_buffer
)
533 struct anv_batch_bo
*batch_bo
;
536 list_inithead(&cmd_buffer
->batch_bos
);
538 result
= anv_batch_bo_create(cmd_buffer
, &batch_bo
);
539 if (result
!= VK_SUCCESS
)
542 list_addtail(&batch_bo
->link
, &cmd_buffer
->batch_bos
);
544 cmd_buffer
->batch
.alloc
= &cmd_buffer
->pool
->alloc
;
545 cmd_buffer
->batch
.extend_cb
= anv_cmd_buffer_chain_batch
;
546 cmd_buffer
->batch
.user_data
= cmd_buffer
;
548 anv_batch_bo_start(batch_bo
, &cmd_buffer
->batch
,
549 GEN8_MI_BATCH_BUFFER_START_length
* 4);
551 int success
= anv_vector_init(&cmd_buffer
->seen_bbos
,
552 sizeof(struct anv_bo
*),
553 8 * sizeof(struct anv_bo
*));
557 *(struct anv_batch_bo
**)anv_vector_add(&cmd_buffer
->seen_bbos
) = batch_bo
;
559 success
= anv_vector_init(&cmd_buffer
->bt_blocks
, sizeof(int32_t),
560 8 * sizeof(int32_t));
564 result
= anv_reloc_list_init(&cmd_buffer
->surface_relocs
,
565 &cmd_buffer
->pool
->alloc
);
566 if (result
!= VK_SUCCESS
)
569 anv_cmd_buffer_new_binding_table_block(cmd_buffer
);
571 cmd_buffer
->execbuf2
.objects
= NULL
;
572 cmd_buffer
->execbuf2
.bos
= NULL
;
573 cmd_buffer
->execbuf2
.array_length
= 0;
578 anv_vector_finish(&cmd_buffer
->bt_blocks
);
580 anv_vector_finish(&cmd_buffer
->seen_bbos
);
582 anv_batch_bo_destroy(batch_bo
, cmd_buffer
);
588 anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer
*cmd_buffer
)
591 anv_vector_foreach(bt_block
, &cmd_buffer
->bt_blocks
) {
592 anv_block_pool_free(&cmd_buffer
->device
->surface_state_block_pool
,
595 anv_vector_finish(&cmd_buffer
->bt_blocks
);
597 anv_reloc_list_finish(&cmd_buffer
->surface_relocs
, &cmd_buffer
->pool
->alloc
);
599 anv_vector_finish(&cmd_buffer
->seen_bbos
);
601 /* Destroy all of the batch buffers */
602 list_for_each_entry_safe(struct anv_batch_bo
, bbo
,
603 &cmd_buffer
->batch_bos
, link
) {
604 anv_batch_bo_destroy(bbo
, cmd_buffer
);
607 anv_free(&cmd_buffer
->pool
->alloc
, cmd_buffer
->execbuf2
.objects
);
608 anv_free(&cmd_buffer
->pool
->alloc
, cmd_buffer
->execbuf2
.bos
);
612 anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer
*cmd_buffer
)
614 /* Delete all but the first batch bo */
615 assert(!list_empty(&cmd_buffer
->batch_bos
));
616 while (cmd_buffer
->batch_bos
.next
!= cmd_buffer
->batch_bos
.prev
) {
617 struct anv_batch_bo
*bbo
= anv_cmd_buffer_current_batch_bo(cmd_buffer
);
618 list_del(&bbo
->link
);
619 anv_batch_bo_destroy(bbo
, cmd_buffer
);
621 assert(!list_empty(&cmd_buffer
->batch_bos
));
623 anv_batch_bo_start(anv_cmd_buffer_current_batch_bo(cmd_buffer
),
625 GEN8_MI_BATCH_BUFFER_START_length
* 4);
627 while (anv_vector_length(&cmd_buffer
->bt_blocks
) > 1) {
628 int32_t *bt_block
= anv_vector_remove(&cmd_buffer
->bt_blocks
);
629 anv_block_pool_free(&cmd_buffer
->device
->surface_state_block_pool
,
632 assert(anv_vector_length(&cmd_buffer
->bt_blocks
) == 1);
633 cmd_buffer
->bt_next
= 0;
635 cmd_buffer
->surface_relocs
.num_relocs
= 0;
637 /* Reset the list of seen buffers */
638 cmd_buffer
->seen_bbos
.head
= 0;
639 cmd_buffer
->seen_bbos
.tail
= 0;
641 *(struct anv_batch_bo
**)anv_vector_add(&cmd_buffer
->seen_bbos
) =
642 anv_cmd_buffer_current_batch_bo(cmd_buffer
);
646 anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer
*cmd_buffer
)
648 struct anv_batch_bo
*batch_bo
= anv_cmd_buffer_current_batch_bo(cmd_buffer
);
650 if (cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_PRIMARY
) {
651 anv_batch_emit(&cmd_buffer
->batch
, GEN7_MI_BATCH_BUFFER_END
);
653 /* Round batch up to an even number of dwords. */
654 if ((cmd_buffer
->batch
.next
- cmd_buffer
->batch
.start
) & 4)
655 anv_batch_emit(&cmd_buffer
->batch
, GEN7_MI_NOOP
);
657 cmd_buffer
->exec_mode
= ANV_CMD_BUFFER_EXEC_MODE_PRIMARY
;
660 anv_batch_bo_finish(batch_bo
, &cmd_buffer
->batch
);
662 if (cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_SECONDARY
) {
663 /* If this is a secondary command buffer, we need to determine the
664 * mode in which it will be executed with vkExecuteCommands. We
665 * determine this statically here so that this stays in sync with the
666 * actual ExecuteCommands implementation.
668 if ((cmd_buffer
->batch_bos
.next
== cmd_buffer
->batch_bos
.prev
) &&
669 (anv_cmd_buffer_current_batch_bo(cmd_buffer
)->length
<
670 ANV_CMD_BUFFER_BATCH_SIZE
/ 2)) {
671 /* If the secondary has exactly one batch buffer in its list *and*
672 * that batch buffer is less than half of the maximum size, we're
673 * probably better of simply copying it into our batch.
675 cmd_buffer
->exec_mode
= ANV_CMD_BUFFER_EXEC_MODE_EMIT
;
676 } else if (!(cmd_buffer
->usage_flags
&
677 VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT
)) {
678 cmd_buffer
->exec_mode
= ANV_CMD_BUFFER_EXEC_MODE_CHAIN
;
680 /* When we chain, we need to add an MI_BATCH_BUFFER_START command
681 * with its relocation. In order to handle this we'll increment here
682 * so we can unconditionally decrement right before adding the
683 * MI_BATCH_BUFFER_START command.
685 anv_cmd_buffer_current_batch_bo(cmd_buffer
)->relocs
.num_relocs
++;
686 cmd_buffer
->batch
.next
+= GEN8_MI_BATCH_BUFFER_START_length
* 4;
688 cmd_buffer
->exec_mode
= ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN
;
693 static inline VkResult
694 anv_cmd_buffer_add_seen_bbos(struct anv_cmd_buffer
*cmd_buffer
,
695 struct list_head
*list
)
697 list_for_each_entry(struct anv_batch_bo
, bbo
, list
, link
) {
698 struct anv_batch_bo
**bbo_ptr
= anv_vector_add(&cmd_buffer
->seen_bbos
);
700 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
709 anv_cmd_buffer_add_secondary(struct anv_cmd_buffer
*primary
,
710 struct anv_cmd_buffer
*secondary
)
712 switch (secondary
->exec_mode
) {
713 case ANV_CMD_BUFFER_EXEC_MODE_EMIT
:
714 anv_batch_emit_batch(&primary
->batch
, &secondary
->batch
);
716 case ANV_CMD_BUFFER_EXEC_MODE_CHAIN
: {
717 struct anv_batch_bo
*first_bbo
=
718 list_first_entry(&secondary
->batch_bos
, struct anv_batch_bo
, link
);
719 struct anv_batch_bo
*last_bbo
=
720 list_last_entry(&secondary
->batch_bos
, struct anv_batch_bo
, link
);
722 emit_batch_buffer_start(primary
, &first_bbo
->bo
, 0);
724 struct anv_batch_bo
*this_bbo
= anv_cmd_buffer_current_batch_bo(primary
);
725 assert(primary
->batch
.start
== this_bbo
->bo
.map
);
726 uint32_t offset
= primary
->batch
.next
- primary
->batch
.start
;
727 const uint32_t inst_size
= GEN8_MI_BATCH_BUFFER_START_length
* 4;
729 /* Roll back the previous MI_BATCH_BUFFER_START and its relocation so we
730 * can emit a new command and relocation for the current splice. In
731 * order to handle the initial-use case, we incremented next and
732 * num_relocs in end_batch_buffer() so we can alyways just subtract
735 last_bbo
->relocs
.num_relocs
--;
736 secondary
->batch
.next
-= inst_size
;
737 emit_batch_buffer_start(secondary
, &this_bbo
->bo
, offset
);
738 anv_cmd_buffer_add_seen_bbos(primary
, &secondary
->batch_bos
);
740 /* After patching up the secondary buffer, we need to clflush the
741 * modified instruction in case we're on a !llc platform. We use a
742 * little loop to handle the case where the instruction crosses a cache
745 if (!primary
->device
->info
.has_llc
) {
746 void *inst
= secondary
->batch
.next
- inst_size
;
747 void *p
= (void *) (((uintptr_t) inst
) & ~CACHELINE_MASK
);
748 __builtin_ia32_sfence();
749 while (p
< secondary
->batch
.next
) {
750 __builtin_ia32_clflush(p
);
757 case ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN
: {
758 struct list_head copy_list
;
759 VkResult result
= anv_batch_bo_list_clone(&secondary
->batch_bos
,
762 if (result
!= VK_SUCCESS
)
765 anv_cmd_buffer_add_seen_bbos(primary
, ©_list
);
767 struct anv_batch_bo
*first_bbo
=
768 list_first_entry(©_list
, struct anv_batch_bo
, link
);
769 struct anv_batch_bo
*last_bbo
=
770 list_last_entry(©_list
, struct anv_batch_bo
, link
);
772 cmd_buffer_chain_to_batch_bo(primary
, first_bbo
);
774 list_splicetail(©_list
, &primary
->batch_bos
);
776 anv_batch_bo_continue(last_bbo
, &primary
->batch
,
777 GEN8_MI_BATCH_BUFFER_START_length
* 4);
779 anv_cmd_buffer_emit_state_base_address(primary
);
783 assert(!"Invalid execution mode");
786 anv_reloc_list_append(&primary
->surface_relocs
, &primary
->pool
->alloc
,
787 &secondary
->surface_relocs
, 0);
791 anv_cmd_buffer_add_bo(struct anv_cmd_buffer
*cmd_buffer
,
793 struct anv_reloc_list
*relocs
)
795 struct drm_i915_gem_exec_object2
*obj
= NULL
;
797 if (bo
->index
< cmd_buffer
->execbuf2
.bo_count
&&
798 cmd_buffer
->execbuf2
.bos
[bo
->index
] == bo
)
799 obj
= &cmd_buffer
->execbuf2
.objects
[bo
->index
];
802 /* We've never seen this one before. Add it to the list and assign
803 * an id that we can use later.
805 if (cmd_buffer
->execbuf2
.bo_count
>= cmd_buffer
->execbuf2
.array_length
) {
806 uint32_t new_len
= cmd_buffer
->execbuf2
.objects
?
807 cmd_buffer
->execbuf2
.array_length
* 2 : 64;
809 struct drm_i915_gem_exec_object2
*new_objects
=
810 anv_alloc(&cmd_buffer
->pool
->alloc
, new_len
* sizeof(*new_objects
),
811 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
812 if (new_objects
== NULL
)
813 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
815 struct anv_bo
**new_bos
=
816 anv_alloc(&cmd_buffer
->pool
->alloc
, new_len
* sizeof(*new_bos
),
817 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
818 if (new_objects
== NULL
) {
819 anv_free(&cmd_buffer
->pool
->alloc
, new_objects
);
820 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
823 if (cmd_buffer
->execbuf2
.objects
) {
824 memcpy(new_objects
, cmd_buffer
->execbuf2
.objects
,
825 cmd_buffer
->execbuf2
.bo_count
* sizeof(*new_objects
));
826 memcpy(new_bos
, cmd_buffer
->execbuf2
.bos
,
827 cmd_buffer
->execbuf2
.bo_count
* sizeof(*new_bos
));
830 cmd_buffer
->execbuf2
.objects
= new_objects
;
831 cmd_buffer
->execbuf2
.bos
= new_bos
;
832 cmd_buffer
->execbuf2
.array_length
= new_len
;
835 assert(cmd_buffer
->execbuf2
.bo_count
< cmd_buffer
->execbuf2
.array_length
);
837 bo
->index
= cmd_buffer
->execbuf2
.bo_count
++;
838 obj
= &cmd_buffer
->execbuf2
.objects
[bo
->index
];
839 cmd_buffer
->execbuf2
.bos
[bo
->index
] = bo
;
841 obj
->handle
= bo
->gem_handle
;
842 obj
->relocation_count
= 0;
845 obj
->offset
= bo
->offset
;
851 if (relocs
!= NULL
&& obj
->relocation_count
== 0) {
852 /* This is the first time we've ever seen a list of relocations for
853 * this BO. Go ahead and set the relocations and then walk the list
854 * of relocations and add them all.
856 obj
->relocation_count
= relocs
->num_relocs
;
857 obj
->relocs_ptr
= (uintptr_t) relocs
->relocs
;
859 for (size_t i
= 0; i
< relocs
->num_relocs
; i
++) {
860 /* A quick sanity check on relocations */
861 assert(relocs
->relocs
[i
].offset
< bo
->size
);
862 anv_cmd_buffer_add_bo(cmd_buffer
, relocs
->reloc_bos
[i
], NULL
);
870 anv_cmd_buffer_process_relocs(struct anv_cmd_buffer
*cmd_buffer
,
871 struct anv_reloc_list
*list
)
875 /* If the kernel supports I915_EXEC_NO_RELOC, it will compare offset in
876 * struct drm_i915_gem_exec_object2 against the bos current offset and if
877 * all bos haven't moved it will skip relocation processing alltogether.
878 * If I915_EXEC_NO_RELOC is not supported, the kernel ignores the incoming
879 * value of offset so we can set it either way. For that to work we need
880 * to make sure all relocs use the same presumed offset.
883 for (size_t i
= 0; i
< list
->num_relocs
; i
++) {
884 bo
= list
->reloc_bos
[i
];
885 if (bo
->offset
!= list
->relocs
[i
].presumed_offset
)
886 cmd_buffer
->execbuf2
.need_reloc
= true;
888 list
->relocs
[i
].target_handle
= bo
->index
;
893 adjust_relocations_from_block_pool(struct anv_block_pool
*pool
,
894 struct anv_reloc_list
*relocs
)
896 for (size_t i
= 0; i
< relocs
->num_relocs
; i
++) {
897 /* In general, we don't know how stale the relocated value is. It
898 * may have been used last time or it may not. Since we don't want
899 * to stomp it while the GPU may be accessing it, we haven't updated
900 * it anywhere else in the code. Instead, we just set the presumed
901 * offset to what it is now based on the delta and the data in the
902 * block pool. Then the kernel will update it for us if needed.
904 assert(relocs
->relocs
[i
].offset
< pool
->state
.end
);
905 uint32_t *reloc_data
= pool
->map
+ relocs
->relocs
[i
].offset
;
907 /* We're reading back the relocated value from potentially incoherent
908 * memory here. However, any change to the value will be from the kernel
909 * writing out relocations, which will keep the CPU cache up to date.
911 relocs
->relocs
[i
].presumed_offset
= *reloc_data
- relocs
->relocs
[i
].delta
;
913 /* All of the relocations from this block pool to other BO's should
914 * have been emitted relative to the surface block pool center. We
915 * need to add the center offset to make them relative to the
916 * beginning of the actual GEM bo.
918 relocs
->relocs
[i
].offset
+= pool
->center_bo_offset
;
923 adjust_relocations_to_block_pool(struct anv_block_pool
*pool
,
924 struct anv_bo
*from_bo
,
925 struct anv_reloc_list
*relocs
,
926 uint32_t *last_pool_center_bo_offset
)
928 assert(*last_pool_center_bo_offset
<= pool
->center_bo_offset
);
929 uint32_t delta
= pool
->center_bo_offset
- *last_pool_center_bo_offset
;
931 /* When we initially emit relocations into a block pool, we don't
932 * actually know what the final center_bo_offset will be so we just emit
933 * it as if center_bo_offset == 0. Now that we know what the center
934 * offset is, we need to walk the list of relocations and adjust any
935 * relocations that point to the pool bo with the correct offset.
937 for (size_t i
= 0; i
< relocs
->num_relocs
; i
++) {
938 if (relocs
->reloc_bos
[i
] == &pool
->bo
) {
939 /* Adjust the delta value in the relocation to correctly
940 * correspond to the new delta. Initially, this value may have
941 * been negative (if treated as unsigned), but we trust in
942 * uint32_t roll-over to fix that for us at this point.
944 relocs
->relocs
[i
].delta
+= delta
;
946 /* Since the delta has changed, we need to update the actual
947 * relocated value with the new presumed value. This function
948 * should only be called on batch buffers, so we know it isn't in
949 * use by the GPU at the moment.
951 assert(relocs
->relocs
[i
].offset
< from_bo
->size
);
952 uint32_t *reloc_data
= from_bo
->map
+ relocs
->relocs
[i
].offset
;
953 *reloc_data
= relocs
->relocs
[i
].presumed_offset
+
954 relocs
->relocs
[i
].delta
;
958 *last_pool_center_bo_offset
= pool
->center_bo_offset
;
962 anv_cmd_buffer_prepare_execbuf(struct anv_cmd_buffer
*cmd_buffer
)
964 struct anv_batch
*batch
= &cmd_buffer
->batch
;
965 struct anv_block_pool
*ss_pool
=
966 &cmd_buffer
->device
->surface_state_block_pool
;
968 cmd_buffer
->execbuf2
.bo_count
= 0;
969 cmd_buffer
->execbuf2
.need_reloc
= false;
971 adjust_relocations_from_block_pool(ss_pool
, &cmd_buffer
->surface_relocs
);
972 anv_cmd_buffer_add_bo(cmd_buffer
, &ss_pool
->bo
, &cmd_buffer
->surface_relocs
);
974 /* First, we walk over all of the bos we've seen and add them and their
975 * relocations to the validate list.
977 struct anv_batch_bo
**bbo
;
978 anv_vector_foreach(bbo
, &cmd_buffer
->seen_bbos
) {
979 adjust_relocations_to_block_pool(ss_pool
, &(*bbo
)->bo
, &(*bbo
)->relocs
,
980 &(*bbo
)->last_ss_pool_bo_offset
);
982 anv_cmd_buffer_add_bo(cmd_buffer
, &(*bbo
)->bo
, &(*bbo
)->relocs
);
985 struct anv_batch_bo
*first_batch_bo
=
986 list_first_entry(&cmd_buffer
->batch_bos
, struct anv_batch_bo
, link
);
988 /* The kernel requires that the last entry in the validation list be the
989 * batch buffer to execute. We can simply swap the element
990 * corresponding to the first batch_bo in the chain with the last
991 * element in the list.
993 if (first_batch_bo
->bo
.index
!= cmd_buffer
->execbuf2
.bo_count
- 1) {
994 uint32_t idx
= first_batch_bo
->bo
.index
;
995 uint32_t last_idx
= cmd_buffer
->execbuf2
.bo_count
- 1;
997 struct drm_i915_gem_exec_object2 tmp_obj
=
998 cmd_buffer
->execbuf2
.objects
[idx
];
999 assert(cmd_buffer
->execbuf2
.bos
[idx
] == &first_batch_bo
->bo
);
1001 cmd_buffer
->execbuf2
.objects
[idx
] = cmd_buffer
->execbuf2
.objects
[last_idx
];
1002 cmd_buffer
->execbuf2
.bos
[idx
] = cmd_buffer
->execbuf2
.bos
[last_idx
];
1003 cmd_buffer
->execbuf2
.bos
[idx
]->index
= idx
;
1005 cmd_buffer
->execbuf2
.objects
[last_idx
] = tmp_obj
;
1006 cmd_buffer
->execbuf2
.bos
[last_idx
] = &first_batch_bo
->bo
;
1007 first_batch_bo
->bo
.index
= last_idx
;
1010 /* Now we go through and fixup all of the relocation lists to point to
1011 * the correct indices in the object array. We have to do this after we
1012 * reorder the list above as some of the indices may have changed.
1014 anv_vector_foreach(bbo
, &cmd_buffer
->seen_bbos
)
1015 anv_cmd_buffer_process_relocs(cmd_buffer
, &(*bbo
)->relocs
);
1017 anv_cmd_buffer_process_relocs(cmd_buffer
, &cmd_buffer
->surface_relocs
);
1019 if (!cmd_buffer
->device
->info
.has_llc
) {
1020 __builtin_ia32_sfence();
1021 anv_vector_foreach(bbo
, &cmd_buffer
->seen_bbos
) {
1022 for (uint32_t i
= 0; i
< (*bbo
)->length
; i
+= CACHELINE_SIZE
)
1023 __builtin_ia32_clflush((*bbo
)->bo
.map
+ i
);
1027 cmd_buffer
->execbuf2
.execbuf
= (struct drm_i915_gem_execbuffer2
) {
1028 .buffers_ptr
= (uintptr_t) cmd_buffer
->execbuf2
.objects
,
1029 .buffer_count
= cmd_buffer
->execbuf2
.bo_count
,
1030 .batch_start_offset
= 0,
1031 .batch_len
= batch
->next
- batch
->start
,
1036 .flags
= I915_EXEC_HANDLE_LUT
| I915_EXEC_RENDER
|
1037 I915_EXEC_CONSTANTS_REL_GENERAL
,
1038 .rsvd1
= cmd_buffer
->device
->context_id
,
1042 if (!cmd_buffer
->execbuf2
.need_reloc
)
1043 cmd_buffer
->execbuf2
.execbuf
.flags
|= I915_EXEC_NO_RELOC
;