2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
32 #include "genxml/gen7_pack.h"
33 #include "genxml/gen8_pack.h"
35 /** \file anv_batch_chain.c
37 * This file contains functions related to anv_cmd_buffer as a data
38 * structure. This involves everything required to create and destroy
39 * the actual batch buffers as well as link them together and handle
40 * relocations and surface state. It specifically does *not* contain any
41 * handling of actual vkCmd calls beyond vkCmdExecuteCommands.
44 /*-----------------------------------------------------------------------*
45 * Functions related to anv_reloc_list
46 *-----------------------------------------------------------------------*/
49 anv_reloc_list_init_clone(struct anv_reloc_list
*list
,
50 const VkAllocationCallbacks
*alloc
,
51 const struct anv_reloc_list
*other_list
)
54 list
->num_relocs
= other_list
->num_relocs
;
55 list
->array_length
= other_list
->array_length
;
58 list
->array_length
= 256;
62 anv_alloc(alloc
, list
->array_length
* sizeof(*list
->relocs
), 8,
63 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
65 if (list
->relocs
== NULL
)
66 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
69 anv_alloc(alloc
, list
->array_length
* sizeof(*list
->reloc_bos
), 8,
70 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
72 if (list
->reloc_bos
== NULL
) {
73 anv_free(alloc
, list
->relocs
);
74 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
78 memcpy(list
->relocs
, other_list
->relocs
,
79 list
->array_length
* sizeof(*list
->relocs
));
80 memcpy(list
->reloc_bos
, other_list
->reloc_bos
,
81 list
->array_length
* sizeof(*list
->reloc_bos
));
88 anv_reloc_list_init(struct anv_reloc_list
*list
,
89 const VkAllocationCallbacks
*alloc
)
91 return anv_reloc_list_init_clone(list
, alloc
, NULL
);
95 anv_reloc_list_finish(struct anv_reloc_list
*list
,
96 const VkAllocationCallbacks
*alloc
)
98 anv_free(alloc
, list
->relocs
);
99 anv_free(alloc
, list
->reloc_bos
);
103 anv_reloc_list_grow(struct anv_reloc_list
*list
,
104 const VkAllocationCallbacks
*alloc
,
105 size_t num_additional_relocs
)
107 if (list
->num_relocs
+ num_additional_relocs
<= list
->array_length
)
110 size_t new_length
= list
->array_length
* 2;
111 while (new_length
< list
->num_relocs
+ num_additional_relocs
)
114 struct drm_i915_gem_relocation_entry
*new_relocs
=
115 anv_alloc(alloc
, new_length
* sizeof(*list
->relocs
), 8,
116 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
117 if (new_relocs
== NULL
)
118 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
120 struct anv_bo
**new_reloc_bos
=
121 anv_alloc(alloc
, new_length
* sizeof(*list
->reloc_bos
), 8,
122 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
123 if (new_relocs
== NULL
) {
124 anv_free(alloc
, new_relocs
);
125 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
128 memcpy(new_relocs
, list
->relocs
, list
->num_relocs
* sizeof(*list
->relocs
));
129 memcpy(new_reloc_bos
, list
->reloc_bos
,
130 list
->num_relocs
* sizeof(*list
->reloc_bos
));
132 anv_free(alloc
, list
->relocs
);
133 anv_free(alloc
, list
->reloc_bos
);
135 list
->array_length
= new_length
;
136 list
->relocs
= new_relocs
;
137 list
->reloc_bos
= new_reloc_bos
;
143 anv_reloc_list_add(struct anv_reloc_list
*list
,
144 const VkAllocationCallbacks
*alloc
,
145 uint32_t offset
, struct anv_bo
*target_bo
, uint32_t delta
)
147 struct drm_i915_gem_relocation_entry
*entry
;
150 const uint32_t domain
=
151 target_bo
->is_winsys_bo
? I915_GEM_DOMAIN_RENDER
: 0;
153 anv_reloc_list_grow(list
, alloc
, 1);
154 /* TODO: Handle failure */
156 /* XXX: Can we use I915_EXEC_HANDLE_LUT? */
157 index
= list
->num_relocs
++;
158 list
->reloc_bos
[index
] = target_bo
;
159 entry
= &list
->relocs
[index
];
160 entry
->target_handle
= target_bo
->gem_handle
;
161 entry
->delta
= delta
;
162 entry
->offset
= offset
;
163 entry
->presumed_offset
= target_bo
->offset
;
164 entry
->read_domains
= domain
;
165 entry
->write_domain
= domain
;
166 VG(VALGRIND_CHECK_MEM_IS_DEFINED(entry
, sizeof(*entry
)));
168 return target_bo
->offset
+ delta
;
172 anv_reloc_list_append(struct anv_reloc_list
*list
,
173 const VkAllocationCallbacks
*alloc
,
174 struct anv_reloc_list
*other
, uint32_t offset
)
176 anv_reloc_list_grow(list
, alloc
, other
->num_relocs
);
177 /* TODO: Handle failure */
179 memcpy(&list
->relocs
[list
->num_relocs
], &other
->relocs
[0],
180 other
->num_relocs
* sizeof(other
->relocs
[0]));
181 memcpy(&list
->reloc_bos
[list
->num_relocs
], &other
->reloc_bos
[0],
182 other
->num_relocs
* sizeof(other
->reloc_bos
[0]));
184 for (uint32_t i
= 0; i
< other
->num_relocs
; i
++)
185 list
->relocs
[i
+ list
->num_relocs
].offset
+= offset
;
187 list
->num_relocs
+= other
->num_relocs
;
190 /*-----------------------------------------------------------------------*
191 * Functions related to anv_batch
192 *-----------------------------------------------------------------------*/
195 anv_batch_emit_dwords(struct anv_batch
*batch
, int num_dwords
)
197 if (batch
->next
+ num_dwords
* 4 > batch
->end
)
198 batch
->extend_cb(batch
, batch
->user_data
);
200 void *p
= batch
->next
;
202 batch
->next
+= num_dwords
* 4;
203 assert(batch
->next
<= batch
->end
);
209 anv_batch_emit_reloc(struct anv_batch
*batch
,
210 void *location
, struct anv_bo
*bo
, uint32_t delta
)
212 return anv_reloc_list_add(batch
->relocs
, batch
->alloc
,
213 location
- batch
->start
, bo
, delta
);
217 anv_batch_emit_batch(struct anv_batch
*batch
, struct anv_batch
*other
)
219 uint32_t size
, offset
;
221 size
= other
->next
- other
->start
;
222 assert(size
% 4 == 0);
224 if (batch
->next
+ size
> batch
->end
)
225 batch
->extend_cb(batch
, batch
->user_data
);
227 assert(batch
->next
+ size
<= batch
->end
);
229 VG(VALGRIND_CHECK_MEM_IS_DEFINED(other
->start
, size
));
230 memcpy(batch
->next
, other
->start
, size
);
232 offset
= batch
->next
- batch
->start
;
233 anv_reloc_list_append(batch
->relocs
, batch
->alloc
,
234 other
->relocs
, offset
);
239 /*-----------------------------------------------------------------------*
240 * Functions related to anv_batch_bo
241 *-----------------------------------------------------------------------*/
244 anv_batch_bo_create(struct anv_cmd_buffer
*cmd_buffer
,
245 struct anv_batch_bo
**bbo_out
)
249 struct anv_batch_bo
*bbo
= anv_alloc(&cmd_buffer
->pool
->alloc
, sizeof(*bbo
),
250 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
252 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
254 result
= anv_bo_pool_alloc(&cmd_buffer
->device
->batch_bo_pool
, &bbo
->bo
,
255 ANV_CMD_BUFFER_BATCH_SIZE
);
256 if (result
!= VK_SUCCESS
)
259 result
= anv_reloc_list_init(&bbo
->relocs
, &cmd_buffer
->pool
->alloc
);
260 if (result
!= VK_SUCCESS
)
268 anv_bo_pool_free(&cmd_buffer
->device
->batch_bo_pool
, &bbo
->bo
);
270 anv_free(&cmd_buffer
->pool
->alloc
, bbo
);
276 anv_batch_bo_clone(struct anv_cmd_buffer
*cmd_buffer
,
277 const struct anv_batch_bo
*other_bbo
,
278 struct anv_batch_bo
**bbo_out
)
282 struct anv_batch_bo
*bbo
= anv_alloc(&cmd_buffer
->pool
->alloc
, sizeof(*bbo
),
283 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
285 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
287 result
= anv_bo_pool_alloc(&cmd_buffer
->device
->batch_bo_pool
, &bbo
->bo
,
289 if (result
!= VK_SUCCESS
)
292 result
= anv_reloc_list_init_clone(&bbo
->relocs
, &cmd_buffer
->pool
->alloc
,
294 if (result
!= VK_SUCCESS
)
297 bbo
->length
= other_bbo
->length
;
298 memcpy(bbo
->bo
.map
, other_bbo
->bo
.map
, other_bbo
->length
);
300 bbo
->last_ss_pool_bo_offset
= other_bbo
->last_ss_pool_bo_offset
;
307 anv_bo_pool_free(&cmd_buffer
->device
->batch_bo_pool
, &bbo
->bo
);
309 anv_free(&cmd_buffer
->pool
->alloc
, bbo
);
315 anv_batch_bo_start(struct anv_batch_bo
*bbo
, struct anv_batch
*batch
,
316 size_t batch_padding
)
318 batch
->next
= batch
->start
= bbo
->bo
.map
;
319 batch
->end
= bbo
->bo
.map
+ bbo
->bo
.size
- batch_padding
;
320 batch
->relocs
= &bbo
->relocs
;
321 bbo
->last_ss_pool_bo_offset
= 0;
322 bbo
->relocs
.num_relocs
= 0;
326 anv_batch_bo_continue(struct anv_batch_bo
*bbo
, struct anv_batch
*batch
,
327 size_t batch_padding
)
329 batch
->start
= bbo
->bo
.map
;
330 batch
->next
= bbo
->bo
.map
+ bbo
->length
;
331 batch
->end
= bbo
->bo
.map
+ bbo
->bo
.size
- batch_padding
;
332 batch
->relocs
= &bbo
->relocs
;
336 anv_batch_bo_finish(struct anv_batch_bo
*bbo
, struct anv_batch
*batch
)
338 assert(batch
->start
== bbo
->bo
.map
);
339 bbo
->length
= batch
->next
- batch
->start
;
340 VG(VALGRIND_CHECK_MEM_IS_DEFINED(batch
->start
, bbo
->length
));
344 anv_batch_bo_grow(struct anv_cmd_buffer
*cmd_buffer
, struct anv_batch_bo
*bbo
,
345 struct anv_batch
*batch
, size_t aditional
,
346 size_t batch_padding
)
348 assert(batch
->start
== bbo
->bo
.map
);
349 bbo
->length
= batch
->next
- batch
->start
;
351 size_t new_size
= bbo
->bo
.size
;
352 while (new_size
<= bbo
->length
+ aditional
+ batch_padding
)
355 if (new_size
== bbo
->bo
.size
)
358 struct anv_bo new_bo
;
359 VkResult result
= anv_bo_pool_alloc(&cmd_buffer
->device
->batch_bo_pool
,
361 if (result
!= VK_SUCCESS
)
364 memcpy(new_bo
.map
, bbo
->bo
.map
, bbo
->length
);
366 anv_bo_pool_free(&cmd_buffer
->device
->batch_bo_pool
, &bbo
->bo
);
369 anv_batch_bo_continue(bbo
, batch
, batch_padding
);
375 anv_batch_bo_destroy(struct anv_batch_bo
*bbo
,
376 struct anv_cmd_buffer
*cmd_buffer
)
378 anv_reloc_list_finish(&bbo
->relocs
, &cmd_buffer
->pool
->alloc
);
379 anv_bo_pool_free(&cmd_buffer
->device
->batch_bo_pool
, &bbo
->bo
);
380 anv_free(&cmd_buffer
->pool
->alloc
, bbo
);
384 anv_batch_bo_list_clone(const struct list_head
*list
,
385 struct anv_cmd_buffer
*cmd_buffer
,
386 struct list_head
*new_list
)
388 VkResult result
= VK_SUCCESS
;
390 list_inithead(new_list
);
392 struct anv_batch_bo
*prev_bbo
= NULL
;
393 list_for_each_entry(struct anv_batch_bo
, bbo
, list
, link
) {
394 struct anv_batch_bo
*new_bbo
= NULL
;
395 result
= anv_batch_bo_clone(cmd_buffer
, bbo
, &new_bbo
);
396 if (result
!= VK_SUCCESS
)
398 list_addtail(&new_bbo
->link
, new_list
);
401 /* As we clone this list of batch_bo's, they chain one to the
402 * other using MI_BATCH_BUFFER_START commands. We need to fix up
403 * those relocations as we go. Fortunately, this is pretty easy
404 * as it will always be the last relocation in the list.
406 uint32_t last_idx
= prev_bbo
->relocs
.num_relocs
- 1;
407 assert(prev_bbo
->relocs
.reloc_bos
[last_idx
] == &bbo
->bo
);
408 prev_bbo
->relocs
.reloc_bos
[last_idx
] = &new_bbo
->bo
;
414 if (result
!= VK_SUCCESS
) {
415 list_for_each_entry_safe(struct anv_batch_bo
, bbo
, new_list
, link
)
416 anv_batch_bo_destroy(bbo
, cmd_buffer
);
422 /*-----------------------------------------------------------------------*
423 * Functions related to anv_batch_bo
424 *-----------------------------------------------------------------------*/
426 static inline struct anv_batch_bo
*
427 anv_cmd_buffer_current_batch_bo(struct anv_cmd_buffer
*cmd_buffer
)
429 return LIST_ENTRY(struct anv_batch_bo
, cmd_buffer
->batch_bos
.prev
, link
);
433 anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer
*cmd_buffer
)
435 return (struct anv_address
) {
436 .bo
= &cmd_buffer
->device
->surface_state_block_pool
.bo
,
437 .offset
= *(int32_t *)anv_vector_head(&cmd_buffer
->bt_blocks
),
442 emit_batch_buffer_start(struct anv_cmd_buffer
*cmd_buffer
,
443 struct anv_bo
*bo
, uint32_t offset
)
445 /* In gen8+ the address field grew to two dwords to accomodate 48 bit
446 * offsets. The high 16 bits are in the last dword, so we can use the gen8
447 * version in either case, as long as we set the instruction length in the
448 * header accordingly. This means that we always emit three dwords here
449 * and all the padding and adjustment we do in this file works for all
453 const uint32_t gen7_length
=
454 GEN7_MI_BATCH_BUFFER_START_length
- GEN7_MI_BATCH_BUFFER_START_length_bias
;
455 const uint32_t gen8_length
=
456 GEN8_MI_BATCH_BUFFER_START_length
- GEN8_MI_BATCH_BUFFER_START_length_bias
;
458 anv_batch_emit(&cmd_buffer
->batch
, GEN8_MI_BATCH_BUFFER_START
,
459 .DWordLength
= cmd_buffer
->device
->info
.gen
< 8 ?
460 gen7_length
: gen8_length
,
461 ._2ndLevelBatchBuffer
= _1stlevelbatch
,
462 .AddressSpaceIndicator
= ASI_PPGTT
,
463 .BatchBufferStartAddress
= { bo
, offset
});
467 cmd_buffer_chain_to_batch_bo(struct anv_cmd_buffer
*cmd_buffer
,
468 struct anv_batch_bo
*bbo
)
470 struct anv_batch
*batch
= &cmd_buffer
->batch
;
471 struct anv_batch_bo
*current_bbo
=
472 anv_cmd_buffer_current_batch_bo(cmd_buffer
);
474 /* We set the end of the batch a little short so we would be sure we
475 * have room for the chaining command. Since we're about to emit the
476 * chaining command, let's set it back where it should go.
478 batch
->end
+= GEN8_MI_BATCH_BUFFER_START_length
* 4;
479 assert(batch
->end
== current_bbo
->bo
.map
+ current_bbo
->bo
.size
);
481 emit_batch_buffer_start(cmd_buffer
, &bbo
->bo
, 0);
483 anv_batch_bo_finish(current_bbo
, batch
);
487 anv_cmd_buffer_chain_batch(struct anv_batch
*batch
, void *_data
)
489 struct anv_cmd_buffer
*cmd_buffer
= _data
;
490 struct anv_batch_bo
*new_bbo
;
492 VkResult result
= anv_batch_bo_create(cmd_buffer
, &new_bbo
);
493 if (result
!= VK_SUCCESS
)
496 struct anv_batch_bo
**seen_bbo
= anv_vector_add(&cmd_buffer
->seen_bbos
);
497 if (seen_bbo
== NULL
) {
498 anv_batch_bo_destroy(new_bbo
, cmd_buffer
);
499 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
503 cmd_buffer_chain_to_batch_bo(cmd_buffer
, new_bbo
);
505 list_addtail(&new_bbo
->link
, &cmd_buffer
->batch_bos
);
507 anv_batch_bo_start(new_bbo
, batch
, GEN8_MI_BATCH_BUFFER_START_length
* 4);
513 anv_cmd_buffer_grow_batch(struct anv_batch
*batch
, void *_data
)
515 struct anv_cmd_buffer
*cmd_buffer
= _data
;
516 struct anv_batch_bo
*bbo
= anv_cmd_buffer_current_batch_bo(cmd_buffer
);
518 anv_batch_bo_grow(cmd_buffer
, bbo
, &cmd_buffer
->batch
, 4096,
519 GEN8_MI_BATCH_BUFFER_START_length
* 4);
525 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer
*cmd_buffer
,
526 uint32_t entries
, uint32_t *state_offset
)
528 struct anv_block_pool
*block_pool
=
529 &cmd_buffer
->device
->surface_state_block_pool
;
530 int32_t *bt_block
= anv_vector_head(&cmd_buffer
->bt_blocks
);
531 struct anv_state state
;
533 state
.alloc_size
= align_u32(entries
* 4, 32);
535 if (cmd_buffer
->bt_next
+ state
.alloc_size
> block_pool
->block_size
)
536 return (struct anv_state
) { 0 };
538 state
.offset
= cmd_buffer
->bt_next
;
539 state
.map
= block_pool
->map
+ *bt_block
+ state
.offset
;
541 cmd_buffer
->bt_next
+= state
.alloc_size
;
543 assert(*bt_block
< 0);
544 *state_offset
= -(*bt_block
);
550 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer
*cmd_buffer
)
552 return anv_state_stream_alloc(&cmd_buffer
->surface_state_stream
, 64, 64);
556 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer
*cmd_buffer
,
557 uint32_t size
, uint32_t alignment
)
559 return anv_state_stream_alloc(&cmd_buffer
->dynamic_state_stream
,
564 anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer
*cmd_buffer
)
566 struct anv_block_pool
*block_pool
=
567 &cmd_buffer
->device
->surface_state_block_pool
;
569 int32_t *offset
= anv_vector_add(&cmd_buffer
->bt_blocks
);
571 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
573 *offset
= anv_block_pool_alloc_back(block_pool
);
574 cmd_buffer
->bt_next
= 0;
580 anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer
*cmd_buffer
)
582 struct anv_batch_bo
*batch_bo
;
585 list_inithead(&cmd_buffer
->batch_bos
);
587 result
= anv_batch_bo_create(cmd_buffer
, &batch_bo
);
588 if (result
!= VK_SUCCESS
)
591 list_addtail(&batch_bo
->link
, &cmd_buffer
->batch_bos
);
593 cmd_buffer
->batch
.alloc
= &cmd_buffer
->pool
->alloc
;
594 cmd_buffer
->batch
.user_data
= cmd_buffer
;
596 if (cmd_buffer
->device
->can_chain_batches
) {
597 cmd_buffer
->batch
.extend_cb
= anv_cmd_buffer_chain_batch
;
599 cmd_buffer
->batch
.extend_cb
= anv_cmd_buffer_grow_batch
;
602 anv_batch_bo_start(batch_bo
, &cmd_buffer
->batch
,
603 GEN8_MI_BATCH_BUFFER_START_length
* 4);
605 int success
= anv_vector_init(&cmd_buffer
->seen_bbos
,
606 sizeof(struct anv_bo
*),
607 8 * sizeof(struct anv_bo
*));
611 *(struct anv_batch_bo
**)anv_vector_add(&cmd_buffer
->seen_bbos
) = batch_bo
;
613 success
= anv_vector_init(&cmd_buffer
->bt_blocks
, sizeof(int32_t),
614 8 * sizeof(int32_t));
618 result
= anv_reloc_list_init(&cmd_buffer
->surface_relocs
,
619 &cmd_buffer
->pool
->alloc
);
620 if (result
!= VK_SUCCESS
)
623 anv_cmd_buffer_new_binding_table_block(cmd_buffer
);
625 cmd_buffer
->execbuf2
.objects
= NULL
;
626 cmd_buffer
->execbuf2
.bos
= NULL
;
627 cmd_buffer
->execbuf2
.array_length
= 0;
632 anv_vector_finish(&cmd_buffer
->bt_blocks
);
634 anv_vector_finish(&cmd_buffer
->seen_bbos
);
636 anv_batch_bo_destroy(batch_bo
, cmd_buffer
);
642 anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer
*cmd_buffer
)
645 anv_vector_foreach(bt_block
, &cmd_buffer
->bt_blocks
) {
646 anv_block_pool_free(&cmd_buffer
->device
->surface_state_block_pool
,
649 anv_vector_finish(&cmd_buffer
->bt_blocks
);
651 anv_reloc_list_finish(&cmd_buffer
->surface_relocs
, &cmd_buffer
->pool
->alloc
);
653 anv_vector_finish(&cmd_buffer
->seen_bbos
);
655 /* Destroy all of the batch buffers */
656 list_for_each_entry_safe(struct anv_batch_bo
, bbo
,
657 &cmd_buffer
->batch_bos
, link
) {
658 anv_batch_bo_destroy(bbo
, cmd_buffer
);
661 anv_free(&cmd_buffer
->pool
->alloc
, cmd_buffer
->execbuf2
.objects
);
662 anv_free(&cmd_buffer
->pool
->alloc
, cmd_buffer
->execbuf2
.bos
);
666 anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer
*cmd_buffer
)
668 /* Delete all but the first batch bo */
669 assert(!list_empty(&cmd_buffer
->batch_bos
));
670 while (cmd_buffer
->batch_bos
.next
!= cmd_buffer
->batch_bos
.prev
) {
671 struct anv_batch_bo
*bbo
= anv_cmd_buffer_current_batch_bo(cmd_buffer
);
672 list_del(&bbo
->link
);
673 anv_batch_bo_destroy(bbo
, cmd_buffer
);
675 assert(!list_empty(&cmd_buffer
->batch_bos
));
677 anv_batch_bo_start(anv_cmd_buffer_current_batch_bo(cmd_buffer
),
679 GEN8_MI_BATCH_BUFFER_START_length
* 4);
681 while (anv_vector_length(&cmd_buffer
->bt_blocks
) > 1) {
682 int32_t *bt_block
= anv_vector_remove(&cmd_buffer
->bt_blocks
);
683 anv_block_pool_free(&cmd_buffer
->device
->surface_state_block_pool
,
686 assert(anv_vector_length(&cmd_buffer
->bt_blocks
) == 1);
687 cmd_buffer
->bt_next
= 0;
689 cmd_buffer
->surface_relocs
.num_relocs
= 0;
691 /* Reset the list of seen buffers */
692 cmd_buffer
->seen_bbos
.head
= 0;
693 cmd_buffer
->seen_bbos
.tail
= 0;
695 *(struct anv_batch_bo
**)anv_vector_add(&cmd_buffer
->seen_bbos
) =
696 anv_cmd_buffer_current_batch_bo(cmd_buffer
);
700 anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer
*cmd_buffer
)
702 struct anv_batch_bo
*batch_bo
= anv_cmd_buffer_current_batch_bo(cmd_buffer
);
704 if (cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_PRIMARY
) {
705 /* When we start a batch buffer, we subtract a certain amount of
706 * padding from the end to ensure that we always have room to emit a
707 * BATCH_BUFFER_START to chain to the next BO. We need to remove
708 * that padding before we end the batch; otherwise, we may end up
709 * with our BATCH_BUFFER_END in another BO.
711 cmd_buffer
->batch
.end
+= GEN8_MI_BATCH_BUFFER_START_length
* 4;
712 assert(cmd_buffer
->batch
.end
== batch_bo
->bo
.map
+ batch_bo
->bo
.size
);
714 anv_batch_emit(&cmd_buffer
->batch
, GEN7_MI_BATCH_BUFFER_END
);
716 /* Round batch up to an even number of dwords. */
717 if ((cmd_buffer
->batch
.next
- cmd_buffer
->batch
.start
) & 4)
718 anv_batch_emit(&cmd_buffer
->batch
, GEN7_MI_NOOP
);
720 cmd_buffer
->exec_mode
= ANV_CMD_BUFFER_EXEC_MODE_PRIMARY
;
723 anv_batch_bo_finish(batch_bo
, &cmd_buffer
->batch
);
725 if (cmd_buffer
->level
== VK_COMMAND_BUFFER_LEVEL_SECONDARY
) {
726 /* If this is a secondary command buffer, we need to determine the
727 * mode in which it will be executed with vkExecuteCommands. We
728 * determine this statically here so that this stays in sync with the
729 * actual ExecuteCommands implementation.
731 if (!cmd_buffer
->device
->can_chain_batches
) {
732 cmd_buffer
->exec_mode
= ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT
;
733 } else if ((cmd_buffer
->batch_bos
.next
== cmd_buffer
->batch_bos
.prev
) &&
734 (batch_bo
->length
< ANV_CMD_BUFFER_BATCH_SIZE
/ 2)) {
735 /* If the secondary has exactly one batch buffer in its list *and*
736 * that batch buffer is less than half of the maximum size, we're
737 * probably better of simply copying it into our batch.
739 cmd_buffer
->exec_mode
= ANV_CMD_BUFFER_EXEC_MODE_EMIT
;
740 } else if (!(cmd_buffer
->usage_flags
&
741 VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT
)) {
742 cmd_buffer
->exec_mode
= ANV_CMD_BUFFER_EXEC_MODE_CHAIN
;
744 /* When we chain, we need to add an MI_BATCH_BUFFER_START command
745 * with its relocation. In order to handle this we'll increment here
746 * so we can unconditionally decrement right before adding the
747 * MI_BATCH_BUFFER_START command.
749 batch_bo
->relocs
.num_relocs
++;
750 cmd_buffer
->batch
.next
+= GEN8_MI_BATCH_BUFFER_START_length
* 4;
752 cmd_buffer
->exec_mode
= ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN
;
757 static inline VkResult
758 anv_cmd_buffer_add_seen_bbos(struct anv_cmd_buffer
*cmd_buffer
,
759 struct list_head
*list
)
761 list_for_each_entry(struct anv_batch_bo
, bbo
, list
, link
) {
762 struct anv_batch_bo
**bbo_ptr
= anv_vector_add(&cmd_buffer
->seen_bbos
);
764 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
773 anv_cmd_buffer_add_secondary(struct anv_cmd_buffer
*primary
,
774 struct anv_cmd_buffer
*secondary
)
776 switch (secondary
->exec_mode
) {
777 case ANV_CMD_BUFFER_EXEC_MODE_EMIT
:
778 anv_batch_emit_batch(&primary
->batch
, &secondary
->batch
);
779 anv_cmd_buffer_emit_state_base_address(primary
);
781 case ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT
: {
782 struct anv_batch_bo
*bbo
= anv_cmd_buffer_current_batch_bo(primary
);
783 unsigned length
= secondary
->batch
.end
- secondary
->batch
.start
;
784 anv_batch_bo_grow(primary
, bbo
, &primary
->batch
, length
,
785 GEN8_MI_BATCH_BUFFER_START_length
* 4);
786 anv_batch_emit_batch(&primary
->batch
, &secondary
->batch
);
787 anv_cmd_buffer_emit_state_base_address(primary
);
790 case ANV_CMD_BUFFER_EXEC_MODE_CHAIN
: {
791 struct anv_batch_bo
*first_bbo
=
792 list_first_entry(&secondary
->batch_bos
, struct anv_batch_bo
, link
);
793 struct anv_batch_bo
*last_bbo
=
794 list_last_entry(&secondary
->batch_bos
, struct anv_batch_bo
, link
);
796 emit_batch_buffer_start(primary
, &first_bbo
->bo
, 0);
798 struct anv_batch_bo
*this_bbo
= anv_cmd_buffer_current_batch_bo(primary
);
799 assert(primary
->batch
.start
== this_bbo
->bo
.map
);
800 uint32_t offset
= primary
->batch
.next
- primary
->batch
.start
;
801 const uint32_t inst_size
= GEN8_MI_BATCH_BUFFER_START_length
* 4;
803 /* Roll back the previous MI_BATCH_BUFFER_START and its relocation so we
804 * can emit a new command and relocation for the current splice. In
805 * order to handle the initial-use case, we incremented next and
806 * num_relocs in end_batch_buffer() so we can alyways just subtract
809 last_bbo
->relocs
.num_relocs
--;
810 secondary
->batch
.next
-= inst_size
;
811 emit_batch_buffer_start(secondary
, &this_bbo
->bo
, offset
);
812 anv_cmd_buffer_add_seen_bbos(primary
, &secondary
->batch_bos
);
814 /* After patching up the secondary buffer, we need to clflush the
815 * modified instruction in case we're on a !llc platform. We use a
816 * little loop to handle the case where the instruction crosses a cache
819 if (!primary
->device
->info
.has_llc
) {
820 void *inst
= secondary
->batch
.next
- inst_size
;
821 void *p
= (void *) (((uintptr_t) inst
) & ~CACHELINE_MASK
);
822 __builtin_ia32_mfence();
823 while (p
< secondary
->batch
.next
) {
824 __builtin_ia32_clflush(p
);
829 anv_cmd_buffer_emit_state_base_address(primary
);
832 case ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN
: {
833 struct list_head copy_list
;
834 VkResult result
= anv_batch_bo_list_clone(&secondary
->batch_bos
,
837 if (result
!= VK_SUCCESS
)
840 anv_cmd_buffer_add_seen_bbos(primary
, ©_list
);
842 struct anv_batch_bo
*first_bbo
=
843 list_first_entry(©_list
, struct anv_batch_bo
, link
);
844 struct anv_batch_bo
*last_bbo
=
845 list_last_entry(©_list
, struct anv_batch_bo
, link
);
847 cmd_buffer_chain_to_batch_bo(primary
, first_bbo
);
849 list_splicetail(©_list
, &primary
->batch_bos
);
851 anv_batch_bo_continue(last_bbo
, &primary
->batch
,
852 GEN8_MI_BATCH_BUFFER_START_length
* 4);
854 anv_cmd_buffer_emit_state_base_address(primary
);
858 assert(!"Invalid execution mode");
861 anv_reloc_list_append(&primary
->surface_relocs
, &primary
->pool
->alloc
,
862 &secondary
->surface_relocs
, 0);
866 anv_cmd_buffer_add_bo(struct anv_cmd_buffer
*cmd_buffer
,
868 struct anv_reloc_list
*relocs
)
870 struct drm_i915_gem_exec_object2
*obj
= NULL
;
872 if (bo
->index
< cmd_buffer
->execbuf2
.bo_count
&&
873 cmd_buffer
->execbuf2
.bos
[bo
->index
] == bo
)
874 obj
= &cmd_buffer
->execbuf2
.objects
[bo
->index
];
877 /* We've never seen this one before. Add it to the list and assign
878 * an id that we can use later.
880 if (cmd_buffer
->execbuf2
.bo_count
>= cmd_buffer
->execbuf2
.array_length
) {
881 uint32_t new_len
= cmd_buffer
->execbuf2
.objects
?
882 cmd_buffer
->execbuf2
.array_length
* 2 : 64;
884 struct drm_i915_gem_exec_object2
*new_objects
=
885 anv_alloc(&cmd_buffer
->pool
->alloc
, new_len
* sizeof(*new_objects
),
886 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
887 if (new_objects
== NULL
)
888 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
890 struct anv_bo
**new_bos
=
891 anv_alloc(&cmd_buffer
->pool
->alloc
, new_len
* sizeof(*new_bos
),
892 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT
);
893 if (new_objects
== NULL
) {
894 anv_free(&cmd_buffer
->pool
->alloc
, new_objects
);
895 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
898 if (cmd_buffer
->execbuf2
.objects
) {
899 memcpy(new_objects
, cmd_buffer
->execbuf2
.objects
,
900 cmd_buffer
->execbuf2
.bo_count
* sizeof(*new_objects
));
901 memcpy(new_bos
, cmd_buffer
->execbuf2
.bos
,
902 cmd_buffer
->execbuf2
.bo_count
* sizeof(*new_bos
));
905 cmd_buffer
->execbuf2
.objects
= new_objects
;
906 cmd_buffer
->execbuf2
.bos
= new_bos
;
907 cmd_buffer
->execbuf2
.array_length
= new_len
;
910 assert(cmd_buffer
->execbuf2
.bo_count
< cmd_buffer
->execbuf2
.array_length
);
912 bo
->index
= cmd_buffer
->execbuf2
.bo_count
++;
913 obj
= &cmd_buffer
->execbuf2
.objects
[bo
->index
];
914 cmd_buffer
->execbuf2
.bos
[bo
->index
] = bo
;
916 obj
->handle
= bo
->gem_handle
;
917 obj
->relocation_count
= 0;
920 obj
->offset
= bo
->offset
;
921 obj
->flags
= bo
->is_winsys_bo
? EXEC_OBJECT_WRITE
: 0;
926 if (relocs
!= NULL
&& obj
->relocation_count
== 0) {
927 /* This is the first time we've ever seen a list of relocations for
928 * this BO. Go ahead and set the relocations and then walk the list
929 * of relocations and add them all.
931 obj
->relocation_count
= relocs
->num_relocs
;
932 obj
->relocs_ptr
= (uintptr_t) relocs
->relocs
;
934 for (size_t i
= 0; i
< relocs
->num_relocs
; i
++) {
935 /* A quick sanity check on relocations */
936 assert(relocs
->relocs
[i
].offset
< bo
->size
);
937 anv_cmd_buffer_add_bo(cmd_buffer
, relocs
->reloc_bos
[i
], NULL
);
945 anv_cmd_buffer_process_relocs(struct anv_cmd_buffer
*cmd_buffer
,
946 struct anv_reloc_list
*list
)
950 /* If the kernel supports I915_EXEC_NO_RELOC, it will compare offset in
951 * struct drm_i915_gem_exec_object2 against the bos current offset and if
952 * all bos haven't moved it will skip relocation processing alltogether.
953 * If I915_EXEC_NO_RELOC is not supported, the kernel ignores the incoming
954 * value of offset so we can set it either way. For that to work we need
955 * to make sure all relocs use the same presumed offset.
958 for (size_t i
= 0; i
< list
->num_relocs
; i
++) {
959 bo
= list
->reloc_bos
[i
];
960 if (bo
->offset
!= list
->relocs
[i
].presumed_offset
)
961 cmd_buffer
->execbuf2
.need_reloc
= true;
963 list
->relocs
[i
].target_handle
= bo
->index
;
968 read_reloc(const struct anv_device
*device
, const void *p
)
970 if (device
->info
.gen
>= 8)
971 return *(uint64_t *)p
;
973 return *(uint32_t *)p
;
977 write_reloc(const struct anv_device
*device
, void *p
, uint64_t v
)
979 if (device
->info
.gen
>= 8)
986 adjust_relocations_from_block_pool(struct anv_block_pool
*pool
,
987 struct anv_reloc_list
*relocs
)
989 for (size_t i
= 0; i
< relocs
->num_relocs
; i
++) {
990 /* In general, we don't know how stale the relocated value is. It
991 * may have been used last time or it may not. Since we don't want
992 * to stomp it while the GPU may be accessing it, we haven't updated
993 * it anywhere else in the code. Instead, we just set the presumed
994 * offset to what it is now based on the delta and the data in the
995 * block pool. Then the kernel will update it for us if needed.
997 assert(relocs
->relocs
[i
].offset
< pool
->state
.end
);
998 const void *p
= pool
->map
+ relocs
->relocs
[i
].offset
;
1000 /* We're reading back the relocated value from potentially incoherent
1001 * memory here. However, any change to the value will be from the kernel
1002 * writing out relocations, which will keep the CPU cache up to date.
1004 relocs
->relocs
[i
].presumed_offset
=
1005 read_reloc(pool
->device
, p
) - relocs
->relocs
[i
].delta
;
1007 /* All of the relocations from this block pool to other BO's should
1008 * have been emitted relative to the surface block pool center. We
1009 * need to add the center offset to make them relative to the
1010 * beginning of the actual GEM bo.
1012 relocs
->relocs
[i
].offset
+= pool
->center_bo_offset
;
1017 adjust_relocations_to_block_pool(struct anv_block_pool
*pool
,
1018 struct anv_bo
*from_bo
,
1019 struct anv_reloc_list
*relocs
,
1020 uint32_t *last_pool_center_bo_offset
)
1022 assert(*last_pool_center_bo_offset
<= pool
->center_bo_offset
);
1023 uint32_t delta
= pool
->center_bo_offset
- *last_pool_center_bo_offset
;
1025 /* When we initially emit relocations into a block pool, we don't
1026 * actually know what the final center_bo_offset will be so we just emit
1027 * it as if center_bo_offset == 0. Now that we know what the center
1028 * offset is, we need to walk the list of relocations and adjust any
1029 * relocations that point to the pool bo with the correct offset.
1031 for (size_t i
= 0; i
< relocs
->num_relocs
; i
++) {
1032 if (relocs
->reloc_bos
[i
] == &pool
->bo
) {
1033 /* Adjust the delta value in the relocation to correctly
1034 * correspond to the new delta. Initially, this value may have
1035 * been negative (if treated as unsigned), but we trust in
1036 * uint32_t roll-over to fix that for us at this point.
1038 relocs
->relocs
[i
].delta
+= delta
;
1040 /* Since the delta has changed, we need to update the actual
1041 * relocated value with the new presumed value. This function
1042 * should only be called on batch buffers, so we know it isn't in
1043 * use by the GPU at the moment.
1045 assert(relocs
->relocs
[i
].offset
< from_bo
->size
);
1046 write_reloc(pool
->device
, from_bo
->map
+ relocs
->relocs
[i
].offset
,
1047 relocs
->relocs
[i
].presumed_offset
+
1048 relocs
->relocs
[i
].delta
);
1052 *last_pool_center_bo_offset
= pool
->center_bo_offset
;
1056 anv_cmd_buffer_prepare_execbuf(struct anv_cmd_buffer
*cmd_buffer
)
1058 struct anv_batch
*batch
= &cmd_buffer
->batch
;
1059 struct anv_block_pool
*ss_pool
=
1060 &cmd_buffer
->device
->surface_state_block_pool
;
1062 cmd_buffer
->execbuf2
.bo_count
= 0;
1063 cmd_buffer
->execbuf2
.need_reloc
= false;
1065 adjust_relocations_from_block_pool(ss_pool
, &cmd_buffer
->surface_relocs
);
1066 anv_cmd_buffer_add_bo(cmd_buffer
, &ss_pool
->bo
, &cmd_buffer
->surface_relocs
);
1068 /* First, we walk over all of the bos we've seen and add them and their
1069 * relocations to the validate list.
1071 struct anv_batch_bo
**bbo
;
1072 anv_vector_foreach(bbo
, &cmd_buffer
->seen_bbos
) {
1073 adjust_relocations_to_block_pool(ss_pool
, &(*bbo
)->bo
, &(*bbo
)->relocs
,
1074 &(*bbo
)->last_ss_pool_bo_offset
);
1076 anv_cmd_buffer_add_bo(cmd_buffer
, &(*bbo
)->bo
, &(*bbo
)->relocs
);
1079 struct anv_batch_bo
*first_batch_bo
=
1080 list_first_entry(&cmd_buffer
->batch_bos
, struct anv_batch_bo
, link
);
1082 /* The kernel requires that the last entry in the validation list be the
1083 * batch buffer to execute. We can simply swap the element
1084 * corresponding to the first batch_bo in the chain with the last
1085 * element in the list.
1087 if (first_batch_bo
->bo
.index
!= cmd_buffer
->execbuf2
.bo_count
- 1) {
1088 uint32_t idx
= first_batch_bo
->bo
.index
;
1089 uint32_t last_idx
= cmd_buffer
->execbuf2
.bo_count
- 1;
1091 struct drm_i915_gem_exec_object2 tmp_obj
=
1092 cmd_buffer
->execbuf2
.objects
[idx
];
1093 assert(cmd_buffer
->execbuf2
.bos
[idx
] == &first_batch_bo
->bo
);
1095 cmd_buffer
->execbuf2
.objects
[idx
] = cmd_buffer
->execbuf2
.objects
[last_idx
];
1096 cmd_buffer
->execbuf2
.bos
[idx
] = cmd_buffer
->execbuf2
.bos
[last_idx
];
1097 cmd_buffer
->execbuf2
.bos
[idx
]->index
= idx
;
1099 cmd_buffer
->execbuf2
.objects
[last_idx
] = tmp_obj
;
1100 cmd_buffer
->execbuf2
.bos
[last_idx
] = &first_batch_bo
->bo
;
1101 first_batch_bo
->bo
.index
= last_idx
;
1104 /* Now we go through and fixup all of the relocation lists to point to
1105 * the correct indices in the object array. We have to do this after we
1106 * reorder the list above as some of the indices may have changed.
1108 anv_vector_foreach(bbo
, &cmd_buffer
->seen_bbos
)
1109 anv_cmd_buffer_process_relocs(cmd_buffer
, &(*bbo
)->relocs
);
1111 anv_cmd_buffer_process_relocs(cmd_buffer
, &cmd_buffer
->surface_relocs
);
1113 if (!cmd_buffer
->device
->info
.has_llc
) {
1114 __builtin_ia32_mfence();
1115 anv_vector_foreach(bbo
, &cmd_buffer
->seen_bbos
) {
1116 for (uint32_t i
= 0; i
< (*bbo
)->length
; i
+= CACHELINE_SIZE
)
1117 __builtin_ia32_clflush((*bbo
)->bo
.map
+ i
);
1121 cmd_buffer
->execbuf2
.execbuf
= (struct drm_i915_gem_execbuffer2
) {
1122 .buffers_ptr
= (uintptr_t) cmd_buffer
->execbuf2
.objects
,
1123 .buffer_count
= cmd_buffer
->execbuf2
.bo_count
,
1124 .batch_start_offset
= 0,
1125 .batch_len
= batch
->next
- batch
->start
,
1130 .flags
= I915_EXEC_HANDLE_LUT
| I915_EXEC_RENDER
|
1131 I915_EXEC_CONSTANTS_REL_GENERAL
,
1132 .rsvd1
= cmd_buffer
->device
->context_id
,
1136 if (!cmd_buffer
->execbuf2
.need_reloc
)
1137 cmd_buffer
->execbuf2
.execbuf
.flags
|= I915_EXEC_NO_RELOC
;