2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
32 /** \file anv_cmd_buffer.c
34 * This file contains functions related to anv_cmd_buffer as a data
35 * structure. This involves everything required to create and destroy
36 * the actual batch buffers as well as link them together and handle
37 * relocations and surface state. It specifically does *not* contain any
38 * handling of actual vkCmd calls beyond vkCmdExecuteCommands.
41 /*-----------------------------------------------------------------------*
42 * Functions related to anv_reloc_list
43 *-----------------------------------------------------------------------*/
46 anv_reloc_list_init(struct anv_reloc_list
*list
, struct anv_device
*device
)
49 list
->array_length
= 256;
51 anv_device_alloc(device
, list
->array_length
* sizeof(*list
->relocs
), 8,
52 VK_SYSTEM_ALLOC_TYPE_INTERNAL
);
54 if (list
->relocs
== NULL
)
55 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
58 anv_device_alloc(device
, list
->array_length
* sizeof(*list
->reloc_bos
), 8,
59 VK_SYSTEM_ALLOC_TYPE_INTERNAL
);
61 if (list
->relocs
== NULL
) {
62 anv_device_free(device
, list
->relocs
);
63 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
70 anv_reloc_list_finish(struct anv_reloc_list
*list
, struct anv_device
*device
)
72 anv_device_free(device
, list
->relocs
);
73 anv_device_free(device
, list
->reloc_bos
);
77 anv_reloc_list_grow(struct anv_reloc_list
*list
, struct anv_device
*device
,
78 size_t num_additional_relocs
)
80 if (list
->num_relocs
+ num_additional_relocs
<= list
->array_length
)
83 size_t new_length
= list
->array_length
* 2;
84 while (new_length
< list
->num_relocs
+ num_additional_relocs
)
87 struct drm_i915_gem_relocation_entry
*new_relocs
=
88 anv_device_alloc(device
, new_length
* sizeof(*list
->relocs
), 8,
89 VK_SYSTEM_ALLOC_TYPE_INTERNAL
);
90 if (new_relocs
== NULL
)
91 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
93 struct anv_bo
**new_reloc_bos
=
94 anv_device_alloc(device
, new_length
* sizeof(*list
->reloc_bos
), 8,
95 VK_SYSTEM_ALLOC_TYPE_INTERNAL
);
96 if (new_relocs
== NULL
) {
97 anv_device_free(device
, new_relocs
);
98 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
101 memcpy(new_relocs
, list
->relocs
, list
->num_relocs
* sizeof(*list
->relocs
));
102 memcpy(new_reloc_bos
, list
->reloc_bos
,
103 list
->num_relocs
* sizeof(*list
->reloc_bos
));
105 anv_device_free(device
, list
->relocs
);
106 anv_device_free(device
, list
->reloc_bos
);
108 list
->relocs
= new_relocs
;
109 list
->reloc_bos
= new_reloc_bos
;
115 anv_reloc_list_add(struct anv_reloc_list
*list
, struct anv_device
*device
,
116 uint32_t offset
, struct anv_bo
*target_bo
, uint32_t delta
)
118 struct drm_i915_gem_relocation_entry
*entry
;
121 anv_reloc_list_grow(list
, device
, 1);
122 /* TODO: Handle failure */
124 /* XXX: Can we use I915_EXEC_HANDLE_LUT? */
125 index
= list
->num_relocs
++;
126 list
->reloc_bos
[index
] = target_bo
;
127 entry
= &list
->relocs
[index
];
128 entry
->target_handle
= target_bo
->gem_handle
;
129 entry
->delta
= delta
;
130 entry
->offset
= offset
;
131 entry
->presumed_offset
= target_bo
->offset
;
132 entry
->read_domains
= 0;
133 entry
->write_domain
= 0;
135 return target_bo
->offset
+ delta
;
139 anv_reloc_list_append(struct anv_reloc_list
*list
, struct anv_device
*device
,
140 struct anv_reloc_list
*other
, uint32_t offset
)
142 anv_reloc_list_grow(list
, device
, other
->num_relocs
);
143 /* TODO: Handle failure */
145 memcpy(&list
->relocs
[list
->num_relocs
], &other
->relocs
[0],
146 other
->num_relocs
* sizeof(other
->relocs
[0]));
147 memcpy(&list
->reloc_bos
[list
->num_relocs
], &other
->reloc_bos
[0],
148 other
->num_relocs
* sizeof(other
->reloc_bos
[0]));
150 for (uint32_t i
= 0; i
< other
->num_relocs
; i
++)
151 list
->relocs
[i
+ list
->num_relocs
].offset
+= offset
;
153 list
->num_relocs
+= other
->num_relocs
;
156 /*-----------------------------------------------------------------------*
157 * Functions related to anv_batch
158 *-----------------------------------------------------------------------*/
161 anv_batch_emit_dwords(struct anv_batch
*batch
, int num_dwords
)
163 if (batch
->next
+ num_dwords
* 4 > batch
->end
)
164 batch
->extend_cb(batch
, batch
->user_data
);
166 void *p
= batch
->next
;
168 batch
->next
+= num_dwords
* 4;
169 assert(batch
->next
<= batch
->end
);
175 anv_batch_emit_reloc(struct anv_batch
*batch
,
176 void *location
, struct anv_bo
*bo
, uint32_t delta
)
178 return anv_reloc_list_add(batch
->relocs
, batch
->device
,
179 location
- batch
->start
, bo
, delta
);
183 anv_batch_emit_batch(struct anv_batch
*batch
, struct anv_batch
*other
)
185 uint32_t size
, offset
;
187 size
= other
->next
- other
->start
;
188 assert(size
% 4 == 0);
190 if (batch
->next
+ size
> batch
->end
)
191 batch
->extend_cb(batch
, batch
->user_data
);
193 assert(batch
->next
+ size
<= batch
->end
);
195 VG(VALGRIND_CHECK_MEM_IS_DEFINED(other
->start
, size
));
196 memcpy(batch
->next
, other
->start
, size
);
198 offset
= batch
->next
- batch
->start
;
199 anv_reloc_list_append(batch
->relocs
, batch
->device
,
200 other
->relocs
, offset
);
205 /*-----------------------------------------------------------------------*
206 * Functions related to anv_batch_bo
207 *-----------------------------------------------------------------------*/
210 anv_batch_bo_create(struct anv_device
*device
, struct anv_batch_bo
**bbo_out
)
214 struct anv_batch_bo
*bbo
=
215 anv_device_alloc(device
, sizeof(*bbo
), 8, VK_SYSTEM_ALLOC_TYPE_INTERNAL
);
217 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
219 result
= anv_bo_pool_alloc(&device
->batch_bo_pool
, &bbo
->bo
);
220 if (result
!= VK_SUCCESS
)
223 result
= anv_reloc_list_init(&bbo
->relocs
, device
);
224 if (result
!= VK_SUCCESS
)
232 anv_bo_pool_free(&device
->batch_bo_pool
, &bbo
->bo
);
234 anv_device_free(device
, bbo
);
240 anv_batch_bo_start(struct anv_batch_bo
*bbo
, struct anv_batch
*batch
,
241 size_t batch_padding
)
243 batch
->next
= batch
->start
= bbo
->bo
.map
;
244 batch
->end
= bbo
->bo
.map
+ bbo
->bo
.size
- batch_padding
;
245 batch
->relocs
= &bbo
->relocs
;
246 bbo
->relocs
.num_relocs
= 0;
250 anv_batch_bo_finish(struct anv_batch_bo
*bbo
, struct anv_batch
*batch
)
252 assert(batch
->start
== bbo
->bo
.map
);
253 bbo
->length
= batch
->next
- batch
->start
;
254 VG(VALGRIND_CHECK_MEM_IS_DEFINED(batch
->start
, bbo
->length
));
258 anv_batch_bo_destroy(struct anv_batch_bo
*bbo
, struct anv_device
*device
)
260 anv_reloc_list_finish(&bbo
->relocs
, device
);
261 anv_bo_pool_free(&device
->batch_bo_pool
, &bbo
->bo
);
262 anv_device_free(device
, bbo
);
265 /*-----------------------------------------------------------------------*
266 * Functions related to anv_batch_bo
267 *-----------------------------------------------------------------------*/
269 static inline struct anv_batch_bo
*
270 anv_cmd_buffer_current_batch_bo(struct anv_cmd_buffer
*cmd_buffer
)
272 return LIST_ENTRY(struct anv_batch_bo
, cmd_buffer
->batch_bos
.prev
, link
);
275 static inline struct anv_batch_bo
*
276 anv_cmd_buffer_current_surface_bbo(struct anv_cmd_buffer
*cmd_buffer
)
278 return LIST_ENTRY(struct anv_batch_bo
, cmd_buffer
->surface_bos
.prev
, link
);
282 anv_cmd_buffer_current_surface_bo(struct anv_cmd_buffer
*cmd_buffer
)
284 return &anv_cmd_buffer_current_surface_bbo(cmd_buffer
)->bo
;
287 struct anv_reloc_list
*
288 anv_cmd_buffer_current_surface_relocs(struct anv_cmd_buffer
*cmd_buffer
)
290 return &anv_cmd_buffer_current_surface_bbo(cmd_buffer
)->relocs
;
294 anv_cmd_buffer_chain_batch(struct anv_batch
*batch
, void *_data
)
296 struct anv_cmd_buffer
*cmd_buffer
= _data
;
297 struct anv_batch_bo
*new_bbo
, *old_bbo
=
298 anv_cmd_buffer_current_batch_bo(cmd_buffer
);
300 VkResult result
= anv_batch_bo_create(cmd_buffer
->device
, &new_bbo
);
301 if (result
!= VK_SUCCESS
)
304 struct anv_batch_bo
**seen_bbo
= anv_vector_add(&cmd_buffer
->seen_bbos
);
305 if (seen_bbo
== NULL
) {
306 anv_batch_bo_destroy(new_bbo
, cmd_buffer
->device
);
307 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
311 /* We set the end of the batch a little short so we would be sure we
312 * have room for the chaining command. Since we're about to emit the
313 * chaining command, let's set it back where it should go.
315 batch
->end
+= GEN8_MI_BATCH_BUFFER_START_length
* 4;
316 assert(batch
->end
== old_bbo
->bo
.map
+ old_bbo
->bo
.size
);
318 anv_batch_emit(batch
, GEN8_MI_BATCH_BUFFER_START
,
319 GEN8_MI_BATCH_BUFFER_START_header
,
320 ._2ndLevelBatchBuffer
= _1stlevelbatch
,
321 .AddressSpaceIndicator
= ASI_PPGTT
,
322 .BatchBufferStartAddress
= { &new_bbo
->bo
, 0 },
325 anv_batch_bo_finish(old_bbo
, batch
);
327 list_addtail(&new_bbo
->link
, &cmd_buffer
->batch_bos
);
329 anv_batch_bo_start(new_bbo
, batch
, GEN8_MI_BATCH_BUFFER_START_length
* 4);
335 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer
*cmd_buffer
,
336 uint32_t size
, uint32_t alignment
)
338 struct anv_bo
*surface_bo
=
339 anv_cmd_buffer_current_surface_bo(cmd_buffer
);
340 struct anv_state state
;
342 state
.offset
= align_u32(cmd_buffer
->surface_next
, alignment
);
343 if (state
.offset
+ size
> surface_bo
->size
)
344 return (struct anv_state
) { 0 };
346 state
.map
= surface_bo
->map
+ state
.offset
;
347 state
.alloc_size
= size
;
348 cmd_buffer
->surface_next
= state
.offset
+ size
;
350 assert(state
.offset
+ size
<= surface_bo
->size
);
356 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer
*cmd_buffer
,
357 uint32_t size
, uint32_t alignment
)
359 return anv_state_stream_alloc(&cmd_buffer
->dynamic_state_stream
,
364 anv_cmd_buffer_new_surface_state_bo(struct anv_cmd_buffer
*cmd_buffer
)
366 struct anv_batch_bo
*new_bbo
, *old_bbo
=
367 anv_cmd_buffer_current_surface_bbo(cmd_buffer
);
369 /* Finish off the old buffer */
370 old_bbo
->length
= cmd_buffer
->surface_next
;
372 VkResult result
= anv_batch_bo_create(cmd_buffer
->device
, &new_bbo
);
373 if (result
!= VK_SUCCESS
)
376 struct anv_batch_bo
**seen_bbo
= anv_vector_add(&cmd_buffer
->seen_bbos
);
377 if (seen_bbo
== NULL
) {
378 anv_batch_bo_destroy(new_bbo
, cmd_buffer
->device
);
379 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
383 cmd_buffer
->surface_next
= 1;
385 list_addtail(&new_bbo
->link
, &cmd_buffer
->surface_bos
);
391 anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer
*cmd_buffer
)
393 struct anv_batch_bo
*batch_bo
, *surface_bbo
;
394 struct anv_device
*device
= cmd_buffer
->device
;
397 list_inithead(&cmd_buffer
->batch_bos
);
398 list_inithead(&cmd_buffer
->surface_bos
);
400 result
= anv_batch_bo_create(device
, &batch_bo
);
401 if (result
!= VK_SUCCESS
)
404 list_addtail(&batch_bo
->link
, &cmd_buffer
->batch_bos
);
406 cmd_buffer
->batch
.device
= device
;
407 cmd_buffer
->batch
.extend_cb
= anv_cmd_buffer_chain_batch
;
408 cmd_buffer
->batch
.user_data
= cmd_buffer
;
410 anv_batch_bo_start(batch_bo
, &cmd_buffer
->batch
,
411 GEN8_MI_BATCH_BUFFER_START_length
* 4);
413 result
= anv_batch_bo_create(device
, &surface_bbo
);
414 if (result
!= VK_SUCCESS
)
417 list_addtail(&surface_bbo
->link
, &cmd_buffer
->surface_bos
);
419 int success
= anv_vector_init(&cmd_buffer
->seen_bbos
,
420 sizeof(struct anv_bo
*),
421 8 * sizeof(struct anv_bo
*));
423 goto fail_surface_bo
;
425 *(struct anv_batch_bo
**)anv_vector_add(&cmd_buffer
->seen_bbos
) = batch_bo
;
426 *(struct anv_batch_bo
**)anv_vector_add(&cmd_buffer
->seen_bbos
) = surface_bbo
;
428 /* Start surface_next at 1 so surface offset 0 is invalid. */
429 cmd_buffer
->surface_next
= 1;
431 cmd_buffer
->execbuf2
.objects
= NULL
;
432 cmd_buffer
->execbuf2
.bos
= NULL
;
433 cmd_buffer
->execbuf2
.array_length
= 0;
438 anv_batch_bo_destroy(surface_bbo
, device
);
440 anv_batch_bo_destroy(batch_bo
, device
);
446 anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer
*cmd_buffer
)
448 struct anv_device
*device
= cmd_buffer
->device
;
450 anv_vector_finish(&cmd_buffer
->seen_bbos
);
452 /* Destroy all of the batch buffers */
453 list_for_each_entry_safe(struct anv_batch_bo
, bbo
,
454 &cmd_buffer
->batch_bos
, link
) {
455 anv_batch_bo_destroy(bbo
, device
);
458 /* Destroy all of the surface state buffers */
459 list_for_each_entry_safe(struct anv_batch_bo
, bbo
,
460 &cmd_buffer
->surface_bos
, link
) {
461 anv_batch_bo_destroy(bbo
, device
);
464 anv_device_free(device
, cmd_buffer
->execbuf2
.objects
);
465 anv_device_free(device
, cmd_buffer
->execbuf2
.bos
);
469 anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer
*cmd_buffer
)
471 struct anv_device
*device
= cmd_buffer
->device
;
473 /* Delete all but the first batch bo */
474 assert(!list_empty(&cmd_buffer
->batch_bos
));
475 while (cmd_buffer
->batch_bos
.next
!= cmd_buffer
->batch_bos
.prev
) {
476 struct anv_batch_bo
*bbo
= anv_cmd_buffer_current_batch_bo(cmd_buffer
);
477 list_del(&bbo
->link
);
478 anv_batch_bo_destroy(bbo
, device
);
480 assert(!list_empty(&cmd_buffer
->batch_bos
));
482 anv_batch_bo_start(anv_cmd_buffer_current_batch_bo(cmd_buffer
),
484 GEN8_MI_BATCH_BUFFER_START_length
* 4);
486 /* Delete all but the first batch bo */
487 assert(!list_empty(&cmd_buffer
->batch_bos
));
488 while (cmd_buffer
->surface_bos
.next
!= cmd_buffer
->surface_bos
.prev
) {
489 struct anv_batch_bo
*bbo
= anv_cmd_buffer_current_surface_bbo(cmd_buffer
);
490 list_del(&bbo
->link
);
491 anv_batch_bo_destroy(bbo
, device
);
493 assert(!list_empty(&cmd_buffer
->batch_bos
));
495 anv_cmd_buffer_current_surface_bbo(cmd_buffer
)->relocs
.num_relocs
= 0;
497 cmd_buffer
->surface_next
= 1;
499 /* Reset the list of seen buffers */
500 cmd_buffer
->seen_bbos
.head
= 0;
501 cmd_buffer
->seen_bbos
.tail
= 0;
503 *(struct anv_batch_bo
**)anv_vector_add(&cmd_buffer
->seen_bbos
) =
504 anv_cmd_buffer_current_batch_bo(cmd_buffer
);
505 *(struct anv_batch_bo
**)anv_vector_add(&cmd_buffer
->seen_bbos
) =
506 anv_cmd_buffer_current_surface_bbo(cmd_buffer
);
510 anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer
*cmd_buffer
)
512 struct anv_batch_bo
*batch_bo
= anv_cmd_buffer_current_batch_bo(cmd_buffer
);
513 struct anv_batch_bo
*surface_bbo
=
514 anv_cmd_buffer_current_surface_bbo(cmd_buffer
);
516 anv_batch_emit(&cmd_buffer
->batch
, GEN8_MI_BATCH_BUFFER_END
);
518 /* Round batch up to an even number of dwords. */
519 if ((cmd_buffer
->batch
.next
- cmd_buffer
->batch
.start
) & 4)
520 anv_batch_emit(&cmd_buffer
->batch
, GEN8_MI_NOOP
);
522 anv_batch_bo_finish(batch_bo
, &cmd_buffer
->batch
);
524 surface_bbo
->length
= cmd_buffer
->surface_next
;
528 anv_cmd_buffer_add_bo(struct anv_cmd_buffer
*cmd_buffer
,
530 struct anv_reloc_list
*relocs
)
532 struct drm_i915_gem_exec_object2
*obj
= NULL
;
534 if (bo
->index
< cmd_buffer
->execbuf2
.bo_count
&&
535 cmd_buffer
->execbuf2
.bos
[bo
->index
] == bo
)
536 obj
= &cmd_buffer
->execbuf2
.objects
[bo
->index
];
539 /* We've never seen this one before. Add it to the list and assign
540 * an id that we can use later.
542 if (cmd_buffer
->execbuf2
.bo_count
>= cmd_buffer
->execbuf2
.array_length
) {
543 uint32_t new_len
= cmd_buffer
->execbuf2
.objects
?
544 cmd_buffer
->execbuf2
.array_length
* 2 : 64;
546 struct drm_i915_gem_exec_object2
*new_objects
=
547 anv_device_alloc(cmd_buffer
->device
, new_len
* sizeof(*new_objects
),
548 8, VK_SYSTEM_ALLOC_TYPE_INTERNAL
);
549 if (new_objects
== NULL
)
550 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
552 struct anv_bo
**new_bos
=
553 anv_device_alloc(cmd_buffer
->device
, new_len
* sizeof(*new_bos
),
554 8, VK_SYSTEM_ALLOC_TYPE_INTERNAL
);
555 if (new_objects
== NULL
) {
556 anv_device_free(cmd_buffer
->device
, new_objects
);
557 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
560 if (cmd_buffer
->execbuf2
.objects
) {
561 memcpy(new_objects
, cmd_buffer
->execbuf2
.objects
,
562 cmd_buffer
->execbuf2
.bo_count
* sizeof(*new_objects
));
563 memcpy(new_bos
, cmd_buffer
->execbuf2
.bos
,
564 cmd_buffer
->execbuf2
.bo_count
* sizeof(*new_bos
));
567 cmd_buffer
->execbuf2
.objects
= new_objects
;
568 cmd_buffer
->execbuf2
.bos
= new_bos
;
569 cmd_buffer
->execbuf2
.array_length
= new_len
;
572 assert(cmd_buffer
->execbuf2
.bo_count
< cmd_buffer
->execbuf2
.array_length
);
574 bo
->index
= cmd_buffer
->execbuf2
.bo_count
++;
575 obj
= &cmd_buffer
->execbuf2
.objects
[bo
->index
];
576 cmd_buffer
->execbuf2
.bos
[bo
->index
] = bo
;
578 obj
->handle
= bo
->gem_handle
;
579 obj
->relocation_count
= 0;
582 obj
->offset
= bo
->offset
;
588 if (relocs
!= NULL
&& obj
->relocation_count
== 0) {
589 /* This is the first time we've ever seen a list of relocations for
590 * this BO. Go ahead and set the relocations and then walk the list
591 * of relocations and add them all.
593 obj
->relocation_count
= relocs
->num_relocs
;
594 obj
->relocs_ptr
= (uintptr_t) relocs
->relocs
;
596 for (size_t i
= 0; i
< relocs
->num_relocs
; i
++)
597 anv_cmd_buffer_add_bo(cmd_buffer
, relocs
->reloc_bos
[i
], NULL
);
604 anv_cmd_buffer_process_relocs(struct anv_cmd_buffer
*cmd_buffer
,
605 struct anv_reloc_list
*list
)
609 /* If the kernel supports I915_EXEC_NO_RELOC, it will compare offset in
610 * struct drm_i915_gem_exec_object2 against the bos current offset and if
611 * all bos haven't moved it will skip relocation processing alltogether.
612 * If I915_EXEC_NO_RELOC is not supported, the kernel ignores the incoming
613 * value of offset so we can set it either way. For that to work we need
614 * to make sure all relocs use the same presumed offset.
617 for (size_t i
= 0; i
< list
->num_relocs
; i
++) {
618 bo
= list
->reloc_bos
[i
];
619 if (bo
->offset
!= list
->relocs
[i
].presumed_offset
)
620 cmd_buffer
->execbuf2
.need_reloc
= true;
622 list
->relocs
[i
].target_handle
= bo
->index
;
627 anv_cmd_buffer_prepare_execbuf(struct anv_cmd_buffer
*cmd_buffer
)
629 struct anv_batch
*batch
= &cmd_buffer
->batch
;
631 cmd_buffer
->execbuf2
.bo_count
= 0;
632 cmd_buffer
->execbuf2
.need_reloc
= false;
634 /* First, we walk over all of the bos we've seen and add them and their
635 * relocations to the validate list.
637 struct anv_batch_bo
**bbo
;
638 anv_vector_foreach(bbo
, &cmd_buffer
->seen_bbos
)
639 anv_cmd_buffer_add_bo(cmd_buffer
, &(*bbo
)->bo
, &(*bbo
)->relocs
);
641 struct anv_batch_bo
*first_batch_bo
=
642 list_first_entry(&cmd_buffer
->batch_bos
, struct anv_batch_bo
, link
);
644 /* The kernel requires that the last entry in the validation list be the
645 * batch buffer to execute. We can simply swap the element
646 * corresponding to the first batch_bo in the chain with the last
647 * element in the list.
649 if (first_batch_bo
->bo
.index
!= cmd_buffer
->execbuf2
.bo_count
- 1) {
650 uint32_t idx
= first_batch_bo
->bo
.index
;
652 struct drm_i915_gem_exec_object2 tmp_obj
=
653 cmd_buffer
->execbuf2
.objects
[idx
];
654 assert(cmd_buffer
->execbuf2
.bos
[idx
] == &first_batch_bo
->bo
);
656 cmd_buffer
->execbuf2
.objects
[idx
] =
657 cmd_buffer
->execbuf2
.objects
[cmd_buffer
->execbuf2
.bo_count
- 1];
658 cmd_buffer
->execbuf2
.bos
[idx
] =
659 cmd_buffer
->execbuf2
.bos
[cmd_buffer
->execbuf2
.bo_count
- 1];
660 cmd_buffer
->execbuf2
.bos
[idx
]->index
= idx
;
662 cmd_buffer
->execbuf2
.objects
[cmd_buffer
->execbuf2
.bo_count
- 1] = tmp_obj
;
663 cmd_buffer
->execbuf2
.bos
[cmd_buffer
->execbuf2
.bo_count
- 1] =
665 first_batch_bo
->bo
.index
= cmd_buffer
->execbuf2
.bo_count
- 1;
668 /* Now we go through and fixup all of the relocation lists to point to
669 * the correct indices in the object array. We have to do this after we
670 * reorder the list above as some of the indices may have changed.
672 anv_vector_foreach(bbo
, &cmd_buffer
->seen_bbos
)
673 anv_cmd_buffer_process_relocs(cmd_buffer
, &(*bbo
)->relocs
);
675 cmd_buffer
->execbuf2
.execbuf
= (struct drm_i915_gem_execbuffer2
) {
676 .buffers_ptr
= (uintptr_t) cmd_buffer
->execbuf2
.objects
,
677 .buffer_count
= cmd_buffer
->execbuf2
.bo_count
,
678 .batch_start_offset
= 0,
679 .batch_len
= batch
->next
- batch
->start
,
684 .flags
= I915_EXEC_HANDLE_LUT
| I915_EXEC_RENDER
,
685 .rsvd1
= cmd_buffer
->device
->context_id
,
689 if (!cmd_buffer
->execbuf2
.need_reloc
)
690 cmd_buffer
->execbuf2
.execbuf
.flags
|= I915_EXEC_NO_RELOC
;