anv: add a new execution mode for secondary command buffers
[mesa.git] / src / intel / vulkan / anv_batch_chain.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31
32 #include "genxml/gen8_pack.h"
33 #include "genxml/genX_bits.h"
34
35 #include "util/debug.h"
36
37 /** \file anv_batch_chain.c
38 *
39 * This file contains functions related to anv_cmd_buffer as a data
40 * structure. This involves everything required to create and destroy
41 * the actual batch buffers as well as link them together and handle
42 * relocations and surface state. It specifically does *not* contain any
43 * handling of actual vkCmd calls beyond vkCmdExecuteCommands.
44 */
45
46 /*-----------------------------------------------------------------------*
47 * Functions related to anv_reloc_list
48 *-----------------------------------------------------------------------*/
49
50 VkResult
51 anv_reloc_list_init(struct anv_reloc_list *list,
52 const VkAllocationCallbacks *alloc)
53 {
54 memset(list, 0, sizeof(*list));
55 return VK_SUCCESS;
56 }
57
58 static VkResult
59 anv_reloc_list_init_clone(struct anv_reloc_list *list,
60 const VkAllocationCallbacks *alloc,
61 const struct anv_reloc_list *other_list)
62 {
63 list->num_relocs = other_list->num_relocs;
64 list->array_length = other_list->array_length;
65
66 if (list->num_relocs > 0) {
67 list->relocs =
68 vk_alloc(alloc, list->array_length * sizeof(*list->relocs), 8,
69 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
70 if (list->relocs == NULL)
71 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
72
73 list->reloc_bos =
74 vk_alloc(alloc, list->array_length * sizeof(*list->reloc_bos), 8,
75 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
76 if (list->reloc_bos == NULL) {
77 vk_free(alloc, list->relocs);
78 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
79 }
80
81 memcpy(list->relocs, other_list->relocs,
82 list->array_length * sizeof(*list->relocs));
83 memcpy(list->reloc_bos, other_list->reloc_bos,
84 list->array_length * sizeof(*list->reloc_bos));
85 } else {
86 list->relocs = NULL;
87 list->reloc_bos = NULL;
88 }
89
90 list->dep_words = other_list->dep_words;
91
92 if (list->dep_words > 0) {
93 list->deps =
94 vk_alloc(alloc, list->dep_words * sizeof(BITSET_WORD), 8,
95 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
96 memcpy(list->deps, other_list->deps,
97 list->dep_words * sizeof(BITSET_WORD));
98 } else {
99 list->deps = NULL;
100 }
101
102 return VK_SUCCESS;
103 }
104
105 void
106 anv_reloc_list_finish(struct anv_reloc_list *list,
107 const VkAllocationCallbacks *alloc)
108 {
109 vk_free(alloc, list->relocs);
110 vk_free(alloc, list->reloc_bos);
111 vk_free(alloc, list->deps);
112 }
113
114 static VkResult
115 anv_reloc_list_grow(struct anv_reloc_list *list,
116 const VkAllocationCallbacks *alloc,
117 size_t num_additional_relocs)
118 {
119 if (list->num_relocs + num_additional_relocs <= list->array_length)
120 return VK_SUCCESS;
121
122 size_t new_length = MAX2(16, list->array_length * 2);
123 while (new_length < list->num_relocs + num_additional_relocs)
124 new_length *= 2;
125
126 struct drm_i915_gem_relocation_entry *new_relocs =
127 vk_realloc(alloc, list->relocs,
128 new_length * sizeof(*list->relocs), 8,
129 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
130 if (new_relocs == NULL)
131 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
132 list->relocs = new_relocs;
133
134 struct anv_bo **new_reloc_bos =
135 vk_realloc(alloc, list->reloc_bos,
136 new_length * sizeof(*list->reloc_bos), 8,
137 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
138 if (new_reloc_bos == NULL)
139 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
140 list->reloc_bos = new_reloc_bos;
141
142 list->array_length = new_length;
143
144 return VK_SUCCESS;
145 }
146
147 static VkResult
148 anv_reloc_list_grow_deps(struct anv_reloc_list *list,
149 const VkAllocationCallbacks *alloc,
150 uint32_t min_num_words)
151 {
152 if (min_num_words <= list->dep_words)
153 return VK_SUCCESS;
154
155 uint32_t new_length = MAX2(32, list->dep_words * 2);
156 while (new_length < min_num_words)
157 new_length *= 2;
158
159 BITSET_WORD *new_deps =
160 vk_realloc(alloc, list->deps, new_length * sizeof(BITSET_WORD), 8,
161 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
162 if (new_deps == NULL)
163 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
164 list->deps = new_deps;
165
166 /* Zero out the new data */
167 memset(list->deps + list->dep_words, 0,
168 (new_length - list->dep_words) * sizeof(BITSET_WORD));
169 list->dep_words = new_length;
170
171 return VK_SUCCESS;
172 }
173
174 #define READ_ONCE(x) (*(volatile __typeof__(x) *)&(x))
175
176 VkResult
177 anv_reloc_list_add(struct anv_reloc_list *list,
178 const VkAllocationCallbacks *alloc,
179 uint32_t offset, struct anv_bo *target_bo, uint32_t delta,
180 uint64_t *address_u64_out)
181 {
182 struct drm_i915_gem_relocation_entry *entry;
183 int index;
184
185 struct anv_bo *unwrapped_target_bo = anv_bo_unwrap(target_bo);
186 uint64_t target_bo_offset = READ_ONCE(unwrapped_target_bo->offset);
187 if (address_u64_out)
188 *address_u64_out = target_bo_offset + delta;
189
190 if (unwrapped_target_bo->flags & EXEC_OBJECT_PINNED) {
191 assert(!target_bo->is_wrapper);
192 uint32_t idx = unwrapped_target_bo->gem_handle;
193 anv_reloc_list_grow_deps(list, alloc, (idx / BITSET_WORDBITS) + 1);
194 BITSET_SET(list->deps, unwrapped_target_bo->gem_handle);
195 return VK_SUCCESS;
196 }
197
198 VkResult result = anv_reloc_list_grow(list, alloc, 1);
199 if (result != VK_SUCCESS)
200 return result;
201
202 /* XXX: Can we use I915_EXEC_HANDLE_LUT? */
203 index = list->num_relocs++;
204 list->reloc_bos[index] = target_bo;
205 entry = &list->relocs[index];
206 entry->target_handle = -1; /* See also anv_cmd_buffer_process_relocs() */
207 entry->delta = delta;
208 entry->offset = offset;
209 entry->presumed_offset = target_bo_offset;
210 entry->read_domains = 0;
211 entry->write_domain = 0;
212 VG(VALGRIND_CHECK_MEM_IS_DEFINED(entry, sizeof(*entry)));
213
214 return VK_SUCCESS;
215 }
216
217 static void
218 anv_reloc_list_clear(struct anv_reloc_list *list)
219 {
220 list->num_relocs = 0;
221 if (list->dep_words > 0)
222 memset(list->deps, 0, list->dep_words * sizeof(BITSET_WORD));
223 }
224
225 static VkResult
226 anv_reloc_list_append(struct anv_reloc_list *list,
227 const VkAllocationCallbacks *alloc,
228 struct anv_reloc_list *other, uint32_t offset)
229 {
230 VkResult result = anv_reloc_list_grow(list, alloc, other->num_relocs);
231 if (result != VK_SUCCESS)
232 return result;
233
234 if (other->num_relocs > 0) {
235 memcpy(&list->relocs[list->num_relocs], &other->relocs[0],
236 other->num_relocs * sizeof(other->relocs[0]));
237 memcpy(&list->reloc_bos[list->num_relocs], &other->reloc_bos[0],
238 other->num_relocs * sizeof(other->reloc_bos[0]));
239
240 for (uint32_t i = 0; i < other->num_relocs; i++)
241 list->relocs[i + list->num_relocs].offset += offset;
242
243 list->num_relocs += other->num_relocs;
244 }
245
246 anv_reloc_list_grow_deps(list, alloc, other->dep_words);
247 for (uint32_t w = 0; w < other->dep_words; w++)
248 list->deps[w] |= other->deps[w];
249
250 return VK_SUCCESS;
251 }
252
253 /*-----------------------------------------------------------------------*
254 * Functions related to anv_batch
255 *-----------------------------------------------------------------------*/
256
257 void *
258 anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords)
259 {
260 if (batch->next + num_dwords * 4 > batch->end) {
261 VkResult result = batch->extend_cb(batch, batch->user_data);
262 if (result != VK_SUCCESS) {
263 anv_batch_set_error(batch, result);
264 return NULL;
265 }
266 }
267
268 void *p = batch->next;
269
270 batch->next += num_dwords * 4;
271 assert(batch->next <= batch->end);
272
273 return p;
274 }
275
276 uint64_t
277 anv_batch_emit_reloc(struct anv_batch *batch,
278 void *location, struct anv_bo *bo, uint32_t delta)
279 {
280 uint64_t address_u64 = 0;
281 VkResult result = anv_reloc_list_add(batch->relocs, batch->alloc,
282 location - batch->start, bo, delta,
283 &address_u64);
284 if (result != VK_SUCCESS) {
285 anv_batch_set_error(batch, result);
286 return 0;
287 }
288
289 return address_u64;
290 }
291
292 struct anv_address
293 anv_batch_address(struct anv_batch *batch, void *batch_location)
294 {
295 assert(batch->start < batch_location);
296
297 /* Allow a jump at the current location of the batch. */
298 assert(batch->next >= batch_location);
299
300 return anv_address_add(batch->start_addr, batch_location - batch->start);
301 }
302
303 void
304 anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other)
305 {
306 uint32_t size, offset;
307
308 size = other->next - other->start;
309 assert(size % 4 == 0);
310
311 if (batch->next + size > batch->end) {
312 VkResult result = batch->extend_cb(batch, batch->user_data);
313 if (result != VK_SUCCESS) {
314 anv_batch_set_error(batch, result);
315 return;
316 }
317 }
318
319 assert(batch->next + size <= batch->end);
320
321 VG(VALGRIND_CHECK_MEM_IS_DEFINED(other->start, size));
322 memcpy(batch->next, other->start, size);
323
324 offset = batch->next - batch->start;
325 VkResult result = anv_reloc_list_append(batch->relocs, batch->alloc,
326 other->relocs, offset);
327 if (result != VK_SUCCESS) {
328 anv_batch_set_error(batch, result);
329 return;
330 }
331
332 batch->next += size;
333 }
334
335 /*-----------------------------------------------------------------------*
336 * Functions related to anv_batch_bo
337 *-----------------------------------------------------------------------*/
338
339 static VkResult
340 anv_batch_bo_create(struct anv_cmd_buffer *cmd_buffer,
341 struct anv_batch_bo **bbo_out)
342 {
343 VkResult result;
344
345 struct anv_batch_bo *bbo = vk_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo),
346 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
347 if (bbo == NULL)
348 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
349
350 result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
351 ANV_CMD_BUFFER_BATCH_SIZE, &bbo->bo);
352 if (result != VK_SUCCESS)
353 goto fail_alloc;
354
355 result = anv_reloc_list_init(&bbo->relocs, &cmd_buffer->pool->alloc);
356 if (result != VK_SUCCESS)
357 goto fail_bo_alloc;
358
359 *bbo_out = bbo;
360
361 return VK_SUCCESS;
362
363 fail_bo_alloc:
364 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
365 fail_alloc:
366 vk_free(&cmd_buffer->pool->alloc, bbo);
367
368 return result;
369 }
370
371 static VkResult
372 anv_batch_bo_clone(struct anv_cmd_buffer *cmd_buffer,
373 const struct anv_batch_bo *other_bbo,
374 struct anv_batch_bo **bbo_out)
375 {
376 VkResult result;
377
378 struct anv_batch_bo *bbo = vk_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo),
379 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
380 if (bbo == NULL)
381 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
382
383 result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
384 other_bbo->bo->size, &bbo->bo);
385 if (result != VK_SUCCESS)
386 goto fail_alloc;
387
388 result = anv_reloc_list_init_clone(&bbo->relocs, &cmd_buffer->pool->alloc,
389 &other_bbo->relocs);
390 if (result != VK_SUCCESS)
391 goto fail_bo_alloc;
392
393 bbo->length = other_bbo->length;
394 memcpy(bbo->bo->map, other_bbo->bo->map, other_bbo->length);
395 *bbo_out = bbo;
396
397 return VK_SUCCESS;
398
399 fail_bo_alloc:
400 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
401 fail_alloc:
402 vk_free(&cmd_buffer->pool->alloc, bbo);
403
404 return result;
405 }
406
407 static void
408 anv_batch_bo_start(struct anv_batch_bo *bbo, struct anv_batch *batch,
409 size_t batch_padding)
410 {
411 batch->start_addr = (struct anv_address) { .bo = bbo->bo, };
412 batch->next = batch->start = bbo->bo->map;
413 batch->end = bbo->bo->map + bbo->bo->size - batch_padding;
414 batch->relocs = &bbo->relocs;
415 anv_reloc_list_clear(&bbo->relocs);
416 }
417
418 static void
419 anv_batch_bo_continue(struct anv_batch_bo *bbo, struct anv_batch *batch,
420 size_t batch_padding)
421 {
422 batch->start_addr = (struct anv_address) { .bo = bbo->bo, };
423 batch->start = bbo->bo->map;
424 batch->next = bbo->bo->map + bbo->length;
425 batch->end = bbo->bo->map + bbo->bo->size - batch_padding;
426 batch->relocs = &bbo->relocs;
427 }
428
429 static void
430 anv_batch_bo_finish(struct anv_batch_bo *bbo, struct anv_batch *batch)
431 {
432 assert(batch->start == bbo->bo->map);
433 bbo->length = batch->next - batch->start;
434 VG(VALGRIND_CHECK_MEM_IS_DEFINED(batch->start, bbo->length));
435 }
436
437 static VkResult
438 anv_batch_bo_grow(struct anv_cmd_buffer *cmd_buffer, struct anv_batch_bo *bbo,
439 struct anv_batch *batch, size_t aditional,
440 size_t batch_padding)
441 {
442 assert(batch->start == bbo->bo->map);
443 bbo->length = batch->next - batch->start;
444
445 size_t new_size = bbo->bo->size;
446 while (new_size <= bbo->length + aditional + batch_padding)
447 new_size *= 2;
448
449 if (new_size == bbo->bo->size)
450 return VK_SUCCESS;
451
452 struct anv_bo *new_bo;
453 VkResult result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
454 new_size, &new_bo);
455 if (result != VK_SUCCESS)
456 return result;
457
458 memcpy(new_bo->map, bbo->bo->map, bbo->length);
459
460 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
461
462 bbo->bo = new_bo;
463 anv_batch_bo_continue(bbo, batch, batch_padding);
464
465 return VK_SUCCESS;
466 }
467
468 static void
469 anv_batch_bo_link(struct anv_cmd_buffer *cmd_buffer,
470 struct anv_batch_bo *prev_bbo,
471 struct anv_batch_bo *next_bbo,
472 uint32_t next_bbo_offset)
473 {
474 const uint32_t bb_start_offset =
475 prev_bbo->length - GEN8_MI_BATCH_BUFFER_START_length * 4;
476 ASSERTED const uint32_t *bb_start = prev_bbo->bo->map + bb_start_offset;
477
478 /* Make sure we're looking at a MI_BATCH_BUFFER_START */
479 assert(((*bb_start >> 29) & 0x07) == 0);
480 assert(((*bb_start >> 23) & 0x3f) == 49);
481
482 if (cmd_buffer->device->physical->use_softpin) {
483 assert(prev_bbo->bo->flags & EXEC_OBJECT_PINNED);
484 assert(next_bbo->bo->flags & EXEC_OBJECT_PINNED);
485
486 write_reloc(cmd_buffer->device,
487 prev_bbo->bo->map + bb_start_offset + 4,
488 next_bbo->bo->offset + next_bbo_offset, true);
489 } else {
490 uint32_t reloc_idx = prev_bbo->relocs.num_relocs - 1;
491 assert(prev_bbo->relocs.relocs[reloc_idx].offset == bb_start_offset + 4);
492
493 prev_bbo->relocs.reloc_bos[reloc_idx] = next_bbo->bo;
494 prev_bbo->relocs.relocs[reloc_idx].delta = next_bbo_offset;
495
496 /* Use a bogus presumed offset to force a relocation */
497 prev_bbo->relocs.relocs[reloc_idx].presumed_offset = -1;
498 }
499 }
500
501 static void
502 anv_batch_bo_destroy(struct anv_batch_bo *bbo,
503 struct anv_cmd_buffer *cmd_buffer)
504 {
505 anv_reloc_list_finish(&bbo->relocs, &cmd_buffer->pool->alloc);
506 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
507 vk_free(&cmd_buffer->pool->alloc, bbo);
508 }
509
510 static VkResult
511 anv_batch_bo_list_clone(const struct list_head *list,
512 struct anv_cmd_buffer *cmd_buffer,
513 struct list_head *new_list)
514 {
515 VkResult result = VK_SUCCESS;
516
517 list_inithead(new_list);
518
519 struct anv_batch_bo *prev_bbo = NULL;
520 list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
521 struct anv_batch_bo *new_bbo = NULL;
522 result = anv_batch_bo_clone(cmd_buffer, bbo, &new_bbo);
523 if (result != VK_SUCCESS)
524 break;
525 list_addtail(&new_bbo->link, new_list);
526
527 if (prev_bbo)
528 anv_batch_bo_link(cmd_buffer, prev_bbo, new_bbo, 0);
529
530 prev_bbo = new_bbo;
531 }
532
533 if (result != VK_SUCCESS) {
534 list_for_each_entry_safe(struct anv_batch_bo, bbo, new_list, link) {
535 list_del(&bbo->link);
536 anv_batch_bo_destroy(bbo, cmd_buffer);
537 }
538 }
539
540 return result;
541 }
542
543 /*-----------------------------------------------------------------------*
544 * Functions related to anv_batch_bo
545 *-----------------------------------------------------------------------*/
546
547 static struct anv_batch_bo *
548 anv_cmd_buffer_current_batch_bo(struct anv_cmd_buffer *cmd_buffer)
549 {
550 return LIST_ENTRY(struct anv_batch_bo, cmd_buffer->batch_bos.prev, link);
551 }
552
553 struct anv_address
554 anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer *cmd_buffer)
555 {
556 struct anv_state_pool *pool = anv_binding_table_pool(cmd_buffer->device);
557 struct anv_state *bt_block = u_vector_head(&cmd_buffer->bt_block_states);
558 return (struct anv_address) {
559 .bo = pool->block_pool.bo,
560 .offset = bt_block->offset - pool->start_offset,
561 };
562 }
563
564 static void
565 emit_batch_buffer_start(struct anv_cmd_buffer *cmd_buffer,
566 struct anv_bo *bo, uint32_t offset)
567 {
568 /* In gen8+ the address field grew to two dwords to accomodate 48 bit
569 * offsets. The high 16 bits are in the last dword, so we can use the gen8
570 * version in either case, as long as we set the instruction length in the
571 * header accordingly. This means that we always emit three dwords here
572 * and all the padding and adjustment we do in this file works for all
573 * gens.
574 */
575
576 #define GEN7_MI_BATCH_BUFFER_START_length 2
577 #define GEN7_MI_BATCH_BUFFER_START_length_bias 2
578
579 const uint32_t gen7_length =
580 GEN7_MI_BATCH_BUFFER_START_length - GEN7_MI_BATCH_BUFFER_START_length_bias;
581 const uint32_t gen8_length =
582 GEN8_MI_BATCH_BUFFER_START_length - GEN8_MI_BATCH_BUFFER_START_length_bias;
583
584 anv_batch_emit(&cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_START, bbs) {
585 bbs.DWordLength = cmd_buffer->device->info.gen < 8 ?
586 gen7_length : gen8_length;
587 bbs.SecondLevelBatchBuffer = Firstlevelbatch;
588 bbs.AddressSpaceIndicator = ASI_PPGTT;
589 bbs.BatchBufferStartAddress = (struct anv_address) { bo, offset };
590 }
591 }
592
593 static void
594 cmd_buffer_chain_to_batch_bo(struct anv_cmd_buffer *cmd_buffer,
595 struct anv_batch_bo *bbo)
596 {
597 struct anv_batch *batch = &cmd_buffer->batch;
598 struct anv_batch_bo *current_bbo =
599 anv_cmd_buffer_current_batch_bo(cmd_buffer);
600
601 /* We set the end of the batch a little short so we would be sure we
602 * have room for the chaining command. Since we're about to emit the
603 * chaining command, let's set it back where it should go.
604 */
605 batch->end += GEN8_MI_BATCH_BUFFER_START_length * 4;
606 assert(batch->end == current_bbo->bo->map + current_bbo->bo->size);
607
608 emit_batch_buffer_start(cmd_buffer, bbo->bo, 0);
609
610 anv_batch_bo_finish(current_bbo, batch);
611 }
612
613 static VkResult
614 anv_cmd_buffer_chain_batch(struct anv_batch *batch, void *_data)
615 {
616 struct anv_cmd_buffer *cmd_buffer = _data;
617 struct anv_batch_bo *new_bbo;
618
619 VkResult result = anv_batch_bo_create(cmd_buffer, &new_bbo);
620 if (result != VK_SUCCESS)
621 return result;
622
623 struct anv_batch_bo **seen_bbo = u_vector_add(&cmd_buffer->seen_bbos);
624 if (seen_bbo == NULL) {
625 anv_batch_bo_destroy(new_bbo, cmd_buffer);
626 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
627 }
628 *seen_bbo = new_bbo;
629
630 cmd_buffer_chain_to_batch_bo(cmd_buffer, new_bbo);
631
632 list_addtail(&new_bbo->link, &cmd_buffer->batch_bos);
633
634 anv_batch_bo_start(new_bbo, batch, GEN8_MI_BATCH_BUFFER_START_length * 4);
635
636 return VK_SUCCESS;
637 }
638
639 static VkResult
640 anv_cmd_buffer_grow_batch(struct anv_batch *batch, void *_data)
641 {
642 struct anv_cmd_buffer *cmd_buffer = _data;
643 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
644
645 anv_batch_bo_grow(cmd_buffer, bbo, &cmd_buffer->batch, 4096,
646 GEN8_MI_BATCH_BUFFER_START_length * 4);
647
648 return VK_SUCCESS;
649 }
650
651 /** Allocate a binding table
652 *
653 * This function allocates a binding table. This is a bit more complicated
654 * than one would think due to a combination of Vulkan driver design and some
655 * unfortunate hardware restrictions.
656 *
657 * The 3DSTATE_BINDING_TABLE_POINTERS_* packets only have a 16-bit field for
658 * the binding table pointer which means that all binding tables need to live
659 * in the bottom 64k of surface state base address. The way the GL driver has
660 * classically dealt with this restriction is to emit all surface states
661 * on-the-fly into the batch and have a batch buffer smaller than 64k. This
662 * isn't really an option in Vulkan for a couple of reasons:
663 *
664 * 1) In Vulkan, we have growing (or chaining) batches so surface states have
665 * to live in their own buffer and we have to be able to re-emit
666 * STATE_BASE_ADDRESS as needed which requires a full pipeline stall. In
667 * order to avoid emitting STATE_BASE_ADDRESS any more often than needed
668 * (it's not that hard to hit 64k of just binding tables), we allocate
669 * surface state objects up-front when VkImageView is created. In order
670 * for this to work, surface state objects need to be allocated from a
671 * global buffer.
672 *
673 * 2) We tried to design the surface state system in such a way that it's
674 * already ready for bindless texturing. The way bindless texturing works
675 * on our hardware is that you have a big pool of surface state objects
676 * (with its own state base address) and the bindless handles are simply
677 * offsets into that pool. With the architecture we chose, we already
678 * have that pool and it's exactly the same pool that we use for regular
679 * surface states so we should already be ready for bindless.
680 *
681 * 3) For render targets, we need to be able to fill out the surface states
682 * later in vkBeginRenderPass so that we can assign clear colors
683 * correctly. One way to do this would be to just create the surface
684 * state data and then repeatedly copy it into the surface state BO every
685 * time we have to re-emit STATE_BASE_ADDRESS. While this works, it's
686 * rather annoying and just being able to allocate them up-front and
687 * re-use them for the entire render pass.
688 *
689 * While none of these are technically blockers for emitting state on the fly
690 * like we do in GL, the ability to have a single surface state pool is
691 * simplifies things greatly. Unfortunately, it comes at a cost...
692 *
693 * Because of the 64k limitation of 3DSTATE_BINDING_TABLE_POINTERS_*, we can't
694 * place the binding tables just anywhere in surface state base address.
695 * Because 64k isn't a whole lot of space, we can't simply restrict the
696 * surface state buffer to 64k, we have to be more clever. The solution we've
697 * chosen is to have a block pool with a maximum size of 2G that starts at
698 * zero and grows in both directions. All surface states are allocated from
699 * the top of the pool (positive offsets) and we allocate blocks (< 64k) of
700 * binding tables from the bottom of the pool (negative offsets). Every time
701 * we allocate a new binding table block, we set surface state base address to
702 * point to the bottom of the binding table block. This way all of the
703 * binding tables in the block are in the bottom 64k of surface state base
704 * address. When we fill out the binding table, we add the distance between
705 * the bottom of our binding table block and zero of the block pool to the
706 * surface state offsets so that they are correct relative to out new surface
707 * state base address at the bottom of the binding table block.
708 *
709 * \see adjust_relocations_from_block_pool()
710 * \see adjust_relocations_too_block_pool()
711 *
712 * \param[in] entries The number of surface state entries the binding
713 * table should be able to hold.
714 *
715 * \param[out] state_offset The offset surface surface state base address
716 * where the surface states live. This must be
717 * added to the surface state offset when it is
718 * written into the binding table entry.
719 *
720 * \return An anv_state representing the binding table
721 */
722 struct anv_state
723 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
724 uint32_t entries, uint32_t *state_offset)
725 {
726 struct anv_state *bt_block = u_vector_head(&cmd_buffer->bt_block_states);
727
728 uint32_t bt_size = align_u32(entries * 4, 32);
729
730 struct anv_state state = cmd_buffer->bt_next;
731 if (bt_size > state.alloc_size)
732 return (struct anv_state) { 0 };
733
734 state.alloc_size = bt_size;
735 cmd_buffer->bt_next.offset += bt_size;
736 cmd_buffer->bt_next.map += bt_size;
737 cmd_buffer->bt_next.alloc_size -= bt_size;
738
739 assert(bt_block->offset < 0);
740 *state_offset = -bt_block->offset;
741
742 return state;
743 }
744
745 struct anv_state
746 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer *cmd_buffer)
747 {
748 struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
749 return anv_state_stream_alloc(&cmd_buffer->surface_state_stream,
750 isl_dev->ss.size, isl_dev->ss.align);
751 }
752
753 struct anv_state
754 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer *cmd_buffer,
755 uint32_t size, uint32_t alignment)
756 {
757 return anv_state_stream_alloc(&cmd_buffer->dynamic_state_stream,
758 size, alignment);
759 }
760
761 VkResult
762 anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer *cmd_buffer)
763 {
764 struct anv_state *bt_block = u_vector_add(&cmd_buffer->bt_block_states);
765 if (bt_block == NULL) {
766 anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_HOST_MEMORY);
767 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
768 }
769
770 *bt_block = anv_binding_table_pool_alloc(cmd_buffer->device);
771
772 /* The bt_next state is a rolling state (we update it as we suballocate
773 * from it) which is relative to the start of the binding table block.
774 */
775 cmd_buffer->bt_next = *bt_block;
776 cmd_buffer->bt_next.offset = 0;
777
778 return VK_SUCCESS;
779 }
780
781 VkResult
782 anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
783 {
784 struct anv_batch_bo *batch_bo;
785 VkResult result;
786
787 list_inithead(&cmd_buffer->batch_bos);
788
789 result = anv_batch_bo_create(cmd_buffer, &batch_bo);
790 if (result != VK_SUCCESS)
791 return result;
792
793 list_addtail(&batch_bo->link, &cmd_buffer->batch_bos);
794
795 cmd_buffer->batch.alloc = &cmd_buffer->pool->alloc;
796 cmd_buffer->batch.user_data = cmd_buffer;
797
798 if (cmd_buffer->device->can_chain_batches) {
799 cmd_buffer->batch.extend_cb = anv_cmd_buffer_chain_batch;
800 } else {
801 cmd_buffer->batch.extend_cb = anv_cmd_buffer_grow_batch;
802 }
803
804 anv_batch_bo_start(batch_bo, &cmd_buffer->batch,
805 GEN8_MI_BATCH_BUFFER_START_length * 4);
806
807 int success = u_vector_init(&cmd_buffer->seen_bbos,
808 sizeof(struct anv_bo *),
809 8 * sizeof(struct anv_bo *));
810 if (!success)
811 goto fail_batch_bo;
812
813 *(struct anv_batch_bo **)u_vector_add(&cmd_buffer->seen_bbos) = batch_bo;
814
815 /* u_vector requires power-of-two size elements */
816 unsigned pow2_state_size = util_next_power_of_two(sizeof(struct anv_state));
817 success = u_vector_init(&cmd_buffer->bt_block_states,
818 pow2_state_size, 8 * pow2_state_size);
819 if (!success)
820 goto fail_seen_bbos;
821
822 result = anv_reloc_list_init(&cmd_buffer->surface_relocs,
823 &cmd_buffer->pool->alloc);
824 if (result != VK_SUCCESS)
825 goto fail_bt_blocks;
826 cmd_buffer->last_ss_pool_center = 0;
827
828 result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
829 if (result != VK_SUCCESS)
830 goto fail_bt_blocks;
831
832 return VK_SUCCESS;
833
834 fail_bt_blocks:
835 u_vector_finish(&cmd_buffer->bt_block_states);
836 fail_seen_bbos:
837 u_vector_finish(&cmd_buffer->seen_bbos);
838 fail_batch_bo:
839 anv_batch_bo_destroy(batch_bo, cmd_buffer);
840
841 return result;
842 }
843
844 void
845 anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
846 {
847 struct anv_state *bt_block;
848 u_vector_foreach(bt_block, &cmd_buffer->bt_block_states)
849 anv_binding_table_pool_free(cmd_buffer->device, *bt_block);
850 u_vector_finish(&cmd_buffer->bt_block_states);
851
852 anv_reloc_list_finish(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc);
853
854 u_vector_finish(&cmd_buffer->seen_bbos);
855
856 /* Destroy all of the batch buffers */
857 list_for_each_entry_safe(struct anv_batch_bo, bbo,
858 &cmd_buffer->batch_bos, link) {
859 list_del(&bbo->link);
860 anv_batch_bo_destroy(bbo, cmd_buffer);
861 }
862 }
863
864 void
865 anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
866 {
867 /* Delete all but the first batch bo */
868 assert(!list_is_empty(&cmd_buffer->batch_bos));
869 while (cmd_buffer->batch_bos.next != cmd_buffer->batch_bos.prev) {
870 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
871 list_del(&bbo->link);
872 anv_batch_bo_destroy(bbo, cmd_buffer);
873 }
874 assert(!list_is_empty(&cmd_buffer->batch_bos));
875
876 anv_batch_bo_start(anv_cmd_buffer_current_batch_bo(cmd_buffer),
877 &cmd_buffer->batch,
878 GEN8_MI_BATCH_BUFFER_START_length * 4);
879
880 while (u_vector_length(&cmd_buffer->bt_block_states) > 1) {
881 struct anv_state *bt_block = u_vector_remove(&cmd_buffer->bt_block_states);
882 anv_binding_table_pool_free(cmd_buffer->device, *bt_block);
883 }
884 assert(u_vector_length(&cmd_buffer->bt_block_states) == 1);
885 cmd_buffer->bt_next = *(struct anv_state *)u_vector_head(&cmd_buffer->bt_block_states);
886 cmd_buffer->bt_next.offset = 0;
887
888 anv_reloc_list_clear(&cmd_buffer->surface_relocs);
889 cmd_buffer->last_ss_pool_center = 0;
890
891 /* Reset the list of seen buffers */
892 cmd_buffer->seen_bbos.head = 0;
893 cmd_buffer->seen_bbos.tail = 0;
894
895 *(struct anv_batch_bo **)u_vector_add(&cmd_buffer->seen_bbos) =
896 anv_cmd_buffer_current_batch_bo(cmd_buffer);
897 }
898
899 void
900 anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer)
901 {
902 struct anv_batch_bo *batch_bo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
903
904 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
905 /* When we start a batch buffer, we subtract a certain amount of
906 * padding from the end to ensure that we always have room to emit a
907 * BATCH_BUFFER_START to chain to the next BO. We need to remove
908 * that padding before we end the batch; otherwise, we may end up
909 * with our BATCH_BUFFER_END in another BO.
910 */
911 cmd_buffer->batch.end += GEN8_MI_BATCH_BUFFER_START_length * 4;
912 assert(cmd_buffer->batch.end == batch_bo->bo->map + batch_bo->bo->size);
913
914 anv_batch_emit(&cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_END, bbe);
915
916 /* Round batch up to an even number of dwords. */
917 if ((cmd_buffer->batch.next - cmd_buffer->batch.start) & 4)
918 anv_batch_emit(&cmd_buffer->batch, GEN8_MI_NOOP, noop);
919
920 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_PRIMARY;
921 } else {
922 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
923 /* If this is a secondary command buffer, we need to determine the
924 * mode in which it will be executed with vkExecuteCommands. We
925 * determine this statically here so that this stays in sync with the
926 * actual ExecuteCommands implementation.
927 */
928 const uint32_t length = cmd_buffer->batch.next - cmd_buffer->batch.start;
929 if (!cmd_buffer->device->can_chain_batches) {
930 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT;
931 } else if (cmd_buffer->device->physical->use_softpin) {
932 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_CALL_AND_RETURN;
933 /* If the secondary command buffer begins & ends in the same BO and
934 * its length is less than the length of CS prefetch, add some NOOPs
935 * instructions so the last MI_BATCH_BUFFER_START is outside the CS
936 * prefetch.
937 */
938 if (cmd_buffer->batch_bos.next == cmd_buffer->batch_bos.prev) {
939 int32_t batch_len =
940 cmd_buffer->batch.next - cmd_buffer->batch.start;
941
942 for (int32_t i = 0; i < (512 - batch_len); i += 4)
943 anv_batch_emit(&cmd_buffer->batch, GEN8_MI_NOOP, noop);
944 }
945
946 void *jump_addr =
947 anv_batch_emitn(&cmd_buffer->batch,
948 GEN8_MI_BATCH_BUFFER_START_length,
949 GEN8_MI_BATCH_BUFFER_START,
950 .AddressSpaceIndicator = ASI_PPGTT,
951 .SecondLevelBatchBuffer = Firstlevelbatch) +
952 (GEN8_MI_BATCH_BUFFER_START_BatchBufferStartAddress_start / 8);
953 cmd_buffer->return_addr = anv_batch_address(&cmd_buffer->batch, jump_addr);
954 } else if ((cmd_buffer->batch_bos.next == cmd_buffer->batch_bos.prev) &&
955 (length < ANV_CMD_BUFFER_BATCH_SIZE / 2)) {
956 /* If the secondary has exactly one batch buffer in its list *and*
957 * that batch buffer is less than half of the maximum size, we're
958 * probably better of simply copying it into our batch.
959 */
960 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_EMIT;
961 } else if (!(cmd_buffer->usage_flags &
962 VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
963 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_CHAIN;
964
965 /* In order to chain, we need this command buffer to contain an
966 * MI_BATCH_BUFFER_START which will jump back to the calling batch.
967 * It doesn't matter where it points now so long as has a valid
968 * relocation. We'll adjust it later as part of the chaining
969 * process.
970 *
971 * We set the end of the batch a little short so we would be sure we
972 * have room for the chaining command. Since we're about to emit the
973 * chaining command, let's set it back where it should go.
974 */
975 cmd_buffer->batch.end += GEN8_MI_BATCH_BUFFER_START_length * 4;
976 assert(cmd_buffer->batch.start == batch_bo->bo->map);
977 assert(cmd_buffer->batch.end == batch_bo->bo->map + batch_bo->bo->size);
978
979 emit_batch_buffer_start(cmd_buffer, batch_bo->bo, 0);
980 assert(cmd_buffer->batch.start == batch_bo->bo->map);
981 } else {
982 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN;
983 }
984 }
985
986 anv_batch_bo_finish(batch_bo, &cmd_buffer->batch);
987 }
988
989 static VkResult
990 anv_cmd_buffer_add_seen_bbos(struct anv_cmd_buffer *cmd_buffer,
991 struct list_head *list)
992 {
993 list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
994 struct anv_batch_bo **bbo_ptr = u_vector_add(&cmd_buffer->seen_bbos);
995 if (bbo_ptr == NULL)
996 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
997
998 *bbo_ptr = bbo;
999 }
1000
1001 return VK_SUCCESS;
1002 }
1003
1004 void
1005 anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary,
1006 struct anv_cmd_buffer *secondary)
1007 {
1008 switch (secondary->exec_mode) {
1009 case ANV_CMD_BUFFER_EXEC_MODE_EMIT:
1010 anv_batch_emit_batch(&primary->batch, &secondary->batch);
1011 break;
1012 case ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT: {
1013 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(primary);
1014 unsigned length = secondary->batch.end - secondary->batch.start;
1015 anv_batch_bo_grow(primary, bbo, &primary->batch, length,
1016 GEN8_MI_BATCH_BUFFER_START_length * 4);
1017 anv_batch_emit_batch(&primary->batch, &secondary->batch);
1018 break;
1019 }
1020 case ANV_CMD_BUFFER_EXEC_MODE_CHAIN: {
1021 struct anv_batch_bo *first_bbo =
1022 list_first_entry(&secondary->batch_bos, struct anv_batch_bo, link);
1023 struct anv_batch_bo *last_bbo =
1024 list_last_entry(&secondary->batch_bos, struct anv_batch_bo, link);
1025
1026 emit_batch_buffer_start(primary, first_bbo->bo, 0);
1027
1028 struct anv_batch_bo *this_bbo = anv_cmd_buffer_current_batch_bo(primary);
1029 assert(primary->batch.start == this_bbo->bo->map);
1030 uint32_t offset = primary->batch.next - primary->batch.start;
1031
1032 /* Make the tail of the secondary point back to right after the
1033 * MI_BATCH_BUFFER_START in the primary batch.
1034 */
1035 anv_batch_bo_link(primary, last_bbo, this_bbo, offset);
1036
1037 anv_cmd_buffer_add_seen_bbos(primary, &secondary->batch_bos);
1038 break;
1039 }
1040 case ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN: {
1041 struct list_head copy_list;
1042 VkResult result = anv_batch_bo_list_clone(&secondary->batch_bos,
1043 secondary,
1044 &copy_list);
1045 if (result != VK_SUCCESS)
1046 return; /* FIXME */
1047
1048 anv_cmd_buffer_add_seen_bbos(primary, &copy_list);
1049
1050 struct anv_batch_bo *first_bbo =
1051 list_first_entry(&copy_list, struct anv_batch_bo, link);
1052 struct anv_batch_bo *last_bbo =
1053 list_last_entry(&copy_list, struct anv_batch_bo, link);
1054
1055 cmd_buffer_chain_to_batch_bo(primary, first_bbo);
1056
1057 list_splicetail(&copy_list, &primary->batch_bos);
1058
1059 anv_batch_bo_continue(last_bbo, &primary->batch,
1060 GEN8_MI_BATCH_BUFFER_START_length * 4);
1061 break;
1062 }
1063 case ANV_CMD_BUFFER_EXEC_MODE_CALL_AND_RETURN: {
1064 struct anv_batch_bo *first_bbo =
1065 list_first_entry(&secondary->batch_bos, struct anv_batch_bo, link);
1066
1067 uint64_t *write_return_addr =
1068 anv_batch_emitn(&primary->batch,
1069 GEN8_MI_STORE_DATA_IMM_length + 1 /* QWord write */,
1070 GEN8_MI_STORE_DATA_IMM,
1071 .Address = secondary->return_addr)
1072 + (GEN8_MI_STORE_DATA_IMM_ImmediateData_start / 8);
1073
1074 emit_batch_buffer_start(primary, first_bbo->bo, 0);
1075
1076 *write_return_addr =
1077 anv_address_physical(anv_batch_address(&primary->batch,
1078 primary->batch.next));
1079
1080 anv_cmd_buffer_add_seen_bbos(primary, &secondary->batch_bos);
1081 break;
1082 }
1083 default:
1084 assert(!"Invalid execution mode");
1085 }
1086
1087 anv_reloc_list_append(&primary->surface_relocs, &primary->pool->alloc,
1088 &secondary->surface_relocs, 0);
1089 }
1090
1091 struct anv_execbuf {
1092 struct drm_i915_gem_execbuffer2 execbuf;
1093
1094 struct drm_i915_gem_exec_object2 * objects;
1095 uint32_t bo_count;
1096 struct anv_bo ** bos;
1097
1098 /* Allocated length of the 'objects' and 'bos' arrays */
1099 uint32_t array_length;
1100
1101 bool has_relocs;
1102
1103 const VkAllocationCallbacks * alloc;
1104 VkSystemAllocationScope alloc_scope;
1105 };
1106
1107 static void
1108 anv_execbuf_init(struct anv_execbuf *exec)
1109 {
1110 memset(exec, 0, sizeof(*exec));
1111 }
1112
1113 static void
1114 anv_execbuf_finish(struct anv_execbuf *exec)
1115 {
1116 vk_free(exec->alloc, exec->objects);
1117 vk_free(exec->alloc, exec->bos);
1118 }
1119
1120 static VkResult
1121 anv_execbuf_add_bo_bitset(struct anv_device *device,
1122 struct anv_execbuf *exec,
1123 uint32_t dep_words,
1124 BITSET_WORD *deps,
1125 uint32_t extra_flags);
1126
1127 static VkResult
1128 anv_execbuf_add_bo(struct anv_device *device,
1129 struct anv_execbuf *exec,
1130 struct anv_bo *bo,
1131 struct anv_reloc_list *relocs,
1132 uint32_t extra_flags)
1133 {
1134 struct drm_i915_gem_exec_object2 *obj = NULL;
1135
1136 bo = anv_bo_unwrap(bo);
1137
1138 if (bo->index < exec->bo_count && exec->bos[bo->index] == bo)
1139 obj = &exec->objects[bo->index];
1140
1141 if (obj == NULL) {
1142 /* We've never seen this one before. Add it to the list and assign
1143 * an id that we can use later.
1144 */
1145 if (exec->bo_count >= exec->array_length) {
1146 uint32_t new_len = exec->objects ? exec->array_length * 2 : 64;
1147
1148 struct drm_i915_gem_exec_object2 *new_objects =
1149 vk_alloc(exec->alloc, new_len * sizeof(*new_objects), 8, exec->alloc_scope);
1150 if (new_objects == NULL)
1151 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1152
1153 struct anv_bo **new_bos =
1154 vk_alloc(exec->alloc, new_len * sizeof(*new_bos), 8, exec->alloc_scope);
1155 if (new_bos == NULL) {
1156 vk_free(exec->alloc, new_objects);
1157 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1158 }
1159
1160 if (exec->objects) {
1161 memcpy(new_objects, exec->objects,
1162 exec->bo_count * sizeof(*new_objects));
1163 memcpy(new_bos, exec->bos,
1164 exec->bo_count * sizeof(*new_bos));
1165 }
1166
1167 vk_free(exec->alloc, exec->objects);
1168 vk_free(exec->alloc, exec->bos);
1169
1170 exec->objects = new_objects;
1171 exec->bos = new_bos;
1172 exec->array_length = new_len;
1173 }
1174
1175 assert(exec->bo_count < exec->array_length);
1176
1177 bo->index = exec->bo_count++;
1178 obj = &exec->objects[bo->index];
1179 exec->bos[bo->index] = bo;
1180
1181 obj->handle = bo->gem_handle;
1182 obj->relocation_count = 0;
1183 obj->relocs_ptr = 0;
1184 obj->alignment = 0;
1185 obj->offset = bo->offset;
1186 obj->flags = bo->flags | extra_flags;
1187 obj->rsvd1 = 0;
1188 obj->rsvd2 = 0;
1189 }
1190
1191 if (extra_flags & EXEC_OBJECT_WRITE) {
1192 obj->flags |= EXEC_OBJECT_WRITE;
1193 obj->flags &= ~EXEC_OBJECT_ASYNC;
1194 }
1195
1196 if (relocs != NULL) {
1197 assert(obj->relocation_count == 0);
1198
1199 if (relocs->num_relocs > 0) {
1200 /* This is the first time we've ever seen a list of relocations for
1201 * this BO. Go ahead and set the relocations and then walk the list
1202 * of relocations and add them all.
1203 */
1204 exec->has_relocs = true;
1205 obj->relocation_count = relocs->num_relocs;
1206 obj->relocs_ptr = (uintptr_t) relocs->relocs;
1207
1208 for (size_t i = 0; i < relocs->num_relocs; i++) {
1209 VkResult result;
1210
1211 /* A quick sanity check on relocations */
1212 assert(relocs->relocs[i].offset < bo->size);
1213 result = anv_execbuf_add_bo(device, exec, relocs->reloc_bos[i],
1214 NULL, extra_flags);
1215 if (result != VK_SUCCESS)
1216 return result;
1217 }
1218 }
1219
1220 return anv_execbuf_add_bo_bitset(device, exec, relocs->dep_words,
1221 relocs->deps, extra_flags);
1222 }
1223
1224 return VK_SUCCESS;
1225 }
1226
1227 /* Add BO dependencies to execbuf */
1228 static VkResult
1229 anv_execbuf_add_bo_bitset(struct anv_device *device,
1230 struct anv_execbuf *exec,
1231 uint32_t dep_words,
1232 BITSET_WORD *deps,
1233 uint32_t extra_flags)
1234 {
1235 for (uint32_t w = 0; w < dep_words; w++) {
1236 BITSET_WORD mask = deps[w];
1237 while (mask) {
1238 int i = u_bit_scan(&mask);
1239 uint32_t gem_handle = w * BITSET_WORDBITS + i;
1240 struct anv_bo *bo = anv_device_lookup_bo(device, gem_handle);
1241 assert(bo->refcount > 0);
1242 VkResult result =
1243 anv_execbuf_add_bo(device, exec, bo, NULL, extra_flags);
1244 if (result != VK_SUCCESS)
1245 return result;
1246 }
1247 }
1248
1249 return VK_SUCCESS;
1250 }
1251
1252 static void
1253 anv_cmd_buffer_process_relocs(struct anv_cmd_buffer *cmd_buffer,
1254 struct anv_reloc_list *list)
1255 {
1256 for (size_t i = 0; i < list->num_relocs; i++)
1257 list->relocs[i].target_handle = anv_bo_unwrap(list->reloc_bos[i])->index;
1258 }
1259
1260 static void
1261 adjust_relocations_from_state_pool(struct anv_state_pool *pool,
1262 struct anv_reloc_list *relocs,
1263 uint32_t last_pool_center_bo_offset)
1264 {
1265 assert(last_pool_center_bo_offset <= pool->block_pool.center_bo_offset);
1266 uint32_t delta = pool->block_pool.center_bo_offset - last_pool_center_bo_offset;
1267
1268 for (size_t i = 0; i < relocs->num_relocs; i++) {
1269 /* All of the relocations from this block pool to other BO's should
1270 * have been emitted relative to the surface block pool center. We
1271 * need to add the center offset to make them relative to the
1272 * beginning of the actual GEM bo.
1273 */
1274 relocs->relocs[i].offset += delta;
1275 }
1276 }
1277
1278 static void
1279 adjust_relocations_to_state_pool(struct anv_state_pool *pool,
1280 struct anv_bo *from_bo,
1281 struct anv_reloc_list *relocs,
1282 uint32_t last_pool_center_bo_offset)
1283 {
1284 assert(!from_bo->is_wrapper);
1285 assert(last_pool_center_bo_offset <= pool->block_pool.center_bo_offset);
1286 uint32_t delta = pool->block_pool.center_bo_offset - last_pool_center_bo_offset;
1287
1288 /* When we initially emit relocations into a block pool, we don't
1289 * actually know what the final center_bo_offset will be so we just emit
1290 * it as if center_bo_offset == 0. Now that we know what the center
1291 * offset is, we need to walk the list of relocations and adjust any
1292 * relocations that point to the pool bo with the correct offset.
1293 */
1294 for (size_t i = 0; i < relocs->num_relocs; i++) {
1295 if (relocs->reloc_bos[i] == pool->block_pool.bo) {
1296 /* Adjust the delta value in the relocation to correctly
1297 * correspond to the new delta. Initially, this value may have
1298 * been negative (if treated as unsigned), but we trust in
1299 * uint32_t roll-over to fix that for us at this point.
1300 */
1301 relocs->relocs[i].delta += delta;
1302
1303 /* Since the delta has changed, we need to update the actual
1304 * relocated value with the new presumed value. This function
1305 * should only be called on batch buffers, so we know it isn't in
1306 * use by the GPU at the moment.
1307 */
1308 assert(relocs->relocs[i].offset < from_bo->size);
1309 write_reloc(pool->block_pool.device,
1310 from_bo->map + relocs->relocs[i].offset,
1311 relocs->relocs[i].presumed_offset +
1312 relocs->relocs[i].delta, false);
1313 }
1314 }
1315 }
1316
1317 static void
1318 anv_reloc_list_apply(struct anv_device *device,
1319 struct anv_reloc_list *list,
1320 struct anv_bo *bo,
1321 bool always_relocate)
1322 {
1323 bo = anv_bo_unwrap(bo);
1324
1325 for (size_t i = 0; i < list->num_relocs; i++) {
1326 struct anv_bo *target_bo = anv_bo_unwrap(list->reloc_bos[i]);
1327 if (list->relocs[i].presumed_offset == target_bo->offset &&
1328 !always_relocate)
1329 continue;
1330
1331 void *p = bo->map + list->relocs[i].offset;
1332 write_reloc(device, p, target_bo->offset + list->relocs[i].delta, true);
1333 list->relocs[i].presumed_offset = target_bo->offset;
1334 }
1335 }
1336
1337 /**
1338 * This function applies the relocation for a command buffer and writes the
1339 * actual addresses into the buffers as per what we were told by the kernel on
1340 * the previous execbuf2 call. This should be safe to do because, for each
1341 * relocated address, we have two cases:
1342 *
1343 * 1) The target BO is inactive (as seen by the kernel). In this case, it is
1344 * not in use by the GPU so updating the address is 100% ok. It won't be
1345 * in-use by the GPU (from our context) again until the next execbuf2
1346 * happens. If the kernel decides to move it in the next execbuf2, it
1347 * will have to do the relocations itself, but that's ok because it should
1348 * have all of the information needed to do so.
1349 *
1350 * 2) The target BO is active (as seen by the kernel). In this case, it
1351 * hasn't moved since the last execbuffer2 call because GTT shuffling
1352 * *only* happens when the BO is idle. (From our perspective, it only
1353 * happens inside the execbuffer2 ioctl, but the shuffling may be
1354 * triggered by another ioctl, with full-ppgtt this is limited to only
1355 * execbuffer2 ioctls on the same context, or memory pressure.) Since the
1356 * target BO hasn't moved, our anv_bo::offset exactly matches the BO's GTT
1357 * address and the relocated value we are writing into the BO will be the
1358 * same as the value that is already there.
1359 *
1360 * There is also a possibility that the target BO is active but the exact
1361 * RENDER_SURFACE_STATE object we are writing the relocation into isn't in
1362 * use. In this case, the address currently in the RENDER_SURFACE_STATE
1363 * may be stale but it's still safe to write the relocation because that
1364 * particular RENDER_SURFACE_STATE object isn't in-use by the GPU and
1365 * won't be until the next execbuf2 call.
1366 *
1367 * By doing relocations on the CPU, we can tell the kernel that it doesn't
1368 * need to bother. We want to do this because the surface state buffer is
1369 * used by every command buffer so, if the kernel does the relocations, it
1370 * will always be busy and the kernel will always stall. This is also
1371 * probably the fastest mechanism for doing relocations since the kernel would
1372 * have to make a full copy of all the relocations lists.
1373 */
1374 static bool
1375 relocate_cmd_buffer(struct anv_cmd_buffer *cmd_buffer,
1376 struct anv_execbuf *exec)
1377 {
1378 if (!exec->has_relocs)
1379 return true;
1380
1381 static int userspace_relocs = -1;
1382 if (userspace_relocs < 0)
1383 userspace_relocs = env_var_as_boolean("ANV_USERSPACE_RELOCS", true);
1384 if (!userspace_relocs)
1385 return false;
1386
1387 /* First, we have to check to see whether or not we can even do the
1388 * relocation. New buffers which have never been submitted to the kernel
1389 * don't have a valid offset so we need to let the kernel do relocations so
1390 * that we can get offsets for them. On future execbuf2 calls, those
1391 * buffers will have offsets and we will be able to skip relocating.
1392 * Invalid offsets are indicated by anv_bo::offset == (uint64_t)-1.
1393 */
1394 for (uint32_t i = 0; i < exec->bo_count; i++) {
1395 assert(!exec->bos[i]->is_wrapper);
1396 if (exec->bos[i]->offset == (uint64_t)-1)
1397 return false;
1398 }
1399
1400 /* Since surface states are shared between command buffers and we don't
1401 * know what order they will be submitted to the kernel, we don't know
1402 * what address is actually written in the surface state object at any
1403 * given time. The only option is to always relocate them.
1404 */
1405 struct anv_bo *surface_state_bo =
1406 anv_bo_unwrap(cmd_buffer->device->surface_state_pool.block_pool.bo);
1407 anv_reloc_list_apply(cmd_buffer->device, &cmd_buffer->surface_relocs,
1408 surface_state_bo,
1409 true /* always relocate surface states */);
1410
1411 /* Since we own all of the batch buffers, we know what values are stored
1412 * in the relocated addresses and only have to update them if the offsets
1413 * have changed.
1414 */
1415 struct anv_batch_bo **bbo;
1416 u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1417 anv_reloc_list_apply(cmd_buffer->device,
1418 &(*bbo)->relocs, (*bbo)->bo, false);
1419 }
1420
1421 for (uint32_t i = 0; i < exec->bo_count; i++)
1422 exec->objects[i].offset = exec->bos[i]->offset;
1423
1424 return true;
1425 }
1426
1427 static VkResult
1428 setup_execbuf_for_cmd_buffer(struct anv_execbuf *execbuf,
1429 struct anv_cmd_buffer *cmd_buffer)
1430 {
1431 struct anv_batch *batch = &cmd_buffer->batch;
1432 struct anv_state_pool *ss_pool =
1433 &cmd_buffer->device->surface_state_pool;
1434
1435 adjust_relocations_from_state_pool(ss_pool, &cmd_buffer->surface_relocs,
1436 cmd_buffer->last_ss_pool_center);
1437 VkResult result;
1438 if (cmd_buffer->device->physical->use_softpin) {
1439 anv_block_pool_foreach_bo(bo, &ss_pool->block_pool) {
1440 result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
1441 bo, NULL, 0);
1442 if (result != VK_SUCCESS)
1443 return result;
1444 }
1445 /* Add surface dependencies (BOs) to the execbuf */
1446 anv_execbuf_add_bo_bitset(cmd_buffer->device, execbuf,
1447 cmd_buffer->surface_relocs.dep_words,
1448 cmd_buffer->surface_relocs.deps, 0);
1449
1450 /* Add the BOs for all memory objects */
1451 list_for_each_entry(struct anv_device_memory, mem,
1452 &cmd_buffer->device->memory_objects, link) {
1453 result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
1454 mem->bo, NULL, 0);
1455 if (result != VK_SUCCESS)
1456 return result;
1457 }
1458
1459 struct anv_block_pool *pool;
1460 pool = &cmd_buffer->device->dynamic_state_pool.block_pool;
1461 anv_block_pool_foreach_bo(bo, pool) {
1462 result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
1463 bo, NULL, 0);
1464 if (result != VK_SUCCESS)
1465 return result;
1466 }
1467
1468 pool = &cmd_buffer->device->instruction_state_pool.block_pool;
1469 anv_block_pool_foreach_bo(bo, pool) {
1470 result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
1471 bo, NULL, 0);
1472 if (result != VK_SUCCESS)
1473 return result;
1474 }
1475
1476 pool = &cmd_buffer->device->binding_table_pool.block_pool;
1477 anv_block_pool_foreach_bo(bo, pool) {
1478 result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
1479 bo, NULL, 0);
1480 if (result != VK_SUCCESS)
1481 return result;
1482 }
1483 } else {
1484 /* Since we aren't in the softpin case, all of our STATE_BASE_ADDRESS BOs
1485 * will get added automatically by processing relocations on the batch
1486 * buffer. We have to add the surface state BO manually because it has
1487 * relocations of its own that we need to be sure are processsed.
1488 */
1489 result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
1490 ss_pool->block_pool.bo,
1491 &cmd_buffer->surface_relocs, 0);
1492 if (result != VK_SUCCESS)
1493 return result;
1494 }
1495
1496 /* First, we walk over all of the bos we've seen and add them and their
1497 * relocations to the validate list.
1498 */
1499 struct anv_batch_bo **bbo;
1500 u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1501 adjust_relocations_to_state_pool(ss_pool, (*bbo)->bo, &(*bbo)->relocs,
1502 cmd_buffer->last_ss_pool_center);
1503
1504 result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
1505 (*bbo)->bo, &(*bbo)->relocs, 0);
1506 if (result != VK_SUCCESS)
1507 return result;
1508 }
1509
1510 /* Now that we've adjusted all of the surface state relocations, we need to
1511 * record the surface state pool center so future executions of the command
1512 * buffer can adjust correctly.
1513 */
1514 cmd_buffer->last_ss_pool_center = ss_pool->block_pool.center_bo_offset;
1515
1516 struct anv_batch_bo *first_batch_bo =
1517 list_first_entry(&cmd_buffer->batch_bos, struct anv_batch_bo, link);
1518
1519 /* The kernel requires that the last entry in the validation list be the
1520 * batch buffer to execute. We can simply swap the element
1521 * corresponding to the first batch_bo in the chain with the last
1522 * element in the list.
1523 */
1524 if (first_batch_bo->bo->index != execbuf->bo_count - 1) {
1525 uint32_t idx = first_batch_bo->bo->index;
1526 uint32_t last_idx = execbuf->bo_count - 1;
1527
1528 struct drm_i915_gem_exec_object2 tmp_obj = execbuf->objects[idx];
1529 assert(execbuf->bos[idx] == first_batch_bo->bo);
1530
1531 execbuf->objects[idx] = execbuf->objects[last_idx];
1532 execbuf->bos[idx] = execbuf->bos[last_idx];
1533 execbuf->bos[idx]->index = idx;
1534
1535 execbuf->objects[last_idx] = tmp_obj;
1536 execbuf->bos[last_idx] = first_batch_bo->bo;
1537 first_batch_bo->bo->index = last_idx;
1538 }
1539
1540 /* If we are pinning our BOs, we shouldn't have to relocate anything */
1541 if (cmd_buffer->device->physical->use_softpin)
1542 assert(!execbuf->has_relocs);
1543
1544 /* Now we go through and fixup all of the relocation lists to point to
1545 * the correct indices in the object array. We have to do this after we
1546 * reorder the list above as some of the indices may have changed.
1547 */
1548 if (execbuf->has_relocs) {
1549 u_vector_foreach(bbo, &cmd_buffer->seen_bbos)
1550 anv_cmd_buffer_process_relocs(cmd_buffer, &(*bbo)->relocs);
1551
1552 anv_cmd_buffer_process_relocs(cmd_buffer, &cmd_buffer->surface_relocs);
1553 }
1554
1555 if (!cmd_buffer->device->info.has_llc) {
1556 __builtin_ia32_mfence();
1557 u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1558 for (uint32_t i = 0; i < (*bbo)->length; i += CACHELINE_SIZE)
1559 __builtin_ia32_clflush((*bbo)->bo->map + i);
1560 }
1561 }
1562
1563 execbuf->execbuf = (struct drm_i915_gem_execbuffer2) {
1564 .buffers_ptr = (uintptr_t) execbuf->objects,
1565 .buffer_count = execbuf->bo_count,
1566 .batch_start_offset = 0,
1567 .batch_len = batch->next - batch->start,
1568 .cliprects_ptr = 0,
1569 .num_cliprects = 0,
1570 .DR1 = 0,
1571 .DR4 = 0,
1572 .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER,
1573 .rsvd1 = cmd_buffer->device->context_id,
1574 .rsvd2 = 0,
1575 };
1576
1577 if (relocate_cmd_buffer(cmd_buffer, execbuf)) {
1578 /* If we were able to successfully relocate everything, tell the kernel
1579 * that it can skip doing relocations. The requirement for using
1580 * NO_RELOC is:
1581 *
1582 * 1) The addresses written in the objects must match the corresponding
1583 * reloc.presumed_offset which in turn must match the corresponding
1584 * execobject.offset.
1585 *
1586 * 2) To avoid stalling, execobject.offset should match the current
1587 * address of that object within the active context.
1588 *
1589 * In order to satisfy all of the invariants that make userspace
1590 * relocations to be safe (see relocate_cmd_buffer()), we need to
1591 * further ensure that the addresses we use match those used by the
1592 * kernel for the most recent execbuf2.
1593 *
1594 * The kernel may still choose to do relocations anyway if something has
1595 * moved in the GTT. In this case, the relocation list still needs to be
1596 * valid. All relocations on the batch buffers are already valid and
1597 * kept up-to-date. For surface state relocations, by applying the
1598 * relocations in relocate_cmd_buffer, we ensured that the address in
1599 * the RENDER_SURFACE_STATE matches presumed_offset, so it should be
1600 * safe for the kernel to relocate them as needed.
1601 */
1602 execbuf->execbuf.flags |= I915_EXEC_NO_RELOC;
1603 } else {
1604 /* In the case where we fall back to doing kernel relocations, we need
1605 * to ensure that the relocation list is valid. All relocations on the
1606 * batch buffers are already valid and kept up-to-date. Since surface
1607 * states are shared between command buffers and we don't know what
1608 * order they will be submitted to the kernel, we don't know what
1609 * address is actually written in the surface state object at any given
1610 * time. The only option is to set a bogus presumed offset and let the
1611 * kernel relocate them.
1612 */
1613 for (size_t i = 0; i < cmd_buffer->surface_relocs.num_relocs; i++)
1614 cmd_buffer->surface_relocs.relocs[i].presumed_offset = -1;
1615 }
1616
1617 return VK_SUCCESS;
1618 }
1619
1620 static VkResult
1621 setup_empty_execbuf(struct anv_execbuf *execbuf, struct anv_device *device)
1622 {
1623 VkResult result = anv_execbuf_add_bo(device, execbuf,
1624 device->trivial_batch_bo,
1625 NULL, 0);
1626 if (result != VK_SUCCESS)
1627 return result;
1628
1629 execbuf->execbuf = (struct drm_i915_gem_execbuffer2) {
1630 .buffers_ptr = (uintptr_t) execbuf->objects,
1631 .buffer_count = execbuf->bo_count,
1632 .batch_start_offset = 0,
1633 .batch_len = 8, /* GEN7_MI_BATCH_BUFFER_END and NOOP */
1634 .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER,
1635 .rsvd1 = device->context_id,
1636 .rsvd2 = 0,
1637 };
1638
1639 return VK_SUCCESS;
1640 }
1641
1642 /* We lock around execbuf for three main reasons:
1643 *
1644 * 1) When a block pool is resized, we create a new gem handle with a
1645 * different size and, in the case of surface states, possibly a different
1646 * center offset but we re-use the same anv_bo struct when we do so. If
1647 * this happens in the middle of setting up an execbuf, we could end up
1648 * with our list of BOs out of sync with our list of gem handles.
1649 *
1650 * 2) The algorithm we use for building the list of unique buffers isn't
1651 * thread-safe. While the client is supposed to syncronize around
1652 * QueueSubmit, this would be extremely difficult to debug if it ever came
1653 * up in the wild due to a broken app. It's better to play it safe and
1654 * just lock around QueueSubmit.
1655 *
1656 * 3) The anv_cmd_buffer_execbuf function may perform relocations in
1657 * userspace. Due to the fact that the surface state buffer is shared
1658 * between batches, we can't afford to have that happen from multiple
1659 * threads at the same time. Even though the user is supposed to ensure
1660 * this doesn't happen, we play it safe as in (2) above.
1661 *
1662 * Since the only other things that ever take the device lock such as block
1663 * pool resize only rarely happen, this will almost never be contended so
1664 * taking a lock isn't really an expensive operation in this case.
1665 */
1666 VkResult
1667 anv_queue_execbuf_locked(struct anv_queue *queue,
1668 struct anv_queue_submit *submit)
1669 {
1670 struct anv_device *device = queue->device;
1671 struct anv_execbuf execbuf;
1672 anv_execbuf_init(&execbuf);
1673 execbuf.alloc = submit->alloc;
1674 execbuf.alloc_scope = submit->alloc_scope;
1675
1676 VkResult result;
1677
1678 for (uint32_t i = 0; i < submit->fence_bo_count; i++) {
1679 int signaled;
1680 struct anv_bo *bo = anv_unpack_ptr(submit->fence_bos[i], 1, &signaled);
1681
1682 result = anv_execbuf_add_bo(device, &execbuf, bo, NULL,
1683 signaled ? EXEC_OBJECT_WRITE : 0);
1684 if (result != VK_SUCCESS)
1685 goto error;
1686 }
1687
1688 if (submit->cmd_buffer) {
1689 result = setup_execbuf_for_cmd_buffer(&execbuf, submit->cmd_buffer);
1690 } else if (submit->simple_bo) {
1691 result = anv_execbuf_add_bo(device, &execbuf, submit->simple_bo, NULL, 0);
1692 if (result != VK_SUCCESS)
1693 goto error;
1694
1695 execbuf.execbuf = (struct drm_i915_gem_execbuffer2) {
1696 .buffers_ptr = (uintptr_t) execbuf.objects,
1697 .buffer_count = execbuf.bo_count,
1698 .batch_start_offset = 0,
1699 .batch_len = submit->simple_bo_size,
1700 .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER,
1701 .rsvd1 = device->context_id,
1702 .rsvd2 = 0,
1703 };
1704 } else {
1705 result = setup_empty_execbuf(&execbuf, queue->device);
1706 }
1707
1708 if (result != VK_SUCCESS)
1709 goto error;
1710
1711 if (unlikely(INTEL_DEBUG & DEBUG_BATCH)) {
1712 if (submit->cmd_buffer) {
1713 struct anv_batch_bo **bo = u_vector_tail(&submit->cmd_buffer->seen_bbos);
1714
1715 device->cmd_buffer_being_decoded = submit->cmd_buffer;
1716 gen_print_batch(&device->decoder_ctx, (*bo)->bo->map,
1717 (*bo)->bo->size, (*bo)->bo->offset, false);
1718 device->cmd_buffer_being_decoded = NULL;
1719 } else if (submit->simple_bo) {
1720 gen_print_batch(&device->decoder_ctx, submit->simple_bo->map,
1721 submit->simple_bo->size, submit->simple_bo->offset, false);
1722 } else {
1723 gen_print_batch(&device->decoder_ctx,
1724 device->trivial_batch_bo->map,
1725 device->trivial_batch_bo->size,
1726 device->trivial_batch_bo->offset, false);
1727 }
1728 }
1729
1730 if (submit->fence_count > 0) {
1731 assert(device->physical->has_syncobj);
1732 execbuf.execbuf.flags |= I915_EXEC_FENCE_ARRAY;
1733 execbuf.execbuf.num_cliprects = submit->fence_count;
1734 execbuf.execbuf.cliprects_ptr = (uintptr_t)submit->fences;
1735 }
1736
1737 if (submit->in_fence != -1) {
1738 execbuf.execbuf.flags |= I915_EXEC_FENCE_IN;
1739 execbuf.execbuf.rsvd2 |= (uint32_t)submit->in_fence;
1740 }
1741
1742 if (submit->need_out_fence)
1743 execbuf.execbuf.flags |= I915_EXEC_FENCE_OUT;
1744
1745 int ret = queue->device->no_hw ? 0 :
1746 anv_gem_execbuffer(queue->device, &execbuf.execbuf);
1747 if (ret)
1748 result = anv_queue_set_lost(queue, "execbuf2 failed: %m");
1749
1750 struct drm_i915_gem_exec_object2 *objects = execbuf.objects;
1751 for (uint32_t k = 0; k < execbuf.bo_count; k++) {
1752 if (execbuf.bos[k]->flags & EXEC_OBJECT_PINNED)
1753 assert(execbuf.bos[k]->offset == objects[k].offset);
1754 execbuf.bos[k]->offset = objects[k].offset;
1755 }
1756
1757 if (result == VK_SUCCESS && submit->need_out_fence)
1758 submit->out_fence = execbuf.execbuf.rsvd2 >> 32;
1759
1760 error:
1761 pthread_cond_broadcast(&device->queue_submit);
1762
1763 anv_execbuf_finish(&execbuf);
1764
1765 return result;
1766 }