intel/perf: repurpose INTEL_DEBUG=no-oaconfig
[mesa.git] / src / intel / vulkan / anv_batch_chain.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31
32 #include "genxml/gen8_pack.h"
33 #include "genxml/genX_bits.h"
34 #include "perf/gen_perf.h"
35
36 #include "util/debug.h"
37
38 /** \file anv_batch_chain.c
39 *
40 * This file contains functions related to anv_cmd_buffer as a data
41 * structure. This involves everything required to create and destroy
42 * the actual batch buffers as well as link them together and handle
43 * relocations and surface state. It specifically does *not* contain any
44 * handling of actual vkCmd calls beyond vkCmdExecuteCommands.
45 */
46
47 /*-----------------------------------------------------------------------*
48 * Functions related to anv_reloc_list
49 *-----------------------------------------------------------------------*/
50
51 VkResult
52 anv_reloc_list_init(struct anv_reloc_list *list,
53 const VkAllocationCallbacks *alloc)
54 {
55 memset(list, 0, sizeof(*list));
56 return VK_SUCCESS;
57 }
58
59 static VkResult
60 anv_reloc_list_init_clone(struct anv_reloc_list *list,
61 const VkAllocationCallbacks *alloc,
62 const struct anv_reloc_list *other_list)
63 {
64 list->num_relocs = other_list->num_relocs;
65 list->array_length = other_list->array_length;
66
67 if (list->num_relocs > 0) {
68 list->relocs =
69 vk_alloc(alloc, list->array_length * sizeof(*list->relocs), 8,
70 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
71 if (list->relocs == NULL)
72 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
73
74 list->reloc_bos =
75 vk_alloc(alloc, list->array_length * sizeof(*list->reloc_bos), 8,
76 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
77 if (list->reloc_bos == NULL) {
78 vk_free(alloc, list->relocs);
79 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
80 }
81
82 memcpy(list->relocs, other_list->relocs,
83 list->array_length * sizeof(*list->relocs));
84 memcpy(list->reloc_bos, other_list->reloc_bos,
85 list->array_length * sizeof(*list->reloc_bos));
86 } else {
87 list->relocs = NULL;
88 list->reloc_bos = NULL;
89 }
90
91 list->dep_words = other_list->dep_words;
92
93 if (list->dep_words > 0) {
94 list->deps =
95 vk_alloc(alloc, list->dep_words * sizeof(BITSET_WORD), 8,
96 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
97 memcpy(list->deps, other_list->deps,
98 list->dep_words * sizeof(BITSET_WORD));
99 } else {
100 list->deps = NULL;
101 }
102
103 return VK_SUCCESS;
104 }
105
106 void
107 anv_reloc_list_finish(struct anv_reloc_list *list,
108 const VkAllocationCallbacks *alloc)
109 {
110 vk_free(alloc, list->relocs);
111 vk_free(alloc, list->reloc_bos);
112 vk_free(alloc, list->deps);
113 }
114
115 static VkResult
116 anv_reloc_list_grow(struct anv_reloc_list *list,
117 const VkAllocationCallbacks *alloc,
118 size_t num_additional_relocs)
119 {
120 if (list->num_relocs + num_additional_relocs <= list->array_length)
121 return VK_SUCCESS;
122
123 size_t new_length = MAX2(16, list->array_length * 2);
124 while (new_length < list->num_relocs + num_additional_relocs)
125 new_length *= 2;
126
127 struct drm_i915_gem_relocation_entry *new_relocs =
128 vk_realloc(alloc, list->relocs,
129 new_length * sizeof(*list->relocs), 8,
130 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
131 if (new_relocs == NULL)
132 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
133 list->relocs = new_relocs;
134
135 struct anv_bo **new_reloc_bos =
136 vk_realloc(alloc, list->reloc_bos,
137 new_length * sizeof(*list->reloc_bos), 8,
138 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
139 if (new_reloc_bos == NULL)
140 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
141 list->reloc_bos = new_reloc_bos;
142
143 list->array_length = new_length;
144
145 return VK_SUCCESS;
146 }
147
148 static VkResult
149 anv_reloc_list_grow_deps(struct anv_reloc_list *list,
150 const VkAllocationCallbacks *alloc,
151 uint32_t min_num_words)
152 {
153 if (min_num_words <= list->dep_words)
154 return VK_SUCCESS;
155
156 uint32_t new_length = MAX2(32, list->dep_words * 2);
157 while (new_length < min_num_words)
158 new_length *= 2;
159
160 BITSET_WORD *new_deps =
161 vk_realloc(alloc, list->deps, new_length * sizeof(BITSET_WORD), 8,
162 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
163 if (new_deps == NULL)
164 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
165 list->deps = new_deps;
166
167 /* Zero out the new data */
168 memset(list->deps + list->dep_words, 0,
169 (new_length - list->dep_words) * sizeof(BITSET_WORD));
170 list->dep_words = new_length;
171
172 return VK_SUCCESS;
173 }
174
175 #define READ_ONCE(x) (*(volatile __typeof__(x) *)&(x))
176
177 VkResult
178 anv_reloc_list_add(struct anv_reloc_list *list,
179 const VkAllocationCallbacks *alloc,
180 uint32_t offset, struct anv_bo *target_bo, uint32_t delta,
181 uint64_t *address_u64_out)
182 {
183 struct drm_i915_gem_relocation_entry *entry;
184 int index;
185
186 struct anv_bo *unwrapped_target_bo = anv_bo_unwrap(target_bo);
187 uint64_t target_bo_offset = READ_ONCE(unwrapped_target_bo->offset);
188 if (address_u64_out)
189 *address_u64_out = target_bo_offset + delta;
190
191 if (unwrapped_target_bo->flags & EXEC_OBJECT_PINNED) {
192 assert(!target_bo->is_wrapper);
193 uint32_t idx = unwrapped_target_bo->gem_handle;
194 anv_reloc_list_grow_deps(list, alloc, (idx / BITSET_WORDBITS) + 1);
195 BITSET_SET(list->deps, unwrapped_target_bo->gem_handle);
196 return VK_SUCCESS;
197 }
198
199 VkResult result = anv_reloc_list_grow(list, alloc, 1);
200 if (result != VK_SUCCESS)
201 return result;
202
203 /* XXX: Can we use I915_EXEC_HANDLE_LUT? */
204 index = list->num_relocs++;
205 list->reloc_bos[index] = target_bo;
206 entry = &list->relocs[index];
207 entry->target_handle = -1; /* See also anv_cmd_buffer_process_relocs() */
208 entry->delta = delta;
209 entry->offset = offset;
210 entry->presumed_offset = target_bo_offset;
211 entry->read_domains = 0;
212 entry->write_domain = 0;
213 VG(VALGRIND_CHECK_MEM_IS_DEFINED(entry, sizeof(*entry)));
214
215 return VK_SUCCESS;
216 }
217
218 static void
219 anv_reloc_list_clear(struct anv_reloc_list *list)
220 {
221 list->num_relocs = 0;
222 if (list->dep_words > 0)
223 memset(list->deps, 0, list->dep_words * sizeof(BITSET_WORD));
224 }
225
226 static VkResult
227 anv_reloc_list_append(struct anv_reloc_list *list,
228 const VkAllocationCallbacks *alloc,
229 struct anv_reloc_list *other, uint32_t offset)
230 {
231 VkResult result = anv_reloc_list_grow(list, alloc, other->num_relocs);
232 if (result != VK_SUCCESS)
233 return result;
234
235 if (other->num_relocs > 0) {
236 memcpy(&list->relocs[list->num_relocs], &other->relocs[0],
237 other->num_relocs * sizeof(other->relocs[0]));
238 memcpy(&list->reloc_bos[list->num_relocs], &other->reloc_bos[0],
239 other->num_relocs * sizeof(other->reloc_bos[0]));
240
241 for (uint32_t i = 0; i < other->num_relocs; i++)
242 list->relocs[i + list->num_relocs].offset += offset;
243
244 list->num_relocs += other->num_relocs;
245 }
246
247 anv_reloc_list_grow_deps(list, alloc, other->dep_words);
248 for (uint32_t w = 0; w < other->dep_words; w++)
249 list->deps[w] |= other->deps[w];
250
251 return VK_SUCCESS;
252 }
253
254 /*-----------------------------------------------------------------------*
255 * Functions related to anv_batch
256 *-----------------------------------------------------------------------*/
257
258 void *
259 anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords)
260 {
261 if (batch->next + num_dwords * 4 > batch->end) {
262 VkResult result = batch->extend_cb(batch, batch->user_data);
263 if (result != VK_SUCCESS) {
264 anv_batch_set_error(batch, result);
265 return NULL;
266 }
267 }
268
269 void *p = batch->next;
270
271 batch->next += num_dwords * 4;
272 assert(batch->next <= batch->end);
273
274 return p;
275 }
276
277 uint64_t
278 anv_batch_emit_reloc(struct anv_batch *batch,
279 void *location, struct anv_bo *bo, uint32_t delta)
280 {
281 uint64_t address_u64 = 0;
282 VkResult result = anv_reloc_list_add(batch->relocs, batch->alloc,
283 location - batch->start, bo, delta,
284 &address_u64);
285 if (result != VK_SUCCESS) {
286 anv_batch_set_error(batch, result);
287 return 0;
288 }
289
290 return address_u64;
291 }
292
293 struct anv_address
294 anv_batch_address(struct anv_batch *batch, void *batch_location)
295 {
296 assert(batch->start < batch_location);
297
298 /* Allow a jump at the current location of the batch. */
299 assert(batch->next >= batch_location);
300
301 return anv_address_add(batch->start_addr, batch_location - batch->start);
302 }
303
304 void
305 anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other)
306 {
307 uint32_t size, offset;
308
309 size = other->next - other->start;
310 assert(size % 4 == 0);
311
312 if (batch->next + size > batch->end) {
313 VkResult result = batch->extend_cb(batch, batch->user_data);
314 if (result != VK_SUCCESS) {
315 anv_batch_set_error(batch, result);
316 return;
317 }
318 }
319
320 assert(batch->next + size <= batch->end);
321
322 VG(VALGRIND_CHECK_MEM_IS_DEFINED(other->start, size));
323 memcpy(batch->next, other->start, size);
324
325 offset = batch->next - batch->start;
326 VkResult result = anv_reloc_list_append(batch->relocs, batch->alloc,
327 other->relocs, offset);
328 if (result != VK_SUCCESS) {
329 anv_batch_set_error(batch, result);
330 return;
331 }
332
333 batch->next += size;
334 }
335
336 /*-----------------------------------------------------------------------*
337 * Functions related to anv_batch_bo
338 *-----------------------------------------------------------------------*/
339
340 static VkResult
341 anv_batch_bo_create(struct anv_cmd_buffer *cmd_buffer,
342 struct anv_batch_bo **bbo_out)
343 {
344 VkResult result;
345
346 struct anv_batch_bo *bbo = vk_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo),
347 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
348 if (bbo == NULL)
349 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
350
351 result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
352 ANV_CMD_BUFFER_BATCH_SIZE, &bbo->bo);
353 if (result != VK_SUCCESS)
354 goto fail_alloc;
355
356 result = anv_reloc_list_init(&bbo->relocs, &cmd_buffer->pool->alloc);
357 if (result != VK_SUCCESS)
358 goto fail_bo_alloc;
359
360 *bbo_out = bbo;
361
362 return VK_SUCCESS;
363
364 fail_bo_alloc:
365 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
366 fail_alloc:
367 vk_free(&cmd_buffer->pool->alloc, bbo);
368
369 return result;
370 }
371
372 static VkResult
373 anv_batch_bo_clone(struct anv_cmd_buffer *cmd_buffer,
374 const struct anv_batch_bo *other_bbo,
375 struct anv_batch_bo **bbo_out)
376 {
377 VkResult result;
378
379 struct anv_batch_bo *bbo = vk_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo),
380 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
381 if (bbo == NULL)
382 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
383
384 result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
385 other_bbo->bo->size, &bbo->bo);
386 if (result != VK_SUCCESS)
387 goto fail_alloc;
388
389 result = anv_reloc_list_init_clone(&bbo->relocs, &cmd_buffer->pool->alloc,
390 &other_bbo->relocs);
391 if (result != VK_SUCCESS)
392 goto fail_bo_alloc;
393
394 bbo->length = other_bbo->length;
395 memcpy(bbo->bo->map, other_bbo->bo->map, other_bbo->length);
396 *bbo_out = bbo;
397
398 return VK_SUCCESS;
399
400 fail_bo_alloc:
401 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
402 fail_alloc:
403 vk_free(&cmd_buffer->pool->alloc, bbo);
404
405 return result;
406 }
407
408 static void
409 anv_batch_bo_start(struct anv_batch_bo *bbo, struct anv_batch *batch,
410 size_t batch_padding)
411 {
412 batch->start_addr = (struct anv_address) { .bo = bbo->bo, };
413 batch->next = batch->start = bbo->bo->map;
414 batch->end = bbo->bo->map + bbo->bo->size - batch_padding;
415 batch->relocs = &bbo->relocs;
416 anv_reloc_list_clear(&bbo->relocs);
417 }
418
419 static void
420 anv_batch_bo_continue(struct anv_batch_bo *bbo, struct anv_batch *batch,
421 size_t batch_padding)
422 {
423 batch->start_addr = (struct anv_address) { .bo = bbo->bo, };
424 batch->start = bbo->bo->map;
425 batch->next = bbo->bo->map + bbo->length;
426 batch->end = bbo->bo->map + bbo->bo->size - batch_padding;
427 batch->relocs = &bbo->relocs;
428 }
429
430 static void
431 anv_batch_bo_finish(struct anv_batch_bo *bbo, struct anv_batch *batch)
432 {
433 assert(batch->start == bbo->bo->map);
434 bbo->length = batch->next - batch->start;
435 VG(VALGRIND_CHECK_MEM_IS_DEFINED(batch->start, bbo->length));
436 }
437
438 static VkResult
439 anv_batch_bo_grow(struct anv_cmd_buffer *cmd_buffer, struct anv_batch_bo *bbo,
440 struct anv_batch *batch, size_t aditional,
441 size_t batch_padding)
442 {
443 assert(batch->start == bbo->bo->map);
444 bbo->length = batch->next - batch->start;
445
446 size_t new_size = bbo->bo->size;
447 while (new_size <= bbo->length + aditional + batch_padding)
448 new_size *= 2;
449
450 if (new_size == bbo->bo->size)
451 return VK_SUCCESS;
452
453 struct anv_bo *new_bo;
454 VkResult result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
455 new_size, &new_bo);
456 if (result != VK_SUCCESS)
457 return result;
458
459 memcpy(new_bo->map, bbo->bo->map, bbo->length);
460
461 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
462
463 bbo->bo = new_bo;
464 anv_batch_bo_continue(bbo, batch, batch_padding);
465
466 return VK_SUCCESS;
467 }
468
469 static void
470 anv_batch_bo_link(struct anv_cmd_buffer *cmd_buffer,
471 struct anv_batch_bo *prev_bbo,
472 struct anv_batch_bo *next_bbo,
473 uint32_t next_bbo_offset)
474 {
475 const uint32_t bb_start_offset =
476 prev_bbo->length - GEN8_MI_BATCH_BUFFER_START_length * 4;
477 ASSERTED const uint32_t *bb_start = prev_bbo->bo->map + bb_start_offset;
478
479 /* Make sure we're looking at a MI_BATCH_BUFFER_START */
480 assert(((*bb_start >> 29) & 0x07) == 0);
481 assert(((*bb_start >> 23) & 0x3f) == 49);
482
483 if (cmd_buffer->device->physical->use_softpin) {
484 assert(prev_bbo->bo->flags & EXEC_OBJECT_PINNED);
485 assert(next_bbo->bo->flags & EXEC_OBJECT_PINNED);
486
487 write_reloc(cmd_buffer->device,
488 prev_bbo->bo->map + bb_start_offset + 4,
489 next_bbo->bo->offset + next_bbo_offset, true);
490 } else {
491 uint32_t reloc_idx = prev_bbo->relocs.num_relocs - 1;
492 assert(prev_bbo->relocs.relocs[reloc_idx].offset == bb_start_offset + 4);
493
494 prev_bbo->relocs.reloc_bos[reloc_idx] = next_bbo->bo;
495 prev_bbo->relocs.relocs[reloc_idx].delta = next_bbo_offset;
496
497 /* Use a bogus presumed offset to force a relocation */
498 prev_bbo->relocs.relocs[reloc_idx].presumed_offset = -1;
499 }
500 }
501
502 static void
503 anv_batch_bo_destroy(struct anv_batch_bo *bbo,
504 struct anv_cmd_buffer *cmd_buffer)
505 {
506 anv_reloc_list_finish(&bbo->relocs, &cmd_buffer->pool->alloc);
507 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
508 vk_free(&cmd_buffer->pool->alloc, bbo);
509 }
510
511 static VkResult
512 anv_batch_bo_list_clone(const struct list_head *list,
513 struct anv_cmd_buffer *cmd_buffer,
514 struct list_head *new_list)
515 {
516 VkResult result = VK_SUCCESS;
517
518 list_inithead(new_list);
519
520 struct anv_batch_bo *prev_bbo = NULL;
521 list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
522 struct anv_batch_bo *new_bbo = NULL;
523 result = anv_batch_bo_clone(cmd_buffer, bbo, &new_bbo);
524 if (result != VK_SUCCESS)
525 break;
526 list_addtail(&new_bbo->link, new_list);
527
528 if (prev_bbo)
529 anv_batch_bo_link(cmd_buffer, prev_bbo, new_bbo, 0);
530
531 prev_bbo = new_bbo;
532 }
533
534 if (result != VK_SUCCESS) {
535 list_for_each_entry_safe(struct anv_batch_bo, bbo, new_list, link) {
536 list_del(&bbo->link);
537 anv_batch_bo_destroy(bbo, cmd_buffer);
538 }
539 }
540
541 return result;
542 }
543
544 /*-----------------------------------------------------------------------*
545 * Functions related to anv_batch_bo
546 *-----------------------------------------------------------------------*/
547
548 static struct anv_batch_bo *
549 anv_cmd_buffer_current_batch_bo(struct anv_cmd_buffer *cmd_buffer)
550 {
551 return LIST_ENTRY(struct anv_batch_bo, cmd_buffer->batch_bos.prev, link);
552 }
553
554 struct anv_address
555 anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer *cmd_buffer)
556 {
557 struct anv_state_pool *pool = anv_binding_table_pool(cmd_buffer->device);
558 struct anv_state *bt_block = u_vector_head(&cmd_buffer->bt_block_states);
559 return (struct anv_address) {
560 .bo = pool->block_pool.bo,
561 .offset = bt_block->offset - pool->start_offset,
562 };
563 }
564
565 static void
566 emit_batch_buffer_start(struct anv_cmd_buffer *cmd_buffer,
567 struct anv_bo *bo, uint32_t offset)
568 {
569 /* In gen8+ the address field grew to two dwords to accomodate 48 bit
570 * offsets. The high 16 bits are in the last dword, so we can use the gen8
571 * version in either case, as long as we set the instruction length in the
572 * header accordingly. This means that we always emit three dwords here
573 * and all the padding and adjustment we do in this file works for all
574 * gens.
575 */
576
577 #define GEN7_MI_BATCH_BUFFER_START_length 2
578 #define GEN7_MI_BATCH_BUFFER_START_length_bias 2
579
580 const uint32_t gen7_length =
581 GEN7_MI_BATCH_BUFFER_START_length - GEN7_MI_BATCH_BUFFER_START_length_bias;
582 const uint32_t gen8_length =
583 GEN8_MI_BATCH_BUFFER_START_length - GEN8_MI_BATCH_BUFFER_START_length_bias;
584
585 anv_batch_emit(&cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_START, bbs) {
586 bbs.DWordLength = cmd_buffer->device->info.gen < 8 ?
587 gen7_length : gen8_length;
588 bbs.SecondLevelBatchBuffer = Firstlevelbatch;
589 bbs.AddressSpaceIndicator = ASI_PPGTT;
590 bbs.BatchBufferStartAddress = (struct anv_address) { bo, offset };
591 }
592 }
593
594 static void
595 cmd_buffer_chain_to_batch_bo(struct anv_cmd_buffer *cmd_buffer,
596 struct anv_batch_bo *bbo)
597 {
598 struct anv_batch *batch = &cmd_buffer->batch;
599 struct anv_batch_bo *current_bbo =
600 anv_cmd_buffer_current_batch_bo(cmd_buffer);
601
602 /* We set the end of the batch a little short so we would be sure we
603 * have room for the chaining command. Since we're about to emit the
604 * chaining command, let's set it back where it should go.
605 */
606 batch->end += GEN8_MI_BATCH_BUFFER_START_length * 4;
607 assert(batch->end == current_bbo->bo->map + current_bbo->bo->size);
608
609 emit_batch_buffer_start(cmd_buffer, bbo->bo, 0);
610
611 anv_batch_bo_finish(current_bbo, batch);
612 }
613
614 static VkResult
615 anv_cmd_buffer_chain_batch(struct anv_batch *batch, void *_data)
616 {
617 struct anv_cmd_buffer *cmd_buffer = _data;
618 struct anv_batch_bo *new_bbo;
619
620 VkResult result = anv_batch_bo_create(cmd_buffer, &new_bbo);
621 if (result != VK_SUCCESS)
622 return result;
623
624 struct anv_batch_bo **seen_bbo = u_vector_add(&cmd_buffer->seen_bbos);
625 if (seen_bbo == NULL) {
626 anv_batch_bo_destroy(new_bbo, cmd_buffer);
627 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
628 }
629 *seen_bbo = new_bbo;
630
631 cmd_buffer_chain_to_batch_bo(cmd_buffer, new_bbo);
632
633 list_addtail(&new_bbo->link, &cmd_buffer->batch_bos);
634
635 anv_batch_bo_start(new_bbo, batch, GEN8_MI_BATCH_BUFFER_START_length * 4);
636
637 return VK_SUCCESS;
638 }
639
640 static VkResult
641 anv_cmd_buffer_grow_batch(struct anv_batch *batch, void *_data)
642 {
643 struct anv_cmd_buffer *cmd_buffer = _data;
644 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
645
646 anv_batch_bo_grow(cmd_buffer, bbo, &cmd_buffer->batch, 4096,
647 GEN8_MI_BATCH_BUFFER_START_length * 4);
648
649 return VK_SUCCESS;
650 }
651
652 /** Allocate a binding table
653 *
654 * This function allocates a binding table. This is a bit more complicated
655 * than one would think due to a combination of Vulkan driver design and some
656 * unfortunate hardware restrictions.
657 *
658 * The 3DSTATE_BINDING_TABLE_POINTERS_* packets only have a 16-bit field for
659 * the binding table pointer which means that all binding tables need to live
660 * in the bottom 64k of surface state base address. The way the GL driver has
661 * classically dealt with this restriction is to emit all surface states
662 * on-the-fly into the batch and have a batch buffer smaller than 64k. This
663 * isn't really an option in Vulkan for a couple of reasons:
664 *
665 * 1) In Vulkan, we have growing (or chaining) batches so surface states have
666 * to live in their own buffer and we have to be able to re-emit
667 * STATE_BASE_ADDRESS as needed which requires a full pipeline stall. In
668 * order to avoid emitting STATE_BASE_ADDRESS any more often than needed
669 * (it's not that hard to hit 64k of just binding tables), we allocate
670 * surface state objects up-front when VkImageView is created. In order
671 * for this to work, surface state objects need to be allocated from a
672 * global buffer.
673 *
674 * 2) We tried to design the surface state system in such a way that it's
675 * already ready for bindless texturing. The way bindless texturing works
676 * on our hardware is that you have a big pool of surface state objects
677 * (with its own state base address) and the bindless handles are simply
678 * offsets into that pool. With the architecture we chose, we already
679 * have that pool and it's exactly the same pool that we use for regular
680 * surface states so we should already be ready for bindless.
681 *
682 * 3) For render targets, we need to be able to fill out the surface states
683 * later in vkBeginRenderPass so that we can assign clear colors
684 * correctly. One way to do this would be to just create the surface
685 * state data and then repeatedly copy it into the surface state BO every
686 * time we have to re-emit STATE_BASE_ADDRESS. While this works, it's
687 * rather annoying and just being able to allocate them up-front and
688 * re-use them for the entire render pass.
689 *
690 * While none of these are technically blockers for emitting state on the fly
691 * like we do in GL, the ability to have a single surface state pool is
692 * simplifies things greatly. Unfortunately, it comes at a cost...
693 *
694 * Because of the 64k limitation of 3DSTATE_BINDING_TABLE_POINTERS_*, we can't
695 * place the binding tables just anywhere in surface state base address.
696 * Because 64k isn't a whole lot of space, we can't simply restrict the
697 * surface state buffer to 64k, we have to be more clever. The solution we've
698 * chosen is to have a block pool with a maximum size of 2G that starts at
699 * zero and grows in both directions. All surface states are allocated from
700 * the top of the pool (positive offsets) and we allocate blocks (< 64k) of
701 * binding tables from the bottom of the pool (negative offsets). Every time
702 * we allocate a new binding table block, we set surface state base address to
703 * point to the bottom of the binding table block. This way all of the
704 * binding tables in the block are in the bottom 64k of surface state base
705 * address. When we fill out the binding table, we add the distance between
706 * the bottom of our binding table block and zero of the block pool to the
707 * surface state offsets so that they are correct relative to out new surface
708 * state base address at the bottom of the binding table block.
709 *
710 * \see adjust_relocations_from_block_pool()
711 * \see adjust_relocations_too_block_pool()
712 *
713 * \param[in] entries The number of surface state entries the binding
714 * table should be able to hold.
715 *
716 * \param[out] state_offset The offset surface surface state base address
717 * where the surface states live. This must be
718 * added to the surface state offset when it is
719 * written into the binding table entry.
720 *
721 * \return An anv_state representing the binding table
722 */
723 struct anv_state
724 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
725 uint32_t entries, uint32_t *state_offset)
726 {
727 struct anv_state *bt_block = u_vector_head(&cmd_buffer->bt_block_states);
728
729 uint32_t bt_size = align_u32(entries * 4, 32);
730
731 struct anv_state state = cmd_buffer->bt_next;
732 if (bt_size > state.alloc_size)
733 return (struct anv_state) { 0 };
734
735 state.alloc_size = bt_size;
736 cmd_buffer->bt_next.offset += bt_size;
737 cmd_buffer->bt_next.map += bt_size;
738 cmd_buffer->bt_next.alloc_size -= bt_size;
739
740 assert(bt_block->offset < 0);
741 *state_offset = -bt_block->offset;
742
743 return state;
744 }
745
746 struct anv_state
747 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer *cmd_buffer)
748 {
749 struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
750 return anv_state_stream_alloc(&cmd_buffer->surface_state_stream,
751 isl_dev->ss.size, isl_dev->ss.align);
752 }
753
754 struct anv_state
755 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer *cmd_buffer,
756 uint32_t size, uint32_t alignment)
757 {
758 return anv_state_stream_alloc(&cmd_buffer->dynamic_state_stream,
759 size, alignment);
760 }
761
762 VkResult
763 anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer *cmd_buffer)
764 {
765 struct anv_state *bt_block = u_vector_add(&cmd_buffer->bt_block_states);
766 if (bt_block == NULL) {
767 anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_HOST_MEMORY);
768 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
769 }
770
771 *bt_block = anv_binding_table_pool_alloc(cmd_buffer->device);
772
773 /* The bt_next state is a rolling state (we update it as we suballocate
774 * from it) which is relative to the start of the binding table block.
775 */
776 cmd_buffer->bt_next = *bt_block;
777 cmd_buffer->bt_next.offset = 0;
778
779 return VK_SUCCESS;
780 }
781
782 VkResult
783 anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
784 {
785 struct anv_batch_bo *batch_bo;
786 VkResult result;
787
788 list_inithead(&cmd_buffer->batch_bos);
789
790 result = anv_batch_bo_create(cmd_buffer, &batch_bo);
791 if (result != VK_SUCCESS)
792 return result;
793
794 list_addtail(&batch_bo->link, &cmd_buffer->batch_bos);
795
796 cmd_buffer->batch.alloc = &cmd_buffer->pool->alloc;
797 cmd_buffer->batch.user_data = cmd_buffer;
798
799 if (cmd_buffer->device->can_chain_batches) {
800 cmd_buffer->batch.extend_cb = anv_cmd_buffer_chain_batch;
801 } else {
802 cmd_buffer->batch.extend_cb = anv_cmd_buffer_grow_batch;
803 }
804
805 anv_batch_bo_start(batch_bo, &cmd_buffer->batch,
806 GEN8_MI_BATCH_BUFFER_START_length * 4);
807
808 int success = u_vector_init(&cmd_buffer->seen_bbos,
809 sizeof(struct anv_bo *),
810 8 * sizeof(struct anv_bo *));
811 if (!success)
812 goto fail_batch_bo;
813
814 *(struct anv_batch_bo **)u_vector_add(&cmd_buffer->seen_bbos) = batch_bo;
815
816 /* u_vector requires power-of-two size elements */
817 unsigned pow2_state_size = util_next_power_of_two(sizeof(struct anv_state));
818 success = u_vector_init(&cmd_buffer->bt_block_states,
819 pow2_state_size, 8 * pow2_state_size);
820 if (!success)
821 goto fail_seen_bbos;
822
823 result = anv_reloc_list_init(&cmd_buffer->surface_relocs,
824 &cmd_buffer->pool->alloc);
825 if (result != VK_SUCCESS)
826 goto fail_bt_blocks;
827 cmd_buffer->last_ss_pool_center = 0;
828
829 result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
830 if (result != VK_SUCCESS)
831 goto fail_bt_blocks;
832
833 return VK_SUCCESS;
834
835 fail_bt_blocks:
836 u_vector_finish(&cmd_buffer->bt_block_states);
837 fail_seen_bbos:
838 u_vector_finish(&cmd_buffer->seen_bbos);
839 fail_batch_bo:
840 anv_batch_bo_destroy(batch_bo, cmd_buffer);
841
842 return result;
843 }
844
845 void
846 anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
847 {
848 struct anv_state *bt_block;
849 u_vector_foreach(bt_block, &cmd_buffer->bt_block_states)
850 anv_binding_table_pool_free(cmd_buffer->device, *bt_block);
851 u_vector_finish(&cmd_buffer->bt_block_states);
852
853 anv_reloc_list_finish(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc);
854
855 u_vector_finish(&cmd_buffer->seen_bbos);
856
857 /* Destroy all of the batch buffers */
858 list_for_each_entry_safe(struct anv_batch_bo, bbo,
859 &cmd_buffer->batch_bos, link) {
860 list_del(&bbo->link);
861 anv_batch_bo_destroy(bbo, cmd_buffer);
862 }
863 }
864
865 void
866 anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
867 {
868 /* Delete all but the first batch bo */
869 assert(!list_is_empty(&cmd_buffer->batch_bos));
870 while (cmd_buffer->batch_bos.next != cmd_buffer->batch_bos.prev) {
871 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
872 list_del(&bbo->link);
873 anv_batch_bo_destroy(bbo, cmd_buffer);
874 }
875 assert(!list_is_empty(&cmd_buffer->batch_bos));
876
877 anv_batch_bo_start(anv_cmd_buffer_current_batch_bo(cmd_buffer),
878 &cmd_buffer->batch,
879 GEN8_MI_BATCH_BUFFER_START_length * 4);
880
881 while (u_vector_length(&cmd_buffer->bt_block_states) > 1) {
882 struct anv_state *bt_block = u_vector_remove(&cmd_buffer->bt_block_states);
883 anv_binding_table_pool_free(cmd_buffer->device, *bt_block);
884 }
885 assert(u_vector_length(&cmd_buffer->bt_block_states) == 1);
886 cmd_buffer->bt_next = *(struct anv_state *)u_vector_head(&cmd_buffer->bt_block_states);
887 cmd_buffer->bt_next.offset = 0;
888
889 anv_reloc_list_clear(&cmd_buffer->surface_relocs);
890 cmd_buffer->last_ss_pool_center = 0;
891
892 /* Reset the list of seen buffers */
893 cmd_buffer->seen_bbos.head = 0;
894 cmd_buffer->seen_bbos.tail = 0;
895
896 *(struct anv_batch_bo **)u_vector_add(&cmd_buffer->seen_bbos) =
897 anv_cmd_buffer_current_batch_bo(cmd_buffer);
898 }
899
900 void
901 anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer)
902 {
903 struct anv_batch_bo *batch_bo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
904
905 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
906 /* When we start a batch buffer, we subtract a certain amount of
907 * padding from the end to ensure that we always have room to emit a
908 * BATCH_BUFFER_START to chain to the next BO. We need to remove
909 * that padding before we end the batch; otherwise, we may end up
910 * with our BATCH_BUFFER_END in another BO.
911 */
912 cmd_buffer->batch.end += GEN8_MI_BATCH_BUFFER_START_length * 4;
913 assert(cmd_buffer->batch.end == batch_bo->bo->map + batch_bo->bo->size);
914
915 anv_batch_emit(&cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_END, bbe);
916
917 /* Round batch up to an even number of dwords. */
918 if ((cmd_buffer->batch.next - cmd_buffer->batch.start) & 4)
919 anv_batch_emit(&cmd_buffer->batch, GEN8_MI_NOOP, noop);
920
921 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_PRIMARY;
922 } else {
923 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
924 /* If this is a secondary command buffer, we need to determine the
925 * mode in which it will be executed with vkExecuteCommands. We
926 * determine this statically here so that this stays in sync with the
927 * actual ExecuteCommands implementation.
928 */
929 const uint32_t length = cmd_buffer->batch.next - cmd_buffer->batch.start;
930 if (!cmd_buffer->device->can_chain_batches) {
931 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT;
932 } else if (cmd_buffer->device->physical->use_softpin) {
933 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_CALL_AND_RETURN;
934 /* If the secondary command buffer begins & ends in the same BO and
935 * its length is less than the length of CS prefetch, add some NOOPs
936 * instructions so the last MI_BATCH_BUFFER_START is outside the CS
937 * prefetch.
938 */
939 if (cmd_buffer->batch_bos.next == cmd_buffer->batch_bos.prev) {
940 int32_t batch_len =
941 cmd_buffer->batch.next - cmd_buffer->batch.start;
942
943 for (int32_t i = 0; i < (512 - batch_len); i += 4)
944 anv_batch_emit(&cmd_buffer->batch, GEN8_MI_NOOP, noop);
945 }
946
947 void *jump_addr =
948 anv_batch_emitn(&cmd_buffer->batch,
949 GEN8_MI_BATCH_BUFFER_START_length,
950 GEN8_MI_BATCH_BUFFER_START,
951 .AddressSpaceIndicator = ASI_PPGTT,
952 .SecondLevelBatchBuffer = Firstlevelbatch) +
953 (GEN8_MI_BATCH_BUFFER_START_BatchBufferStartAddress_start / 8);
954 cmd_buffer->return_addr = anv_batch_address(&cmd_buffer->batch, jump_addr);
955 } else if ((cmd_buffer->batch_bos.next == cmd_buffer->batch_bos.prev) &&
956 (length < ANV_CMD_BUFFER_BATCH_SIZE / 2)) {
957 /* If the secondary has exactly one batch buffer in its list *and*
958 * that batch buffer is less than half of the maximum size, we're
959 * probably better of simply copying it into our batch.
960 */
961 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_EMIT;
962 } else if (!(cmd_buffer->usage_flags &
963 VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
964 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_CHAIN;
965
966 /* In order to chain, we need this command buffer to contain an
967 * MI_BATCH_BUFFER_START which will jump back to the calling batch.
968 * It doesn't matter where it points now so long as has a valid
969 * relocation. We'll adjust it later as part of the chaining
970 * process.
971 *
972 * We set the end of the batch a little short so we would be sure we
973 * have room for the chaining command. Since we're about to emit the
974 * chaining command, let's set it back where it should go.
975 */
976 cmd_buffer->batch.end += GEN8_MI_BATCH_BUFFER_START_length * 4;
977 assert(cmd_buffer->batch.start == batch_bo->bo->map);
978 assert(cmd_buffer->batch.end == batch_bo->bo->map + batch_bo->bo->size);
979
980 emit_batch_buffer_start(cmd_buffer, batch_bo->bo, 0);
981 assert(cmd_buffer->batch.start == batch_bo->bo->map);
982 } else {
983 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN;
984 }
985 }
986
987 anv_batch_bo_finish(batch_bo, &cmd_buffer->batch);
988 }
989
990 static VkResult
991 anv_cmd_buffer_add_seen_bbos(struct anv_cmd_buffer *cmd_buffer,
992 struct list_head *list)
993 {
994 list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
995 struct anv_batch_bo **bbo_ptr = u_vector_add(&cmd_buffer->seen_bbos);
996 if (bbo_ptr == NULL)
997 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
998
999 *bbo_ptr = bbo;
1000 }
1001
1002 return VK_SUCCESS;
1003 }
1004
1005 void
1006 anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary,
1007 struct anv_cmd_buffer *secondary)
1008 {
1009 switch (secondary->exec_mode) {
1010 case ANV_CMD_BUFFER_EXEC_MODE_EMIT:
1011 anv_batch_emit_batch(&primary->batch, &secondary->batch);
1012 break;
1013 case ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT: {
1014 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(primary);
1015 unsigned length = secondary->batch.end - secondary->batch.start;
1016 anv_batch_bo_grow(primary, bbo, &primary->batch, length,
1017 GEN8_MI_BATCH_BUFFER_START_length * 4);
1018 anv_batch_emit_batch(&primary->batch, &secondary->batch);
1019 break;
1020 }
1021 case ANV_CMD_BUFFER_EXEC_MODE_CHAIN: {
1022 struct anv_batch_bo *first_bbo =
1023 list_first_entry(&secondary->batch_bos, struct anv_batch_bo, link);
1024 struct anv_batch_bo *last_bbo =
1025 list_last_entry(&secondary->batch_bos, struct anv_batch_bo, link);
1026
1027 emit_batch_buffer_start(primary, first_bbo->bo, 0);
1028
1029 struct anv_batch_bo *this_bbo = anv_cmd_buffer_current_batch_bo(primary);
1030 assert(primary->batch.start == this_bbo->bo->map);
1031 uint32_t offset = primary->batch.next - primary->batch.start;
1032
1033 /* Make the tail of the secondary point back to right after the
1034 * MI_BATCH_BUFFER_START in the primary batch.
1035 */
1036 anv_batch_bo_link(primary, last_bbo, this_bbo, offset);
1037
1038 anv_cmd_buffer_add_seen_bbos(primary, &secondary->batch_bos);
1039 break;
1040 }
1041 case ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN: {
1042 struct list_head copy_list;
1043 VkResult result = anv_batch_bo_list_clone(&secondary->batch_bos,
1044 secondary,
1045 &copy_list);
1046 if (result != VK_SUCCESS)
1047 return; /* FIXME */
1048
1049 anv_cmd_buffer_add_seen_bbos(primary, &copy_list);
1050
1051 struct anv_batch_bo *first_bbo =
1052 list_first_entry(&copy_list, struct anv_batch_bo, link);
1053 struct anv_batch_bo *last_bbo =
1054 list_last_entry(&copy_list, struct anv_batch_bo, link);
1055
1056 cmd_buffer_chain_to_batch_bo(primary, first_bbo);
1057
1058 list_splicetail(&copy_list, &primary->batch_bos);
1059
1060 anv_batch_bo_continue(last_bbo, &primary->batch,
1061 GEN8_MI_BATCH_BUFFER_START_length * 4);
1062 break;
1063 }
1064 case ANV_CMD_BUFFER_EXEC_MODE_CALL_AND_RETURN: {
1065 struct anv_batch_bo *first_bbo =
1066 list_first_entry(&secondary->batch_bos, struct anv_batch_bo, link);
1067
1068 uint64_t *write_return_addr =
1069 anv_batch_emitn(&primary->batch,
1070 GEN8_MI_STORE_DATA_IMM_length + 1 /* QWord write */,
1071 GEN8_MI_STORE_DATA_IMM,
1072 .Address = secondary->return_addr)
1073 + (GEN8_MI_STORE_DATA_IMM_ImmediateData_start / 8);
1074
1075 emit_batch_buffer_start(primary, first_bbo->bo, 0);
1076
1077 *write_return_addr =
1078 anv_address_physical(anv_batch_address(&primary->batch,
1079 primary->batch.next));
1080
1081 anv_cmd_buffer_add_seen_bbos(primary, &secondary->batch_bos);
1082 break;
1083 }
1084 default:
1085 assert(!"Invalid execution mode");
1086 }
1087
1088 anv_reloc_list_append(&primary->surface_relocs, &primary->pool->alloc,
1089 &secondary->surface_relocs, 0);
1090 }
1091
1092 struct anv_execbuf {
1093 struct drm_i915_gem_execbuffer2 execbuf;
1094
1095 struct drm_i915_gem_exec_object2 * objects;
1096 uint32_t bo_count;
1097 struct anv_bo ** bos;
1098
1099 /* Allocated length of the 'objects' and 'bos' arrays */
1100 uint32_t array_length;
1101
1102 bool has_relocs;
1103
1104 const VkAllocationCallbacks * alloc;
1105 VkSystemAllocationScope alloc_scope;
1106
1107 int perf_query_pass;
1108 };
1109
1110 static void
1111 anv_execbuf_init(struct anv_execbuf *exec)
1112 {
1113 memset(exec, 0, sizeof(*exec));
1114 }
1115
1116 static void
1117 anv_execbuf_finish(struct anv_execbuf *exec)
1118 {
1119 vk_free(exec->alloc, exec->objects);
1120 vk_free(exec->alloc, exec->bos);
1121 }
1122
1123 static VkResult
1124 anv_execbuf_add_bo_bitset(struct anv_device *device,
1125 struct anv_execbuf *exec,
1126 uint32_t dep_words,
1127 BITSET_WORD *deps,
1128 uint32_t extra_flags);
1129
1130 static VkResult
1131 anv_execbuf_add_bo(struct anv_device *device,
1132 struct anv_execbuf *exec,
1133 struct anv_bo *bo,
1134 struct anv_reloc_list *relocs,
1135 uint32_t extra_flags)
1136 {
1137 struct drm_i915_gem_exec_object2 *obj = NULL;
1138
1139 bo = anv_bo_unwrap(bo);
1140
1141 if (bo->index < exec->bo_count && exec->bos[bo->index] == bo)
1142 obj = &exec->objects[bo->index];
1143
1144 if (obj == NULL) {
1145 /* We've never seen this one before. Add it to the list and assign
1146 * an id that we can use later.
1147 */
1148 if (exec->bo_count >= exec->array_length) {
1149 uint32_t new_len = exec->objects ? exec->array_length * 2 : 64;
1150
1151 struct drm_i915_gem_exec_object2 *new_objects =
1152 vk_alloc(exec->alloc, new_len * sizeof(*new_objects), 8, exec->alloc_scope);
1153 if (new_objects == NULL)
1154 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1155
1156 struct anv_bo **new_bos =
1157 vk_alloc(exec->alloc, new_len * sizeof(*new_bos), 8, exec->alloc_scope);
1158 if (new_bos == NULL) {
1159 vk_free(exec->alloc, new_objects);
1160 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1161 }
1162
1163 if (exec->objects) {
1164 memcpy(new_objects, exec->objects,
1165 exec->bo_count * sizeof(*new_objects));
1166 memcpy(new_bos, exec->bos,
1167 exec->bo_count * sizeof(*new_bos));
1168 }
1169
1170 vk_free(exec->alloc, exec->objects);
1171 vk_free(exec->alloc, exec->bos);
1172
1173 exec->objects = new_objects;
1174 exec->bos = new_bos;
1175 exec->array_length = new_len;
1176 }
1177
1178 assert(exec->bo_count < exec->array_length);
1179
1180 bo->index = exec->bo_count++;
1181 obj = &exec->objects[bo->index];
1182 exec->bos[bo->index] = bo;
1183
1184 obj->handle = bo->gem_handle;
1185 obj->relocation_count = 0;
1186 obj->relocs_ptr = 0;
1187 obj->alignment = 0;
1188 obj->offset = bo->offset;
1189 obj->flags = bo->flags | extra_flags;
1190 obj->rsvd1 = 0;
1191 obj->rsvd2 = 0;
1192 }
1193
1194 if (extra_flags & EXEC_OBJECT_WRITE) {
1195 obj->flags |= EXEC_OBJECT_WRITE;
1196 obj->flags &= ~EXEC_OBJECT_ASYNC;
1197 }
1198
1199 if (relocs != NULL) {
1200 assert(obj->relocation_count == 0);
1201
1202 if (relocs->num_relocs > 0) {
1203 /* This is the first time we've ever seen a list of relocations for
1204 * this BO. Go ahead and set the relocations and then walk the list
1205 * of relocations and add them all.
1206 */
1207 exec->has_relocs = true;
1208 obj->relocation_count = relocs->num_relocs;
1209 obj->relocs_ptr = (uintptr_t) relocs->relocs;
1210
1211 for (size_t i = 0; i < relocs->num_relocs; i++) {
1212 VkResult result;
1213
1214 /* A quick sanity check on relocations */
1215 assert(relocs->relocs[i].offset < bo->size);
1216 result = anv_execbuf_add_bo(device, exec, relocs->reloc_bos[i],
1217 NULL, extra_flags);
1218 if (result != VK_SUCCESS)
1219 return result;
1220 }
1221 }
1222
1223 return anv_execbuf_add_bo_bitset(device, exec, relocs->dep_words,
1224 relocs->deps, extra_flags);
1225 }
1226
1227 return VK_SUCCESS;
1228 }
1229
1230 /* Add BO dependencies to execbuf */
1231 static VkResult
1232 anv_execbuf_add_bo_bitset(struct anv_device *device,
1233 struct anv_execbuf *exec,
1234 uint32_t dep_words,
1235 BITSET_WORD *deps,
1236 uint32_t extra_flags)
1237 {
1238 for (uint32_t w = 0; w < dep_words; w++) {
1239 BITSET_WORD mask = deps[w];
1240 while (mask) {
1241 int i = u_bit_scan(&mask);
1242 uint32_t gem_handle = w * BITSET_WORDBITS + i;
1243 struct anv_bo *bo = anv_device_lookup_bo(device, gem_handle);
1244 assert(bo->refcount > 0);
1245 VkResult result =
1246 anv_execbuf_add_bo(device, exec, bo, NULL, extra_flags);
1247 if (result != VK_SUCCESS)
1248 return result;
1249 }
1250 }
1251
1252 return VK_SUCCESS;
1253 }
1254
1255 static void
1256 anv_cmd_buffer_process_relocs(struct anv_cmd_buffer *cmd_buffer,
1257 struct anv_reloc_list *list)
1258 {
1259 for (size_t i = 0; i < list->num_relocs; i++)
1260 list->relocs[i].target_handle = anv_bo_unwrap(list->reloc_bos[i])->index;
1261 }
1262
1263 static void
1264 adjust_relocations_from_state_pool(struct anv_state_pool *pool,
1265 struct anv_reloc_list *relocs,
1266 uint32_t last_pool_center_bo_offset)
1267 {
1268 assert(last_pool_center_bo_offset <= pool->block_pool.center_bo_offset);
1269 uint32_t delta = pool->block_pool.center_bo_offset - last_pool_center_bo_offset;
1270
1271 for (size_t i = 0; i < relocs->num_relocs; i++) {
1272 /* All of the relocations from this block pool to other BO's should
1273 * have been emitted relative to the surface block pool center. We
1274 * need to add the center offset to make them relative to the
1275 * beginning of the actual GEM bo.
1276 */
1277 relocs->relocs[i].offset += delta;
1278 }
1279 }
1280
1281 static void
1282 adjust_relocations_to_state_pool(struct anv_state_pool *pool,
1283 struct anv_bo *from_bo,
1284 struct anv_reloc_list *relocs,
1285 uint32_t last_pool_center_bo_offset)
1286 {
1287 assert(!from_bo->is_wrapper);
1288 assert(last_pool_center_bo_offset <= pool->block_pool.center_bo_offset);
1289 uint32_t delta = pool->block_pool.center_bo_offset - last_pool_center_bo_offset;
1290
1291 /* When we initially emit relocations into a block pool, we don't
1292 * actually know what the final center_bo_offset will be so we just emit
1293 * it as if center_bo_offset == 0. Now that we know what the center
1294 * offset is, we need to walk the list of relocations and adjust any
1295 * relocations that point to the pool bo with the correct offset.
1296 */
1297 for (size_t i = 0; i < relocs->num_relocs; i++) {
1298 if (relocs->reloc_bos[i] == pool->block_pool.bo) {
1299 /* Adjust the delta value in the relocation to correctly
1300 * correspond to the new delta. Initially, this value may have
1301 * been negative (if treated as unsigned), but we trust in
1302 * uint32_t roll-over to fix that for us at this point.
1303 */
1304 relocs->relocs[i].delta += delta;
1305
1306 /* Since the delta has changed, we need to update the actual
1307 * relocated value with the new presumed value. This function
1308 * should only be called on batch buffers, so we know it isn't in
1309 * use by the GPU at the moment.
1310 */
1311 assert(relocs->relocs[i].offset < from_bo->size);
1312 write_reloc(pool->block_pool.device,
1313 from_bo->map + relocs->relocs[i].offset,
1314 relocs->relocs[i].presumed_offset +
1315 relocs->relocs[i].delta, false);
1316 }
1317 }
1318 }
1319
1320 static void
1321 anv_reloc_list_apply(struct anv_device *device,
1322 struct anv_reloc_list *list,
1323 struct anv_bo *bo,
1324 bool always_relocate)
1325 {
1326 bo = anv_bo_unwrap(bo);
1327
1328 for (size_t i = 0; i < list->num_relocs; i++) {
1329 struct anv_bo *target_bo = anv_bo_unwrap(list->reloc_bos[i]);
1330 if (list->relocs[i].presumed_offset == target_bo->offset &&
1331 !always_relocate)
1332 continue;
1333
1334 void *p = bo->map + list->relocs[i].offset;
1335 write_reloc(device, p, target_bo->offset + list->relocs[i].delta, true);
1336 list->relocs[i].presumed_offset = target_bo->offset;
1337 }
1338 }
1339
1340 /**
1341 * This function applies the relocation for a command buffer and writes the
1342 * actual addresses into the buffers as per what we were told by the kernel on
1343 * the previous execbuf2 call. This should be safe to do because, for each
1344 * relocated address, we have two cases:
1345 *
1346 * 1) The target BO is inactive (as seen by the kernel). In this case, it is
1347 * not in use by the GPU so updating the address is 100% ok. It won't be
1348 * in-use by the GPU (from our context) again until the next execbuf2
1349 * happens. If the kernel decides to move it in the next execbuf2, it
1350 * will have to do the relocations itself, but that's ok because it should
1351 * have all of the information needed to do so.
1352 *
1353 * 2) The target BO is active (as seen by the kernel). In this case, it
1354 * hasn't moved since the last execbuffer2 call because GTT shuffling
1355 * *only* happens when the BO is idle. (From our perspective, it only
1356 * happens inside the execbuffer2 ioctl, but the shuffling may be
1357 * triggered by another ioctl, with full-ppgtt this is limited to only
1358 * execbuffer2 ioctls on the same context, or memory pressure.) Since the
1359 * target BO hasn't moved, our anv_bo::offset exactly matches the BO's GTT
1360 * address and the relocated value we are writing into the BO will be the
1361 * same as the value that is already there.
1362 *
1363 * There is also a possibility that the target BO is active but the exact
1364 * RENDER_SURFACE_STATE object we are writing the relocation into isn't in
1365 * use. In this case, the address currently in the RENDER_SURFACE_STATE
1366 * may be stale but it's still safe to write the relocation because that
1367 * particular RENDER_SURFACE_STATE object isn't in-use by the GPU and
1368 * won't be until the next execbuf2 call.
1369 *
1370 * By doing relocations on the CPU, we can tell the kernel that it doesn't
1371 * need to bother. We want to do this because the surface state buffer is
1372 * used by every command buffer so, if the kernel does the relocations, it
1373 * will always be busy and the kernel will always stall. This is also
1374 * probably the fastest mechanism for doing relocations since the kernel would
1375 * have to make a full copy of all the relocations lists.
1376 */
1377 static bool
1378 relocate_cmd_buffer(struct anv_cmd_buffer *cmd_buffer,
1379 struct anv_execbuf *exec)
1380 {
1381 if (cmd_buffer->perf_query_pool)
1382 return false;
1383
1384 if (!exec->has_relocs)
1385 return true;
1386
1387 static int userspace_relocs = -1;
1388 if (userspace_relocs < 0)
1389 userspace_relocs = env_var_as_boolean("ANV_USERSPACE_RELOCS", true);
1390 if (!userspace_relocs)
1391 return false;
1392
1393 /* First, we have to check to see whether or not we can even do the
1394 * relocation. New buffers which have never been submitted to the kernel
1395 * don't have a valid offset so we need to let the kernel do relocations so
1396 * that we can get offsets for them. On future execbuf2 calls, those
1397 * buffers will have offsets and we will be able to skip relocating.
1398 * Invalid offsets are indicated by anv_bo::offset == (uint64_t)-1.
1399 */
1400 for (uint32_t i = 0; i < exec->bo_count; i++) {
1401 assert(!exec->bos[i]->is_wrapper);
1402 if (exec->bos[i]->offset == (uint64_t)-1)
1403 return false;
1404 }
1405
1406 /* Since surface states are shared between command buffers and we don't
1407 * know what order they will be submitted to the kernel, we don't know
1408 * what address is actually written in the surface state object at any
1409 * given time. The only option is to always relocate them.
1410 */
1411 struct anv_bo *surface_state_bo =
1412 anv_bo_unwrap(cmd_buffer->device->surface_state_pool.block_pool.bo);
1413 anv_reloc_list_apply(cmd_buffer->device, &cmd_buffer->surface_relocs,
1414 surface_state_bo,
1415 true /* always relocate surface states */);
1416
1417 /* Since we own all of the batch buffers, we know what values are stored
1418 * in the relocated addresses and only have to update them if the offsets
1419 * have changed.
1420 */
1421 struct anv_batch_bo **bbo;
1422 u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1423 anv_reloc_list_apply(cmd_buffer->device,
1424 &(*bbo)->relocs, (*bbo)->bo, false);
1425 }
1426
1427 for (uint32_t i = 0; i < exec->bo_count; i++)
1428 exec->objects[i].offset = exec->bos[i]->offset;
1429
1430 return true;
1431 }
1432
1433 static VkResult
1434 setup_execbuf_for_cmd_buffer(struct anv_execbuf *execbuf,
1435 struct anv_cmd_buffer *cmd_buffer)
1436 {
1437 struct anv_batch *batch = &cmd_buffer->batch;
1438 struct anv_state_pool *ss_pool =
1439 &cmd_buffer->device->surface_state_pool;
1440
1441 adjust_relocations_from_state_pool(ss_pool, &cmd_buffer->surface_relocs,
1442 cmd_buffer->last_ss_pool_center);
1443 VkResult result;
1444 if (cmd_buffer->device->physical->use_softpin) {
1445 anv_block_pool_foreach_bo(bo, &ss_pool->block_pool) {
1446 result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
1447 bo, NULL, 0);
1448 if (result != VK_SUCCESS)
1449 return result;
1450 }
1451 /* Add surface dependencies (BOs) to the execbuf */
1452 anv_execbuf_add_bo_bitset(cmd_buffer->device, execbuf,
1453 cmd_buffer->surface_relocs.dep_words,
1454 cmd_buffer->surface_relocs.deps, 0);
1455
1456 /* Add the BOs for all memory objects */
1457 list_for_each_entry(struct anv_device_memory, mem,
1458 &cmd_buffer->device->memory_objects, link) {
1459 result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
1460 mem->bo, NULL, 0);
1461 if (result != VK_SUCCESS)
1462 return result;
1463 }
1464
1465 struct anv_block_pool *pool;
1466 pool = &cmd_buffer->device->dynamic_state_pool.block_pool;
1467 anv_block_pool_foreach_bo(bo, pool) {
1468 result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
1469 bo, NULL, 0);
1470 if (result != VK_SUCCESS)
1471 return result;
1472 }
1473
1474 pool = &cmd_buffer->device->instruction_state_pool.block_pool;
1475 anv_block_pool_foreach_bo(bo, pool) {
1476 result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
1477 bo, NULL, 0);
1478 if (result != VK_SUCCESS)
1479 return result;
1480 }
1481
1482 pool = &cmd_buffer->device->binding_table_pool.block_pool;
1483 anv_block_pool_foreach_bo(bo, pool) {
1484 result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
1485 bo, NULL, 0);
1486 if (result != VK_SUCCESS)
1487 return result;
1488 }
1489 } else {
1490 /* Since we aren't in the softpin case, all of our STATE_BASE_ADDRESS BOs
1491 * will get added automatically by processing relocations on the batch
1492 * buffer. We have to add the surface state BO manually because it has
1493 * relocations of its own that we need to be sure are processsed.
1494 */
1495 result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
1496 ss_pool->block_pool.bo,
1497 &cmd_buffer->surface_relocs, 0);
1498 if (result != VK_SUCCESS)
1499 return result;
1500 }
1501
1502 /* First, we walk over all of the bos we've seen and add them and their
1503 * relocations to the validate list.
1504 */
1505 struct anv_batch_bo **bbo;
1506 u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1507 adjust_relocations_to_state_pool(ss_pool, (*bbo)->bo, &(*bbo)->relocs,
1508 cmd_buffer->last_ss_pool_center);
1509
1510 result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
1511 (*bbo)->bo, &(*bbo)->relocs, 0);
1512 if (result != VK_SUCCESS)
1513 return result;
1514 }
1515
1516 /* Now that we've adjusted all of the surface state relocations, we need to
1517 * record the surface state pool center so future executions of the command
1518 * buffer can adjust correctly.
1519 */
1520 cmd_buffer->last_ss_pool_center = ss_pool->block_pool.center_bo_offset;
1521
1522 struct anv_batch_bo *first_batch_bo =
1523 list_first_entry(&cmd_buffer->batch_bos, struct anv_batch_bo, link);
1524
1525 /* The kernel requires that the last entry in the validation list be the
1526 * batch buffer to execute. We can simply swap the element
1527 * corresponding to the first batch_bo in the chain with the last
1528 * element in the list.
1529 */
1530 if (first_batch_bo->bo->index != execbuf->bo_count - 1) {
1531 uint32_t idx = first_batch_bo->bo->index;
1532 uint32_t last_idx = execbuf->bo_count - 1;
1533
1534 struct drm_i915_gem_exec_object2 tmp_obj = execbuf->objects[idx];
1535 assert(execbuf->bos[idx] == first_batch_bo->bo);
1536
1537 execbuf->objects[idx] = execbuf->objects[last_idx];
1538 execbuf->bos[idx] = execbuf->bos[last_idx];
1539 execbuf->bos[idx]->index = idx;
1540
1541 execbuf->objects[last_idx] = tmp_obj;
1542 execbuf->bos[last_idx] = first_batch_bo->bo;
1543 first_batch_bo->bo->index = last_idx;
1544 }
1545
1546 /* If we are pinning our BOs, we shouldn't have to relocate anything */
1547 if (cmd_buffer->device->physical->use_softpin)
1548 assert(!execbuf->has_relocs);
1549
1550 /* Now we go through and fixup all of the relocation lists to point to
1551 * the correct indices in the object array. We have to do this after we
1552 * reorder the list above as some of the indices may have changed.
1553 */
1554 if (execbuf->has_relocs) {
1555 u_vector_foreach(bbo, &cmd_buffer->seen_bbos)
1556 anv_cmd_buffer_process_relocs(cmd_buffer, &(*bbo)->relocs);
1557
1558 anv_cmd_buffer_process_relocs(cmd_buffer, &cmd_buffer->surface_relocs);
1559 }
1560
1561 if (!cmd_buffer->device->info.has_llc) {
1562 __builtin_ia32_mfence();
1563 u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1564 for (uint32_t i = 0; i < (*bbo)->length; i += CACHELINE_SIZE)
1565 __builtin_ia32_clflush((*bbo)->bo->map + i);
1566 }
1567 }
1568
1569 execbuf->execbuf = (struct drm_i915_gem_execbuffer2) {
1570 .buffers_ptr = (uintptr_t) execbuf->objects,
1571 .buffer_count = execbuf->bo_count,
1572 .batch_start_offset = 0,
1573 .batch_len = batch->next - batch->start,
1574 .cliprects_ptr = 0,
1575 .num_cliprects = 0,
1576 .DR1 = 0,
1577 .DR4 = 0,
1578 .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER,
1579 .rsvd1 = cmd_buffer->device->context_id,
1580 .rsvd2 = 0,
1581 };
1582
1583 if (relocate_cmd_buffer(cmd_buffer, execbuf)) {
1584 /* If we were able to successfully relocate everything, tell the kernel
1585 * that it can skip doing relocations. The requirement for using
1586 * NO_RELOC is:
1587 *
1588 * 1) The addresses written in the objects must match the corresponding
1589 * reloc.presumed_offset which in turn must match the corresponding
1590 * execobject.offset.
1591 *
1592 * 2) To avoid stalling, execobject.offset should match the current
1593 * address of that object within the active context.
1594 *
1595 * In order to satisfy all of the invariants that make userspace
1596 * relocations to be safe (see relocate_cmd_buffer()), we need to
1597 * further ensure that the addresses we use match those used by the
1598 * kernel for the most recent execbuf2.
1599 *
1600 * The kernel may still choose to do relocations anyway if something has
1601 * moved in the GTT. In this case, the relocation list still needs to be
1602 * valid. All relocations on the batch buffers are already valid and
1603 * kept up-to-date. For surface state relocations, by applying the
1604 * relocations in relocate_cmd_buffer, we ensured that the address in
1605 * the RENDER_SURFACE_STATE matches presumed_offset, so it should be
1606 * safe for the kernel to relocate them as needed.
1607 */
1608 execbuf->execbuf.flags |= I915_EXEC_NO_RELOC;
1609 } else {
1610 /* In the case where we fall back to doing kernel relocations, we need
1611 * to ensure that the relocation list is valid. All relocations on the
1612 * batch buffers are already valid and kept up-to-date. Since surface
1613 * states are shared between command buffers and we don't know what
1614 * order they will be submitted to the kernel, we don't know what
1615 * address is actually written in the surface state object at any given
1616 * time. The only option is to set a bogus presumed offset and let the
1617 * kernel relocate them.
1618 */
1619 for (size_t i = 0; i < cmd_buffer->surface_relocs.num_relocs; i++)
1620 cmd_buffer->surface_relocs.relocs[i].presumed_offset = -1;
1621 }
1622
1623 return VK_SUCCESS;
1624 }
1625
1626 static VkResult
1627 setup_empty_execbuf(struct anv_execbuf *execbuf, struct anv_device *device)
1628 {
1629 VkResult result = anv_execbuf_add_bo(device, execbuf,
1630 device->trivial_batch_bo,
1631 NULL, 0);
1632 if (result != VK_SUCCESS)
1633 return result;
1634
1635 execbuf->execbuf = (struct drm_i915_gem_execbuffer2) {
1636 .buffers_ptr = (uintptr_t) execbuf->objects,
1637 .buffer_count = execbuf->bo_count,
1638 .batch_start_offset = 0,
1639 .batch_len = 8, /* GEN7_MI_BATCH_BUFFER_END and NOOP */
1640 .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER,
1641 .rsvd1 = device->context_id,
1642 .rsvd2 = 0,
1643 };
1644
1645 return VK_SUCCESS;
1646 }
1647
1648 /* We lock around execbuf for three main reasons:
1649 *
1650 * 1) When a block pool is resized, we create a new gem handle with a
1651 * different size and, in the case of surface states, possibly a different
1652 * center offset but we re-use the same anv_bo struct when we do so. If
1653 * this happens in the middle of setting up an execbuf, we could end up
1654 * with our list of BOs out of sync with our list of gem handles.
1655 *
1656 * 2) The algorithm we use for building the list of unique buffers isn't
1657 * thread-safe. While the client is supposed to syncronize around
1658 * QueueSubmit, this would be extremely difficult to debug if it ever came
1659 * up in the wild due to a broken app. It's better to play it safe and
1660 * just lock around QueueSubmit.
1661 *
1662 * 3) The anv_cmd_buffer_execbuf function may perform relocations in
1663 * userspace. Due to the fact that the surface state buffer is shared
1664 * between batches, we can't afford to have that happen from multiple
1665 * threads at the same time. Even though the user is supposed to ensure
1666 * this doesn't happen, we play it safe as in (2) above.
1667 *
1668 * Since the only other things that ever take the device lock such as block
1669 * pool resize only rarely happen, this will almost never be contended so
1670 * taking a lock isn't really an expensive operation in this case.
1671 */
1672 VkResult
1673 anv_queue_execbuf_locked(struct anv_queue *queue,
1674 struct anv_queue_submit *submit)
1675 {
1676 struct anv_device *device = queue->device;
1677 struct anv_execbuf execbuf;
1678 anv_execbuf_init(&execbuf);
1679 execbuf.alloc = submit->alloc;
1680 execbuf.alloc_scope = submit->alloc_scope;
1681 execbuf.perf_query_pass = submit->perf_query_pass;
1682
1683 VkResult result;
1684
1685 for (uint32_t i = 0; i < submit->fence_bo_count; i++) {
1686 int signaled;
1687 struct anv_bo *bo = anv_unpack_ptr(submit->fence_bos[i], 1, &signaled);
1688
1689 result = anv_execbuf_add_bo(device, &execbuf, bo, NULL,
1690 signaled ? EXEC_OBJECT_WRITE : 0);
1691 if (result != VK_SUCCESS)
1692 goto error;
1693 }
1694
1695 if (submit->cmd_buffer) {
1696 result = setup_execbuf_for_cmd_buffer(&execbuf, submit->cmd_buffer);
1697 } else if (submit->simple_bo) {
1698 result = anv_execbuf_add_bo(device, &execbuf, submit->simple_bo, NULL, 0);
1699 if (result != VK_SUCCESS)
1700 goto error;
1701
1702 execbuf.execbuf = (struct drm_i915_gem_execbuffer2) {
1703 .buffers_ptr = (uintptr_t) execbuf.objects,
1704 .buffer_count = execbuf.bo_count,
1705 .batch_start_offset = 0,
1706 .batch_len = submit->simple_bo_size,
1707 .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER,
1708 .rsvd1 = device->context_id,
1709 .rsvd2 = 0,
1710 };
1711 } else {
1712 result = setup_empty_execbuf(&execbuf, queue->device);
1713 }
1714
1715 if (result != VK_SUCCESS)
1716 goto error;
1717
1718 const bool has_perf_query =
1719 submit->perf_query_pass >= 0 &&
1720 submit->cmd_buffer &&
1721 submit->cmd_buffer->perf_query_pool;
1722
1723 if (unlikely(INTEL_DEBUG & DEBUG_BATCH)) {
1724 if (submit->cmd_buffer) {
1725 if (has_perf_query) {
1726 struct anv_query_pool *query_pool = submit->cmd_buffer->perf_query_pool;
1727 struct anv_bo *pass_batch_bo = query_pool->bo;
1728 uint64_t pass_batch_offset =
1729 khr_perf_query_preamble_offset(query_pool,
1730 submit->perf_query_pass);
1731
1732 gen_print_batch(&device->decoder_ctx,
1733 pass_batch_bo->map + pass_batch_offset, 64,
1734 pass_batch_bo->offset + pass_batch_offset, false);
1735 }
1736
1737 struct anv_batch_bo **bo = u_vector_tail(&submit->cmd_buffer->seen_bbos);
1738 device->cmd_buffer_being_decoded = submit->cmd_buffer;
1739 gen_print_batch(&device->decoder_ctx, (*bo)->bo->map,
1740 (*bo)->bo->size, (*bo)->bo->offset, false);
1741 device->cmd_buffer_being_decoded = NULL;
1742 } else if (submit->simple_bo) {
1743 gen_print_batch(&device->decoder_ctx, submit->simple_bo->map,
1744 submit->simple_bo->size, submit->simple_bo->offset, false);
1745 } else {
1746 gen_print_batch(&device->decoder_ctx,
1747 device->trivial_batch_bo->map,
1748 device->trivial_batch_bo->size,
1749 device->trivial_batch_bo->offset, false);
1750 }
1751 }
1752
1753 if (submit->fence_count > 0) {
1754 assert(device->physical->has_syncobj);
1755 execbuf.execbuf.flags |= I915_EXEC_FENCE_ARRAY;
1756 execbuf.execbuf.num_cliprects = submit->fence_count;
1757 execbuf.execbuf.cliprects_ptr = (uintptr_t)submit->fences;
1758 }
1759
1760 if (submit->in_fence != -1) {
1761 execbuf.execbuf.flags |= I915_EXEC_FENCE_IN;
1762 execbuf.execbuf.rsvd2 |= (uint32_t)submit->in_fence;
1763 }
1764
1765 if (submit->need_out_fence)
1766 execbuf.execbuf.flags |= I915_EXEC_FENCE_OUT;
1767
1768 if (has_perf_query) {
1769 struct anv_query_pool *query_pool = submit->cmd_buffer->perf_query_pool;
1770 assert(submit->perf_query_pass < query_pool->n_passes);
1771 struct gen_perf_query_info *query_info =
1772 query_pool->pass_query[submit->perf_query_pass];
1773
1774 /* Some performance queries just the pipeline statistic HW, no need for
1775 * OA in that case, so no need to reconfigure.
1776 */
1777 if (likely((INTEL_DEBUG & DEBUG_NO_OACONFIG) == 0) &&
1778 (query_info->kind == GEN_PERF_QUERY_TYPE_OA ||
1779 query_info->kind == GEN_PERF_QUERY_TYPE_RAW)) {
1780 int ret = gen_ioctl(device->perf_fd, I915_PERF_IOCTL_CONFIG,
1781 (void *)(uintptr_t) query_info->oa_metrics_set_id);
1782 if (ret < 0) {
1783 result = anv_device_set_lost(device,
1784 "i915-perf config failed: %s",
1785 strerror(ret));
1786 }
1787 }
1788
1789 struct anv_bo *pass_batch_bo = query_pool->bo;
1790
1791 struct drm_i915_gem_exec_object2 query_pass_object = {
1792 .handle = pass_batch_bo->gem_handle,
1793 .offset = pass_batch_bo->offset,
1794 .flags = pass_batch_bo->flags,
1795 };
1796 struct drm_i915_gem_execbuffer2 query_pass_execbuf = {
1797 .buffers_ptr = (uintptr_t) &query_pass_object,
1798 .buffer_count = 1,
1799 .batch_start_offset = khr_perf_query_preamble_offset(query_pool,
1800 submit->perf_query_pass),
1801 .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER,
1802 .rsvd1 = device->context_id,
1803 };
1804
1805 int ret = queue->device->no_hw ? 0 :
1806 anv_gem_execbuffer(queue->device, &query_pass_execbuf);
1807 if (ret)
1808 result = anv_queue_set_lost(queue, "execbuf2 failed: %m");
1809 }
1810
1811 int ret = queue->device->no_hw ? 0 :
1812 anv_gem_execbuffer(queue->device, &execbuf.execbuf);
1813 if (ret)
1814 result = anv_queue_set_lost(queue, "execbuf2 failed: %m");
1815
1816 struct drm_i915_gem_exec_object2 *objects = execbuf.objects;
1817 for (uint32_t k = 0; k < execbuf.bo_count; k++) {
1818 if (execbuf.bos[k]->flags & EXEC_OBJECT_PINNED)
1819 assert(execbuf.bos[k]->offset == objects[k].offset);
1820 execbuf.bos[k]->offset = objects[k].offset;
1821 }
1822
1823 if (result == VK_SUCCESS && submit->need_out_fence)
1824 submit->out_fence = execbuf.execbuf.rsvd2 >> 32;
1825
1826 error:
1827 pthread_cond_broadcast(&device->queue_submit);
1828
1829 anv_execbuf_finish(&execbuf);
1830
1831 return result;
1832 }