anv: implement VK_KHR_timeline_semaphore
[mesa.git] / src / intel / vulkan / anv_batch_chain.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31
32 #include "genxml/gen8_pack.h"
33
34 #include "util/debug.h"
35
36 /** \file anv_batch_chain.c
37 *
38 * This file contains functions related to anv_cmd_buffer as a data
39 * structure. This involves everything required to create and destroy
40 * the actual batch buffers as well as link them together and handle
41 * relocations and surface state. It specifically does *not* contain any
42 * handling of actual vkCmd calls beyond vkCmdExecuteCommands.
43 */
44
45 /*-----------------------------------------------------------------------*
46 * Functions related to anv_reloc_list
47 *-----------------------------------------------------------------------*/
48
49 VkResult
50 anv_reloc_list_init(struct anv_reloc_list *list,
51 const VkAllocationCallbacks *alloc)
52 {
53 memset(list, 0, sizeof(*list));
54 return VK_SUCCESS;
55 }
56
57 static VkResult
58 anv_reloc_list_init_clone(struct anv_reloc_list *list,
59 const VkAllocationCallbacks *alloc,
60 const struct anv_reloc_list *other_list)
61 {
62 list->num_relocs = other_list->num_relocs;
63 list->array_length = other_list->array_length;
64
65 if (list->num_relocs > 0) {
66 list->relocs =
67 vk_alloc(alloc, list->array_length * sizeof(*list->relocs), 8,
68 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
69 if (list->relocs == NULL)
70 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
71
72 list->reloc_bos =
73 vk_alloc(alloc, list->array_length * sizeof(*list->reloc_bos), 8,
74 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
75 if (list->reloc_bos == NULL) {
76 vk_free(alloc, list->relocs);
77 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
78 }
79
80 memcpy(list->relocs, other_list->relocs,
81 list->array_length * sizeof(*list->relocs));
82 memcpy(list->reloc_bos, other_list->reloc_bos,
83 list->array_length * sizeof(*list->reloc_bos));
84 } else {
85 list->relocs = NULL;
86 list->reloc_bos = NULL;
87 }
88
89 list->dep_words = other_list->dep_words;
90
91 if (list->dep_words > 0) {
92 list->deps =
93 vk_alloc(alloc, list->dep_words * sizeof(BITSET_WORD), 8,
94 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
95 memcpy(list->deps, other_list->deps,
96 list->dep_words * sizeof(BITSET_WORD));
97 } else {
98 list->deps = NULL;
99 }
100
101 return VK_SUCCESS;
102 }
103
104 void
105 anv_reloc_list_finish(struct anv_reloc_list *list,
106 const VkAllocationCallbacks *alloc)
107 {
108 vk_free(alloc, list->relocs);
109 vk_free(alloc, list->reloc_bos);
110 vk_free(alloc, list->deps);
111 }
112
113 static VkResult
114 anv_reloc_list_grow(struct anv_reloc_list *list,
115 const VkAllocationCallbacks *alloc,
116 size_t num_additional_relocs)
117 {
118 if (list->num_relocs + num_additional_relocs <= list->array_length)
119 return VK_SUCCESS;
120
121 size_t new_length = MAX2(16, list->array_length * 2);
122 while (new_length < list->num_relocs + num_additional_relocs)
123 new_length *= 2;
124
125 struct drm_i915_gem_relocation_entry *new_relocs =
126 vk_realloc(alloc, list->relocs,
127 new_length * sizeof(*list->relocs), 8,
128 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
129 if (new_relocs == NULL)
130 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
131 list->relocs = new_relocs;
132
133 struct anv_bo **new_reloc_bos =
134 vk_realloc(alloc, list->reloc_bos,
135 new_length * sizeof(*list->reloc_bos), 8,
136 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
137 if (new_reloc_bos == NULL)
138 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
139 list->reloc_bos = new_reloc_bos;
140
141 list->array_length = new_length;
142
143 return VK_SUCCESS;
144 }
145
146 static VkResult
147 anv_reloc_list_grow_deps(struct anv_reloc_list *list,
148 const VkAllocationCallbacks *alloc,
149 uint32_t min_num_words)
150 {
151 if (min_num_words <= list->dep_words)
152 return VK_SUCCESS;
153
154 uint32_t new_length = MAX2(32, list->dep_words * 2);
155 while (new_length < min_num_words)
156 new_length *= 2;
157
158 BITSET_WORD *new_deps =
159 vk_realloc(alloc, list->deps, new_length * sizeof(BITSET_WORD), 8,
160 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
161 if (new_deps == NULL)
162 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
163 list->deps = new_deps;
164
165 /* Zero out the new data */
166 memset(list->deps + list->dep_words, 0,
167 (new_length - list->dep_words) * sizeof(BITSET_WORD));
168 list->dep_words = new_length;
169
170 return VK_SUCCESS;
171 }
172
173 #define READ_ONCE(x) (*(volatile __typeof__(x) *)&(x))
174
175 VkResult
176 anv_reloc_list_add(struct anv_reloc_list *list,
177 const VkAllocationCallbacks *alloc,
178 uint32_t offset, struct anv_bo *target_bo, uint32_t delta,
179 uint64_t *address_u64_out)
180 {
181 struct drm_i915_gem_relocation_entry *entry;
182 int index;
183
184 struct anv_bo *unwrapped_target_bo = anv_bo_unwrap(target_bo);
185 uint64_t target_bo_offset = READ_ONCE(unwrapped_target_bo->offset);
186 if (address_u64_out)
187 *address_u64_out = target_bo_offset + delta;
188
189 if (unwrapped_target_bo->flags & EXEC_OBJECT_PINNED) {
190 assert(!target_bo->is_wrapper);
191 uint32_t idx = unwrapped_target_bo->gem_handle;
192 anv_reloc_list_grow_deps(list, alloc, (idx / BITSET_WORDBITS) + 1);
193 BITSET_SET(list->deps, unwrapped_target_bo->gem_handle);
194 return VK_SUCCESS;
195 }
196
197 VkResult result = anv_reloc_list_grow(list, alloc, 1);
198 if (result != VK_SUCCESS)
199 return result;
200
201 /* XXX: Can we use I915_EXEC_HANDLE_LUT? */
202 index = list->num_relocs++;
203 list->reloc_bos[index] = target_bo;
204 entry = &list->relocs[index];
205 entry->target_handle = -1; /* See also anv_cmd_buffer_process_relocs() */
206 entry->delta = delta;
207 entry->offset = offset;
208 entry->presumed_offset = target_bo_offset;
209 entry->read_domains = 0;
210 entry->write_domain = 0;
211 VG(VALGRIND_CHECK_MEM_IS_DEFINED(entry, sizeof(*entry)));
212
213 return VK_SUCCESS;
214 }
215
216 static void
217 anv_reloc_list_clear(struct anv_reloc_list *list)
218 {
219 list->num_relocs = 0;
220 if (list->dep_words > 0)
221 memset(list->deps, 0, list->dep_words * sizeof(BITSET_WORD));
222 }
223
224 static VkResult
225 anv_reloc_list_append(struct anv_reloc_list *list,
226 const VkAllocationCallbacks *alloc,
227 struct anv_reloc_list *other, uint32_t offset)
228 {
229 VkResult result = anv_reloc_list_grow(list, alloc, other->num_relocs);
230 if (result != VK_SUCCESS)
231 return result;
232
233 if (other->num_relocs > 0) {
234 memcpy(&list->relocs[list->num_relocs], &other->relocs[0],
235 other->num_relocs * sizeof(other->relocs[0]));
236 memcpy(&list->reloc_bos[list->num_relocs], &other->reloc_bos[0],
237 other->num_relocs * sizeof(other->reloc_bos[0]));
238
239 for (uint32_t i = 0; i < other->num_relocs; i++)
240 list->relocs[i + list->num_relocs].offset += offset;
241
242 list->num_relocs += other->num_relocs;
243 }
244
245 anv_reloc_list_grow_deps(list, alloc, other->dep_words);
246 for (uint32_t w = 0; w < other->dep_words; w++)
247 list->deps[w] |= other->deps[w];
248
249 return VK_SUCCESS;
250 }
251
252 /*-----------------------------------------------------------------------*
253 * Functions related to anv_batch
254 *-----------------------------------------------------------------------*/
255
256 void *
257 anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords)
258 {
259 if (batch->next + num_dwords * 4 > batch->end) {
260 VkResult result = batch->extend_cb(batch, batch->user_data);
261 if (result != VK_SUCCESS) {
262 anv_batch_set_error(batch, result);
263 return NULL;
264 }
265 }
266
267 void *p = batch->next;
268
269 batch->next += num_dwords * 4;
270 assert(batch->next <= batch->end);
271
272 return p;
273 }
274
275 uint64_t
276 anv_batch_emit_reloc(struct anv_batch *batch,
277 void *location, struct anv_bo *bo, uint32_t delta)
278 {
279 uint64_t address_u64 = 0;
280 VkResult result = anv_reloc_list_add(batch->relocs, batch->alloc,
281 location - batch->start, bo, delta,
282 &address_u64);
283 if (result != VK_SUCCESS) {
284 anv_batch_set_error(batch, result);
285 return 0;
286 }
287
288 return address_u64;
289 }
290
291 void
292 anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other)
293 {
294 uint32_t size, offset;
295
296 size = other->next - other->start;
297 assert(size % 4 == 0);
298
299 if (batch->next + size > batch->end) {
300 VkResult result = batch->extend_cb(batch, batch->user_data);
301 if (result != VK_SUCCESS) {
302 anv_batch_set_error(batch, result);
303 return;
304 }
305 }
306
307 assert(batch->next + size <= batch->end);
308
309 VG(VALGRIND_CHECK_MEM_IS_DEFINED(other->start, size));
310 memcpy(batch->next, other->start, size);
311
312 offset = batch->next - batch->start;
313 VkResult result = anv_reloc_list_append(batch->relocs, batch->alloc,
314 other->relocs, offset);
315 if (result != VK_SUCCESS) {
316 anv_batch_set_error(batch, result);
317 return;
318 }
319
320 batch->next += size;
321 }
322
323 /*-----------------------------------------------------------------------*
324 * Functions related to anv_batch_bo
325 *-----------------------------------------------------------------------*/
326
327 static VkResult
328 anv_batch_bo_create(struct anv_cmd_buffer *cmd_buffer,
329 struct anv_batch_bo **bbo_out)
330 {
331 VkResult result;
332
333 struct anv_batch_bo *bbo = vk_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo),
334 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
335 if (bbo == NULL)
336 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
337
338 result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
339 ANV_CMD_BUFFER_BATCH_SIZE, &bbo->bo);
340 if (result != VK_SUCCESS)
341 goto fail_alloc;
342
343 result = anv_reloc_list_init(&bbo->relocs, &cmd_buffer->pool->alloc);
344 if (result != VK_SUCCESS)
345 goto fail_bo_alloc;
346
347 *bbo_out = bbo;
348
349 return VK_SUCCESS;
350
351 fail_bo_alloc:
352 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
353 fail_alloc:
354 vk_free(&cmd_buffer->pool->alloc, bbo);
355
356 return result;
357 }
358
359 static VkResult
360 anv_batch_bo_clone(struct anv_cmd_buffer *cmd_buffer,
361 const struct anv_batch_bo *other_bbo,
362 struct anv_batch_bo **bbo_out)
363 {
364 VkResult result;
365
366 struct anv_batch_bo *bbo = vk_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo),
367 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
368 if (bbo == NULL)
369 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
370
371 result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
372 other_bbo->bo->size, &bbo->bo);
373 if (result != VK_SUCCESS)
374 goto fail_alloc;
375
376 result = anv_reloc_list_init_clone(&bbo->relocs, &cmd_buffer->pool->alloc,
377 &other_bbo->relocs);
378 if (result != VK_SUCCESS)
379 goto fail_bo_alloc;
380
381 bbo->length = other_bbo->length;
382 memcpy(bbo->bo->map, other_bbo->bo->map, other_bbo->length);
383 *bbo_out = bbo;
384
385 return VK_SUCCESS;
386
387 fail_bo_alloc:
388 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
389 fail_alloc:
390 vk_free(&cmd_buffer->pool->alloc, bbo);
391
392 return result;
393 }
394
395 static void
396 anv_batch_bo_start(struct anv_batch_bo *bbo, struct anv_batch *batch,
397 size_t batch_padding)
398 {
399 batch->next = batch->start = bbo->bo->map;
400 batch->end = bbo->bo->map + bbo->bo->size - batch_padding;
401 batch->relocs = &bbo->relocs;
402 anv_reloc_list_clear(&bbo->relocs);
403 }
404
405 static void
406 anv_batch_bo_continue(struct anv_batch_bo *bbo, struct anv_batch *batch,
407 size_t batch_padding)
408 {
409 batch->start = bbo->bo->map;
410 batch->next = bbo->bo->map + bbo->length;
411 batch->end = bbo->bo->map + bbo->bo->size - batch_padding;
412 batch->relocs = &bbo->relocs;
413 }
414
415 static void
416 anv_batch_bo_finish(struct anv_batch_bo *bbo, struct anv_batch *batch)
417 {
418 assert(batch->start == bbo->bo->map);
419 bbo->length = batch->next - batch->start;
420 VG(VALGRIND_CHECK_MEM_IS_DEFINED(batch->start, bbo->length));
421 }
422
423 static VkResult
424 anv_batch_bo_grow(struct anv_cmd_buffer *cmd_buffer, struct anv_batch_bo *bbo,
425 struct anv_batch *batch, size_t aditional,
426 size_t batch_padding)
427 {
428 assert(batch->start == bbo->bo->map);
429 bbo->length = batch->next - batch->start;
430
431 size_t new_size = bbo->bo->size;
432 while (new_size <= bbo->length + aditional + batch_padding)
433 new_size *= 2;
434
435 if (new_size == bbo->bo->size)
436 return VK_SUCCESS;
437
438 struct anv_bo *new_bo;
439 VkResult result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
440 new_size, &new_bo);
441 if (result != VK_SUCCESS)
442 return result;
443
444 memcpy(new_bo->map, bbo->bo->map, bbo->length);
445
446 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
447
448 bbo->bo = new_bo;
449 anv_batch_bo_continue(bbo, batch, batch_padding);
450
451 return VK_SUCCESS;
452 }
453
454 static void
455 anv_batch_bo_link(struct anv_cmd_buffer *cmd_buffer,
456 struct anv_batch_bo *prev_bbo,
457 struct anv_batch_bo *next_bbo,
458 uint32_t next_bbo_offset)
459 {
460 const uint32_t bb_start_offset =
461 prev_bbo->length - GEN8_MI_BATCH_BUFFER_START_length * 4;
462 ASSERTED const uint32_t *bb_start = prev_bbo->bo->map + bb_start_offset;
463
464 /* Make sure we're looking at a MI_BATCH_BUFFER_START */
465 assert(((*bb_start >> 29) & 0x07) == 0);
466 assert(((*bb_start >> 23) & 0x3f) == 49);
467
468 if (cmd_buffer->device->instance->physicalDevice.use_softpin) {
469 assert(prev_bbo->bo->flags & EXEC_OBJECT_PINNED);
470 assert(next_bbo->bo->flags & EXEC_OBJECT_PINNED);
471
472 write_reloc(cmd_buffer->device,
473 prev_bbo->bo->map + bb_start_offset + 4,
474 next_bbo->bo->offset + next_bbo_offset, true);
475 } else {
476 uint32_t reloc_idx = prev_bbo->relocs.num_relocs - 1;
477 assert(prev_bbo->relocs.relocs[reloc_idx].offset == bb_start_offset + 4);
478
479 prev_bbo->relocs.reloc_bos[reloc_idx] = next_bbo->bo;
480 prev_bbo->relocs.relocs[reloc_idx].delta = next_bbo_offset;
481
482 /* Use a bogus presumed offset to force a relocation */
483 prev_bbo->relocs.relocs[reloc_idx].presumed_offset = -1;
484 }
485 }
486
487 static void
488 anv_batch_bo_destroy(struct anv_batch_bo *bbo,
489 struct anv_cmd_buffer *cmd_buffer)
490 {
491 anv_reloc_list_finish(&bbo->relocs, &cmd_buffer->pool->alloc);
492 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
493 vk_free(&cmd_buffer->pool->alloc, bbo);
494 }
495
496 static VkResult
497 anv_batch_bo_list_clone(const struct list_head *list,
498 struct anv_cmd_buffer *cmd_buffer,
499 struct list_head *new_list)
500 {
501 VkResult result = VK_SUCCESS;
502
503 list_inithead(new_list);
504
505 struct anv_batch_bo *prev_bbo = NULL;
506 list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
507 struct anv_batch_bo *new_bbo = NULL;
508 result = anv_batch_bo_clone(cmd_buffer, bbo, &new_bbo);
509 if (result != VK_SUCCESS)
510 break;
511 list_addtail(&new_bbo->link, new_list);
512
513 if (prev_bbo)
514 anv_batch_bo_link(cmd_buffer, prev_bbo, new_bbo, 0);
515
516 prev_bbo = new_bbo;
517 }
518
519 if (result != VK_SUCCESS) {
520 list_for_each_entry_safe(struct anv_batch_bo, bbo, new_list, link) {
521 list_del(&bbo->link);
522 anv_batch_bo_destroy(bbo, cmd_buffer);
523 }
524 }
525
526 return result;
527 }
528
529 /*-----------------------------------------------------------------------*
530 * Functions related to anv_batch_bo
531 *-----------------------------------------------------------------------*/
532
533 static struct anv_batch_bo *
534 anv_cmd_buffer_current_batch_bo(struct anv_cmd_buffer *cmd_buffer)
535 {
536 return LIST_ENTRY(struct anv_batch_bo, cmd_buffer->batch_bos.prev, link);
537 }
538
539 struct anv_address
540 anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer *cmd_buffer)
541 {
542 struct anv_state *bt_block = u_vector_head(&cmd_buffer->bt_block_states);
543 return (struct anv_address) {
544 .bo = anv_binding_table_pool(cmd_buffer->device)->block_pool.bo,
545 .offset = bt_block->offset,
546 };
547 }
548
549 static void
550 emit_batch_buffer_start(struct anv_cmd_buffer *cmd_buffer,
551 struct anv_bo *bo, uint32_t offset)
552 {
553 /* In gen8+ the address field grew to two dwords to accomodate 48 bit
554 * offsets. The high 16 bits are in the last dword, so we can use the gen8
555 * version in either case, as long as we set the instruction length in the
556 * header accordingly. This means that we always emit three dwords here
557 * and all the padding and adjustment we do in this file works for all
558 * gens.
559 */
560
561 #define GEN7_MI_BATCH_BUFFER_START_length 2
562 #define GEN7_MI_BATCH_BUFFER_START_length_bias 2
563
564 const uint32_t gen7_length =
565 GEN7_MI_BATCH_BUFFER_START_length - GEN7_MI_BATCH_BUFFER_START_length_bias;
566 const uint32_t gen8_length =
567 GEN8_MI_BATCH_BUFFER_START_length - GEN8_MI_BATCH_BUFFER_START_length_bias;
568
569 anv_batch_emit(&cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_START, bbs) {
570 bbs.DWordLength = cmd_buffer->device->info.gen < 8 ?
571 gen7_length : gen8_length;
572 bbs.SecondLevelBatchBuffer = Firstlevelbatch;
573 bbs.AddressSpaceIndicator = ASI_PPGTT;
574 bbs.BatchBufferStartAddress = (struct anv_address) { bo, offset };
575 }
576 }
577
578 static void
579 cmd_buffer_chain_to_batch_bo(struct anv_cmd_buffer *cmd_buffer,
580 struct anv_batch_bo *bbo)
581 {
582 struct anv_batch *batch = &cmd_buffer->batch;
583 struct anv_batch_bo *current_bbo =
584 anv_cmd_buffer_current_batch_bo(cmd_buffer);
585
586 /* We set the end of the batch a little short so we would be sure we
587 * have room for the chaining command. Since we're about to emit the
588 * chaining command, let's set it back where it should go.
589 */
590 batch->end += GEN8_MI_BATCH_BUFFER_START_length * 4;
591 assert(batch->end == current_bbo->bo->map + current_bbo->bo->size);
592
593 emit_batch_buffer_start(cmd_buffer, bbo->bo, 0);
594
595 anv_batch_bo_finish(current_bbo, batch);
596 }
597
598 static VkResult
599 anv_cmd_buffer_chain_batch(struct anv_batch *batch, void *_data)
600 {
601 struct anv_cmd_buffer *cmd_buffer = _data;
602 struct anv_batch_bo *new_bbo;
603
604 VkResult result = anv_batch_bo_create(cmd_buffer, &new_bbo);
605 if (result != VK_SUCCESS)
606 return result;
607
608 struct anv_batch_bo **seen_bbo = u_vector_add(&cmd_buffer->seen_bbos);
609 if (seen_bbo == NULL) {
610 anv_batch_bo_destroy(new_bbo, cmd_buffer);
611 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
612 }
613 *seen_bbo = new_bbo;
614
615 cmd_buffer_chain_to_batch_bo(cmd_buffer, new_bbo);
616
617 list_addtail(&new_bbo->link, &cmd_buffer->batch_bos);
618
619 anv_batch_bo_start(new_bbo, batch, GEN8_MI_BATCH_BUFFER_START_length * 4);
620
621 return VK_SUCCESS;
622 }
623
624 static VkResult
625 anv_cmd_buffer_grow_batch(struct anv_batch *batch, void *_data)
626 {
627 struct anv_cmd_buffer *cmd_buffer = _data;
628 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
629
630 anv_batch_bo_grow(cmd_buffer, bbo, &cmd_buffer->batch, 4096,
631 GEN8_MI_BATCH_BUFFER_START_length * 4);
632
633 return VK_SUCCESS;
634 }
635
636 /** Allocate a binding table
637 *
638 * This function allocates a binding table. This is a bit more complicated
639 * than one would think due to a combination of Vulkan driver design and some
640 * unfortunate hardware restrictions.
641 *
642 * The 3DSTATE_BINDING_TABLE_POINTERS_* packets only have a 16-bit field for
643 * the binding table pointer which means that all binding tables need to live
644 * in the bottom 64k of surface state base address. The way the GL driver has
645 * classically dealt with this restriction is to emit all surface states
646 * on-the-fly into the batch and have a batch buffer smaller than 64k. This
647 * isn't really an option in Vulkan for a couple of reasons:
648 *
649 * 1) In Vulkan, we have growing (or chaining) batches so surface states have
650 * to live in their own buffer and we have to be able to re-emit
651 * STATE_BASE_ADDRESS as needed which requires a full pipeline stall. In
652 * order to avoid emitting STATE_BASE_ADDRESS any more often than needed
653 * (it's not that hard to hit 64k of just binding tables), we allocate
654 * surface state objects up-front when VkImageView is created. In order
655 * for this to work, surface state objects need to be allocated from a
656 * global buffer.
657 *
658 * 2) We tried to design the surface state system in such a way that it's
659 * already ready for bindless texturing. The way bindless texturing works
660 * on our hardware is that you have a big pool of surface state objects
661 * (with its own state base address) and the bindless handles are simply
662 * offsets into that pool. With the architecture we chose, we already
663 * have that pool and it's exactly the same pool that we use for regular
664 * surface states so we should already be ready for bindless.
665 *
666 * 3) For render targets, we need to be able to fill out the surface states
667 * later in vkBeginRenderPass so that we can assign clear colors
668 * correctly. One way to do this would be to just create the surface
669 * state data and then repeatedly copy it into the surface state BO every
670 * time we have to re-emit STATE_BASE_ADDRESS. While this works, it's
671 * rather annoying and just being able to allocate them up-front and
672 * re-use them for the entire render pass.
673 *
674 * While none of these are technically blockers for emitting state on the fly
675 * like we do in GL, the ability to have a single surface state pool is
676 * simplifies things greatly. Unfortunately, it comes at a cost...
677 *
678 * Because of the 64k limitation of 3DSTATE_BINDING_TABLE_POINTERS_*, we can't
679 * place the binding tables just anywhere in surface state base address.
680 * Because 64k isn't a whole lot of space, we can't simply restrict the
681 * surface state buffer to 64k, we have to be more clever. The solution we've
682 * chosen is to have a block pool with a maximum size of 2G that starts at
683 * zero and grows in both directions. All surface states are allocated from
684 * the top of the pool (positive offsets) and we allocate blocks (< 64k) of
685 * binding tables from the bottom of the pool (negative offsets). Every time
686 * we allocate a new binding table block, we set surface state base address to
687 * point to the bottom of the binding table block. This way all of the
688 * binding tables in the block are in the bottom 64k of surface state base
689 * address. When we fill out the binding table, we add the distance between
690 * the bottom of our binding table block and zero of the block pool to the
691 * surface state offsets so that they are correct relative to out new surface
692 * state base address at the bottom of the binding table block.
693 *
694 * \see adjust_relocations_from_block_pool()
695 * \see adjust_relocations_too_block_pool()
696 *
697 * \param[in] entries The number of surface state entries the binding
698 * table should be able to hold.
699 *
700 * \param[out] state_offset The offset surface surface state base address
701 * where the surface states live. This must be
702 * added to the surface state offset when it is
703 * written into the binding table entry.
704 *
705 * \return An anv_state representing the binding table
706 */
707 struct anv_state
708 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
709 uint32_t entries, uint32_t *state_offset)
710 {
711 struct anv_device *device = cmd_buffer->device;
712 struct anv_state_pool *state_pool = &device->surface_state_pool;
713 struct anv_state *bt_block = u_vector_head(&cmd_buffer->bt_block_states);
714 struct anv_state state;
715
716 state.alloc_size = align_u32(entries * 4, 32);
717
718 if (cmd_buffer->bt_next + state.alloc_size > state_pool->block_size)
719 return (struct anv_state) { 0 };
720
721 state.offset = cmd_buffer->bt_next;
722 state.map = anv_block_pool_map(&anv_binding_table_pool(device)->block_pool,
723 bt_block->offset + state.offset);
724
725 cmd_buffer->bt_next += state.alloc_size;
726
727 if (device->instance->physicalDevice.use_softpin) {
728 assert(bt_block->offset >= 0);
729 *state_offset = device->surface_state_pool.block_pool.start_address -
730 device->binding_table_pool.block_pool.start_address - bt_block->offset;
731 } else {
732 assert(bt_block->offset < 0);
733 *state_offset = -bt_block->offset;
734 }
735
736 return state;
737 }
738
739 struct anv_state
740 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer *cmd_buffer)
741 {
742 struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
743 return anv_state_stream_alloc(&cmd_buffer->surface_state_stream,
744 isl_dev->ss.size, isl_dev->ss.align);
745 }
746
747 struct anv_state
748 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer *cmd_buffer,
749 uint32_t size, uint32_t alignment)
750 {
751 return anv_state_stream_alloc(&cmd_buffer->dynamic_state_stream,
752 size, alignment);
753 }
754
755 VkResult
756 anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer *cmd_buffer)
757 {
758 struct anv_state *bt_block = u_vector_add(&cmd_buffer->bt_block_states);
759 if (bt_block == NULL) {
760 anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_HOST_MEMORY);
761 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
762 }
763
764 *bt_block = anv_binding_table_pool_alloc(cmd_buffer->device);
765 cmd_buffer->bt_next = 0;
766
767 return VK_SUCCESS;
768 }
769
770 VkResult
771 anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
772 {
773 struct anv_batch_bo *batch_bo;
774 VkResult result;
775
776 list_inithead(&cmd_buffer->batch_bos);
777
778 result = anv_batch_bo_create(cmd_buffer, &batch_bo);
779 if (result != VK_SUCCESS)
780 return result;
781
782 list_addtail(&batch_bo->link, &cmd_buffer->batch_bos);
783
784 cmd_buffer->batch.alloc = &cmd_buffer->pool->alloc;
785 cmd_buffer->batch.user_data = cmd_buffer;
786
787 if (cmd_buffer->device->can_chain_batches) {
788 cmd_buffer->batch.extend_cb = anv_cmd_buffer_chain_batch;
789 } else {
790 cmd_buffer->batch.extend_cb = anv_cmd_buffer_grow_batch;
791 }
792
793 anv_batch_bo_start(batch_bo, &cmd_buffer->batch,
794 GEN8_MI_BATCH_BUFFER_START_length * 4);
795
796 int success = u_vector_init(&cmd_buffer->seen_bbos,
797 sizeof(struct anv_bo *),
798 8 * sizeof(struct anv_bo *));
799 if (!success)
800 goto fail_batch_bo;
801
802 *(struct anv_batch_bo **)u_vector_add(&cmd_buffer->seen_bbos) = batch_bo;
803
804 /* u_vector requires power-of-two size elements */
805 unsigned pow2_state_size = util_next_power_of_two(sizeof(struct anv_state));
806 success = u_vector_init(&cmd_buffer->bt_block_states,
807 pow2_state_size, 8 * pow2_state_size);
808 if (!success)
809 goto fail_seen_bbos;
810
811 result = anv_reloc_list_init(&cmd_buffer->surface_relocs,
812 &cmd_buffer->pool->alloc);
813 if (result != VK_SUCCESS)
814 goto fail_bt_blocks;
815 cmd_buffer->last_ss_pool_center = 0;
816
817 result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
818 if (result != VK_SUCCESS)
819 goto fail_bt_blocks;
820
821 return VK_SUCCESS;
822
823 fail_bt_blocks:
824 u_vector_finish(&cmd_buffer->bt_block_states);
825 fail_seen_bbos:
826 u_vector_finish(&cmd_buffer->seen_bbos);
827 fail_batch_bo:
828 anv_batch_bo_destroy(batch_bo, cmd_buffer);
829
830 return result;
831 }
832
833 void
834 anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
835 {
836 struct anv_state *bt_block;
837 u_vector_foreach(bt_block, &cmd_buffer->bt_block_states)
838 anv_binding_table_pool_free(cmd_buffer->device, *bt_block);
839 u_vector_finish(&cmd_buffer->bt_block_states);
840
841 anv_reloc_list_finish(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc);
842
843 u_vector_finish(&cmd_buffer->seen_bbos);
844
845 /* Destroy all of the batch buffers */
846 list_for_each_entry_safe(struct anv_batch_bo, bbo,
847 &cmd_buffer->batch_bos, link) {
848 list_del(&bbo->link);
849 anv_batch_bo_destroy(bbo, cmd_buffer);
850 }
851 }
852
853 void
854 anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
855 {
856 /* Delete all but the first batch bo */
857 assert(!list_is_empty(&cmd_buffer->batch_bos));
858 while (cmd_buffer->batch_bos.next != cmd_buffer->batch_bos.prev) {
859 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
860 list_del(&bbo->link);
861 anv_batch_bo_destroy(bbo, cmd_buffer);
862 }
863 assert(!list_is_empty(&cmd_buffer->batch_bos));
864
865 anv_batch_bo_start(anv_cmd_buffer_current_batch_bo(cmd_buffer),
866 &cmd_buffer->batch,
867 GEN8_MI_BATCH_BUFFER_START_length * 4);
868
869 while (u_vector_length(&cmd_buffer->bt_block_states) > 1) {
870 struct anv_state *bt_block = u_vector_remove(&cmd_buffer->bt_block_states);
871 anv_binding_table_pool_free(cmd_buffer->device, *bt_block);
872 }
873 assert(u_vector_length(&cmd_buffer->bt_block_states) == 1);
874 cmd_buffer->bt_next = 0;
875
876 anv_reloc_list_clear(&cmd_buffer->surface_relocs);
877 cmd_buffer->last_ss_pool_center = 0;
878
879 /* Reset the list of seen buffers */
880 cmd_buffer->seen_bbos.head = 0;
881 cmd_buffer->seen_bbos.tail = 0;
882
883 *(struct anv_batch_bo **)u_vector_add(&cmd_buffer->seen_bbos) =
884 anv_cmd_buffer_current_batch_bo(cmd_buffer);
885 }
886
887 void
888 anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer)
889 {
890 struct anv_batch_bo *batch_bo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
891
892 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
893 /* When we start a batch buffer, we subtract a certain amount of
894 * padding from the end to ensure that we always have room to emit a
895 * BATCH_BUFFER_START to chain to the next BO. We need to remove
896 * that padding before we end the batch; otherwise, we may end up
897 * with our BATCH_BUFFER_END in another BO.
898 */
899 cmd_buffer->batch.end += GEN8_MI_BATCH_BUFFER_START_length * 4;
900 assert(cmd_buffer->batch.end == batch_bo->bo->map + batch_bo->bo->size);
901
902 anv_batch_emit(&cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_END, bbe);
903
904 /* Round batch up to an even number of dwords. */
905 if ((cmd_buffer->batch.next - cmd_buffer->batch.start) & 4)
906 anv_batch_emit(&cmd_buffer->batch, GEN8_MI_NOOP, noop);
907
908 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_PRIMARY;
909 } else {
910 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
911 /* If this is a secondary command buffer, we need to determine the
912 * mode in which it will be executed with vkExecuteCommands. We
913 * determine this statically here so that this stays in sync with the
914 * actual ExecuteCommands implementation.
915 */
916 const uint32_t length = cmd_buffer->batch.next - cmd_buffer->batch.start;
917 if (!cmd_buffer->device->can_chain_batches) {
918 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT;
919 } else if ((cmd_buffer->batch_bos.next == cmd_buffer->batch_bos.prev) &&
920 (length < ANV_CMD_BUFFER_BATCH_SIZE / 2)) {
921 /* If the secondary has exactly one batch buffer in its list *and*
922 * that batch buffer is less than half of the maximum size, we're
923 * probably better of simply copying it into our batch.
924 */
925 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_EMIT;
926 } else if (!(cmd_buffer->usage_flags &
927 VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
928 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_CHAIN;
929
930 /* In order to chain, we need this command buffer to contain an
931 * MI_BATCH_BUFFER_START which will jump back to the calling batch.
932 * It doesn't matter where it points now so long as has a valid
933 * relocation. We'll adjust it later as part of the chaining
934 * process.
935 *
936 * We set the end of the batch a little short so we would be sure we
937 * have room for the chaining command. Since we're about to emit the
938 * chaining command, let's set it back where it should go.
939 */
940 cmd_buffer->batch.end += GEN8_MI_BATCH_BUFFER_START_length * 4;
941 assert(cmd_buffer->batch.start == batch_bo->bo->map);
942 assert(cmd_buffer->batch.end == batch_bo->bo->map + batch_bo->bo->size);
943
944 emit_batch_buffer_start(cmd_buffer, batch_bo->bo, 0);
945 assert(cmd_buffer->batch.start == batch_bo->bo->map);
946 } else {
947 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN;
948 }
949 }
950
951 anv_batch_bo_finish(batch_bo, &cmd_buffer->batch);
952 }
953
954 static VkResult
955 anv_cmd_buffer_add_seen_bbos(struct anv_cmd_buffer *cmd_buffer,
956 struct list_head *list)
957 {
958 list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
959 struct anv_batch_bo **bbo_ptr = u_vector_add(&cmd_buffer->seen_bbos);
960 if (bbo_ptr == NULL)
961 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
962
963 *bbo_ptr = bbo;
964 }
965
966 return VK_SUCCESS;
967 }
968
969 void
970 anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary,
971 struct anv_cmd_buffer *secondary)
972 {
973 switch (secondary->exec_mode) {
974 case ANV_CMD_BUFFER_EXEC_MODE_EMIT:
975 anv_batch_emit_batch(&primary->batch, &secondary->batch);
976 break;
977 case ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT: {
978 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(primary);
979 unsigned length = secondary->batch.end - secondary->batch.start;
980 anv_batch_bo_grow(primary, bbo, &primary->batch, length,
981 GEN8_MI_BATCH_BUFFER_START_length * 4);
982 anv_batch_emit_batch(&primary->batch, &secondary->batch);
983 break;
984 }
985 case ANV_CMD_BUFFER_EXEC_MODE_CHAIN: {
986 struct anv_batch_bo *first_bbo =
987 list_first_entry(&secondary->batch_bos, struct anv_batch_bo, link);
988 struct anv_batch_bo *last_bbo =
989 list_last_entry(&secondary->batch_bos, struct anv_batch_bo, link);
990
991 emit_batch_buffer_start(primary, first_bbo->bo, 0);
992
993 struct anv_batch_bo *this_bbo = anv_cmd_buffer_current_batch_bo(primary);
994 assert(primary->batch.start == this_bbo->bo->map);
995 uint32_t offset = primary->batch.next - primary->batch.start;
996
997 /* Make the tail of the secondary point back to right after the
998 * MI_BATCH_BUFFER_START in the primary batch.
999 */
1000 anv_batch_bo_link(primary, last_bbo, this_bbo, offset);
1001
1002 anv_cmd_buffer_add_seen_bbos(primary, &secondary->batch_bos);
1003 break;
1004 }
1005 case ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN: {
1006 struct list_head copy_list;
1007 VkResult result = anv_batch_bo_list_clone(&secondary->batch_bos,
1008 secondary,
1009 &copy_list);
1010 if (result != VK_SUCCESS)
1011 return; /* FIXME */
1012
1013 anv_cmd_buffer_add_seen_bbos(primary, &copy_list);
1014
1015 struct anv_batch_bo *first_bbo =
1016 list_first_entry(&copy_list, struct anv_batch_bo, link);
1017 struct anv_batch_bo *last_bbo =
1018 list_last_entry(&copy_list, struct anv_batch_bo, link);
1019
1020 cmd_buffer_chain_to_batch_bo(primary, first_bbo);
1021
1022 list_splicetail(&copy_list, &primary->batch_bos);
1023
1024 anv_batch_bo_continue(last_bbo, &primary->batch,
1025 GEN8_MI_BATCH_BUFFER_START_length * 4);
1026 break;
1027 }
1028 default:
1029 assert(!"Invalid execution mode");
1030 }
1031
1032 anv_reloc_list_append(&primary->surface_relocs, &primary->pool->alloc,
1033 &secondary->surface_relocs, 0);
1034 }
1035
1036 struct anv_execbuf {
1037 struct drm_i915_gem_execbuffer2 execbuf;
1038
1039 struct drm_i915_gem_exec_object2 * objects;
1040 uint32_t bo_count;
1041 struct anv_bo ** bos;
1042
1043 /* Allocated length of the 'objects' and 'bos' arrays */
1044 uint32_t array_length;
1045
1046 bool has_relocs;
1047
1048 const VkAllocationCallbacks * alloc;
1049 VkSystemAllocationScope alloc_scope;
1050 };
1051
1052 static void
1053 anv_execbuf_init(struct anv_execbuf *exec)
1054 {
1055 memset(exec, 0, sizeof(*exec));
1056 }
1057
1058 static void
1059 anv_execbuf_finish(struct anv_execbuf *exec)
1060 {
1061 vk_free(exec->alloc, exec->objects);
1062 vk_free(exec->alloc, exec->bos);
1063 }
1064
1065 static VkResult
1066 anv_execbuf_add_bo_bitset(struct anv_device *device,
1067 struct anv_execbuf *exec,
1068 uint32_t dep_words,
1069 BITSET_WORD *deps,
1070 uint32_t extra_flags);
1071
1072 static VkResult
1073 anv_execbuf_add_bo(struct anv_device *device,
1074 struct anv_execbuf *exec,
1075 struct anv_bo *bo,
1076 struct anv_reloc_list *relocs,
1077 uint32_t extra_flags)
1078 {
1079 struct drm_i915_gem_exec_object2 *obj = NULL;
1080
1081 bo = anv_bo_unwrap(bo);
1082
1083 if (bo->index < exec->bo_count && exec->bos[bo->index] == bo)
1084 obj = &exec->objects[bo->index];
1085
1086 if (obj == NULL) {
1087 /* We've never seen this one before. Add it to the list and assign
1088 * an id that we can use later.
1089 */
1090 if (exec->bo_count >= exec->array_length) {
1091 uint32_t new_len = exec->objects ? exec->array_length * 2 : 64;
1092
1093 struct drm_i915_gem_exec_object2 *new_objects =
1094 vk_alloc(exec->alloc, new_len * sizeof(*new_objects), 8, exec->alloc_scope);
1095 if (new_objects == NULL)
1096 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1097
1098 struct anv_bo **new_bos =
1099 vk_alloc(exec->alloc, new_len * sizeof(*new_bos), 8, exec->alloc_scope);
1100 if (new_bos == NULL) {
1101 vk_free(exec->alloc, new_objects);
1102 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1103 }
1104
1105 if (exec->objects) {
1106 memcpy(new_objects, exec->objects,
1107 exec->bo_count * sizeof(*new_objects));
1108 memcpy(new_bos, exec->bos,
1109 exec->bo_count * sizeof(*new_bos));
1110 }
1111
1112 vk_free(exec->alloc, exec->objects);
1113 vk_free(exec->alloc, exec->bos);
1114
1115 exec->objects = new_objects;
1116 exec->bos = new_bos;
1117 exec->array_length = new_len;
1118 }
1119
1120 assert(exec->bo_count < exec->array_length);
1121
1122 bo->index = exec->bo_count++;
1123 obj = &exec->objects[bo->index];
1124 exec->bos[bo->index] = bo;
1125
1126 obj->handle = bo->gem_handle;
1127 obj->relocation_count = 0;
1128 obj->relocs_ptr = 0;
1129 obj->alignment = 0;
1130 obj->offset = bo->offset;
1131 obj->flags = bo->flags | extra_flags;
1132 obj->rsvd1 = 0;
1133 obj->rsvd2 = 0;
1134 }
1135
1136 if (relocs != NULL) {
1137 assert(obj->relocation_count == 0);
1138
1139 if (relocs->num_relocs > 0) {
1140 /* This is the first time we've ever seen a list of relocations for
1141 * this BO. Go ahead and set the relocations and then walk the list
1142 * of relocations and add them all.
1143 */
1144 exec->has_relocs = true;
1145 obj->relocation_count = relocs->num_relocs;
1146 obj->relocs_ptr = (uintptr_t) relocs->relocs;
1147
1148 for (size_t i = 0; i < relocs->num_relocs; i++) {
1149 VkResult result;
1150
1151 /* A quick sanity check on relocations */
1152 assert(relocs->relocs[i].offset < bo->size);
1153 result = anv_execbuf_add_bo(device, exec, relocs->reloc_bos[i],
1154 NULL, extra_flags);
1155 if (result != VK_SUCCESS)
1156 return result;
1157 }
1158 }
1159
1160 return anv_execbuf_add_bo_bitset(device, exec, relocs->dep_words,
1161 relocs->deps, extra_flags);
1162 }
1163
1164 return VK_SUCCESS;
1165 }
1166
1167 /* Add BO dependencies to execbuf */
1168 static VkResult
1169 anv_execbuf_add_bo_bitset(struct anv_device *device,
1170 struct anv_execbuf *exec,
1171 uint32_t dep_words,
1172 BITSET_WORD *deps,
1173 uint32_t extra_flags)
1174 {
1175 for (uint32_t w = 0; w < dep_words; w++) {
1176 BITSET_WORD mask = deps[w];
1177 while (mask) {
1178 int i = u_bit_scan(&mask);
1179 uint32_t gem_handle = w * BITSET_WORDBITS + i;
1180 struct anv_bo *bo = anv_device_lookup_bo(device, gem_handle);
1181 assert(bo->refcount > 0);
1182 VkResult result =
1183 anv_execbuf_add_bo(device, exec, bo, NULL, extra_flags);
1184 if (result != VK_SUCCESS)
1185 return result;
1186 }
1187 }
1188
1189 return VK_SUCCESS;
1190 }
1191
1192 static void
1193 anv_cmd_buffer_process_relocs(struct anv_cmd_buffer *cmd_buffer,
1194 struct anv_reloc_list *list)
1195 {
1196 for (size_t i = 0; i < list->num_relocs; i++)
1197 list->relocs[i].target_handle = anv_bo_unwrap(list->reloc_bos[i])->index;
1198 }
1199
1200 static void
1201 adjust_relocations_from_state_pool(struct anv_state_pool *pool,
1202 struct anv_reloc_list *relocs,
1203 uint32_t last_pool_center_bo_offset)
1204 {
1205 assert(last_pool_center_bo_offset <= pool->block_pool.center_bo_offset);
1206 uint32_t delta = pool->block_pool.center_bo_offset - last_pool_center_bo_offset;
1207
1208 for (size_t i = 0; i < relocs->num_relocs; i++) {
1209 /* All of the relocations from this block pool to other BO's should
1210 * have been emitted relative to the surface block pool center. We
1211 * need to add the center offset to make them relative to the
1212 * beginning of the actual GEM bo.
1213 */
1214 relocs->relocs[i].offset += delta;
1215 }
1216 }
1217
1218 static void
1219 adjust_relocations_to_state_pool(struct anv_state_pool *pool,
1220 struct anv_bo *from_bo,
1221 struct anv_reloc_list *relocs,
1222 uint32_t last_pool_center_bo_offset)
1223 {
1224 assert(!from_bo->is_wrapper);
1225 assert(last_pool_center_bo_offset <= pool->block_pool.center_bo_offset);
1226 uint32_t delta = pool->block_pool.center_bo_offset - last_pool_center_bo_offset;
1227
1228 /* When we initially emit relocations into a block pool, we don't
1229 * actually know what the final center_bo_offset will be so we just emit
1230 * it as if center_bo_offset == 0. Now that we know what the center
1231 * offset is, we need to walk the list of relocations and adjust any
1232 * relocations that point to the pool bo with the correct offset.
1233 */
1234 for (size_t i = 0; i < relocs->num_relocs; i++) {
1235 if (relocs->reloc_bos[i] == pool->block_pool.bo) {
1236 /* Adjust the delta value in the relocation to correctly
1237 * correspond to the new delta. Initially, this value may have
1238 * been negative (if treated as unsigned), but we trust in
1239 * uint32_t roll-over to fix that for us at this point.
1240 */
1241 relocs->relocs[i].delta += delta;
1242
1243 /* Since the delta has changed, we need to update the actual
1244 * relocated value with the new presumed value. This function
1245 * should only be called on batch buffers, so we know it isn't in
1246 * use by the GPU at the moment.
1247 */
1248 assert(relocs->relocs[i].offset < from_bo->size);
1249 write_reloc(pool->block_pool.device,
1250 from_bo->map + relocs->relocs[i].offset,
1251 relocs->relocs[i].presumed_offset +
1252 relocs->relocs[i].delta, false);
1253 }
1254 }
1255 }
1256
1257 static void
1258 anv_reloc_list_apply(struct anv_device *device,
1259 struct anv_reloc_list *list,
1260 struct anv_bo *bo,
1261 bool always_relocate)
1262 {
1263 bo = anv_bo_unwrap(bo);
1264
1265 for (size_t i = 0; i < list->num_relocs; i++) {
1266 struct anv_bo *target_bo = anv_bo_unwrap(list->reloc_bos[i]);
1267 if (list->relocs[i].presumed_offset == target_bo->offset &&
1268 !always_relocate)
1269 continue;
1270
1271 void *p = bo->map + list->relocs[i].offset;
1272 write_reloc(device, p, target_bo->offset + list->relocs[i].delta, true);
1273 list->relocs[i].presumed_offset = target_bo->offset;
1274 }
1275 }
1276
1277 /**
1278 * This function applies the relocation for a command buffer and writes the
1279 * actual addresses into the buffers as per what we were told by the kernel on
1280 * the previous execbuf2 call. This should be safe to do because, for each
1281 * relocated address, we have two cases:
1282 *
1283 * 1) The target BO is inactive (as seen by the kernel). In this case, it is
1284 * not in use by the GPU so updating the address is 100% ok. It won't be
1285 * in-use by the GPU (from our context) again until the next execbuf2
1286 * happens. If the kernel decides to move it in the next execbuf2, it
1287 * will have to do the relocations itself, but that's ok because it should
1288 * have all of the information needed to do so.
1289 *
1290 * 2) The target BO is active (as seen by the kernel). In this case, it
1291 * hasn't moved since the last execbuffer2 call because GTT shuffling
1292 * *only* happens when the BO is idle. (From our perspective, it only
1293 * happens inside the execbuffer2 ioctl, but the shuffling may be
1294 * triggered by another ioctl, with full-ppgtt this is limited to only
1295 * execbuffer2 ioctls on the same context, or memory pressure.) Since the
1296 * target BO hasn't moved, our anv_bo::offset exactly matches the BO's GTT
1297 * address and the relocated value we are writing into the BO will be the
1298 * same as the value that is already there.
1299 *
1300 * There is also a possibility that the target BO is active but the exact
1301 * RENDER_SURFACE_STATE object we are writing the relocation into isn't in
1302 * use. In this case, the address currently in the RENDER_SURFACE_STATE
1303 * may be stale but it's still safe to write the relocation because that
1304 * particular RENDER_SURFACE_STATE object isn't in-use by the GPU and
1305 * won't be until the next execbuf2 call.
1306 *
1307 * By doing relocations on the CPU, we can tell the kernel that it doesn't
1308 * need to bother. We want to do this because the surface state buffer is
1309 * used by every command buffer so, if the kernel does the relocations, it
1310 * will always be busy and the kernel will always stall. This is also
1311 * probably the fastest mechanism for doing relocations since the kernel would
1312 * have to make a full copy of all the relocations lists.
1313 */
1314 static bool
1315 relocate_cmd_buffer(struct anv_cmd_buffer *cmd_buffer,
1316 struct anv_execbuf *exec)
1317 {
1318 if (!exec->has_relocs)
1319 return true;
1320
1321 static int userspace_relocs = -1;
1322 if (userspace_relocs < 0)
1323 userspace_relocs = env_var_as_boolean("ANV_USERSPACE_RELOCS", true);
1324 if (!userspace_relocs)
1325 return false;
1326
1327 /* First, we have to check to see whether or not we can even do the
1328 * relocation. New buffers which have never been submitted to the kernel
1329 * don't have a valid offset so we need to let the kernel do relocations so
1330 * that we can get offsets for them. On future execbuf2 calls, those
1331 * buffers will have offsets and we will be able to skip relocating.
1332 * Invalid offsets are indicated by anv_bo::offset == (uint64_t)-1.
1333 */
1334 for (uint32_t i = 0; i < exec->bo_count; i++) {
1335 assert(!exec->bos[i]->is_wrapper);
1336 if (exec->bos[i]->offset == (uint64_t)-1)
1337 return false;
1338 }
1339
1340 /* Since surface states are shared between command buffers and we don't
1341 * know what order they will be submitted to the kernel, we don't know
1342 * what address is actually written in the surface state object at any
1343 * given time. The only option is to always relocate them.
1344 */
1345 struct anv_bo *surface_state_bo =
1346 anv_bo_unwrap(cmd_buffer->device->surface_state_pool.block_pool.bo);
1347 anv_reloc_list_apply(cmd_buffer->device, &cmd_buffer->surface_relocs,
1348 surface_state_bo,
1349 true /* always relocate surface states */);
1350
1351 /* Since we own all of the batch buffers, we know what values are stored
1352 * in the relocated addresses and only have to update them if the offsets
1353 * have changed.
1354 */
1355 struct anv_batch_bo **bbo;
1356 u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1357 anv_reloc_list_apply(cmd_buffer->device,
1358 &(*bbo)->relocs, (*bbo)->bo, false);
1359 }
1360
1361 for (uint32_t i = 0; i < exec->bo_count; i++)
1362 exec->objects[i].offset = exec->bos[i]->offset;
1363
1364 return true;
1365 }
1366
1367 static VkResult
1368 setup_execbuf_for_cmd_buffer(struct anv_execbuf *execbuf,
1369 struct anv_cmd_buffer *cmd_buffer)
1370 {
1371 struct anv_batch *batch = &cmd_buffer->batch;
1372 struct anv_state_pool *ss_pool =
1373 &cmd_buffer->device->surface_state_pool;
1374
1375 adjust_relocations_from_state_pool(ss_pool, &cmd_buffer->surface_relocs,
1376 cmd_buffer->last_ss_pool_center);
1377 VkResult result;
1378 if (cmd_buffer->device->instance->physicalDevice.use_softpin) {
1379 anv_block_pool_foreach_bo(bo, &ss_pool->block_pool) {
1380 result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
1381 bo, NULL, 0);
1382 if (result != VK_SUCCESS)
1383 return result;
1384 }
1385 /* Add surface dependencies (BOs) to the execbuf */
1386 anv_execbuf_add_bo_bitset(cmd_buffer->device, execbuf,
1387 cmd_buffer->surface_relocs.dep_words,
1388 cmd_buffer->surface_relocs.deps, 0);
1389
1390 /* Add the BOs for all memory objects */
1391 list_for_each_entry(struct anv_device_memory, mem,
1392 &cmd_buffer->device->memory_objects, link) {
1393 result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
1394 mem->bo, NULL, 0);
1395 if (result != VK_SUCCESS)
1396 return result;
1397 }
1398
1399 struct anv_block_pool *pool;
1400 pool = &cmd_buffer->device->dynamic_state_pool.block_pool;
1401 anv_block_pool_foreach_bo(bo, pool) {
1402 result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
1403 bo, NULL, 0);
1404 if (result != VK_SUCCESS)
1405 return result;
1406 }
1407
1408 pool = &cmd_buffer->device->instruction_state_pool.block_pool;
1409 anv_block_pool_foreach_bo(bo, pool) {
1410 result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
1411 bo, NULL, 0);
1412 if (result != VK_SUCCESS)
1413 return result;
1414 }
1415
1416 pool = &cmd_buffer->device->binding_table_pool.block_pool;
1417 anv_block_pool_foreach_bo(bo, pool) {
1418 result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
1419 bo, NULL, 0);
1420 if (result != VK_SUCCESS)
1421 return result;
1422 }
1423 } else {
1424 /* Since we aren't in the softpin case, all of our STATE_BASE_ADDRESS BOs
1425 * will get added automatically by processing relocations on the batch
1426 * buffer. We have to add the surface state BO manually because it has
1427 * relocations of its own that we need to be sure are processsed.
1428 */
1429 result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
1430 ss_pool->block_pool.bo,
1431 &cmd_buffer->surface_relocs, 0);
1432 if (result != VK_SUCCESS)
1433 return result;
1434 }
1435
1436 /* First, we walk over all of the bos we've seen and add them and their
1437 * relocations to the validate list.
1438 */
1439 struct anv_batch_bo **bbo;
1440 u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1441 adjust_relocations_to_state_pool(ss_pool, (*bbo)->bo, &(*bbo)->relocs,
1442 cmd_buffer->last_ss_pool_center);
1443
1444 result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
1445 (*bbo)->bo, &(*bbo)->relocs, 0);
1446 if (result != VK_SUCCESS)
1447 return result;
1448 }
1449
1450 /* Now that we've adjusted all of the surface state relocations, we need to
1451 * record the surface state pool center so future executions of the command
1452 * buffer can adjust correctly.
1453 */
1454 cmd_buffer->last_ss_pool_center = ss_pool->block_pool.center_bo_offset;
1455
1456 struct anv_batch_bo *first_batch_bo =
1457 list_first_entry(&cmd_buffer->batch_bos, struct anv_batch_bo, link);
1458
1459 /* The kernel requires that the last entry in the validation list be the
1460 * batch buffer to execute. We can simply swap the element
1461 * corresponding to the first batch_bo in the chain with the last
1462 * element in the list.
1463 */
1464 if (first_batch_bo->bo->index != execbuf->bo_count - 1) {
1465 uint32_t idx = first_batch_bo->bo->index;
1466 uint32_t last_idx = execbuf->bo_count - 1;
1467
1468 struct drm_i915_gem_exec_object2 tmp_obj = execbuf->objects[idx];
1469 assert(execbuf->bos[idx] == first_batch_bo->bo);
1470
1471 execbuf->objects[idx] = execbuf->objects[last_idx];
1472 execbuf->bos[idx] = execbuf->bos[last_idx];
1473 execbuf->bos[idx]->index = idx;
1474
1475 execbuf->objects[last_idx] = tmp_obj;
1476 execbuf->bos[last_idx] = first_batch_bo->bo;
1477 first_batch_bo->bo->index = last_idx;
1478 }
1479
1480 /* If we are pinning our BOs, we shouldn't have to relocate anything */
1481 if (cmd_buffer->device->instance->physicalDevice.use_softpin)
1482 assert(!execbuf->has_relocs);
1483
1484 /* Now we go through and fixup all of the relocation lists to point to
1485 * the correct indices in the object array. We have to do this after we
1486 * reorder the list above as some of the indices may have changed.
1487 */
1488 if (execbuf->has_relocs) {
1489 u_vector_foreach(bbo, &cmd_buffer->seen_bbos)
1490 anv_cmd_buffer_process_relocs(cmd_buffer, &(*bbo)->relocs);
1491
1492 anv_cmd_buffer_process_relocs(cmd_buffer, &cmd_buffer->surface_relocs);
1493 }
1494
1495 if (!cmd_buffer->device->info.has_llc) {
1496 __builtin_ia32_mfence();
1497 u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1498 for (uint32_t i = 0; i < (*bbo)->length; i += CACHELINE_SIZE)
1499 __builtin_ia32_clflush((*bbo)->bo->map + i);
1500 }
1501 }
1502
1503 execbuf->execbuf = (struct drm_i915_gem_execbuffer2) {
1504 .buffers_ptr = (uintptr_t) execbuf->objects,
1505 .buffer_count = execbuf->bo_count,
1506 .batch_start_offset = 0,
1507 .batch_len = batch->next - batch->start,
1508 .cliprects_ptr = 0,
1509 .num_cliprects = 0,
1510 .DR1 = 0,
1511 .DR4 = 0,
1512 .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER,
1513 .rsvd1 = cmd_buffer->device->context_id,
1514 .rsvd2 = 0,
1515 };
1516
1517 if (relocate_cmd_buffer(cmd_buffer, execbuf)) {
1518 /* If we were able to successfully relocate everything, tell the kernel
1519 * that it can skip doing relocations. The requirement for using
1520 * NO_RELOC is:
1521 *
1522 * 1) The addresses written in the objects must match the corresponding
1523 * reloc.presumed_offset which in turn must match the corresponding
1524 * execobject.offset.
1525 *
1526 * 2) To avoid stalling, execobject.offset should match the current
1527 * address of that object within the active context.
1528 *
1529 * In order to satisfy all of the invariants that make userspace
1530 * relocations to be safe (see relocate_cmd_buffer()), we need to
1531 * further ensure that the addresses we use match those used by the
1532 * kernel for the most recent execbuf2.
1533 *
1534 * The kernel may still choose to do relocations anyway if something has
1535 * moved in the GTT. In this case, the relocation list still needs to be
1536 * valid. All relocations on the batch buffers are already valid and
1537 * kept up-to-date. For surface state relocations, by applying the
1538 * relocations in relocate_cmd_buffer, we ensured that the address in
1539 * the RENDER_SURFACE_STATE matches presumed_offset, so it should be
1540 * safe for the kernel to relocate them as needed.
1541 */
1542 execbuf->execbuf.flags |= I915_EXEC_NO_RELOC;
1543 } else {
1544 /* In the case where we fall back to doing kernel relocations, we need
1545 * to ensure that the relocation list is valid. All relocations on the
1546 * batch buffers are already valid and kept up-to-date. Since surface
1547 * states are shared between command buffers and we don't know what
1548 * order they will be submitted to the kernel, we don't know what
1549 * address is actually written in the surface state object at any given
1550 * time. The only option is to set a bogus presumed offset and let the
1551 * kernel relocate them.
1552 */
1553 for (size_t i = 0; i < cmd_buffer->surface_relocs.num_relocs; i++)
1554 cmd_buffer->surface_relocs.relocs[i].presumed_offset = -1;
1555 }
1556
1557 return VK_SUCCESS;
1558 }
1559
1560 static VkResult
1561 setup_empty_execbuf(struct anv_execbuf *execbuf, struct anv_device *device)
1562 {
1563 VkResult result = anv_execbuf_add_bo(device, execbuf,
1564 device->trivial_batch_bo,
1565 NULL, 0);
1566 if (result != VK_SUCCESS)
1567 return result;
1568
1569 execbuf->execbuf = (struct drm_i915_gem_execbuffer2) {
1570 .buffers_ptr = (uintptr_t) execbuf->objects,
1571 .buffer_count = execbuf->bo_count,
1572 .batch_start_offset = 0,
1573 .batch_len = 8, /* GEN7_MI_BATCH_BUFFER_END and NOOP */
1574 .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER,
1575 .rsvd1 = device->context_id,
1576 .rsvd2 = 0,
1577 };
1578
1579 return VK_SUCCESS;
1580 }
1581
1582 /* We lock around execbuf for three main reasons:
1583 *
1584 * 1) When a block pool is resized, we create a new gem handle with a
1585 * different size and, in the case of surface states, possibly a different
1586 * center offset but we re-use the same anv_bo struct when we do so. If
1587 * this happens in the middle of setting up an execbuf, we could end up
1588 * with our list of BOs out of sync with our list of gem handles.
1589 *
1590 * 2) The algorithm we use for building the list of unique buffers isn't
1591 * thread-safe. While the client is supposed to syncronize around
1592 * QueueSubmit, this would be extremely difficult to debug if it ever came
1593 * up in the wild due to a broken app. It's better to play it safe and
1594 * just lock around QueueSubmit.
1595 *
1596 * 3) The anv_cmd_buffer_execbuf function may perform relocations in
1597 * userspace. Due to the fact that the surface state buffer is shared
1598 * between batches, we can't afford to have that happen from multiple
1599 * threads at the same time. Even though the user is supposed to ensure
1600 * this doesn't happen, we play it safe as in (2) above.
1601 *
1602 * Since the only other things that ever take the device lock such as block
1603 * pool resize only rarely happen, this will almost never be contended so
1604 * taking a lock isn't really an expensive operation in this case.
1605 */
1606 VkResult
1607 anv_queue_execbuf_locked(struct anv_queue *queue,
1608 struct anv_queue_submit *submit)
1609 {
1610 struct anv_device *device = queue->device;
1611 struct anv_execbuf execbuf;
1612 anv_execbuf_init(&execbuf);
1613 execbuf.alloc = submit->alloc;
1614 execbuf.alloc_scope = submit->alloc_scope;
1615
1616 VkResult result;
1617
1618 for (uint32_t i = 0; i < submit->fence_bo_count; i++) {
1619 int signaled;
1620 struct anv_bo *bo = anv_unpack_ptr(submit->fence_bos[i], 1, &signaled);
1621
1622 result = anv_execbuf_add_bo(device, &execbuf, bo, NULL,
1623 signaled ? EXEC_OBJECT_WRITE : 0);
1624 if (result != VK_SUCCESS)
1625 goto error;
1626 }
1627
1628 if (submit->cmd_buffer) {
1629 result = setup_execbuf_for_cmd_buffer(&execbuf, submit->cmd_buffer);
1630 } else if (submit->simple_bo) {
1631 result = anv_execbuf_add_bo(device, &execbuf, submit->simple_bo, NULL, 0);
1632 if (result != VK_SUCCESS)
1633 goto error;
1634
1635 execbuf.execbuf = (struct drm_i915_gem_execbuffer2) {
1636 .buffers_ptr = (uintptr_t) execbuf.objects,
1637 .buffer_count = execbuf.bo_count,
1638 .batch_start_offset = 0,
1639 .batch_len = submit->simple_bo_size,
1640 .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER,
1641 .rsvd1 = device->context_id,
1642 .rsvd2 = 0,
1643 };
1644 } else {
1645 result = setup_empty_execbuf(&execbuf, queue->device);
1646 }
1647
1648 if (result != VK_SUCCESS)
1649 goto error;
1650
1651 if (unlikely(INTEL_DEBUG & DEBUG_BATCH)) {
1652 if (submit->cmd_buffer) {
1653 struct anv_batch_bo **bo = u_vector_tail(&submit->cmd_buffer->seen_bbos);
1654
1655 device->cmd_buffer_being_decoded = submit->cmd_buffer;
1656 gen_print_batch(&device->decoder_ctx, (*bo)->bo->map,
1657 (*bo)->bo->size, (*bo)->bo->offset, false);
1658 device->cmd_buffer_being_decoded = NULL;
1659 } else if (submit->simple_bo) {
1660 gen_print_batch(&device->decoder_ctx, submit->simple_bo->map,
1661 submit->simple_bo->size, submit->simple_bo->offset, false);
1662 } else {
1663 gen_print_batch(&device->decoder_ctx,
1664 device->trivial_batch_bo->map,
1665 device->trivial_batch_bo->size,
1666 device->trivial_batch_bo->offset, false);
1667 }
1668 }
1669
1670 if (submit->fence_count > 0) {
1671 assert(device->instance->physicalDevice.has_syncobj);
1672 execbuf.execbuf.flags |= I915_EXEC_FENCE_ARRAY;
1673 execbuf.execbuf.num_cliprects = submit->fence_count;
1674 execbuf.execbuf.cliprects_ptr = (uintptr_t)submit->fences;
1675 }
1676
1677 if (submit->in_fence != -1) {
1678 execbuf.execbuf.flags |= I915_EXEC_FENCE_IN;
1679 execbuf.execbuf.rsvd2 |= (uint32_t)submit->in_fence;
1680 }
1681
1682 if (submit->need_out_fence)
1683 execbuf.execbuf.flags |= I915_EXEC_FENCE_OUT;
1684
1685 int ret = queue->device->no_hw ? 0 :
1686 anv_gem_execbuffer(queue->device, &execbuf.execbuf);
1687 if (ret) {
1688 result = anv_queue_set_lost(queue,
1689 "execbuf2 failed: %s",
1690 strerror(ret));
1691 }
1692
1693 struct drm_i915_gem_exec_object2 *objects = execbuf.objects;
1694 for (uint32_t k = 0; k < execbuf.bo_count; k++) {
1695 if (execbuf.bos[k]->flags & EXEC_OBJECT_PINNED)
1696 assert(execbuf.bos[k]->offset == objects[k].offset);
1697 execbuf.bos[k]->offset = objects[k].offset;
1698 }
1699
1700 if (result == VK_SUCCESS && submit->need_out_fence)
1701 submit->out_fence = execbuf.execbuf.rsvd2 >> 32;
1702
1703 error:
1704 pthread_cond_broadcast(&device->queue_submit);
1705
1706 anv_execbuf_finish(&execbuf);
1707
1708 return result;
1709 }