anv: Don't even bother processing relocs if we have softpin
[mesa.git] / src / intel / vulkan / anv_batch_chain.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31
32 #include "genxml/gen8_pack.h"
33
34 #include "util/debug.h"
35
36 /** \file anv_batch_chain.c
37 *
38 * This file contains functions related to anv_cmd_buffer as a data
39 * structure. This involves everything required to create and destroy
40 * the actual batch buffers as well as link them together and handle
41 * relocations and surface state. It specifically does *not* contain any
42 * handling of actual vkCmd calls beyond vkCmdExecuteCommands.
43 */
44
45 /*-----------------------------------------------------------------------*
46 * Functions related to anv_reloc_list
47 *-----------------------------------------------------------------------*/
48
49 static VkResult
50 anv_reloc_list_init_clone(struct anv_reloc_list *list,
51 const VkAllocationCallbacks *alloc,
52 const struct anv_reloc_list *other_list)
53 {
54 if (other_list) {
55 list->num_relocs = other_list->num_relocs;
56 list->array_length = other_list->array_length;
57 } else {
58 list->num_relocs = 0;
59 list->array_length = 256;
60 }
61
62 list->relocs =
63 vk_alloc(alloc, list->array_length * sizeof(*list->relocs), 8,
64 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
65
66 if (list->relocs == NULL)
67 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
68
69 list->reloc_bos =
70 vk_alloc(alloc, list->array_length * sizeof(*list->reloc_bos), 8,
71 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
72
73 if (list->reloc_bos == NULL) {
74 vk_free(alloc, list->relocs);
75 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
76 }
77
78 list->deps = _mesa_set_create(NULL, _mesa_hash_pointer,
79 _mesa_key_pointer_equal);
80
81 if (!list->deps) {
82 vk_free(alloc, list->relocs);
83 vk_free(alloc, list->reloc_bos);
84 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
85 }
86
87 if (other_list) {
88 memcpy(list->relocs, other_list->relocs,
89 list->array_length * sizeof(*list->relocs));
90 memcpy(list->reloc_bos, other_list->reloc_bos,
91 list->array_length * sizeof(*list->reloc_bos));
92 struct set_entry *entry;
93 set_foreach(other_list->deps, entry) {
94 _mesa_set_add_pre_hashed(list->deps, entry->hash, entry->key);
95 }
96 }
97
98 return VK_SUCCESS;
99 }
100
101 VkResult
102 anv_reloc_list_init(struct anv_reloc_list *list,
103 const VkAllocationCallbacks *alloc)
104 {
105 return anv_reloc_list_init_clone(list, alloc, NULL);
106 }
107
108 void
109 anv_reloc_list_finish(struct anv_reloc_list *list,
110 const VkAllocationCallbacks *alloc)
111 {
112 vk_free(alloc, list->relocs);
113 vk_free(alloc, list->reloc_bos);
114 _mesa_set_destroy(list->deps, NULL);
115 }
116
117 static VkResult
118 anv_reloc_list_grow(struct anv_reloc_list *list,
119 const VkAllocationCallbacks *alloc,
120 size_t num_additional_relocs)
121 {
122 if (list->num_relocs + num_additional_relocs <= list->array_length)
123 return VK_SUCCESS;
124
125 size_t new_length = list->array_length * 2;
126 while (new_length < list->num_relocs + num_additional_relocs)
127 new_length *= 2;
128
129 struct drm_i915_gem_relocation_entry *new_relocs =
130 vk_alloc(alloc, new_length * sizeof(*list->relocs), 8,
131 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
132 if (new_relocs == NULL)
133 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
134
135 struct anv_bo **new_reloc_bos =
136 vk_alloc(alloc, new_length * sizeof(*list->reloc_bos), 8,
137 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
138 if (new_reloc_bos == NULL) {
139 vk_free(alloc, new_relocs);
140 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
141 }
142
143 memcpy(new_relocs, list->relocs, list->num_relocs * sizeof(*list->relocs));
144 memcpy(new_reloc_bos, list->reloc_bos,
145 list->num_relocs * sizeof(*list->reloc_bos));
146
147 vk_free(alloc, list->relocs);
148 vk_free(alloc, list->reloc_bos);
149
150 list->array_length = new_length;
151 list->relocs = new_relocs;
152 list->reloc_bos = new_reloc_bos;
153
154 return VK_SUCCESS;
155 }
156
157 VkResult
158 anv_reloc_list_add(struct anv_reloc_list *list,
159 const VkAllocationCallbacks *alloc,
160 uint32_t offset, struct anv_bo *target_bo, uint32_t delta)
161 {
162 struct drm_i915_gem_relocation_entry *entry;
163 int index;
164
165 if (target_bo->flags & EXEC_OBJECT_PINNED) {
166 _mesa_set_add(list->deps, target_bo);
167 return VK_SUCCESS;
168 }
169
170 VkResult result = anv_reloc_list_grow(list, alloc, 1);
171 if (result != VK_SUCCESS)
172 return result;
173
174 /* XXX: Can we use I915_EXEC_HANDLE_LUT? */
175 index = list->num_relocs++;
176 list->reloc_bos[index] = target_bo;
177 entry = &list->relocs[index];
178 entry->target_handle = target_bo->gem_handle;
179 entry->delta = delta;
180 entry->offset = offset;
181 entry->presumed_offset = target_bo->offset;
182 entry->read_domains = 0;
183 entry->write_domain = 0;
184 VG(VALGRIND_CHECK_MEM_IS_DEFINED(entry, sizeof(*entry)));
185
186 return VK_SUCCESS;
187 }
188
189 static VkResult
190 anv_reloc_list_append(struct anv_reloc_list *list,
191 const VkAllocationCallbacks *alloc,
192 struct anv_reloc_list *other, uint32_t offset)
193 {
194 VkResult result = anv_reloc_list_grow(list, alloc, other->num_relocs);
195 if (result != VK_SUCCESS)
196 return result;
197
198 memcpy(&list->relocs[list->num_relocs], &other->relocs[0],
199 other->num_relocs * sizeof(other->relocs[0]));
200 memcpy(&list->reloc_bos[list->num_relocs], &other->reloc_bos[0],
201 other->num_relocs * sizeof(other->reloc_bos[0]));
202
203 for (uint32_t i = 0; i < other->num_relocs; i++)
204 list->relocs[i + list->num_relocs].offset += offset;
205
206 list->num_relocs += other->num_relocs;
207
208 struct set_entry *entry;
209 set_foreach(other->deps, entry) {
210 _mesa_set_add_pre_hashed(list->deps, entry->hash, entry->key);
211 }
212
213 return VK_SUCCESS;
214 }
215
216 /*-----------------------------------------------------------------------*
217 * Functions related to anv_batch
218 *-----------------------------------------------------------------------*/
219
220 void *
221 anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords)
222 {
223 if (batch->next + num_dwords * 4 > batch->end) {
224 VkResult result = batch->extend_cb(batch, batch->user_data);
225 if (result != VK_SUCCESS) {
226 anv_batch_set_error(batch, result);
227 return NULL;
228 }
229 }
230
231 void *p = batch->next;
232
233 batch->next += num_dwords * 4;
234 assert(batch->next <= batch->end);
235
236 return p;
237 }
238
239 uint64_t
240 anv_batch_emit_reloc(struct anv_batch *batch,
241 void *location, struct anv_bo *bo, uint32_t delta)
242 {
243 VkResult result = anv_reloc_list_add(batch->relocs, batch->alloc,
244 location - batch->start, bo, delta);
245 if (result != VK_SUCCESS) {
246 anv_batch_set_error(batch, result);
247 return 0;
248 }
249
250 return bo->offset + delta;
251 }
252
253 void
254 anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other)
255 {
256 uint32_t size, offset;
257
258 size = other->next - other->start;
259 assert(size % 4 == 0);
260
261 if (batch->next + size > batch->end) {
262 VkResult result = batch->extend_cb(batch, batch->user_data);
263 if (result != VK_SUCCESS) {
264 anv_batch_set_error(batch, result);
265 return;
266 }
267 }
268
269 assert(batch->next + size <= batch->end);
270
271 VG(VALGRIND_CHECK_MEM_IS_DEFINED(other->start, size));
272 memcpy(batch->next, other->start, size);
273
274 offset = batch->next - batch->start;
275 VkResult result = anv_reloc_list_append(batch->relocs, batch->alloc,
276 other->relocs, offset);
277 if (result != VK_SUCCESS) {
278 anv_batch_set_error(batch, result);
279 return;
280 }
281
282 batch->next += size;
283 }
284
285 /*-----------------------------------------------------------------------*
286 * Functions related to anv_batch_bo
287 *-----------------------------------------------------------------------*/
288
289 static VkResult
290 anv_batch_bo_create(struct anv_cmd_buffer *cmd_buffer,
291 struct anv_batch_bo **bbo_out)
292 {
293 VkResult result;
294
295 struct anv_batch_bo *bbo = vk_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo),
296 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
297 if (bbo == NULL)
298 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
299
300 result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool, &bbo->bo,
301 ANV_CMD_BUFFER_BATCH_SIZE);
302 if (result != VK_SUCCESS)
303 goto fail_alloc;
304
305 result = anv_reloc_list_init(&bbo->relocs, &cmd_buffer->pool->alloc);
306 if (result != VK_SUCCESS)
307 goto fail_bo_alloc;
308
309 *bbo_out = bbo;
310
311 return VK_SUCCESS;
312
313 fail_bo_alloc:
314 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
315 fail_alloc:
316 vk_free(&cmd_buffer->pool->alloc, bbo);
317
318 return result;
319 }
320
321 static VkResult
322 anv_batch_bo_clone(struct anv_cmd_buffer *cmd_buffer,
323 const struct anv_batch_bo *other_bbo,
324 struct anv_batch_bo **bbo_out)
325 {
326 VkResult result;
327
328 struct anv_batch_bo *bbo = vk_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo),
329 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
330 if (bbo == NULL)
331 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
332
333 result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool, &bbo->bo,
334 other_bbo->bo.size);
335 if (result != VK_SUCCESS)
336 goto fail_alloc;
337
338 result = anv_reloc_list_init_clone(&bbo->relocs, &cmd_buffer->pool->alloc,
339 &other_bbo->relocs);
340 if (result != VK_SUCCESS)
341 goto fail_bo_alloc;
342
343 bbo->length = other_bbo->length;
344 memcpy(bbo->bo.map, other_bbo->bo.map, other_bbo->length);
345
346 *bbo_out = bbo;
347
348 return VK_SUCCESS;
349
350 fail_bo_alloc:
351 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
352 fail_alloc:
353 vk_free(&cmd_buffer->pool->alloc, bbo);
354
355 return result;
356 }
357
358 static void
359 anv_batch_bo_start(struct anv_batch_bo *bbo, struct anv_batch *batch,
360 size_t batch_padding)
361 {
362 batch->next = batch->start = bbo->bo.map;
363 batch->end = bbo->bo.map + bbo->bo.size - batch_padding;
364 batch->relocs = &bbo->relocs;
365 bbo->relocs.num_relocs = 0;
366 _mesa_set_clear(bbo->relocs.deps, NULL);
367 }
368
369 static void
370 anv_batch_bo_continue(struct anv_batch_bo *bbo, struct anv_batch *batch,
371 size_t batch_padding)
372 {
373 batch->start = bbo->bo.map;
374 batch->next = bbo->bo.map + bbo->length;
375 batch->end = bbo->bo.map + bbo->bo.size - batch_padding;
376 batch->relocs = &bbo->relocs;
377 }
378
379 static void
380 anv_batch_bo_finish(struct anv_batch_bo *bbo, struct anv_batch *batch)
381 {
382 assert(batch->start == bbo->bo.map);
383 bbo->length = batch->next - batch->start;
384 VG(VALGRIND_CHECK_MEM_IS_DEFINED(batch->start, bbo->length));
385 }
386
387 static VkResult
388 anv_batch_bo_grow(struct anv_cmd_buffer *cmd_buffer, struct anv_batch_bo *bbo,
389 struct anv_batch *batch, size_t aditional,
390 size_t batch_padding)
391 {
392 assert(batch->start == bbo->bo.map);
393 bbo->length = batch->next - batch->start;
394
395 size_t new_size = bbo->bo.size;
396 while (new_size <= bbo->length + aditional + batch_padding)
397 new_size *= 2;
398
399 if (new_size == bbo->bo.size)
400 return VK_SUCCESS;
401
402 struct anv_bo new_bo;
403 VkResult result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
404 &new_bo, new_size);
405 if (result != VK_SUCCESS)
406 return result;
407
408 memcpy(new_bo.map, bbo->bo.map, bbo->length);
409
410 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
411
412 bbo->bo = new_bo;
413 anv_batch_bo_continue(bbo, batch, batch_padding);
414
415 return VK_SUCCESS;
416 }
417
418 static void
419 anv_batch_bo_link(struct anv_cmd_buffer *cmd_buffer,
420 struct anv_batch_bo *prev_bbo,
421 struct anv_batch_bo *next_bbo,
422 uint32_t next_bbo_offset)
423 {
424 MAYBE_UNUSED const uint32_t bb_start_offset =
425 prev_bbo->length - GEN8_MI_BATCH_BUFFER_START_length * 4;
426 MAYBE_UNUSED const uint32_t *bb_start = prev_bbo->bo.map + bb_start_offset;
427
428 /* Make sure we're looking at a MI_BATCH_BUFFER_START */
429 assert(((*bb_start >> 29) & 0x07) == 0);
430 assert(((*bb_start >> 23) & 0x3f) == 49);
431
432 if (cmd_buffer->device->instance->physicalDevice.use_softpin) {
433 assert(prev_bbo->bo.flags & EXEC_OBJECT_PINNED);
434 assert(next_bbo->bo.flags & EXEC_OBJECT_PINNED);
435
436 write_reloc(cmd_buffer->device,
437 prev_bbo->bo.map + bb_start_offset + 4,
438 next_bbo->bo.offset + next_bbo_offset, true);
439 } else {
440 uint32_t reloc_idx = prev_bbo->relocs.num_relocs - 1;
441 assert(prev_bbo->relocs.relocs[reloc_idx].offset == bb_start_offset + 4);
442
443 prev_bbo->relocs.reloc_bos[reloc_idx] = &next_bbo->bo;
444 prev_bbo->relocs.relocs[reloc_idx].delta = next_bbo_offset;
445
446 /* Use a bogus presumed offset to force a relocation */
447 prev_bbo->relocs.relocs[reloc_idx].presumed_offset = -1;
448 }
449 }
450
451 static void
452 anv_batch_bo_destroy(struct anv_batch_bo *bbo,
453 struct anv_cmd_buffer *cmd_buffer)
454 {
455 anv_reloc_list_finish(&bbo->relocs, &cmd_buffer->pool->alloc);
456 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
457 vk_free(&cmd_buffer->pool->alloc, bbo);
458 }
459
460 static VkResult
461 anv_batch_bo_list_clone(const struct list_head *list,
462 struct anv_cmd_buffer *cmd_buffer,
463 struct list_head *new_list)
464 {
465 VkResult result = VK_SUCCESS;
466
467 list_inithead(new_list);
468
469 struct anv_batch_bo *prev_bbo = NULL;
470 list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
471 struct anv_batch_bo *new_bbo = NULL;
472 result = anv_batch_bo_clone(cmd_buffer, bbo, &new_bbo);
473 if (result != VK_SUCCESS)
474 break;
475 list_addtail(&new_bbo->link, new_list);
476
477 if (prev_bbo)
478 anv_batch_bo_link(cmd_buffer, prev_bbo, new_bbo, 0);
479
480 prev_bbo = new_bbo;
481 }
482
483 if (result != VK_SUCCESS) {
484 list_for_each_entry_safe(struct anv_batch_bo, bbo, new_list, link)
485 anv_batch_bo_destroy(bbo, cmd_buffer);
486 }
487
488 return result;
489 }
490
491 /*-----------------------------------------------------------------------*
492 * Functions related to anv_batch_bo
493 *-----------------------------------------------------------------------*/
494
495 static struct anv_batch_bo *
496 anv_cmd_buffer_current_batch_bo(struct anv_cmd_buffer *cmd_buffer)
497 {
498 return LIST_ENTRY(struct anv_batch_bo, cmd_buffer->batch_bos.prev, link);
499 }
500
501 struct anv_address
502 anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer *cmd_buffer)
503 {
504 struct anv_state *bt_block = u_vector_head(&cmd_buffer->bt_block_states);
505 return (struct anv_address) {
506 .bo = &anv_binding_table_pool(cmd_buffer->device)->block_pool.bo,
507 .offset = bt_block->offset,
508 };
509 }
510
511 static void
512 emit_batch_buffer_start(struct anv_cmd_buffer *cmd_buffer,
513 struct anv_bo *bo, uint32_t offset)
514 {
515 /* In gen8+ the address field grew to two dwords to accomodate 48 bit
516 * offsets. The high 16 bits are in the last dword, so we can use the gen8
517 * version in either case, as long as we set the instruction length in the
518 * header accordingly. This means that we always emit three dwords here
519 * and all the padding and adjustment we do in this file works for all
520 * gens.
521 */
522
523 #define GEN7_MI_BATCH_BUFFER_START_length 2
524 #define GEN7_MI_BATCH_BUFFER_START_length_bias 2
525
526 const uint32_t gen7_length =
527 GEN7_MI_BATCH_BUFFER_START_length - GEN7_MI_BATCH_BUFFER_START_length_bias;
528 const uint32_t gen8_length =
529 GEN8_MI_BATCH_BUFFER_START_length - GEN8_MI_BATCH_BUFFER_START_length_bias;
530
531 anv_batch_emit(&cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_START, bbs) {
532 bbs.DWordLength = cmd_buffer->device->info.gen < 8 ?
533 gen7_length : gen8_length;
534 bbs._2ndLevelBatchBuffer = _1stlevelbatch;
535 bbs.AddressSpaceIndicator = ASI_PPGTT;
536 bbs.BatchBufferStartAddress = (struct anv_address) { bo, offset };
537 }
538 }
539
540 static void
541 cmd_buffer_chain_to_batch_bo(struct anv_cmd_buffer *cmd_buffer,
542 struct anv_batch_bo *bbo)
543 {
544 struct anv_batch *batch = &cmd_buffer->batch;
545 struct anv_batch_bo *current_bbo =
546 anv_cmd_buffer_current_batch_bo(cmd_buffer);
547
548 /* We set the end of the batch a little short so we would be sure we
549 * have room for the chaining command. Since we're about to emit the
550 * chaining command, let's set it back where it should go.
551 */
552 batch->end += GEN8_MI_BATCH_BUFFER_START_length * 4;
553 assert(batch->end == current_bbo->bo.map + current_bbo->bo.size);
554
555 emit_batch_buffer_start(cmd_buffer, &bbo->bo, 0);
556
557 anv_batch_bo_finish(current_bbo, batch);
558 }
559
560 static VkResult
561 anv_cmd_buffer_chain_batch(struct anv_batch *batch, void *_data)
562 {
563 struct anv_cmd_buffer *cmd_buffer = _data;
564 struct anv_batch_bo *new_bbo;
565
566 VkResult result = anv_batch_bo_create(cmd_buffer, &new_bbo);
567 if (result != VK_SUCCESS)
568 return result;
569
570 struct anv_batch_bo **seen_bbo = u_vector_add(&cmd_buffer->seen_bbos);
571 if (seen_bbo == NULL) {
572 anv_batch_bo_destroy(new_bbo, cmd_buffer);
573 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
574 }
575 *seen_bbo = new_bbo;
576
577 cmd_buffer_chain_to_batch_bo(cmd_buffer, new_bbo);
578
579 list_addtail(&new_bbo->link, &cmd_buffer->batch_bos);
580
581 anv_batch_bo_start(new_bbo, batch, GEN8_MI_BATCH_BUFFER_START_length * 4);
582
583 return VK_SUCCESS;
584 }
585
586 static VkResult
587 anv_cmd_buffer_grow_batch(struct anv_batch *batch, void *_data)
588 {
589 struct anv_cmd_buffer *cmd_buffer = _data;
590 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
591
592 anv_batch_bo_grow(cmd_buffer, bbo, &cmd_buffer->batch, 4096,
593 GEN8_MI_BATCH_BUFFER_START_length * 4);
594
595 return VK_SUCCESS;
596 }
597
598 /** Allocate a binding table
599 *
600 * This function allocates a binding table. This is a bit more complicated
601 * than one would think due to a combination of Vulkan driver design and some
602 * unfortunate hardware restrictions.
603 *
604 * The 3DSTATE_BINDING_TABLE_POINTERS_* packets only have a 16-bit field for
605 * the binding table pointer which means that all binding tables need to live
606 * in the bottom 64k of surface state base address. The way the GL driver has
607 * classically dealt with this restriction is to emit all surface states
608 * on-the-fly into the batch and have a batch buffer smaller than 64k. This
609 * isn't really an option in Vulkan for a couple of reasons:
610 *
611 * 1) In Vulkan, we have growing (or chaining) batches so surface states have
612 * to live in their own buffer and we have to be able to re-emit
613 * STATE_BASE_ADDRESS as needed which requires a full pipeline stall. In
614 * order to avoid emitting STATE_BASE_ADDRESS any more often than needed
615 * (it's not that hard to hit 64k of just binding tables), we allocate
616 * surface state objects up-front when VkImageView is created. In order
617 * for this to work, surface state objects need to be allocated from a
618 * global buffer.
619 *
620 * 2) We tried to design the surface state system in such a way that it's
621 * already ready for bindless texturing. The way bindless texturing works
622 * on our hardware is that you have a big pool of surface state objects
623 * (with its own state base address) and the bindless handles are simply
624 * offsets into that pool. With the architecture we chose, we already
625 * have that pool and it's exactly the same pool that we use for regular
626 * surface states so we should already be ready for bindless.
627 *
628 * 3) For render targets, we need to be able to fill out the surface states
629 * later in vkBeginRenderPass so that we can assign clear colors
630 * correctly. One way to do this would be to just create the surface
631 * state data and then repeatedly copy it into the surface state BO every
632 * time we have to re-emit STATE_BASE_ADDRESS. While this works, it's
633 * rather annoying and just being able to allocate them up-front and
634 * re-use them for the entire render pass.
635 *
636 * While none of these are technically blockers for emitting state on the fly
637 * like we do in GL, the ability to have a single surface state pool is
638 * simplifies things greatly. Unfortunately, it comes at a cost...
639 *
640 * Because of the 64k limitation of 3DSTATE_BINDING_TABLE_POINTERS_*, we can't
641 * place the binding tables just anywhere in surface state base address.
642 * Because 64k isn't a whole lot of space, we can't simply restrict the
643 * surface state buffer to 64k, we have to be more clever. The solution we've
644 * chosen is to have a block pool with a maximum size of 2G that starts at
645 * zero and grows in both directions. All surface states are allocated from
646 * the top of the pool (positive offsets) and we allocate blocks (< 64k) of
647 * binding tables from the bottom of the pool (negative offsets). Every time
648 * we allocate a new binding table block, we set surface state base address to
649 * point to the bottom of the binding table block. This way all of the
650 * binding tables in the block are in the bottom 64k of surface state base
651 * address. When we fill out the binding table, we add the distance between
652 * the bottom of our binding table block and zero of the block pool to the
653 * surface state offsets so that they are correct relative to out new surface
654 * state base address at the bottom of the binding table block.
655 *
656 * \see adjust_relocations_from_block_pool()
657 * \see adjust_relocations_too_block_pool()
658 *
659 * \param[in] entries The number of surface state entries the binding
660 * table should be able to hold.
661 *
662 * \param[out] state_offset The offset surface surface state base address
663 * where the surface states live. This must be
664 * added to the surface state offset when it is
665 * written into the binding table entry.
666 *
667 * \return An anv_state representing the binding table
668 */
669 struct anv_state
670 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
671 uint32_t entries, uint32_t *state_offset)
672 {
673 struct anv_device *device = cmd_buffer->device;
674 struct anv_state_pool *state_pool = &device->surface_state_pool;
675 struct anv_state *bt_block = u_vector_head(&cmd_buffer->bt_block_states);
676 struct anv_state state;
677
678 state.alloc_size = align_u32(entries * 4, 32);
679
680 if (cmd_buffer->bt_next + state.alloc_size > state_pool->block_size)
681 return (struct anv_state) { 0 };
682
683 state.offset = cmd_buffer->bt_next;
684 state.map = anv_binding_table_pool(device)->block_pool.map +
685 bt_block->offset + state.offset;
686
687 cmd_buffer->bt_next += state.alloc_size;
688
689 if (device->instance->physicalDevice.use_softpin) {
690 assert(bt_block->offset >= 0);
691 *state_offset = device->surface_state_pool.block_pool.start_address -
692 device->binding_table_pool.block_pool.start_address - bt_block->offset;
693 } else {
694 assert(bt_block->offset < 0);
695 *state_offset = -bt_block->offset;
696 }
697
698 return state;
699 }
700
701 struct anv_state
702 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer *cmd_buffer)
703 {
704 struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
705 return anv_state_stream_alloc(&cmd_buffer->surface_state_stream,
706 isl_dev->ss.size, isl_dev->ss.align);
707 }
708
709 struct anv_state
710 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer *cmd_buffer,
711 uint32_t size, uint32_t alignment)
712 {
713 return anv_state_stream_alloc(&cmd_buffer->dynamic_state_stream,
714 size, alignment);
715 }
716
717 VkResult
718 anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer *cmd_buffer)
719 {
720 struct anv_state *bt_block = u_vector_add(&cmd_buffer->bt_block_states);
721 if (bt_block == NULL) {
722 anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_HOST_MEMORY);
723 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
724 }
725
726 *bt_block = anv_binding_table_pool_alloc(cmd_buffer->device);
727 cmd_buffer->bt_next = 0;
728
729 return VK_SUCCESS;
730 }
731
732 VkResult
733 anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
734 {
735 struct anv_batch_bo *batch_bo;
736 VkResult result;
737
738 list_inithead(&cmd_buffer->batch_bos);
739
740 result = anv_batch_bo_create(cmd_buffer, &batch_bo);
741 if (result != VK_SUCCESS)
742 return result;
743
744 list_addtail(&batch_bo->link, &cmd_buffer->batch_bos);
745
746 cmd_buffer->batch.alloc = &cmd_buffer->pool->alloc;
747 cmd_buffer->batch.user_data = cmd_buffer;
748
749 if (cmd_buffer->device->can_chain_batches) {
750 cmd_buffer->batch.extend_cb = anv_cmd_buffer_chain_batch;
751 } else {
752 cmd_buffer->batch.extend_cb = anv_cmd_buffer_grow_batch;
753 }
754
755 anv_batch_bo_start(batch_bo, &cmd_buffer->batch,
756 GEN8_MI_BATCH_BUFFER_START_length * 4);
757
758 int success = u_vector_init(&cmd_buffer->seen_bbos,
759 sizeof(struct anv_bo *),
760 8 * sizeof(struct anv_bo *));
761 if (!success)
762 goto fail_batch_bo;
763
764 *(struct anv_batch_bo **)u_vector_add(&cmd_buffer->seen_bbos) = batch_bo;
765
766 /* u_vector requires power-of-two size elements */
767 unsigned pow2_state_size = util_next_power_of_two(sizeof(struct anv_state));
768 success = u_vector_init(&cmd_buffer->bt_block_states,
769 pow2_state_size, 8 * pow2_state_size);
770 if (!success)
771 goto fail_seen_bbos;
772
773 result = anv_reloc_list_init(&cmd_buffer->surface_relocs,
774 &cmd_buffer->pool->alloc);
775 if (result != VK_SUCCESS)
776 goto fail_bt_blocks;
777 cmd_buffer->last_ss_pool_center = 0;
778
779 result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
780 if (result != VK_SUCCESS)
781 goto fail_bt_blocks;
782
783 return VK_SUCCESS;
784
785 fail_bt_blocks:
786 u_vector_finish(&cmd_buffer->bt_block_states);
787 fail_seen_bbos:
788 u_vector_finish(&cmd_buffer->seen_bbos);
789 fail_batch_bo:
790 anv_batch_bo_destroy(batch_bo, cmd_buffer);
791
792 return result;
793 }
794
795 void
796 anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
797 {
798 struct anv_state *bt_block;
799 u_vector_foreach(bt_block, &cmd_buffer->bt_block_states)
800 anv_binding_table_pool_free(cmd_buffer->device, *bt_block);
801 u_vector_finish(&cmd_buffer->bt_block_states);
802
803 anv_reloc_list_finish(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc);
804
805 u_vector_finish(&cmd_buffer->seen_bbos);
806
807 /* Destroy all of the batch buffers */
808 list_for_each_entry_safe(struct anv_batch_bo, bbo,
809 &cmd_buffer->batch_bos, link) {
810 anv_batch_bo_destroy(bbo, cmd_buffer);
811 }
812 }
813
814 void
815 anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
816 {
817 /* Delete all but the first batch bo */
818 assert(!list_empty(&cmd_buffer->batch_bos));
819 while (cmd_buffer->batch_bos.next != cmd_buffer->batch_bos.prev) {
820 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
821 list_del(&bbo->link);
822 anv_batch_bo_destroy(bbo, cmd_buffer);
823 }
824 assert(!list_empty(&cmd_buffer->batch_bos));
825
826 anv_batch_bo_start(anv_cmd_buffer_current_batch_bo(cmd_buffer),
827 &cmd_buffer->batch,
828 GEN8_MI_BATCH_BUFFER_START_length * 4);
829
830 while (u_vector_length(&cmd_buffer->bt_block_states) > 1) {
831 struct anv_state *bt_block = u_vector_remove(&cmd_buffer->bt_block_states);
832 anv_binding_table_pool_free(cmd_buffer->device, *bt_block);
833 }
834 assert(u_vector_length(&cmd_buffer->bt_block_states) == 1);
835 cmd_buffer->bt_next = 0;
836
837 cmd_buffer->surface_relocs.num_relocs = 0;
838 _mesa_set_clear(cmd_buffer->surface_relocs.deps, NULL);
839 cmd_buffer->last_ss_pool_center = 0;
840
841 /* Reset the list of seen buffers */
842 cmd_buffer->seen_bbos.head = 0;
843 cmd_buffer->seen_bbos.tail = 0;
844
845 *(struct anv_batch_bo **)u_vector_add(&cmd_buffer->seen_bbos) =
846 anv_cmd_buffer_current_batch_bo(cmd_buffer);
847 }
848
849 void
850 anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer)
851 {
852 struct anv_batch_bo *batch_bo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
853
854 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
855 /* When we start a batch buffer, we subtract a certain amount of
856 * padding from the end to ensure that we always have room to emit a
857 * BATCH_BUFFER_START to chain to the next BO. We need to remove
858 * that padding before we end the batch; otherwise, we may end up
859 * with our BATCH_BUFFER_END in another BO.
860 */
861 cmd_buffer->batch.end += GEN8_MI_BATCH_BUFFER_START_length * 4;
862 assert(cmd_buffer->batch.end == batch_bo->bo.map + batch_bo->bo.size);
863
864 anv_batch_emit(&cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_END, bbe);
865
866 /* Round batch up to an even number of dwords. */
867 if ((cmd_buffer->batch.next - cmd_buffer->batch.start) & 4)
868 anv_batch_emit(&cmd_buffer->batch, GEN8_MI_NOOP, noop);
869
870 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_PRIMARY;
871 } else {
872 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
873 /* If this is a secondary command buffer, we need to determine the
874 * mode in which it will be executed with vkExecuteCommands. We
875 * determine this statically here so that this stays in sync with the
876 * actual ExecuteCommands implementation.
877 */
878 const uint32_t length = cmd_buffer->batch.next - cmd_buffer->batch.start;
879 if (!cmd_buffer->device->can_chain_batches) {
880 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT;
881 } else if ((cmd_buffer->batch_bos.next == cmd_buffer->batch_bos.prev) &&
882 (length < ANV_CMD_BUFFER_BATCH_SIZE / 2)) {
883 /* If the secondary has exactly one batch buffer in its list *and*
884 * that batch buffer is less than half of the maximum size, we're
885 * probably better of simply copying it into our batch.
886 */
887 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_EMIT;
888 } else if (!(cmd_buffer->usage_flags &
889 VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
890 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_CHAIN;
891
892 /* In order to chain, we need this command buffer to contain an
893 * MI_BATCH_BUFFER_START which will jump back to the calling batch.
894 * It doesn't matter where it points now so long as has a valid
895 * relocation. We'll adjust it later as part of the chaining
896 * process.
897 */
898 emit_batch_buffer_start(cmd_buffer, &batch_bo->bo, 0);
899 } else {
900 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN;
901 }
902 }
903
904 anv_batch_bo_finish(batch_bo, &cmd_buffer->batch);
905 }
906
907 static VkResult
908 anv_cmd_buffer_add_seen_bbos(struct anv_cmd_buffer *cmd_buffer,
909 struct list_head *list)
910 {
911 list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
912 struct anv_batch_bo **bbo_ptr = u_vector_add(&cmd_buffer->seen_bbos);
913 if (bbo_ptr == NULL)
914 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
915
916 *bbo_ptr = bbo;
917 }
918
919 return VK_SUCCESS;
920 }
921
922 void
923 anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary,
924 struct anv_cmd_buffer *secondary)
925 {
926 switch (secondary->exec_mode) {
927 case ANV_CMD_BUFFER_EXEC_MODE_EMIT:
928 anv_batch_emit_batch(&primary->batch, &secondary->batch);
929 break;
930 case ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT: {
931 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(primary);
932 unsigned length = secondary->batch.end - secondary->batch.start;
933 anv_batch_bo_grow(primary, bbo, &primary->batch, length,
934 GEN8_MI_BATCH_BUFFER_START_length * 4);
935 anv_batch_emit_batch(&primary->batch, &secondary->batch);
936 break;
937 }
938 case ANV_CMD_BUFFER_EXEC_MODE_CHAIN: {
939 struct anv_batch_bo *first_bbo =
940 list_first_entry(&secondary->batch_bos, struct anv_batch_bo, link);
941 struct anv_batch_bo *last_bbo =
942 list_last_entry(&secondary->batch_bos, struct anv_batch_bo, link);
943
944 emit_batch_buffer_start(primary, &first_bbo->bo, 0);
945
946 struct anv_batch_bo *this_bbo = anv_cmd_buffer_current_batch_bo(primary);
947 assert(primary->batch.start == this_bbo->bo.map);
948 uint32_t offset = primary->batch.next - primary->batch.start;
949
950 /* Make the tail of the secondary point back to right after the
951 * MI_BATCH_BUFFER_START in the primary batch.
952 */
953 anv_batch_bo_link(primary, last_bbo, this_bbo, offset);
954
955 anv_cmd_buffer_add_seen_bbos(primary, &secondary->batch_bos);
956 break;
957 }
958 case ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN: {
959 struct list_head copy_list;
960 VkResult result = anv_batch_bo_list_clone(&secondary->batch_bos,
961 secondary,
962 &copy_list);
963 if (result != VK_SUCCESS)
964 return; /* FIXME */
965
966 anv_cmd_buffer_add_seen_bbos(primary, &copy_list);
967
968 struct anv_batch_bo *first_bbo =
969 list_first_entry(&copy_list, struct anv_batch_bo, link);
970 struct anv_batch_bo *last_bbo =
971 list_last_entry(&copy_list, struct anv_batch_bo, link);
972
973 cmd_buffer_chain_to_batch_bo(primary, first_bbo);
974
975 list_splicetail(&copy_list, &primary->batch_bos);
976
977 anv_batch_bo_continue(last_bbo, &primary->batch,
978 GEN8_MI_BATCH_BUFFER_START_length * 4);
979 break;
980 }
981 default:
982 assert(!"Invalid execution mode");
983 }
984
985 anv_reloc_list_append(&primary->surface_relocs, &primary->pool->alloc,
986 &secondary->surface_relocs, 0);
987 }
988
989 struct anv_execbuf {
990 struct drm_i915_gem_execbuffer2 execbuf;
991
992 struct drm_i915_gem_exec_object2 * objects;
993 uint32_t bo_count;
994 struct anv_bo ** bos;
995
996 /* Allocated length of the 'objects' and 'bos' arrays */
997 uint32_t array_length;
998
999 bool has_relocs;
1000
1001 uint32_t fence_count;
1002 uint32_t fence_array_length;
1003 struct drm_i915_gem_exec_fence * fences;
1004 struct anv_syncobj ** syncobjs;
1005 };
1006
1007 static void
1008 anv_execbuf_init(struct anv_execbuf *exec)
1009 {
1010 memset(exec, 0, sizeof(*exec));
1011 }
1012
1013 static void
1014 anv_execbuf_finish(struct anv_execbuf *exec,
1015 const VkAllocationCallbacks *alloc)
1016 {
1017 vk_free(alloc, exec->objects);
1018 vk_free(alloc, exec->bos);
1019 vk_free(alloc, exec->fences);
1020 vk_free(alloc, exec->syncobjs);
1021 }
1022
1023 static int
1024 _compare_bo_handles(const void *_bo1, const void *_bo2)
1025 {
1026 struct anv_bo * const *bo1 = _bo1;
1027 struct anv_bo * const *bo2 = _bo2;
1028
1029 return (*bo1)->gem_handle - (*bo2)->gem_handle;
1030 }
1031
1032 static VkResult
1033 anv_execbuf_add_bo(struct anv_execbuf *exec,
1034 struct anv_bo *bo,
1035 struct anv_reloc_list *relocs,
1036 uint32_t extra_flags,
1037 const VkAllocationCallbacks *alloc)
1038 {
1039 struct drm_i915_gem_exec_object2 *obj = NULL;
1040
1041 if (bo->index < exec->bo_count && exec->bos[bo->index] == bo)
1042 obj = &exec->objects[bo->index];
1043
1044 if (obj == NULL) {
1045 /* We've never seen this one before. Add it to the list and assign
1046 * an id that we can use later.
1047 */
1048 if (exec->bo_count >= exec->array_length) {
1049 uint32_t new_len = exec->objects ? exec->array_length * 2 : 64;
1050
1051 struct drm_i915_gem_exec_object2 *new_objects =
1052 vk_alloc(alloc, new_len * sizeof(*new_objects),
1053 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
1054 if (new_objects == NULL)
1055 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1056
1057 struct anv_bo **new_bos =
1058 vk_alloc(alloc, new_len * sizeof(*new_bos),
1059 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
1060 if (new_bos == NULL) {
1061 vk_free(alloc, new_objects);
1062 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1063 }
1064
1065 if (exec->objects) {
1066 memcpy(new_objects, exec->objects,
1067 exec->bo_count * sizeof(*new_objects));
1068 memcpy(new_bos, exec->bos,
1069 exec->bo_count * sizeof(*new_bos));
1070 }
1071
1072 vk_free(alloc, exec->objects);
1073 vk_free(alloc, exec->bos);
1074
1075 exec->objects = new_objects;
1076 exec->bos = new_bos;
1077 exec->array_length = new_len;
1078 }
1079
1080 assert(exec->bo_count < exec->array_length);
1081
1082 bo->index = exec->bo_count++;
1083 obj = &exec->objects[bo->index];
1084 exec->bos[bo->index] = bo;
1085
1086 obj->handle = bo->gem_handle;
1087 obj->relocation_count = 0;
1088 obj->relocs_ptr = 0;
1089 obj->alignment = 0;
1090 obj->offset = bo->offset;
1091 obj->flags = bo->flags | extra_flags;
1092 obj->rsvd1 = 0;
1093 obj->rsvd2 = 0;
1094 }
1095
1096 if (relocs != NULL) {
1097 assert(obj->relocation_count == 0);
1098
1099 if (relocs->num_relocs > 0) {
1100 /* This is the first time we've ever seen a list of relocations for
1101 * this BO. Go ahead and set the relocations and then walk the list
1102 * of relocations and add them all.
1103 */
1104 exec->has_relocs = true;
1105 obj->relocation_count = relocs->num_relocs;
1106 obj->relocs_ptr = (uintptr_t) relocs->relocs;
1107
1108 for (size_t i = 0; i < relocs->num_relocs; i++) {
1109 VkResult result;
1110
1111 /* A quick sanity check on relocations */
1112 assert(relocs->relocs[i].offset < bo->size);
1113 result = anv_execbuf_add_bo(exec, relocs->reloc_bos[i], NULL,
1114 extra_flags, alloc);
1115
1116 if (result != VK_SUCCESS)
1117 return result;
1118 }
1119 }
1120
1121 if (relocs->deps && relocs->deps->entries > 0) {
1122 const uint32_t entries = relocs->deps->entries;
1123 struct anv_bo **bos =
1124 vk_alloc(alloc, entries * sizeof(*bos),
1125 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
1126 if (bos == NULL)
1127 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1128
1129 struct set_entry *entry;
1130 struct anv_bo **bo = bos;
1131 set_foreach(relocs->deps, entry) {
1132 *bo++ = (void *)entry->key;
1133 }
1134
1135 qsort(bos, entries, sizeof(struct anv_bo*), _compare_bo_handles);
1136
1137 VkResult result = VK_SUCCESS;
1138 for (bo = bos; bo < bos + entries; bo++) {
1139 result = anv_execbuf_add_bo(exec, *bo, NULL, extra_flags, alloc);
1140 if (result != VK_SUCCESS)
1141 break;
1142 }
1143
1144 vk_free(alloc, bos);
1145
1146 if (result != VK_SUCCESS)
1147 return result;
1148 }
1149 }
1150
1151 return VK_SUCCESS;
1152 }
1153
1154 static VkResult
1155 anv_execbuf_add_syncobj(struct anv_execbuf *exec,
1156 uint32_t handle, uint32_t flags,
1157 const VkAllocationCallbacks *alloc)
1158 {
1159 assert(flags != 0);
1160
1161 if (exec->fence_count >= exec->fence_array_length) {
1162 uint32_t new_len = MAX2(exec->fence_array_length * 2, 64);
1163
1164 exec->fences = vk_realloc(alloc, exec->fences,
1165 new_len * sizeof(*exec->fences),
1166 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
1167 if (exec->fences == NULL)
1168 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1169
1170 exec->fence_array_length = new_len;
1171 }
1172
1173 exec->fences[exec->fence_count] = (struct drm_i915_gem_exec_fence) {
1174 .handle = handle,
1175 .flags = flags,
1176 };
1177
1178 exec->fence_count++;
1179
1180 return VK_SUCCESS;
1181 }
1182
1183 static void
1184 anv_cmd_buffer_process_relocs(struct anv_cmd_buffer *cmd_buffer,
1185 struct anv_reloc_list *list)
1186 {
1187 for (size_t i = 0; i < list->num_relocs; i++)
1188 list->relocs[i].target_handle = list->reloc_bos[i]->index;
1189 }
1190
1191 static void
1192 adjust_relocations_from_state_pool(struct anv_state_pool *pool,
1193 struct anv_reloc_list *relocs,
1194 uint32_t last_pool_center_bo_offset)
1195 {
1196 assert(last_pool_center_bo_offset <= pool->block_pool.center_bo_offset);
1197 uint32_t delta = pool->block_pool.center_bo_offset - last_pool_center_bo_offset;
1198
1199 for (size_t i = 0; i < relocs->num_relocs; i++) {
1200 /* All of the relocations from this block pool to other BO's should
1201 * have been emitted relative to the surface block pool center. We
1202 * need to add the center offset to make them relative to the
1203 * beginning of the actual GEM bo.
1204 */
1205 relocs->relocs[i].offset += delta;
1206 }
1207 }
1208
1209 static void
1210 adjust_relocations_to_state_pool(struct anv_state_pool *pool,
1211 struct anv_bo *from_bo,
1212 struct anv_reloc_list *relocs,
1213 uint32_t last_pool_center_bo_offset)
1214 {
1215 assert(last_pool_center_bo_offset <= pool->block_pool.center_bo_offset);
1216 uint32_t delta = pool->block_pool.center_bo_offset - last_pool_center_bo_offset;
1217
1218 /* When we initially emit relocations into a block pool, we don't
1219 * actually know what the final center_bo_offset will be so we just emit
1220 * it as if center_bo_offset == 0. Now that we know what the center
1221 * offset is, we need to walk the list of relocations and adjust any
1222 * relocations that point to the pool bo with the correct offset.
1223 */
1224 for (size_t i = 0; i < relocs->num_relocs; i++) {
1225 if (relocs->reloc_bos[i] == &pool->block_pool.bo) {
1226 /* Adjust the delta value in the relocation to correctly
1227 * correspond to the new delta. Initially, this value may have
1228 * been negative (if treated as unsigned), but we trust in
1229 * uint32_t roll-over to fix that for us at this point.
1230 */
1231 relocs->relocs[i].delta += delta;
1232
1233 /* Since the delta has changed, we need to update the actual
1234 * relocated value with the new presumed value. This function
1235 * should only be called on batch buffers, so we know it isn't in
1236 * use by the GPU at the moment.
1237 */
1238 assert(relocs->relocs[i].offset < from_bo->size);
1239 write_reloc(pool->block_pool.device,
1240 from_bo->map + relocs->relocs[i].offset,
1241 relocs->relocs[i].presumed_offset +
1242 relocs->relocs[i].delta, false);
1243 }
1244 }
1245 }
1246
1247 static void
1248 anv_reloc_list_apply(struct anv_device *device,
1249 struct anv_reloc_list *list,
1250 struct anv_bo *bo,
1251 bool always_relocate)
1252 {
1253 for (size_t i = 0; i < list->num_relocs; i++) {
1254 struct anv_bo *target_bo = list->reloc_bos[i];
1255 if (list->relocs[i].presumed_offset == target_bo->offset &&
1256 !always_relocate)
1257 continue;
1258
1259 void *p = bo->map + list->relocs[i].offset;
1260 write_reloc(device, p, target_bo->offset + list->relocs[i].delta, true);
1261 list->relocs[i].presumed_offset = target_bo->offset;
1262 }
1263 }
1264
1265 /**
1266 * This function applies the relocation for a command buffer and writes the
1267 * actual addresses into the buffers as per what we were told by the kernel on
1268 * the previous execbuf2 call. This should be safe to do because, for each
1269 * relocated address, we have two cases:
1270 *
1271 * 1) The target BO is inactive (as seen by the kernel). In this case, it is
1272 * not in use by the GPU so updating the address is 100% ok. It won't be
1273 * in-use by the GPU (from our context) again until the next execbuf2
1274 * happens. If the kernel decides to move it in the next execbuf2, it
1275 * will have to do the relocations itself, but that's ok because it should
1276 * have all of the information needed to do so.
1277 *
1278 * 2) The target BO is active (as seen by the kernel). In this case, it
1279 * hasn't moved since the last execbuffer2 call because GTT shuffling
1280 * *only* happens when the BO is idle. (From our perspective, it only
1281 * happens inside the execbuffer2 ioctl, but the shuffling may be
1282 * triggered by another ioctl, with full-ppgtt this is limited to only
1283 * execbuffer2 ioctls on the same context, or memory pressure.) Since the
1284 * target BO hasn't moved, our anv_bo::offset exactly matches the BO's GTT
1285 * address and the relocated value we are writing into the BO will be the
1286 * same as the value that is already there.
1287 *
1288 * There is also a possibility that the target BO is active but the exact
1289 * RENDER_SURFACE_STATE object we are writing the relocation into isn't in
1290 * use. In this case, the address currently in the RENDER_SURFACE_STATE
1291 * may be stale but it's still safe to write the relocation because that
1292 * particular RENDER_SURFACE_STATE object isn't in-use by the GPU and
1293 * won't be until the next execbuf2 call.
1294 *
1295 * By doing relocations on the CPU, we can tell the kernel that it doesn't
1296 * need to bother. We want to do this because the surface state buffer is
1297 * used by every command buffer so, if the kernel does the relocations, it
1298 * will always be busy and the kernel will always stall. This is also
1299 * probably the fastest mechanism for doing relocations since the kernel would
1300 * have to make a full copy of all the relocations lists.
1301 */
1302 static bool
1303 relocate_cmd_buffer(struct anv_cmd_buffer *cmd_buffer,
1304 struct anv_execbuf *exec)
1305 {
1306 if (!exec->has_relocs)
1307 return true;
1308
1309 static int userspace_relocs = -1;
1310 if (userspace_relocs < 0)
1311 userspace_relocs = env_var_as_boolean("ANV_USERSPACE_RELOCS", true);
1312 if (!userspace_relocs)
1313 return false;
1314
1315 /* First, we have to check to see whether or not we can even do the
1316 * relocation. New buffers which have never been submitted to the kernel
1317 * don't have a valid offset so we need to let the kernel do relocations so
1318 * that we can get offsets for them. On future execbuf2 calls, those
1319 * buffers will have offsets and we will be able to skip relocating.
1320 * Invalid offsets are indicated by anv_bo::offset == (uint64_t)-1.
1321 */
1322 for (uint32_t i = 0; i < exec->bo_count; i++) {
1323 if (exec->bos[i]->offset == (uint64_t)-1)
1324 return false;
1325 }
1326
1327 /* Since surface states are shared between command buffers and we don't
1328 * know what order they will be submitted to the kernel, we don't know
1329 * what address is actually written in the surface state object at any
1330 * given time. The only option is to always relocate them.
1331 */
1332 anv_reloc_list_apply(cmd_buffer->device, &cmd_buffer->surface_relocs,
1333 &cmd_buffer->device->surface_state_pool.block_pool.bo,
1334 true /* always relocate surface states */);
1335
1336 /* Since we own all of the batch buffers, we know what values are stored
1337 * in the relocated addresses and only have to update them if the offsets
1338 * have changed.
1339 */
1340 struct anv_batch_bo **bbo;
1341 u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1342 anv_reloc_list_apply(cmd_buffer->device,
1343 &(*bbo)->relocs, &(*bbo)->bo, false);
1344 }
1345
1346 for (uint32_t i = 0; i < exec->bo_count; i++)
1347 exec->objects[i].offset = exec->bos[i]->offset;
1348
1349 return true;
1350 }
1351
1352 static VkResult
1353 setup_execbuf_for_cmd_buffer(struct anv_execbuf *execbuf,
1354 struct anv_cmd_buffer *cmd_buffer)
1355 {
1356 struct anv_batch *batch = &cmd_buffer->batch;
1357 struct anv_state_pool *ss_pool =
1358 &cmd_buffer->device->surface_state_pool;
1359
1360 adjust_relocations_from_state_pool(ss_pool, &cmd_buffer->surface_relocs,
1361 cmd_buffer->last_ss_pool_center);
1362 VkResult result = anv_execbuf_add_bo(execbuf, &ss_pool->block_pool.bo,
1363 &cmd_buffer->surface_relocs, 0,
1364 &cmd_buffer->device->alloc);
1365 if (result != VK_SUCCESS)
1366 return result;
1367
1368 /* First, we walk over all of the bos we've seen and add them and their
1369 * relocations to the validate list.
1370 */
1371 struct anv_batch_bo **bbo;
1372 u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1373 adjust_relocations_to_state_pool(ss_pool, &(*bbo)->bo, &(*bbo)->relocs,
1374 cmd_buffer->last_ss_pool_center);
1375
1376 result = anv_execbuf_add_bo(execbuf, &(*bbo)->bo, &(*bbo)->relocs, 0,
1377 &cmd_buffer->device->alloc);
1378 if (result != VK_SUCCESS)
1379 return result;
1380 }
1381
1382 /* Now that we've adjusted all of the surface state relocations, we need to
1383 * record the surface state pool center so future executions of the command
1384 * buffer can adjust correctly.
1385 */
1386 cmd_buffer->last_ss_pool_center = ss_pool->block_pool.center_bo_offset;
1387
1388 struct anv_batch_bo *first_batch_bo =
1389 list_first_entry(&cmd_buffer->batch_bos, struct anv_batch_bo, link);
1390
1391 /* The kernel requires that the last entry in the validation list be the
1392 * batch buffer to execute. We can simply swap the element
1393 * corresponding to the first batch_bo in the chain with the last
1394 * element in the list.
1395 */
1396 if (first_batch_bo->bo.index != execbuf->bo_count - 1) {
1397 uint32_t idx = first_batch_bo->bo.index;
1398 uint32_t last_idx = execbuf->bo_count - 1;
1399
1400 struct drm_i915_gem_exec_object2 tmp_obj = execbuf->objects[idx];
1401 assert(execbuf->bos[idx] == &first_batch_bo->bo);
1402
1403 execbuf->objects[idx] = execbuf->objects[last_idx];
1404 execbuf->bos[idx] = execbuf->bos[last_idx];
1405 execbuf->bos[idx]->index = idx;
1406
1407 execbuf->objects[last_idx] = tmp_obj;
1408 execbuf->bos[last_idx] = &first_batch_bo->bo;
1409 first_batch_bo->bo.index = last_idx;
1410 }
1411
1412 /* If we are pinning our BOs, we shouldn't have to relocate anything */
1413 if (cmd_buffer->device->instance->physicalDevice.use_softpin)
1414 assert(!execbuf->has_relocs);
1415
1416 /* Now we go through and fixup all of the relocation lists to point to
1417 * the correct indices in the object array. We have to do this after we
1418 * reorder the list above as some of the indices may have changed.
1419 */
1420 if (execbuf->has_relocs) {
1421 u_vector_foreach(bbo, &cmd_buffer->seen_bbos)
1422 anv_cmd_buffer_process_relocs(cmd_buffer, &(*bbo)->relocs);
1423
1424 anv_cmd_buffer_process_relocs(cmd_buffer, &cmd_buffer->surface_relocs);
1425 }
1426
1427 if (!cmd_buffer->device->info.has_llc) {
1428 __builtin_ia32_mfence();
1429 u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1430 for (uint32_t i = 0; i < (*bbo)->length; i += CACHELINE_SIZE)
1431 __builtin_ia32_clflush((*bbo)->bo.map + i);
1432 }
1433 }
1434
1435 execbuf->execbuf = (struct drm_i915_gem_execbuffer2) {
1436 .buffers_ptr = (uintptr_t) execbuf->objects,
1437 .buffer_count = execbuf->bo_count,
1438 .batch_start_offset = 0,
1439 .batch_len = batch->next - batch->start,
1440 .cliprects_ptr = 0,
1441 .num_cliprects = 0,
1442 .DR1 = 0,
1443 .DR4 = 0,
1444 .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER,
1445 .rsvd1 = cmd_buffer->device->context_id,
1446 .rsvd2 = 0,
1447 };
1448
1449 if (relocate_cmd_buffer(cmd_buffer, execbuf)) {
1450 /* If we were able to successfully relocate everything, tell the kernel
1451 * that it can skip doing relocations. The requirement for using
1452 * NO_RELOC is:
1453 *
1454 * 1) The addresses written in the objects must match the corresponding
1455 * reloc.presumed_offset which in turn must match the corresponding
1456 * execobject.offset.
1457 *
1458 * 2) To avoid stalling, execobject.offset should match the current
1459 * address of that object within the active context.
1460 *
1461 * In order to satisfy all of the invariants that make userspace
1462 * relocations to be safe (see relocate_cmd_buffer()), we need to
1463 * further ensure that the addresses we use match those used by the
1464 * kernel for the most recent execbuf2.
1465 *
1466 * The kernel may still choose to do relocations anyway if something has
1467 * moved in the GTT. In this case, the relocation list still needs to be
1468 * valid. All relocations on the batch buffers are already valid and
1469 * kept up-to-date. For surface state relocations, by applying the
1470 * relocations in relocate_cmd_buffer, we ensured that the address in
1471 * the RENDER_SURFACE_STATE matches presumed_offset, so it should be
1472 * safe for the kernel to relocate them as needed.
1473 */
1474 execbuf->execbuf.flags |= I915_EXEC_NO_RELOC;
1475 } else {
1476 /* In the case where we fall back to doing kernel relocations, we need
1477 * to ensure that the relocation list is valid. All relocations on the
1478 * batch buffers are already valid and kept up-to-date. Since surface
1479 * states are shared between command buffers and we don't know what
1480 * order they will be submitted to the kernel, we don't know what
1481 * address is actually written in the surface state object at any given
1482 * time. The only option is to set a bogus presumed offset and let the
1483 * kernel relocate them.
1484 */
1485 for (size_t i = 0; i < cmd_buffer->surface_relocs.num_relocs; i++)
1486 cmd_buffer->surface_relocs.relocs[i].presumed_offset = -1;
1487 }
1488
1489 return VK_SUCCESS;
1490 }
1491
1492 static VkResult
1493 setup_empty_execbuf(struct anv_execbuf *execbuf, struct anv_device *device)
1494 {
1495 VkResult result = anv_execbuf_add_bo(execbuf, &device->trivial_batch_bo,
1496 NULL, 0, &device->alloc);
1497 if (result != VK_SUCCESS)
1498 return result;
1499
1500 execbuf->execbuf = (struct drm_i915_gem_execbuffer2) {
1501 .buffers_ptr = (uintptr_t) execbuf->objects,
1502 .buffer_count = execbuf->bo_count,
1503 .batch_start_offset = 0,
1504 .batch_len = 8, /* GEN7_MI_BATCH_BUFFER_END and NOOP */
1505 .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER,
1506 .rsvd1 = device->context_id,
1507 .rsvd2 = 0,
1508 };
1509
1510 return VK_SUCCESS;
1511 }
1512
1513 VkResult
1514 anv_cmd_buffer_execbuf(struct anv_device *device,
1515 struct anv_cmd_buffer *cmd_buffer,
1516 const VkSemaphore *in_semaphores,
1517 uint32_t num_in_semaphores,
1518 const VkSemaphore *out_semaphores,
1519 uint32_t num_out_semaphores,
1520 VkFence _fence)
1521 {
1522 ANV_FROM_HANDLE(anv_fence, fence, _fence);
1523
1524 struct anv_execbuf execbuf;
1525 anv_execbuf_init(&execbuf);
1526
1527 int in_fence = -1;
1528 VkResult result = VK_SUCCESS;
1529 for (uint32_t i = 0; i < num_in_semaphores; i++) {
1530 ANV_FROM_HANDLE(anv_semaphore, semaphore, in_semaphores[i]);
1531 struct anv_semaphore_impl *impl =
1532 semaphore->temporary.type != ANV_SEMAPHORE_TYPE_NONE ?
1533 &semaphore->temporary : &semaphore->permanent;
1534
1535 switch (impl->type) {
1536 case ANV_SEMAPHORE_TYPE_BO:
1537 result = anv_execbuf_add_bo(&execbuf, impl->bo, NULL,
1538 0, &device->alloc);
1539 if (result != VK_SUCCESS)
1540 return result;
1541 break;
1542
1543 case ANV_SEMAPHORE_TYPE_SYNC_FILE:
1544 if (in_fence == -1) {
1545 in_fence = impl->fd;
1546 } else {
1547 int merge = anv_gem_sync_file_merge(device, in_fence, impl->fd);
1548 if (merge == -1)
1549 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1550
1551 close(impl->fd);
1552 close(in_fence);
1553 in_fence = merge;
1554 }
1555
1556 impl->fd = -1;
1557 break;
1558
1559 case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
1560 result = anv_execbuf_add_syncobj(&execbuf, impl->syncobj,
1561 I915_EXEC_FENCE_WAIT,
1562 &device->alloc);
1563 if (result != VK_SUCCESS)
1564 return result;
1565 break;
1566
1567 default:
1568 break;
1569 }
1570 }
1571
1572 bool need_out_fence = false;
1573 for (uint32_t i = 0; i < num_out_semaphores; i++) {
1574 ANV_FROM_HANDLE(anv_semaphore, semaphore, out_semaphores[i]);
1575
1576 /* Under most circumstances, out fences won't be temporary. However,
1577 * the spec does allow it for opaque_fd. From the Vulkan 1.0.53 spec:
1578 *
1579 * "If the import is temporary, the implementation must restore the
1580 * semaphore to its prior permanent state after submitting the next
1581 * semaphore wait operation."
1582 *
1583 * The spec says nothing whatsoever about signal operations on
1584 * temporarily imported semaphores so it appears they are allowed.
1585 * There are also CTS tests that require this to work.
1586 */
1587 struct anv_semaphore_impl *impl =
1588 semaphore->temporary.type != ANV_SEMAPHORE_TYPE_NONE ?
1589 &semaphore->temporary : &semaphore->permanent;
1590
1591 switch (impl->type) {
1592 case ANV_SEMAPHORE_TYPE_BO:
1593 result = anv_execbuf_add_bo(&execbuf, impl->bo, NULL,
1594 EXEC_OBJECT_WRITE, &device->alloc);
1595 if (result != VK_SUCCESS)
1596 return result;
1597 break;
1598
1599 case ANV_SEMAPHORE_TYPE_SYNC_FILE:
1600 need_out_fence = true;
1601 break;
1602
1603 case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
1604 result = anv_execbuf_add_syncobj(&execbuf, impl->syncobj,
1605 I915_EXEC_FENCE_SIGNAL,
1606 &device->alloc);
1607 if (result != VK_SUCCESS)
1608 return result;
1609 break;
1610
1611 default:
1612 break;
1613 }
1614 }
1615
1616 if (fence) {
1617 /* Under most circumstances, out fences won't be temporary. However,
1618 * the spec does allow it for opaque_fd. From the Vulkan 1.0.53 spec:
1619 *
1620 * "If the import is temporary, the implementation must restore the
1621 * semaphore to its prior permanent state after submitting the next
1622 * semaphore wait operation."
1623 *
1624 * The spec says nothing whatsoever about signal operations on
1625 * temporarily imported semaphores so it appears they are allowed.
1626 * There are also CTS tests that require this to work.
1627 */
1628 struct anv_fence_impl *impl =
1629 fence->temporary.type != ANV_FENCE_TYPE_NONE ?
1630 &fence->temporary : &fence->permanent;
1631
1632 switch (impl->type) {
1633 case ANV_FENCE_TYPE_BO:
1634 result = anv_execbuf_add_bo(&execbuf, &impl->bo.bo, NULL,
1635 EXEC_OBJECT_WRITE, &device->alloc);
1636 if (result != VK_SUCCESS)
1637 return result;
1638 break;
1639
1640 case ANV_FENCE_TYPE_SYNCOBJ:
1641 result = anv_execbuf_add_syncobj(&execbuf, impl->syncobj,
1642 I915_EXEC_FENCE_SIGNAL,
1643 &device->alloc);
1644 if (result != VK_SUCCESS)
1645 return result;
1646 break;
1647
1648 default:
1649 unreachable("Invalid fence type");
1650 }
1651 }
1652
1653 if (cmd_buffer)
1654 result = setup_execbuf_for_cmd_buffer(&execbuf, cmd_buffer);
1655 else
1656 result = setup_empty_execbuf(&execbuf, device);
1657
1658 if (result != VK_SUCCESS)
1659 return result;
1660
1661 if (execbuf.fence_count > 0) {
1662 assert(device->instance->physicalDevice.has_syncobj);
1663 execbuf.execbuf.flags |= I915_EXEC_FENCE_ARRAY;
1664 execbuf.execbuf.num_cliprects = execbuf.fence_count;
1665 execbuf.execbuf.cliprects_ptr = (uintptr_t) execbuf.fences;
1666 }
1667
1668 if (in_fence != -1) {
1669 execbuf.execbuf.flags |= I915_EXEC_FENCE_IN;
1670 execbuf.execbuf.rsvd2 |= (uint32_t)in_fence;
1671 }
1672
1673 if (need_out_fence)
1674 execbuf.execbuf.flags |= I915_EXEC_FENCE_OUT;
1675
1676 result = anv_device_execbuf(device, &execbuf.execbuf, execbuf.bos);
1677
1678 /* Execbuf does not consume the in_fence. It's our job to close it. */
1679 if (in_fence != -1)
1680 close(in_fence);
1681
1682 for (uint32_t i = 0; i < num_in_semaphores; i++) {
1683 ANV_FROM_HANDLE(anv_semaphore, semaphore, in_semaphores[i]);
1684 /* From the Vulkan 1.0.53 spec:
1685 *
1686 * "If the import is temporary, the implementation must restore the
1687 * semaphore to its prior permanent state after submitting the next
1688 * semaphore wait operation."
1689 *
1690 * This has to happen after the execbuf in case we close any syncobjs in
1691 * the process.
1692 */
1693 anv_semaphore_reset_temporary(device, semaphore);
1694 }
1695
1696 if (fence && fence->permanent.type == ANV_FENCE_TYPE_BO) {
1697 /* BO fences can't be shared, so they can't be temporary. */
1698 assert(fence->temporary.type == ANV_FENCE_TYPE_NONE);
1699
1700 /* Once the execbuf has returned, we need to set the fence state to
1701 * SUBMITTED. We can't do this before calling execbuf because
1702 * anv_GetFenceStatus does take the global device lock before checking
1703 * fence->state.
1704 *
1705 * We set the fence state to SUBMITTED regardless of whether or not the
1706 * execbuf succeeds because we need to ensure that vkWaitForFences() and
1707 * vkGetFenceStatus() return a valid result (VK_ERROR_DEVICE_LOST or
1708 * VK_SUCCESS) in a finite amount of time even if execbuf fails.
1709 */
1710 fence->permanent.bo.state = ANV_BO_FENCE_STATE_SUBMITTED;
1711 }
1712
1713 if (result == VK_SUCCESS && need_out_fence) {
1714 int out_fence = execbuf.execbuf.rsvd2 >> 32;
1715 for (uint32_t i = 0; i < num_out_semaphores; i++) {
1716 ANV_FROM_HANDLE(anv_semaphore, semaphore, out_semaphores[i]);
1717 /* Out fences can't have temporary state because that would imply
1718 * that we imported a sync file and are trying to signal it.
1719 */
1720 assert(semaphore->temporary.type == ANV_SEMAPHORE_TYPE_NONE);
1721 struct anv_semaphore_impl *impl = &semaphore->permanent;
1722
1723 if (impl->type == ANV_SEMAPHORE_TYPE_SYNC_FILE) {
1724 assert(impl->fd == -1);
1725 impl->fd = dup(out_fence);
1726 }
1727 }
1728 close(out_fence);
1729 }
1730
1731 anv_execbuf_finish(&execbuf, &device->alloc);
1732
1733 return result;
1734 }