486b0ac97d6363a2dfc06ee1a270720f4351ee31
[mesa.git] / src / intel / vulkan / anv_batch_chain.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31
32 #include "common/gen_decoder.h"
33
34 #include "genxml/gen8_pack.h"
35
36 #include "util/debug.h"
37
38 /** \file anv_batch_chain.c
39 *
40 * This file contains functions related to anv_cmd_buffer as a data
41 * structure. This involves everything required to create and destroy
42 * the actual batch buffers as well as link them together and handle
43 * relocations and surface state. It specifically does *not* contain any
44 * handling of actual vkCmd calls beyond vkCmdExecuteCommands.
45 */
46
47 /*-----------------------------------------------------------------------*
48 * Functions related to anv_reloc_list
49 *-----------------------------------------------------------------------*/
50
51 static VkResult
52 anv_reloc_list_init_clone(struct anv_reloc_list *list,
53 const VkAllocationCallbacks *alloc,
54 const struct anv_reloc_list *other_list)
55 {
56 if (other_list) {
57 list->num_relocs = other_list->num_relocs;
58 list->array_length = other_list->array_length;
59 } else {
60 list->num_relocs = 0;
61 list->array_length = 256;
62 }
63
64 list->relocs =
65 vk_alloc(alloc, list->array_length * sizeof(*list->relocs), 8,
66 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
67
68 if (list->relocs == NULL)
69 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
70
71 list->reloc_bos =
72 vk_alloc(alloc, list->array_length * sizeof(*list->reloc_bos), 8,
73 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
74
75 if (list->reloc_bos == NULL) {
76 vk_free(alloc, list->relocs);
77 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
78 }
79
80 list->deps = _mesa_pointer_set_create(NULL);
81
82 if (!list->deps) {
83 vk_free(alloc, list->relocs);
84 vk_free(alloc, list->reloc_bos);
85 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
86 }
87
88 if (other_list) {
89 memcpy(list->relocs, other_list->relocs,
90 list->array_length * sizeof(*list->relocs));
91 memcpy(list->reloc_bos, other_list->reloc_bos,
92 list->array_length * sizeof(*list->reloc_bos));
93 set_foreach(other_list->deps, entry) {
94 _mesa_set_add_pre_hashed(list->deps, entry->hash, entry->key);
95 }
96 }
97
98 return VK_SUCCESS;
99 }
100
101 VkResult
102 anv_reloc_list_init(struct anv_reloc_list *list,
103 const VkAllocationCallbacks *alloc)
104 {
105 return anv_reloc_list_init_clone(list, alloc, NULL);
106 }
107
108 void
109 anv_reloc_list_finish(struct anv_reloc_list *list,
110 const VkAllocationCallbacks *alloc)
111 {
112 vk_free(alloc, list->relocs);
113 vk_free(alloc, list->reloc_bos);
114 _mesa_set_destroy(list->deps, NULL);
115 }
116
117 static VkResult
118 anv_reloc_list_grow(struct anv_reloc_list *list,
119 const VkAllocationCallbacks *alloc,
120 size_t num_additional_relocs)
121 {
122 if (list->num_relocs + num_additional_relocs <= list->array_length)
123 return VK_SUCCESS;
124
125 size_t new_length = list->array_length * 2;
126 while (new_length < list->num_relocs + num_additional_relocs)
127 new_length *= 2;
128
129 struct drm_i915_gem_relocation_entry *new_relocs =
130 vk_alloc(alloc, new_length * sizeof(*list->relocs), 8,
131 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
132 if (new_relocs == NULL)
133 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
134
135 struct anv_bo **new_reloc_bos =
136 vk_alloc(alloc, new_length * sizeof(*list->reloc_bos), 8,
137 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
138 if (new_reloc_bos == NULL) {
139 vk_free(alloc, new_relocs);
140 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
141 }
142
143 memcpy(new_relocs, list->relocs, list->num_relocs * sizeof(*list->relocs));
144 memcpy(new_reloc_bos, list->reloc_bos,
145 list->num_relocs * sizeof(*list->reloc_bos));
146
147 vk_free(alloc, list->relocs);
148 vk_free(alloc, list->reloc_bos);
149
150 list->array_length = new_length;
151 list->relocs = new_relocs;
152 list->reloc_bos = new_reloc_bos;
153
154 return VK_SUCCESS;
155 }
156
157 VkResult
158 anv_reloc_list_add(struct anv_reloc_list *list,
159 const VkAllocationCallbacks *alloc,
160 uint32_t offset, struct anv_bo *target_bo, uint32_t delta)
161 {
162 struct drm_i915_gem_relocation_entry *entry;
163 int index;
164
165 if (target_bo->flags & EXEC_OBJECT_PINNED) {
166 _mesa_set_add(list->deps, target_bo);
167 return VK_SUCCESS;
168 }
169
170 VkResult result = anv_reloc_list_grow(list, alloc, 1);
171 if (result != VK_SUCCESS)
172 return result;
173
174 /* XXX: Can we use I915_EXEC_HANDLE_LUT? */
175 index = list->num_relocs++;
176 list->reloc_bos[index] = target_bo;
177 entry = &list->relocs[index];
178 entry->target_handle = target_bo->gem_handle;
179 entry->delta = delta;
180 entry->offset = offset;
181 entry->presumed_offset = target_bo->offset;
182 entry->read_domains = 0;
183 entry->write_domain = 0;
184 VG(VALGRIND_CHECK_MEM_IS_DEFINED(entry, sizeof(*entry)));
185
186 return VK_SUCCESS;
187 }
188
189 static VkResult
190 anv_reloc_list_append(struct anv_reloc_list *list,
191 const VkAllocationCallbacks *alloc,
192 struct anv_reloc_list *other, uint32_t offset)
193 {
194 VkResult result = anv_reloc_list_grow(list, alloc, other->num_relocs);
195 if (result != VK_SUCCESS)
196 return result;
197
198 memcpy(&list->relocs[list->num_relocs], &other->relocs[0],
199 other->num_relocs * sizeof(other->relocs[0]));
200 memcpy(&list->reloc_bos[list->num_relocs], &other->reloc_bos[0],
201 other->num_relocs * sizeof(other->reloc_bos[0]));
202
203 for (uint32_t i = 0; i < other->num_relocs; i++)
204 list->relocs[i + list->num_relocs].offset += offset;
205
206 list->num_relocs += other->num_relocs;
207
208 set_foreach(other->deps, entry) {
209 _mesa_set_add_pre_hashed(list->deps, entry->hash, entry->key);
210 }
211
212 return VK_SUCCESS;
213 }
214
215 /*-----------------------------------------------------------------------*
216 * Functions related to anv_batch
217 *-----------------------------------------------------------------------*/
218
219 void *
220 anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords)
221 {
222 if (batch->next + num_dwords * 4 > batch->end) {
223 VkResult result = batch->extend_cb(batch, batch->user_data);
224 if (result != VK_SUCCESS) {
225 anv_batch_set_error(batch, result);
226 return NULL;
227 }
228 }
229
230 void *p = batch->next;
231
232 batch->next += num_dwords * 4;
233 assert(batch->next <= batch->end);
234
235 return p;
236 }
237
238 uint64_t
239 anv_batch_emit_reloc(struct anv_batch *batch,
240 void *location, struct anv_bo *bo, uint32_t delta)
241 {
242 VkResult result = anv_reloc_list_add(batch->relocs, batch->alloc,
243 location - batch->start, bo, delta);
244 if (result != VK_SUCCESS) {
245 anv_batch_set_error(batch, result);
246 return 0;
247 }
248
249 return bo->offset + delta;
250 }
251
252 void
253 anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other)
254 {
255 uint32_t size, offset;
256
257 size = other->next - other->start;
258 assert(size % 4 == 0);
259
260 if (batch->next + size > batch->end) {
261 VkResult result = batch->extend_cb(batch, batch->user_data);
262 if (result != VK_SUCCESS) {
263 anv_batch_set_error(batch, result);
264 return;
265 }
266 }
267
268 assert(batch->next + size <= batch->end);
269
270 VG(VALGRIND_CHECK_MEM_IS_DEFINED(other->start, size));
271 memcpy(batch->next, other->start, size);
272
273 offset = batch->next - batch->start;
274 VkResult result = anv_reloc_list_append(batch->relocs, batch->alloc,
275 other->relocs, offset);
276 if (result != VK_SUCCESS) {
277 anv_batch_set_error(batch, result);
278 return;
279 }
280
281 batch->next += size;
282 }
283
284 /*-----------------------------------------------------------------------*
285 * Functions related to anv_batch_bo
286 *-----------------------------------------------------------------------*/
287
288 static VkResult
289 anv_batch_bo_create(struct anv_cmd_buffer *cmd_buffer,
290 struct anv_batch_bo **bbo_out)
291 {
292 VkResult result;
293
294 struct anv_batch_bo *bbo = vk_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo),
295 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
296 if (bbo == NULL)
297 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
298
299 result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool, &bbo->bo,
300 ANV_CMD_BUFFER_BATCH_SIZE);
301 if (result != VK_SUCCESS)
302 goto fail_alloc;
303
304 result = anv_reloc_list_init(&bbo->relocs, &cmd_buffer->pool->alloc);
305 if (result != VK_SUCCESS)
306 goto fail_bo_alloc;
307
308 *bbo_out = bbo;
309
310 return VK_SUCCESS;
311
312 fail_bo_alloc:
313 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
314 fail_alloc:
315 vk_free(&cmd_buffer->pool->alloc, bbo);
316
317 return result;
318 }
319
320 static VkResult
321 anv_batch_bo_clone(struct anv_cmd_buffer *cmd_buffer,
322 const struct anv_batch_bo *other_bbo,
323 struct anv_batch_bo **bbo_out)
324 {
325 VkResult result;
326
327 struct anv_batch_bo *bbo = vk_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo),
328 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
329 if (bbo == NULL)
330 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
331
332 result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool, &bbo->bo,
333 other_bbo->bo.size);
334 if (result != VK_SUCCESS)
335 goto fail_alloc;
336
337 result = anv_reloc_list_init_clone(&bbo->relocs, &cmd_buffer->pool->alloc,
338 &other_bbo->relocs);
339 if (result != VK_SUCCESS)
340 goto fail_bo_alloc;
341
342 bbo->length = other_bbo->length;
343 memcpy(bbo->bo.map, other_bbo->bo.map, other_bbo->length);
344
345 *bbo_out = bbo;
346
347 return VK_SUCCESS;
348
349 fail_bo_alloc:
350 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
351 fail_alloc:
352 vk_free(&cmd_buffer->pool->alloc, bbo);
353
354 return result;
355 }
356
357 static void
358 anv_batch_bo_start(struct anv_batch_bo *bbo, struct anv_batch *batch,
359 size_t batch_padding)
360 {
361 batch->next = batch->start = bbo->bo.map;
362 batch->end = bbo->bo.map + bbo->bo.size - batch_padding;
363 batch->relocs = &bbo->relocs;
364 bbo->relocs.num_relocs = 0;
365 _mesa_set_clear(bbo->relocs.deps, NULL);
366 }
367
368 static void
369 anv_batch_bo_continue(struct anv_batch_bo *bbo, struct anv_batch *batch,
370 size_t batch_padding)
371 {
372 batch->start = bbo->bo.map;
373 batch->next = bbo->bo.map + bbo->length;
374 batch->end = bbo->bo.map + bbo->bo.size - batch_padding;
375 batch->relocs = &bbo->relocs;
376 }
377
378 static void
379 anv_batch_bo_finish(struct anv_batch_bo *bbo, struct anv_batch *batch)
380 {
381 assert(batch->start == bbo->bo.map);
382 bbo->length = batch->next - batch->start;
383 VG(VALGRIND_CHECK_MEM_IS_DEFINED(batch->start, bbo->length));
384 }
385
386 static VkResult
387 anv_batch_bo_grow(struct anv_cmd_buffer *cmd_buffer, struct anv_batch_bo *bbo,
388 struct anv_batch *batch, size_t aditional,
389 size_t batch_padding)
390 {
391 assert(batch->start == bbo->bo.map);
392 bbo->length = batch->next - batch->start;
393
394 size_t new_size = bbo->bo.size;
395 while (new_size <= bbo->length + aditional + batch_padding)
396 new_size *= 2;
397
398 if (new_size == bbo->bo.size)
399 return VK_SUCCESS;
400
401 struct anv_bo new_bo;
402 VkResult result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
403 &new_bo, new_size);
404 if (result != VK_SUCCESS)
405 return result;
406
407 memcpy(new_bo.map, bbo->bo.map, bbo->length);
408
409 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
410
411 bbo->bo = new_bo;
412 anv_batch_bo_continue(bbo, batch, batch_padding);
413
414 return VK_SUCCESS;
415 }
416
417 static void
418 anv_batch_bo_link(struct anv_cmd_buffer *cmd_buffer,
419 struct anv_batch_bo *prev_bbo,
420 struct anv_batch_bo *next_bbo,
421 uint32_t next_bbo_offset)
422 {
423 MAYBE_UNUSED const uint32_t bb_start_offset =
424 prev_bbo->length - GEN8_MI_BATCH_BUFFER_START_length * 4;
425 MAYBE_UNUSED const uint32_t *bb_start = prev_bbo->bo.map + bb_start_offset;
426
427 /* Make sure we're looking at a MI_BATCH_BUFFER_START */
428 assert(((*bb_start >> 29) & 0x07) == 0);
429 assert(((*bb_start >> 23) & 0x3f) == 49);
430
431 if (cmd_buffer->device->instance->physicalDevice.use_softpin) {
432 assert(prev_bbo->bo.flags & EXEC_OBJECT_PINNED);
433 assert(next_bbo->bo.flags & EXEC_OBJECT_PINNED);
434
435 write_reloc(cmd_buffer->device,
436 prev_bbo->bo.map + bb_start_offset + 4,
437 next_bbo->bo.offset + next_bbo_offset, true);
438 } else {
439 uint32_t reloc_idx = prev_bbo->relocs.num_relocs - 1;
440 assert(prev_bbo->relocs.relocs[reloc_idx].offset == bb_start_offset + 4);
441
442 prev_bbo->relocs.reloc_bos[reloc_idx] = &next_bbo->bo;
443 prev_bbo->relocs.relocs[reloc_idx].delta = next_bbo_offset;
444
445 /* Use a bogus presumed offset to force a relocation */
446 prev_bbo->relocs.relocs[reloc_idx].presumed_offset = -1;
447 }
448 }
449
450 static void
451 anv_batch_bo_destroy(struct anv_batch_bo *bbo,
452 struct anv_cmd_buffer *cmd_buffer)
453 {
454 anv_reloc_list_finish(&bbo->relocs, &cmd_buffer->pool->alloc);
455 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
456 vk_free(&cmd_buffer->pool->alloc, bbo);
457 }
458
459 static VkResult
460 anv_batch_bo_list_clone(const struct list_head *list,
461 struct anv_cmd_buffer *cmd_buffer,
462 struct list_head *new_list)
463 {
464 VkResult result = VK_SUCCESS;
465
466 list_inithead(new_list);
467
468 struct anv_batch_bo *prev_bbo = NULL;
469 list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
470 struct anv_batch_bo *new_bbo = NULL;
471 result = anv_batch_bo_clone(cmd_buffer, bbo, &new_bbo);
472 if (result != VK_SUCCESS)
473 break;
474 list_addtail(&new_bbo->link, new_list);
475
476 if (prev_bbo)
477 anv_batch_bo_link(cmd_buffer, prev_bbo, new_bbo, 0);
478
479 prev_bbo = new_bbo;
480 }
481
482 if (result != VK_SUCCESS) {
483 list_for_each_entry_safe(struct anv_batch_bo, bbo, new_list, link)
484 anv_batch_bo_destroy(bbo, cmd_buffer);
485 }
486
487 return result;
488 }
489
490 /*-----------------------------------------------------------------------*
491 * Functions related to anv_batch_bo
492 *-----------------------------------------------------------------------*/
493
494 static struct anv_batch_bo *
495 anv_cmd_buffer_current_batch_bo(struct anv_cmd_buffer *cmd_buffer)
496 {
497 return LIST_ENTRY(struct anv_batch_bo, cmd_buffer->batch_bos.prev, link);
498 }
499
500 struct anv_address
501 anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer *cmd_buffer)
502 {
503 struct anv_state *bt_block = u_vector_head(&cmd_buffer->bt_block_states);
504 return (struct anv_address) {
505 .bo = anv_binding_table_pool(cmd_buffer->device)->block_pool.bo,
506 .offset = bt_block->offset,
507 };
508 }
509
510 static void
511 emit_batch_buffer_start(struct anv_cmd_buffer *cmd_buffer,
512 struct anv_bo *bo, uint32_t offset)
513 {
514 /* In gen8+ the address field grew to two dwords to accomodate 48 bit
515 * offsets. The high 16 bits are in the last dword, so we can use the gen8
516 * version in either case, as long as we set the instruction length in the
517 * header accordingly. This means that we always emit three dwords here
518 * and all the padding and adjustment we do in this file works for all
519 * gens.
520 */
521
522 #define GEN7_MI_BATCH_BUFFER_START_length 2
523 #define GEN7_MI_BATCH_BUFFER_START_length_bias 2
524
525 const uint32_t gen7_length =
526 GEN7_MI_BATCH_BUFFER_START_length - GEN7_MI_BATCH_BUFFER_START_length_bias;
527 const uint32_t gen8_length =
528 GEN8_MI_BATCH_BUFFER_START_length - GEN8_MI_BATCH_BUFFER_START_length_bias;
529
530 anv_batch_emit(&cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_START, bbs) {
531 bbs.DWordLength = cmd_buffer->device->info.gen < 8 ?
532 gen7_length : gen8_length;
533 bbs.SecondLevelBatchBuffer = Firstlevelbatch;
534 bbs.AddressSpaceIndicator = ASI_PPGTT;
535 bbs.BatchBufferStartAddress = (struct anv_address) { bo, offset };
536 }
537 }
538
539 static void
540 cmd_buffer_chain_to_batch_bo(struct anv_cmd_buffer *cmd_buffer,
541 struct anv_batch_bo *bbo)
542 {
543 struct anv_batch *batch = &cmd_buffer->batch;
544 struct anv_batch_bo *current_bbo =
545 anv_cmd_buffer_current_batch_bo(cmd_buffer);
546
547 /* We set the end of the batch a little short so we would be sure we
548 * have room for the chaining command. Since we're about to emit the
549 * chaining command, let's set it back where it should go.
550 */
551 batch->end += GEN8_MI_BATCH_BUFFER_START_length * 4;
552 assert(batch->end == current_bbo->bo.map + current_bbo->bo.size);
553
554 emit_batch_buffer_start(cmd_buffer, &bbo->bo, 0);
555
556 anv_batch_bo_finish(current_bbo, batch);
557 }
558
559 static VkResult
560 anv_cmd_buffer_chain_batch(struct anv_batch *batch, void *_data)
561 {
562 struct anv_cmd_buffer *cmd_buffer = _data;
563 struct anv_batch_bo *new_bbo;
564
565 VkResult result = anv_batch_bo_create(cmd_buffer, &new_bbo);
566 if (result != VK_SUCCESS)
567 return result;
568
569 struct anv_batch_bo **seen_bbo = u_vector_add(&cmd_buffer->seen_bbos);
570 if (seen_bbo == NULL) {
571 anv_batch_bo_destroy(new_bbo, cmd_buffer);
572 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
573 }
574 *seen_bbo = new_bbo;
575
576 cmd_buffer_chain_to_batch_bo(cmd_buffer, new_bbo);
577
578 list_addtail(&new_bbo->link, &cmd_buffer->batch_bos);
579
580 anv_batch_bo_start(new_bbo, batch, GEN8_MI_BATCH_BUFFER_START_length * 4);
581
582 return VK_SUCCESS;
583 }
584
585 static VkResult
586 anv_cmd_buffer_grow_batch(struct anv_batch *batch, void *_data)
587 {
588 struct anv_cmd_buffer *cmd_buffer = _data;
589 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
590
591 anv_batch_bo_grow(cmd_buffer, bbo, &cmd_buffer->batch, 4096,
592 GEN8_MI_BATCH_BUFFER_START_length * 4);
593
594 return VK_SUCCESS;
595 }
596
597 /** Allocate a binding table
598 *
599 * This function allocates a binding table. This is a bit more complicated
600 * than one would think due to a combination of Vulkan driver design and some
601 * unfortunate hardware restrictions.
602 *
603 * The 3DSTATE_BINDING_TABLE_POINTERS_* packets only have a 16-bit field for
604 * the binding table pointer which means that all binding tables need to live
605 * in the bottom 64k of surface state base address. The way the GL driver has
606 * classically dealt with this restriction is to emit all surface states
607 * on-the-fly into the batch and have a batch buffer smaller than 64k. This
608 * isn't really an option in Vulkan for a couple of reasons:
609 *
610 * 1) In Vulkan, we have growing (or chaining) batches so surface states have
611 * to live in their own buffer and we have to be able to re-emit
612 * STATE_BASE_ADDRESS as needed which requires a full pipeline stall. In
613 * order to avoid emitting STATE_BASE_ADDRESS any more often than needed
614 * (it's not that hard to hit 64k of just binding tables), we allocate
615 * surface state objects up-front when VkImageView is created. In order
616 * for this to work, surface state objects need to be allocated from a
617 * global buffer.
618 *
619 * 2) We tried to design the surface state system in such a way that it's
620 * already ready for bindless texturing. The way bindless texturing works
621 * on our hardware is that you have a big pool of surface state objects
622 * (with its own state base address) and the bindless handles are simply
623 * offsets into that pool. With the architecture we chose, we already
624 * have that pool and it's exactly the same pool that we use for regular
625 * surface states so we should already be ready for bindless.
626 *
627 * 3) For render targets, we need to be able to fill out the surface states
628 * later in vkBeginRenderPass so that we can assign clear colors
629 * correctly. One way to do this would be to just create the surface
630 * state data and then repeatedly copy it into the surface state BO every
631 * time we have to re-emit STATE_BASE_ADDRESS. While this works, it's
632 * rather annoying and just being able to allocate them up-front and
633 * re-use them for the entire render pass.
634 *
635 * While none of these are technically blockers for emitting state on the fly
636 * like we do in GL, the ability to have a single surface state pool is
637 * simplifies things greatly. Unfortunately, it comes at a cost...
638 *
639 * Because of the 64k limitation of 3DSTATE_BINDING_TABLE_POINTERS_*, we can't
640 * place the binding tables just anywhere in surface state base address.
641 * Because 64k isn't a whole lot of space, we can't simply restrict the
642 * surface state buffer to 64k, we have to be more clever. The solution we've
643 * chosen is to have a block pool with a maximum size of 2G that starts at
644 * zero and grows in both directions. All surface states are allocated from
645 * the top of the pool (positive offsets) and we allocate blocks (< 64k) of
646 * binding tables from the bottom of the pool (negative offsets). Every time
647 * we allocate a new binding table block, we set surface state base address to
648 * point to the bottom of the binding table block. This way all of the
649 * binding tables in the block are in the bottom 64k of surface state base
650 * address. When we fill out the binding table, we add the distance between
651 * the bottom of our binding table block and zero of the block pool to the
652 * surface state offsets so that they are correct relative to out new surface
653 * state base address at the bottom of the binding table block.
654 *
655 * \see adjust_relocations_from_block_pool()
656 * \see adjust_relocations_too_block_pool()
657 *
658 * \param[in] entries The number of surface state entries the binding
659 * table should be able to hold.
660 *
661 * \param[out] state_offset The offset surface surface state base address
662 * where the surface states live. This must be
663 * added to the surface state offset when it is
664 * written into the binding table entry.
665 *
666 * \return An anv_state representing the binding table
667 */
668 struct anv_state
669 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
670 uint32_t entries, uint32_t *state_offset)
671 {
672 struct anv_device *device = cmd_buffer->device;
673 struct anv_state_pool *state_pool = &device->surface_state_pool;
674 struct anv_state *bt_block = u_vector_head(&cmd_buffer->bt_block_states);
675 struct anv_state state;
676
677 state.alloc_size = align_u32(entries * 4, 32);
678
679 if (cmd_buffer->bt_next + state.alloc_size > state_pool->block_size)
680 return (struct anv_state) { 0 };
681
682 state.offset = cmd_buffer->bt_next;
683 state.map = anv_block_pool_map(&anv_binding_table_pool(device)->block_pool,
684 bt_block->offset + state.offset);
685
686 cmd_buffer->bt_next += state.alloc_size;
687
688 if (device->instance->physicalDevice.use_softpin) {
689 assert(bt_block->offset >= 0);
690 *state_offset = device->surface_state_pool.block_pool.start_address -
691 device->binding_table_pool.block_pool.start_address - bt_block->offset;
692 } else {
693 assert(bt_block->offset < 0);
694 *state_offset = -bt_block->offset;
695 }
696
697 return state;
698 }
699
700 struct anv_state
701 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer *cmd_buffer)
702 {
703 struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
704 return anv_state_stream_alloc(&cmd_buffer->surface_state_stream,
705 isl_dev->ss.size, isl_dev->ss.align);
706 }
707
708 struct anv_state
709 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer *cmd_buffer,
710 uint32_t size, uint32_t alignment)
711 {
712 return anv_state_stream_alloc(&cmd_buffer->dynamic_state_stream,
713 size, alignment);
714 }
715
716 VkResult
717 anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer *cmd_buffer)
718 {
719 struct anv_state *bt_block = u_vector_add(&cmd_buffer->bt_block_states);
720 if (bt_block == NULL) {
721 anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_HOST_MEMORY);
722 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
723 }
724
725 *bt_block = anv_binding_table_pool_alloc(cmd_buffer->device);
726 cmd_buffer->bt_next = 0;
727
728 return VK_SUCCESS;
729 }
730
731 VkResult
732 anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
733 {
734 struct anv_batch_bo *batch_bo;
735 VkResult result;
736
737 list_inithead(&cmd_buffer->batch_bos);
738
739 result = anv_batch_bo_create(cmd_buffer, &batch_bo);
740 if (result != VK_SUCCESS)
741 return result;
742
743 list_addtail(&batch_bo->link, &cmd_buffer->batch_bos);
744
745 cmd_buffer->batch.alloc = &cmd_buffer->pool->alloc;
746 cmd_buffer->batch.user_data = cmd_buffer;
747
748 if (cmd_buffer->device->can_chain_batches) {
749 cmd_buffer->batch.extend_cb = anv_cmd_buffer_chain_batch;
750 } else {
751 cmd_buffer->batch.extend_cb = anv_cmd_buffer_grow_batch;
752 }
753
754 anv_batch_bo_start(batch_bo, &cmd_buffer->batch,
755 GEN8_MI_BATCH_BUFFER_START_length * 4);
756
757 int success = u_vector_init(&cmd_buffer->seen_bbos,
758 sizeof(struct anv_bo *),
759 8 * sizeof(struct anv_bo *));
760 if (!success)
761 goto fail_batch_bo;
762
763 *(struct anv_batch_bo **)u_vector_add(&cmd_buffer->seen_bbos) = batch_bo;
764
765 /* u_vector requires power-of-two size elements */
766 unsigned pow2_state_size = util_next_power_of_two(sizeof(struct anv_state));
767 success = u_vector_init(&cmd_buffer->bt_block_states,
768 pow2_state_size, 8 * pow2_state_size);
769 if (!success)
770 goto fail_seen_bbos;
771
772 result = anv_reloc_list_init(&cmd_buffer->surface_relocs,
773 &cmd_buffer->pool->alloc);
774 if (result != VK_SUCCESS)
775 goto fail_bt_blocks;
776 cmd_buffer->last_ss_pool_center = 0;
777
778 result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
779 if (result != VK_SUCCESS)
780 goto fail_bt_blocks;
781
782 return VK_SUCCESS;
783
784 fail_bt_blocks:
785 u_vector_finish(&cmd_buffer->bt_block_states);
786 fail_seen_bbos:
787 u_vector_finish(&cmd_buffer->seen_bbos);
788 fail_batch_bo:
789 anv_batch_bo_destroy(batch_bo, cmd_buffer);
790
791 return result;
792 }
793
794 void
795 anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
796 {
797 struct anv_state *bt_block;
798 u_vector_foreach(bt_block, &cmd_buffer->bt_block_states)
799 anv_binding_table_pool_free(cmd_buffer->device, *bt_block);
800 u_vector_finish(&cmd_buffer->bt_block_states);
801
802 anv_reloc_list_finish(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc);
803
804 u_vector_finish(&cmd_buffer->seen_bbos);
805
806 /* Destroy all of the batch buffers */
807 list_for_each_entry_safe(struct anv_batch_bo, bbo,
808 &cmd_buffer->batch_bos, link) {
809 anv_batch_bo_destroy(bbo, cmd_buffer);
810 }
811 }
812
813 void
814 anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
815 {
816 /* Delete all but the first batch bo */
817 assert(!list_empty(&cmd_buffer->batch_bos));
818 while (cmd_buffer->batch_bos.next != cmd_buffer->batch_bos.prev) {
819 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
820 list_del(&bbo->link);
821 anv_batch_bo_destroy(bbo, cmd_buffer);
822 }
823 assert(!list_empty(&cmd_buffer->batch_bos));
824
825 anv_batch_bo_start(anv_cmd_buffer_current_batch_bo(cmd_buffer),
826 &cmd_buffer->batch,
827 GEN8_MI_BATCH_BUFFER_START_length * 4);
828
829 while (u_vector_length(&cmd_buffer->bt_block_states) > 1) {
830 struct anv_state *bt_block = u_vector_remove(&cmd_buffer->bt_block_states);
831 anv_binding_table_pool_free(cmd_buffer->device, *bt_block);
832 }
833 assert(u_vector_length(&cmd_buffer->bt_block_states) == 1);
834 cmd_buffer->bt_next = 0;
835
836 cmd_buffer->surface_relocs.num_relocs = 0;
837 _mesa_set_clear(cmd_buffer->surface_relocs.deps, NULL);
838 cmd_buffer->last_ss_pool_center = 0;
839
840 /* Reset the list of seen buffers */
841 cmd_buffer->seen_bbos.head = 0;
842 cmd_buffer->seen_bbos.tail = 0;
843
844 *(struct anv_batch_bo **)u_vector_add(&cmd_buffer->seen_bbos) =
845 anv_cmd_buffer_current_batch_bo(cmd_buffer);
846 }
847
848 void
849 anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer)
850 {
851 struct anv_batch_bo *batch_bo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
852
853 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
854 /* When we start a batch buffer, we subtract a certain amount of
855 * padding from the end to ensure that we always have room to emit a
856 * BATCH_BUFFER_START to chain to the next BO. We need to remove
857 * that padding before we end the batch; otherwise, we may end up
858 * with our BATCH_BUFFER_END in another BO.
859 */
860 cmd_buffer->batch.end += GEN8_MI_BATCH_BUFFER_START_length * 4;
861 assert(cmd_buffer->batch.end == batch_bo->bo.map + batch_bo->bo.size);
862
863 anv_batch_emit(&cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_END, bbe);
864
865 /* Round batch up to an even number of dwords. */
866 if ((cmd_buffer->batch.next - cmd_buffer->batch.start) & 4)
867 anv_batch_emit(&cmd_buffer->batch, GEN8_MI_NOOP, noop);
868
869 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_PRIMARY;
870 } else {
871 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
872 /* If this is a secondary command buffer, we need to determine the
873 * mode in which it will be executed with vkExecuteCommands. We
874 * determine this statically here so that this stays in sync with the
875 * actual ExecuteCommands implementation.
876 */
877 const uint32_t length = cmd_buffer->batch.next - cmd_buffer->batch.start;
878 if (!cmd_buffer->device->can_chain_batches) {
879 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT;
880 } else if ((cmd_buffer->batch_bos.next == cmd_buffer->batch_bos.prev) &&
881 (length < ANV_CMD_BUFFER_BATCH_SIZE / 2)) {
882 /* If the secondary has exactly one batch buffer in its list *and*
883 * that batch buffer is less than half of the maximum size, we're
884 * probably better of simply copying it into our batch.
885 */
886 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_EMIT;
887 } else if (!(cmd_buffer->usage_flags &
888 VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
889 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_CHAIN;
890
891 /* In order to chain, we need this command buffer to contain an
892 * MI_BATCH_BUFFER_START which will jump back to the calling batch.
893 * It doesn't matter where it points now so long as has a valid
894 * relocation. We'll adjust it later as part of the chaining
895 * process.
896 *
897 * We set the end of the batch a little short so we would be sure we
898 * have room for the chaining command. Since we're about to emit the
899 * chaining command, let's set it back where it should go.
900 */
901 cmd_buffer->batch.end += GEN8_MI_BATCH_BUFFER_START_length * 4;
902 assert(cmd_buffer->batch.start == batch_bo->bo.map);
903 assert(cmd_buffer->batch.end == batch_bo->bo.map + batch_bo->bo.size);
904
905 emit_batch_buffer_start(cmd_buffer, &batch_bo->bo, 0);
906 assert(cmd_buffer->batch.start == batch_bo->bo.map);
907 } else {
908 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN;
909 }
910 }
911
912 anv_batch_bo_finish(batch_bo, &cmd_buffer->batch);
913 }
914
915 static VkResult
916 anv_cmd_buffer_add_seen_bbos(struct anv_cmd_buffer *cmd_buffer,
917 struct list_head *list)
918 {
919 list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
920 struct anv_batch_bo **bbo_ptr = u_vector_add(&cmd_buffer->seen_bbos);
921 if (bbo_ptr == NULL)
922 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
923
924 *bbo_ptr = bbo;
925 }
926
927 return VK_SUCCESS;
928 }
929
930 void
931 anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary,
932 struct anv_cmd_buffer *secondary)
933 {
934 switch (secondary->exec_mode) {
935 case ANV_CMD_BUFFER_EXEC_MODE_EMIT:
936 anv_batch_emit_batch(&primary->batch, &secondary->batch);
937 break;
938 case ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT: {
939 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(primary);
940 unsigned length = secondary->batch.end - secondary->batch.start;
941 anv_batch_bo_grow(primary, bbo, &primary->batch, length,
942 GEN8_MI_BATCH_BUFFER_START_length * 4);
943 anv_batch_emit_batch(&primary->batch, &secondary->batch);
944 break;
945 }
946 case ANV_CMD_BUFFER_EXEC_MODE_CHAIN: {
947 struct anv_batch_bo *first_bbo =
948 list_first_entry(&secondary->batch_bos, struct anv_batch_bo, link);
949 struct anv_batch_bo *last_bbo =
950 list_last_entry(&secondary->batch_bos, struct anv_batch_bo, link);
951
952 emit_batch_buffer_start(primary, &first_bbo->bo, 0);
953
954 struct anv_batch_bo *this_bbo = anv_cmd_buffer_current_batch_bo(primary);
955 assert(primary->batch.start == this_bbo->bo.map);
956 uint32_t offset = primary->batch.next - primary->batch.start;
957
958 /* Make the tail of the secondary point back to right after the
959 * MI_BATCH_BUFFER_START in the primary batch.
960 */
961 anv_batch_bo_link(primary, last_bbo, this_bbo, offset);
962
963 anv_cmd_buffer_add_seen_bbos(primary, &secondary->batch_bos);
964 break;
965 }
966 case ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN: {
967 struct list_head copy_list;
968 VkResult result = anv_batch_bo_list_clone(&secondary->batch_bos,
969 secondary,
970 &copy_list);
971 if (result != VK_SUCCESS)
972 return; /* FIXME */
973
974 anv_cmd_buffer_add_seen_bbos(primary, &copy_list);
975
976 struct anv_batch_bo *first_bbo =
977 list_first_entry(&copy_list, struct anv_batch_bo, link);
978 struct anv_batch_bo *last_bbo =
979 list_last_entry(&copy_list, struct anv_batch_bo, link);
980
981 cmd_buffer_chain_to_batch_bo(primary, first_bbo);
982
983 list_splicetail(&copy_list, &primary->batch_bos);
984
985 anv_batch_bo_continue(last_bbo, &primary->batch,
986 GEN8_MI_BATCH_BUFFER_START_length * 4);
987 break;
988 }
989 default:
990 assert(!"Invalid execution mode");
991 }
992
993 anv_reloc_list_append(&primary->surface_relocs, &primary->pool->alloc,
994 &secondary->surface_relocs, 0);
995 }
996
997 struct anv_execbuf {
998 struct drm_i915_gem_execbuffer2 execbuf;
999
1000 struct drm_i915_gem_exec_object2 * objects;
1001 uint32_t bo_count;
1002 struct anv_bo ** bos;
1003
1004 /* Allocated length of the 'objects' and 'bos' arrays */
1005 uint32_t array_length;
1006
1007 bool has_relocs;
1008
1009 uint32_t fence_count;
1010 uint32_t fence_array_length;
1011 struct drm_i915_gem_exec_fence * fences;
1012 struct anv_syncobj ** syncobjs;
1013 };
1014
1015 static void
1016 anv_execbuf_init(struct anv_execbuf *exec)
1017 {
1018 memset(exec, 0, sizeof(*exec));
1019 }
1020
1021 static void
1022 anv_execbuf_finish(struct anv_execbuf *exec,
1023 const VkAllocationCallbacks *alloc)
1024 {
1025 vk_free(alloc, exec->objects);
1026 vk_free(alloc, exec->bos);
1027 vk_free(alloc, exec->fences);
1028 vk_free(alloc, exec->syncobjs);
1029 }
1030
1031 static int
1032 _compare_bo_handles(const void *_bo1, const void *_bo2)
1033 {
1034 struct anv_bo * const *bo1 = _bo1;
1035 struct anv_bo * const *bo2 = _bo2;
1036
1037 return (*bo1)->gem_handle - (*bo2)->gem_handle;
1038 }
1039
1040 static VkResult
1041 anv_execbuf_add_bo_set(struct anv_execbuf *exec,
1042 struct set *deps,
1043 uint32_t extra_flags,
1044 const VkAllocationCallbacks *alloc);
1045
1046 static VkResult
1047 anv_execbuf_add_bo(struct anv_execbuf *exec,
1048 struct anv_bo *bo,
1049 struct anv_reloc_list *relocs,
1050 uint32_t extra_flags,
1051 const VkAllocationCallbacks *alloc)
1052 {
1053 struct drm_i915_gem_exec_object2 *obj = NULL;
1054
1055 if (bo->index < exec->bo_count && exec->bos[bo->index] == bo)
1056 obj = &exec->objects[bo->index];
1057
1058 if (obj == NULL) {
1059 /* We've never seen this one before. Add it to the list and assign
1060 * an id that we can use later.
1061 */
1062 if (exec->bo_count >= exec->array_length) {
1063 uint32_t new_len = exec->objects ? exec->array_length * 2 : 64;
1064
1065 struct drm_i915_gem_exec_object2 *new_objects =
1066 vk_alloc(alloc, new_len * sizeof(*new_objects),
1067 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
1068 if (new_objects == NULL)
1069 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1070
1071 struct anv_bo **new_bos =
1072 vk_alloc(alloc, new_len * sizeof(*new_bos),
1073 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
1074 if (new_bos == NULL) {
1075 vk_free(alloc, new_objects);
1076 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1077 }
1078
1079 if (exec->objects) {
1080 memcpy(new_objects, exec->objects,
1081 exec->bo_count * sizeof(*new_objects));
1082 memcpy(new_bos, exec->bos,
1083 exec->bo_count * sizeof(*new_bos));
1084 }
1085
1086 vk_free(alloc, exec->objects);
1087 vk_free(alloc, exec->bos);
1088
1089 exec->objects = new_objects;
1090 exec->bos = new_bos;
1091 exec->array_length = new_len;
1092 }
1093
1094 assert(exec->bo_count < exec->array_length);
1095
1096 bo->index = exec->bo_count++;
1097 obj = &exec->objects[bo->index];
1098 exec->bos[bo->index] = bo;
1099
1100 obj->handle = bo->gem_handle;
1101 obj->relocation_count = 0;
1102 obj->relocs_ptr = 0;
1103 obj->alignment = 0;
1104 obj->offset = bo->offset;
1105 obj->flags = (bo->flags & ~ANV_BO_FLAG_MASK) | extra_flags;
1106 obj->rsvd1 = 0;
1107 obj->rsvd2 = 0;
1108 }
1109
1110 if (relocs != NULL) {
1111 assert(obj->relocation_count == 0);
1112
1113 if (relocs->num_relocs > 0) {
1114 /* This is the first time we've ever seen a list of relocations for
1115 * this BO. Go ahead and set the relocations and then walk the list
1116 * of relocations and add them all.
1117 */
1118 exec->has_relocs = true;
1119 obj->relocation_count = relocs->num_relocs;
1120 obj->relocs_ptr = (uintptr_t) relocs->relocs;
1121
1122 for (size_t i = 0; i < relocs->num_relocs; i++) {
1123 VkResult result;
1124
1125 /* A quick sanity check on relocations */
1126 assert(relocs->relocs[i].offset < bo->size);
1127 result = anv_execbuf_add_bo(exec, relocs->reloc_bos[i], NULL,
1128 extra_flags, alloc);
1129
1130 if (result != VK_SUCCESS)
1131 return result;
1132 }
1133 }
1134
1135 return anv_execbuf_add_bo_set(exec, relocs->deps, extra_flags, alloc);
1136 }
1137
1138 return VK_SUCCESS;
1139 }
1140
1141 /* Add BO dependencies to execbuf */
1142 static VkResult
1143 anv_execbuf_add_bo_set(struct anv_execbuf *exec,
1144 struct set *deps,
1145 uint32_t extra_flags,
1146 const VkAllocationCallbacks *alloc)
1147 {
1148 if (!deps || deps->entries <= 0)
1149 return VK_SUCCESS;
1150
1151 const uint32_t entries = deps->entries;
1152 struct anv_bo **bos =
1153 vk_alloc(alloc, entries * sizeof(*bos),
1154 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
1155 if (bos == NULL)
1156 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1157
1158 struct anv_bo **bo = bos;
1159 set_foreach(deps, entry) {
1160 *bo++ = (void *)entry->key;
1161 }
1162
1163 qsort(bos, entries, sizeof(struct anv_bo*), _compare_bo_handles);
1164
1165 VkResult result = VK_SUCCESS;
1166 for (bo = bos; bo < bos + entries; bo++) {
1167 result = anv_execbuf_add_bo(exec, *bo, NULL, extra_flags, alloc);
1168 if (result != VK_SUCCESS)
1169 break;
1170 }
1171
1172 vk_free(alloc, bos);
1173
1174 return result;
1175 }
1176
1177 static VkResult
1178 anv_execbuf_add_syncobj(struct anv_execbuf *exec,
1179 uint32_t handle, uint32_t flags,
1180 const VkAllocationCallbacks *alloc)
1181 {
1182 assert(flags != 0);
1183
1184 if (exec->fence_count >= exec->fence_array_length) {
1185 uint32_t new_len = MAX2(exec->fence_array_length * 2, 64);
1186
1187 exec->fences = vk_realloc(alloc, exec->fences,
1188 new_len * sizeof(*exec->fences),
1189 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
1190 if (exec->fences == NULL)
1191 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1192
1193 exec->fence_array_length = new_len;
1194 }
1195
1196 exec->fences[exec->fence_count] = (struct drm_i915_gem_exec_fence) {
1197 .handle = handle,
1198 .flags = flags,
1199 };
1200
1201 exec->fence_count++;
1202
1203 return VK_SUCCESS;
1204 }
1205
1206 static void
1207 anv_cmd_buffer_process_relocs(struct anv_cmd_buffer *cmd_buffer,
1208 struct anv_reloc_list *list)
1209 {
1210 for (size_t i = 0; i < list->num_relocs; i++)
1211 list->relocs[i].target_handle = list->reloc_bos[i]->index;
1212 }
1213
1214 static void
1215 adjust_relocations_from_state_pool(struct anv_state_pool *pool,
1216 struct anv_reloc_list *relocs,
1217 uint32_t last_pool_center_bo_offset)
1218 {
1219 assert(last_pool_center_bo_offset <= pool->block_pool.center_bo_offset);
1220 uint32_t delta = pool->block_pool.center_bo_offset - last_pool_center_bo_offset;
1221
1222 for (size_t i = 0; i < relocs->num_relocs; i++) {
1223 /* All of the relocations from this block pool to other BO's should
1224 * have been emitted relative to the surface block pool center. We
1225 * need to add the center offset to make them relative to the
1226 * beginning of the actual GEM bo.
1227 */
1228 relocs->relocs[i].offset += delta;
1229 }
1230 }
1231
1232 static void
1233 adjust_relocations_to_state_pool(struct anv_state_pool *pool,
1234 struct anv_bo *from_bo,
1235 struct anv_reloc_list *relocs,
1236 uint32_t last_pool_center_bo_offset)
1237 {
1238 assert(last_pool_center_bo_offset <= pool->block_pool.center_bo_offset);
1239 uint32_t delta = pool->block_pool.center_bo_offset - last_pool_center_bo_offset;
1240
1241 /* When we initially emit relocations into a block pool, we don't
1242 * actually know what the final center_bo_offset will be so we just emit
1243 * it as if center_bo_offset == 0. Now that we know what the center
1244 * offset is, we need to walk the list of relocations and adjust any
1245 * relocations that point to the pool bo with the correct offset.
1246 */
1247 for (size_t i = 0; i < relocs->num_relocs; i++) {
1248 if (relocs->reloc_bos[i] == pool->block_pool.bo) {
1249 /* Adjust the delta value in the relocation to correctly
1250 * correspond to the new delta. Initially, this value may have
1251 * been negative (if treated as unsigned), but we trust in
1252 * uint32_t roll-over to fix that for us at this point.
1253 */
1254 relocs->relocs[i].delta += delta;
1255
1256 /* Since the delta has changed, we need to update the actual
1257 * relocated value with the new presumed value. This function
1258 * should only be called on batch buffers, so we know it isn't in
1259 * use by the GPU at the moment.
1260 */
1261 assert(relocs->relocs[i].offset < from_bo->size);
1262 write_reloc(pool->block_pool.device,
1263 from_bo->map + relocs->relocs[i].offset,
1264 relocs->relocs[i].presumed_offset +
1265 relocs->relocs[i].delta, false);
1266 }
1267 }
1268 }
1269
1270 static void
1271 anv_reloc_list_apply(struct anv_device *device,
1272 struct anv_reloc_list *list,
1273 struct anv_bo *bo,
1274 bool always_relocate)
1275 {
1276 for (size_t i = 0; i < list->num_relocs; i++) {
1277 struct anv_bo *target_bo = list->reloc_bos[i];
1278 if (list->relocs[i].presumed_offset == target_bo->offset &&
1279 !always_relocate)
1280 continue;
1281
1282 void *p = bo->map + list->relocs[i].offset;
1283 write_reloc(device, p, target_bo->offset + list->relocs[i].delta, true);
1284 list->relocs[i].presumed_offset = target_bo->offset;
1285 }
1286 }
1287
1288 /**
1289 * This function applies the relocation for a command buffer and writes the
1290 * actual addresses into the buffers as per what we were told by the kernel on
1291 * the previous execbuf2 call. This should be safe to do because, for each
1292 * relocated address, we have two cases:
1293 *
1294 * 1) The target BO is inactive (as seen by the kernel). In this case, it is
1295 * not in use by the GPU so updating the address is 100% ok. It won't be
1296 * in-use by the GPU (from our context) again until the next execbuf2
1297 * happens. If the kernel decides to move it in the next execbuf2, it
1298 * will have to do the relocations itself, but that's ok because it should
1299 * have all of the information needed to do so.
1300 *
1301 * 2) The target BO is active (as seen by the kernel). In this case, it
1302 * hasn't moved since the last execbuffer2 call because GTT shuffling
1303 * *only* happens when the BO is idle. (From our perspective, it only
1304 * happens inside the execbuffer2 ioctl, but the shuffling may be
1305 * triggered by another ioctl, with full-ppgtt this is limited to only
1306 * execbuffer2 ioctls on the same context, or memory pressure.) Since the
1307 * target BO hasn't moved, our anv_bo::offset exactly matches the BO's GTT
1308 * address and the relocated value we are writing into the BO will be the
1309 * same as the value that is already there.
1310 *
1311 * There is also a possibility that the target BO is active but the exact
1312 * RENDER_SURFACE_STATE object we are writing the relocation into isn't in
1313 * use. In this case, the address currently in the RENDER_SURFACE_STATE
1314 * may be stale but it's still safe to write the relocation because that
1315 * particular RENDER_SURFACE_STATE object isn't in-use by the GPU and
1316 * won't be until the next execbuf2 call.
1317 *
1318 * By doing relocations on the CPU, we can tell the kernel that it doesn't
1319 * need to bother. We want to do this because the surface state buffer is
1320 * used by every command buffer so, if the kernel does the relocations, it
1321 * will always be busy and the kernel will always stall. This is also
1322 * probably the fastest mechanism for doing relocations since the kernel would
1323 * have to make a full copy of all the relocations lists.
1324 */
1325 static bool
1326 relocate_cmd_buffer(struct anv_cmd_buffer *cmd_buffer,
1327 struct anv_execbuf *exec)
1328 {
1329 if (!exec->has_relocs)
1330 return true;
1331
1332 static int userspace_relocs = -1;
1333 if (userspace_relocs < 0)
1334 userspace_relocs = env_var_as_boolean("ANV_USERSPACE_RELOCS", true);
1335 if (!userspace_relocs)
1336 return false;
1337
1338 /* First, we have to check to see whether or not we can even do the
1339 * relocation. New buffers which have never been submitted to the kernel
1340 * don't have a valid offset so we need to let the kernel do relocations so
1341 * that we can get offsets for them. On future execbuf2 calls, those
1342 * buffers will have offsets and we will be able to skip relocating.
1343 * Invalid offsets are indicated by anv_bo::offset == (uint64_t)-1.
1344 */
1345 for (uint32_t i = 0; i < exec->bo_count; i++) {
1346 if (exec->bos[i]->offset == (uint64_t)-1)
1347 return false;
1348 }
1349
1350 /* Since surface states are shared between command buffers and we don't
1351 * know what order they will be submitted to the kernel, we don't know
1352 * what address is actually written in the surface state object at any
1353 * given time. The only option is to always relocate them.
1354 */
1355 anv_reloc_list_apply(cmd_buffer->device, &cmd_buffer->surface_relocs,
1356 cmd_buffer->device->surface_state_pool.block_pool.bo,
1357 true /* always relocate surface states */);
1358
1359 /* Since we own all of the batch buffers, we know what values are stored
1360 * in the relocated addresses and only have to update them if the offsets
1361 * have changed.
1362 */
1363 struct anv_batch_bo **bbo;
1364 u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1365 anv_reloc_list_apply(cmd_buffer->device,
1366 &(*bbo)->relocs, &(*bbo)->bo, false);
1367 }
1368
1369 for (uint32_t i = 0; i < exec->bo_count; i++)
1370 exec->objects[i].offset = exec->bos[i]->offset;
1371
1372 return true;
1373 }
1374
1375 static VkResult
1376 setup_execbuf_for_cmd_buffer(struct anv_execbuf *execbuf,
1377 struct anv_cmd_buffer *cmd_buffer)
1378 {
1379 struct anv_batch *batch = &cmd_buffer->batch;
1380 struct anv_state_pool *ss_pool =
1381 &cmd_buffer->device->surface_state_pool;
1382
1383 adjust_relocations_from_state_pool(ss_pool, &cmd_buffer->surface_relocs,
1384 cmd_buffer->last_ss_pool_center);
1385 VkResult result;
1386 struct anv_bo *bo;
1387 if (cmd_buffer->device->instance->physicalDevice.use_softpin) {
1388 anv_block_pool_foreach_bo(bo, &ss_pool->block_pool) {
1389 result = anv_execbuf_add_bo(execbuf, bo, NULL, 0,
1390 &cmd_buffer->device->alloc);
1391 if (result != VK_SUCCESS)
1392 return result;
1393 }
1394 /* Add surface dependencies (BOs) to the execbuf */
1395 anv_execbuf_add_bo_set(execbuf, cmd_buffer->surface_relocs.deps, 0,
1396 &cmd_buffer->device->alloc);
1397
1398 /* Add the BOs for all the pinned buffers */
1399 if (cmd_buffer->device->pinned_buffers->entries) {
1400 struct set *pinned_bos = _mesa_pointer_set_create(NULL);
1401 if (pinned_bos == NULL)
1402 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
1403 set_foreach(cmd_buffer->device->pinned_buffers, entry) {
1404 const struct anv_buffer *buffer = entry->key;
1405 _mesa_set_add(pinned_bos, buffer->address.bo);
1406 }
1407 anv_execbuf_add_bo_set(execbuf, pinned_bos, 0,
1408 &cmd_buffer->device->alloc);
1409 _mesa_set_destroy(pinned_bos, NULL);
1410 }
1411
1412 struct anv_block_pool *pool;
1413 pool = &cmd_buffer->device->dynamic_state_pool.block_pool;
1414 anv_block_pool_foreach_bo(bo, pool) {
1415 result = anv_execbuf_add_bo(execbuf, bo, NULL, 0,
1416 &cmd_buffer->device->alloc);
1417 if (result != VK_SUCCESS)
1418 return result;
1419 }
1420
1421 pool = &cmd_buffer->device->instruction_state_pool.block_pool;
1422 anv_block_pool_foreach_bo(bo, pool) {
1423 result = anv_execbuf_add_bo(execbuf, bo, NULL, 0,
1424 &cmd_buffer->device->alloc);
1425 if (result != VK_SUCCESS)
1426 return result;
1427 }
1428
1429 pool = &cmd_buffer->device->binding_table_pool.block_pool;
1430 anv_block_pool_foreach_bo(bo, pool) {
1431 result = anv_execbuf_add_bo(execbuf, bo, NULL, 0,
1432 &cmd_buffer->device->alloc);
1433 if (result != VK_SUCCESS)
1434 return result;
1435 }
1436 } else {
1437 /* Since we aren't in the softpin case, all of our STATE_BASE_ADDRESS BOs
1438 * will get added automatically by processing relocations on the batch
1439 * buffer. We have to add the surface state BO manually because it has
1440 * relocations of its own that we need to be sure are processsed.
1441 */
1442 result = anv_execbuf_add_bo(execbuf, ss_pool->block_pool.bo,
1443 &cmd_buffer->surface_relocs, 0,
1444 &cmd_buffer->device->alloc);
1445 if (result != VK_SUCCESS)
1446 return result;
1447 }
1448
1449 /* First, we walk over all of the bos we've seen and add them and their
1450 * relocations to the validate list.
1451 */
1452 struct anv_batch_bo **bbo;
1453 u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1454 adjust_relocations_to_state_pool(ss_pool, &(*bbo)->bo, &(*bbo)->relocs,
1455 cmd_buffer->last_ss_pool_center);
1456
1457 result = anv_execbuf_add_bo(execbuf, &(*bbo)->bo, &(*bbo)->relocs, 0,
1458 &cmd_buffer->device->alloc);
1459 if (result != VK_SUCCESS)
1460 return result;
1461 }
1462
1463 /* Now that we've adjusted all of the surface state relocations, we need to
1464 * record the surface state pool center so future executions of the command
1465 * buffer can adjust correctly.
1466 */
1467 cmd_buffer->last_ss_pool_center = ss_pool->block_pool.center_bo_offset;
1468
1469 struct anv_batch_bo *first_batch_bo =
1470 list_first_entry(&cmd_buffer->batch_bos, struct anv_batch_bo, link);
1471
1472 /* The kernel requires that the last entry in the validation list be the
1473 * batch buffer to execute. We can simply swap the element
1474 * corresponding to the first batch_bo in the chain with the last
1475 * element in the list.
1476 */
1477 if (first_batch_bo->bo.index != execbuf->bo_count - 1) {
1478 uint32_t idx = first_batch_bo->bo.index;
1479 uint32_t last_idx = execbuf->bo_count - 1;
1480
1481 struct drm_i915_gem_exec_object2 tmp_obj = execbuf->objects[idx];
1482 assert(execbuf->bos[idx] == &first_batch_bo->bo);
1483
1484 execbuf->objects[idx] = execbuf->objects[last_idx];
1485 execbuf->bos[idx] = execbuf->bos[last_idx];
1486 execbuf->bos[idx]->index = idx;
1487
1488 execbuf->objects[last_idx] = tmp_obj;
1489 execbuf->bos[last_idx] = &first_batch_bo->bo;
1490 first_batch_bo->bo.index = last_idx;
1491 }
1492
1493 /* If we are pinning our BOs, we shouldn't have to relocate anything */
1494 if (cmd_buffer->device->instance->physicalDevice.use_softpin)
1495 assert(!execbuf->has_relocs);
1496
1497 /* Now we go through and fixup all of the relocation lists to point to
1498 * the correct indices in the object array. We have to do this after we
1499 * reorder the list above as some of the indices may have changed.
1500 */
1501 if (execbuf->has_relocs) {
1502 u_vector_foreach(bbo, &cmd_buffer->seen_bbos)
1503 anv_cmd_buffer_process_relocs(cmd_buffer, &(*bbo)->relocs);
1504
1505 anv_cmd_buffer_process_relocs(cmd_buffer, &cmd_buffer->surface_relocs);
1506 }
1507
1508 if (!cmd_buffer->device->info.has_llc) {
1509 __builtin_ia32_mfence();
1510 u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1511 for (uint32_t i = 0; i < (*bbo)->length; i += CACHELINE_SIZE)
1512 __builtin_ia32_clflush((*bbo)->bo.map + i);
1513 }
1514 }
1515
1516 execbuf->execbuf = (struct drm_i915_gem_execbuffer2) {
1517 .buffers_ptr = (uintptr_t) execbuf->objects,
1518 .buffer_count = execbuf->bo_count,
1519 .batch_start_offset = 0,
1520 .batch_len = batch->next - batch->start,
1521 .cliprects_ptr = 0,
1522 .num_cliprects = 0,
1523 .DR1 = 0,
1524 .DR4 = 0,
1525 .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER,
1526 .rsvd1 = cmd_buffer->device->context_id,
1527 .rsvd2 = 0,
1528 };
1529
1530 if (relocate_cmd_buffer(cmd_buffer, execbuf)) {
1531 /* If we were able to successfully relocate everything, tell the kernel
1532 * that it can skip doing relocations. The requirement for using
1533 * NO_RELOC is:
1534 *
1535 * 1) The addresses written in the objects must match the corresponding
1536 * reloc.presumed_offset which in turn must match the corresponding
1537 * execobject.offset.
1538 *
1539 * 2) To avoid stalling, execobject.offset should match the current
1540 * address of that object within the active context.
1541 *
1542 * In order to satisfy all of the invariants that make userspace
1543 * relocations to be safe (see relocate_cmd_buffer()), we need to
1544 * further ensure that the addresses we use match those used by the
1545 * kernel for the most recent execbuf2.
1546 *
1547 * The kernel may still choose to do relocations anyway if something has
1548 * moved in the GTT. In this case, the relocation list still needs to be
1549 * valid. All relocations on the batch buffers are already valid and
1550 * kept up-to-date. For surface state relocations, by applying the
1551 * relocations in relocate_cmd_buffer, we ensured that the address in
1552 * the RENDER_SURFACE_STATE matches presumed_offset, so it should be
1553 * safe for the kernel to relocate them as needed.
1554 */
1555 execbuf->execbuf.flags |= I915_EXEC_NO_RELOC;
1556 } else {
1557 /* In the case where we fall back to doing kernel relocations, we need
1558 * to ensure that the relocation list is valid. All relocations on the
1559 * batch buffers are already valid and kept up-to-date. Since surface
1560 * states are shared between command buffers and we don't know what
1561 * order they will be submitted to the kernel, we don't know what
1562 * address is actually written in the surface state object at any given
1563 * time. The only option is to set a bogus presumed offset and let the
1564 * kernel relocate them.
1565 */
1566 for (size_t i = 0; i < cmd_buffer->surface_relocs.num_relocs; i++)
1567 cmd_buffer->surface_relocs.relocs[i].presumed_offset = -1;
1568 }
1569
1570 return VK_SUCCESS;
1571 }
1572
1573 static VkResult
1574 setup_empty_execbuf(struct anv_execbuf *execbuf, struct anv_device *device)
1575 {
1576 VkResult result = anv_execbuf_add_bo(execbuf, &device->trivial_batch_bo,
1577 NULL, 0, &device->alloc);
1578 if (result != VK_SUCCESS)
1579 return result;
1580
1581 execbuf->execbuf = (struct drm_i915_gem_execbuffer2) {
1582 .buffers_ptr = (uintptr_t) execbuf->objects,
1583 .buffer_count = execbuf->bo_count,
1584 .batch_start_offset = 0,
1585 .batch_len = 8, /* GEN7_MI_BATCH_BUFFER_END and NOOP */
1586 .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER,
1587 .rsvd1 = device->context_id,
1588 .rsvd2 = 0,
1589 };
1590
1591 return VK_SUCCESS;
1592 }
1593
1594 /* Finding a buffer for batch decoding */
1595 static struct gen_batch_decode_bo
1596 decode_get_bo(void *v_batch, uint64_t address)
1597 {
1598 struct anv_cmd_buffer *cmd_buffer = v_batch;
1599 struct anv_batch_bo *bo;
1600
1601 u_vector_foreach(bo, &cmd_buffer->seen_bbos) {
1602 /* The decoder zeroes out the top 16 bits, so we need to as well */
1603 uint64_t bo_address = bo->bo.offset & (~0ull >> 16);
1604
1605 if (address >= bo_address && address < bo_address + bo->bo.size) {
1606 return (struct gen_batch_decode_bo) {
1607 .addr = address,
1608 .size = bo->bo.size,
1609 .map = bo->bo.map,
1610 };
1611 }
1612 }
1613
1614 return (struct gen_batch_decode_bo) { };
1615 }
1616
1617 static void
1618 decode_batch(struct anv_cmd_buffer *cmd_buffer)
1619 {
1620 struct gen_batch_decode_ctx ctx;
1621 struct anv_batch_bo *bo = u_vector_head(&cmd_buffer->seen_bbos);
1622 const unsigned decode_flags =
1623 GEN_BATCH_DECODE_FULL |
1624 ((INTEL_DEBUG & DEBUG_COLOR) ? GEN_BATCH_DECODE_IN_COLOR : 0) |
1625 GEN_BATCH_DECODE_OFFSETS |
1626 GEN_BATCH_DECODE_FLOATS;
1627
1628 gen_batch_decode_ctx_init(&ctx,
1629 &cmd_buffer->device->instance->physicalDevice.info,
1630 stderr, decode_flags, NULL,
1631 decode_get_bo, NULL, cmd_buffer);
1632
1633 gen_print_batch(&ctx, bo->bo.map, bo->bo.size, bo->bo.offset);
1634
1635 gen_batch_decode_ctx_finish(&ctx);
1636 }
1637
1638 VkResult
1639 anv_cmd_buffer_execbuf(struct anv_device *device,
1640 struct anv_cmd_buffer *cmd_buffer,
1641 const VkSemaphore *in_semaphores,
1642 uint32_t num_in_semaphores,
1643 const VkSemaphore *out_semaphores,
1644 uint32_t num_out_semaphores,
1645 VkFence _fence)
1646 {
1647 ANV_FROM_HANDLE(anv_fence, fence, _fence);
1648
1649 struct anv_execbuf execbuf;
1650 anv_execbuf_init(&execbuf);
1651
1652 int in_fence = -1;
1653 VkResult result = VK_SUCCESS;
1654 for (uint32_t i = 0; i < num_in_semaphores; i++) {
1655 ANV_FROM_HANDLE(anv_semaphore, semaphore, in_semaphores[i]);
1656 struct anv_semaphore_impl *impl =
1657 semaphore->temporary.type != ANV_SEMAPHORE_TYPE_NONE ?
1658 &semaphore->temporary : &semaphore->permanent;
1659
1660 switch (impl->type) {
1661 case ANV_SEMAPHORE_TYPE_BO:
1662 result = anv_execbuf_add_bo(&execbuf, impl->bo, NULL,
1663 0, &device->alloc);
1664 if (result != VK_SUCCESS)
1665 return result;
1666 break;
1667
1668 case ANV_SEMAPHORE_TYPE_SYNC_FILE:
1669 if (in_fence == -1) {
1670 in_fence = impl->fd;
1671 } else {
1672 int merge = anv_gem_sync_file_merge(device, in_fence, impl->fd);
1673 if (merge == -1)
1674 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1675
1676 close(impl->fd);
1677 close(in_fence);
1678 in_fence = merge;
1679 }
1680
1681 impl->fd = -1;
1682 break;
1683
1684 case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
1685 result = anv_execbuf_add_syncobj(&execbuf, impl->syncobj,
1686 I915_EXEC_FENCE_WAIT,
1687 &device->alloc);
1688 if (result != VK_SUCCESS)
1689 return result;
1690 break;
1691
1692 default:
1693 break;
1694 }
1695 }
1696
1697 bool need_out_fence = false;
1698 for (uint32_t i = 0; i < num_out_semaphores; i++) {
1699 ANV_FROM_HANDLE(anv_semaphore, semaphore, out_semaphores[i]);
1700
1701 /* Under most circumstances, out fences won't be temporary. However,
1702 * the spec does allow it for opaque_fd. From the Vulkan 1.0.53 spec:
1703 *
1704 * "If the import is temporary, the implementation must restore the
1705 * semaphore to its prior permanent state after submitting the next
1706 * semaphore wait operation."
1707 *
1708 * The spec says nothing whatsoever about signal operations on
1709 * temporarily imported semaphores so it appears they are allowed.
1710 * There are also CTS tests that require this to work.
1711 */
1712 struct anv_semaphore_impl *impl =
1713 semaphore->temporary.type != ANV_SEMAPHORE_TYPE_NONE ?
1714 &semaphore->temporary : &semaphore->permanent;
1715
1716 switch (impl->type) {
1717 case ANV_SEMAPHORE_TYPE_BO:
1718 result = anv_execbuf_add_bo(&execbuf, impl->bo, NULL,
1719 EXEC_OBJECT_WRITE, &device->alloc);
1720 if (result != VK_SUCCESS)
1721 return result;
1722 break;
1723
1724 case ANV_SEMAPHORE_TYPE_SYNC_FILE:
1725 need_out_fence = true;
1726 break;
1727
1728 case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
1729 result = anv_execbuf_add_syncobj(&execbuf, impl->syncobj,
1730 I915_EXEC_FENCE_SIGNAL,
1731 &device->alloc);
1732 if (result != VK_SUCCESS)
1733 return result;
1734 break;
1735
1736 default:
1737 break;
1738 }
1739 }
1740
1741 if (fence) {
1742 /* Under most circumstances, out fences won't be temporary. However,
1743 * the spec does allow it for opaque_fd. From the Vulkan 1.0.53 spec:
1744 *
1745 * "If the import is temporary, the implementation must restore the
1746 * semaphore to its prior permanent state after submitting the next
1747 * semaphore wait operation."
1748 *
1749 * The spec says nothing whatsoever about signal operations on
1750 * temporarily imported semaphores so it appears they are allowed.
1751 * There are also CTS tests that require this to work.
1752 */
1753 struct anv_fence_impl *impl =
1754 fence->temporary.type != ANV_FENCE_TYPE_NONE ?
1755 &fence->temporary : &fence->permanent;
1756
1757 switch (impl->type) {
1758 case ANV_FENCE_TYPE_BO:
1759 result = anv_execbuf_add_bo(&execbuf, &impl->bo.bo, NULL,
1760 EXEC_OBJECT_WRITE, &device->alloc);
1761 if (result != VK_SUCCESS)
1762 return result;
1763 break;
1764
1765 case ANV_FENCE_TYPE_SYNCOBJ:
1766 result = anv_execbuf_add_syncobj(&execbuf, impl->syncobj,
1767 I915_EXEC_FENCE_SIGNAL,
1768 &device->alloc);
1769 if (result != VK_SUCCESS)
1770 return result;
1771 break;
1772
1773 default:
1774 unreachable("Invalid fence type");
1775 }
1776 }
1777
1778 if (cmd_buffer)
1779 result = setup_execbuf_for_cmd_buffer(&execbuf, cmd_buffer);
1780 else
1781 result = setup_empty_execbuf(&execbuf, device);
1782
1783 if (result != VK_SUCCESS)
1784 return result;
1785
1786 if (execbuf.fence_count > 0) {
1787 assert(device->instance->physicalDevice.has_syncobj);
1788 execbuf.execbuf.flags |= I915_EXEC_FENCE_ARRAY;
1789 execbuf.execbuf.num_cliprects = execbuf.fence_count;
1790 execbuf.execbuf.cliprects_ptr = (uintptr_t) execbuf.fences;
1791 }
1792
1793 if (in_fence != -1) {
1794 execbuf.execbuf.flags |= I915_EXEC_FENCE_IN;
1795 execbuf.execbuf.rsvd2 |= (uint32_t)in_fence;
1796 }
1797
1798 if (need_out_fence)
1799 execbuf.execbuf.flags |= I915_EXEC_FENCE_OUT;
1800
1801 if (unlikely(INTEL_DEBUG & DEBUG_BATCH))
1802 decode_batch(cmd_buffer);
1803
1804 result = anv_device_execbuf(device, &execbuf.execbuf, execbuf.bos);
1805
1806 /* Execbuf does not consume the in_fence. It's our job to close it. */
1807 if (in_fence != -1)
1808 close(in_fence);
1809
1810 for (uint32_t i = 0; i < num_in_semaphores; i++) {
1811 ANV_FROM_HANDLE(anv_semaphore, semaphore, in_semaphores[i]);
1812 /* From the Vulkan 1.0.53 spec:
1813 *
1814 * "If the import is temporary, the implementation must restore the
1815 * semaphore to its prior permanent state after submitting the next
1816 * semaphore wait operation."
1817 *
1818 * This has to happen after the execbuf in case we close any syncobjs in
1819 * the process.
1820 */
1821 anv_semaphore_reset_temporary(device, semaphore);
1822 }
1823
1824 if (fence && fence->permanent.type == ANV_FENCE_TYPE_BO) {
1825 /* BO fences can't be shared, so they can't be temporary. */
1826 assert(fence->temporary.type == ANV_FENCE_TYPE_NONE);
1827
1828 /* Once the execbuf has returned, we need to set the fence state to
1829 * SUBMITTED. We can't do this before calling execbuf because
1830 * anv_GetFenceStatus does take the global device lock before checking
1831 * fence->state.
1832 *
1833 * We set the fence state to SUBMITTED regardless of whether or not the
1834 * execbuf succeeds because we need to ensure that vkWaitForFences() and
1835 * vkGetFenceStatus() return a valid result (VK_ERROR_DEVICE_LOST or
1836 * VK_SUCCESS) in a finite amount of time even if execbuf fails.
1837 */
1838 fence->permanent.bo.state = ANV_BO_FENCE_STATE_SUBMITTED;
1839 }
1840
1841 if (result == VK_SUCCESS && need_out_fence) {
1842 int out_fence = execbuf.execbuf.rsvd2 >> 32;
1843 for (uint32_t i = 0; i < num_out_semaphores; i++) {
1844 ANV_FROM_HANDLE(anv_semaphore, semaphore, out_semaphores[i]);
1845 /* Out fences can't have temporary state because that would imply
1846 * that we imported a sync file and are trying to signal it.
1847 */
1848 assert(semaphore->temporary.type == ANV_SEMAPHORE_TYPE_NONE);
1849 struct anv_semaphore_impl *impl = &semaphore->permanent;
1850
1851 if (impl->type == ANV_SEMAPHORE_TYPE_SYNC_FILE) {
1852 assert(impl->fd == -1);
1853 impl->fd = dup(out_fence);
1854 }
1855 }
1856 close(out_fence);
1857 }
1858
1859 anv_execbuf_finish(&execbuf, &device->alloc);
1860
1861 return result;
1862 }