util: use C99 declaration in the for-loop set_foreach() macro
[mesa.git] / src / intel / vulkan / anv_batch_chain.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31
32 #include "genxml/gen8_pack.h"
33
34 #include "util/debug.h"
35
36 /** \file anv_batch_chain.c
37 *
38 * This file contains functions related to anv_cmd_buffer as a data
39 * structure. This involves everything required to create and destroy
40 * the actual batch buffers as well as link them together and handle
41 * relocations and surface state. It specifically does *not* contain any
42 * handling of actual vkCmd calls beyond vkCmdExecuteCommands.
43 */
44
45 /*-----------------------------------------------------------------------*
46 * Functions related to anv_reloc_list
47 *-----------------------------------------------------------------------*/
48
49 static VkResult
50 anv_reloc_list_init_clone(struct anv_reloc_list *list,
51 const VkAllocationCallbacks *alloc,
52 const struct anv_reloc_list *other_list)
53 {
54 if (other_list) {
55 list->num_relocs = other_list->num_relocs;
56 list->array_length = other_list->array_length;
57 } else {
58 list->num_relocs = 0;
59 list->array_length = 256;
60 }
61
62 list->relocs =
63 vk_alloc(alloc, list->array_length * sizeof(*list->relocs), 8,
64 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
65
66 if (list->relocs == NULL)
67 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
68
69 list->reloc_bos =
70 vk_alloc(alloc, list->array_length * sizeof(*list->reloc_bos), 8,
71 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
72
73 if (list->reloc_bos == NULL) {
74 vk_free(alloc, list->relocs);
75 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
76 }
77
78 list->deps = _mesa_set_create(NULL, _mesa_hash_pointer,
79 _mesa_key_pointer_equal);
80
81 if (!list->deps) {
82 vk_free(alloc, list->relocs);
83 vk_free(alloc, list->reloc_bos);
84 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
85 }
86
87 if (other_list) {
88 memcpy(list->relocs, other_list->relocs,
89 list->array_length * sizeof(*list->relocs));
90 memcpy(list->reloc_bos, other_list->reloc_bos,
91 list->array_length * sizeof(*list->reloc_bos));
92 set_foreach(other_list->deps, entry) {
93 _mesa_set_add_pre_hashed(list->deps, entry->hash, entry->key);
94 }
95 }
96
97 return VK_SUCCESS;
98 }
99
100 VkResult
101 anv_reloc_list_init(struct anv_reloc_list *list,
102 const VkAllocationCallbacks *alloc)
103 {
104 return anv_reloc_list_init_clone(list, alloc, NULL);
105 }
106
107 void
108 anv_reloc_list_finish(struct anv_reloc_list *list,
109 const VkAllocationCallbacks *alloc)
110 {
111 vk_free(alloc, list->relocs);
112 vk_free(alloc, list->reloc_bos);
113 _mesa_set_destroy(list->deps, NULL);
114 }
115
116 static VkResult
117 anv_reloc_list_grow(struct anv_reloc_list *list,
118 const VkAllocationCallbacks *alloc,
119 size_t num_additional_relocs)
120 {
121 if (list->num_relocs + num_additional_relocs <= list->array_length)
122 return VK_SUCCESS;
123
124 size_t new_length = list->array_length * 2;
125 while (new_length < list->num_relocs + num_additional_relocs)
126 new_length *= 2;
127
128 struct drm_i915_gem_relocation_entry *new_relocs =
129 vk_alloc(alloc, new_length * sizeof(*list->relocs), 8,
130 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
131 if (new_relocs == NULL)
132 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
133
134 struct anv_bo **new_reloc_bos =
135 vk_alloc(alloc, new_length * sizeof(*list->reloc_bos), 8,
136 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
137 if (new_reloc_bos == NULL) {
138 vk_free(alloc, new_relocs);
139 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
140 }
141
142 memcpy(new_relocs, list->relocs, list->num_relocs * sizeof(*list->relocs));
143 memcpy(new_reloc_bos, list->reloc_bos,
144 list->num_relocs * sizeof(*list->reloc_bos));
145
146 vk_free(alloc, list->relocs);
147 vk_free(alloc, list->reloc_bos);
148
149 list->array_length = new_length;
150 list->relocs = new_relocs;
151 list->reloc_bos = new_reloc_bos;
152
153 return VK_SUCCESS;
154 }
155
156 VkResult
157 anv_reloc_list_add(struct anv_reloc_list *list,
158 const VkAllocationCallbacks *alloc,
159 uint32_t offset, struct anv_bo *target_bo, uint32_t delta)
160 {
161 struct drm_i915_gem_relocation_entry *entry;
162 int index;
163
164 if (target_bo->flags & EXEC_OBJECT_PINNED) {
165 _mesa_set_add(list->deps, target_bo);
166 return VK_SUCCESS;
167 }
168
169 VkResult result = anv_reloc_list_grow(list, alloc, 1);
170 if (result != VK_SUCCESS)
171 return result;
172
173 /* XXX: Can we use I915_EXEC_HANDLE_LUT? */
174 index = list->num_relocs++;
175 list->reloc_bos[index] = target_bo;
176 entry = &list->relocs[index];
177 entry->target_handle = target_bo->gem_handle;
178 entry->delta = delta;
179 entry->offset = offset;
180 entry->presumed_offset = target_bo->offset;
181 entry->read_domains = 0;
182 entry->write_domain = 0;
183 VG(VALGRIND_CHECK_MEM_IS_DEFINED(entry, sizeof(*entry)));
184
185 return VK_SUCCESS;
186 }
187
188 static VkResult
189 anv_reloc_list_append(struct anv_reloc_list *list,
190 const VkAllocationCallbacks *alloc,
191 struct anv_reloc_list *other, uint32_t offset)
192 {
193 VkResult result = anv_reloc_list_grow(list, alloc, other->num_relocs);
194 if (result != VK_SUCCESS)
195 return result;
196
197 memcpy(&list->relocs[list->num_relocs], &other->relocs[0],
198 other->num_relocs * sizeof(other->relocs[0]));
199 memcpy(&list->reloc_bos[list->num_relocs], &other->reloc_bos[0],
200 other->num_relocs * sizeof(other->reloc_bos[0]));
201
202 for (uint32_t i = 0; i < other->num_relocs; i++)
203 list->relocs[i + list->num_relocs].offset += offset;
204
205 list->num_relocs += other->num_relocs;
206
207 set_foreach(other->deps, entry) {
208 _mesa_set_add_pre_hashed(list->deps, entry->hash, entry->key);
209 }
210
211 return VK_SUCCESS;
212 }
213
214 /*-----------------------------------------------------------------------*
215 * Functions related to anv_batch
216 *-----------------------------------------------------------------------*/
217
218 void *
219 anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords)
220 {
221 if (batch->next + num_dwords * 4 > batch->end) {
222 VkResult result = batch->extend_cb(batch, batch->user_data);
223 if (result != VK_SUCCESS) {
224 anv_batch_set_error(batch, result);
225 return NULL;
226 }
227 }
228
229 void *p = batch->next;
230
231 batch->next += num_dwords * 4;
232 assert(batch->next <= batch->end);
233
234 return p;
235 }
236
237 uint64_t
238 anv_batch_emit_reloc(struct anv_batch *batch,
239 void *location, struct anv_bo *bo, uint32_t delta)
240 {
241 VkResult result = anv_reloc_list_add(batch->relocs, batch->alloc,
242 location - batch->start, bo, delta);
243 if (result != VK_SUCCESS) {
244 anv_batch_set_error(batch, result);
245 return 0;
246 }
247
248 return bo->offset + delta;
249 }
250
251 void
252 anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other)
253 {
254 uint32_t size, offset;
255
256 size = other->next - other->start;
257 assert(size % 4 == 0);
258
259 if (batch->next + size > batch->end) {
260 VkResult result = batch->extend_cb(batch, batch->user_data);
261 if (result != VK_SUCCESS) {
262 anv_batch_set_error(batch, result);
263 return;
264 }
265 }
266
267 assert(batch->next + size <= batch->end);
268
269 VG(VALGRIND_CHECK_MEM_IS_DEFINED(other->start, size));
270 memcpy(batch->next, other->start, size);
271
272 offset = batch->next - batch->start;
273 VkResult result = anv_reloc_list_append(batch->relocs, batch->alloc,
274 other->relocs, offset);
275 if (result != VK_SUCCESS) {
276 anv_batch_set_error(batch, result);
277 return;
278 }
279
280 batch->next += size;
281 }
282
283 /*-----------------------------------------------------------------------*
284 * Functions related to anv_batch_bo
285 *-----------------------------------------------------------------------*/
286
287 static VkResult
288 anv_batch_bo_create(struct anv_cmd_buffer *cmd_buffer,
289 struct anv_batch_bo **bbo_out)
290 {
291 VkResult result;
292
293 struct anv_batch_bo *bbo = vk_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo),
294 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
295 if (bbo == NULL)
296 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
297
298 result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool, &bbo->bo,
299 ANV_CMD_BUFFER_BATCH_SIZE);
300 if (result != VK_SUCCESS)
301 goto fail_alloc;
302
303 result = anv_reloc_list_init(&bbo->relocs, &cmd_buffer->pool->alloc);
304 if (result != VK_SUCCESS)
305 goto fail_bo_alloc;
306
307 *bbo_out = bbo;
308
309 return VK_SUCCESS;
310
311 fail_bo_alloc:
312 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
313 fail_alloc:
314 vk_free(&cmd_buffer->pool->alloc, bbo);
315
316 return result;
317 }
318
319 static VkResult
320 anv_batch_bo_clone(struct anv_cmd_buffer *cmd_buffer,
321 const struct anv_batch_bo *other_bbo,
322 struct anv_batch_bo **bbo_out)
323 {
324 VkResult result;
325
326 struct anv_batch_bo *bbo = vk_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo),
327 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
328 if (bbo == NULL)
329 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
330
331 result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool, &bbo->bo,
332 other_bbo->bo.size);
333 if (result != VK_SUCCESS)
334 goto fail_alloc;
335
336 result = anv_reloc_list_init_clone(&bbo->relocs, &cmd_buffer->pool->alloc,
337 &other_bbo->relocs);
338 if (result != VK_SUCCESS)
339 goto fail_bo_alloc;
340
341 bbo->length = other_bbo->length;
342 memcpy(bbo->bo.map, other_bbo->bo.map, other_bbo->length);
343
344 *bbo_out = bbo;
345
346 return VK_SUCCESS;
347
348 fail_bo_alloc:
349 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
350 fail_alloc:
351 vk_free(&cmd_buffer->pool->alloc, bbo);
352
353 return result;
354 }
355
356 static void
357 anv_batch_bo_start(struct anv_batch_bo *bbo, struct anv_batch *batch,
358 size_t batch_padding)
359 {
360 batch->next = batch->start = bbo->bo.map;
361 batch->end = bbo->bo.map + bbo->bo.size - batch_padding;
362 batch->relocs = &bbo->relocs;
363 bbo->relocs.num_relocs = 0;
364 _mesa_set_clear(bbo->relocs.deps, NULL);
365 }
366
367 static void
368 anv_batch_bo_continue(struct anv_batch_bo *bbo, struct anv_batch *batch,
369 size_t batch_padding)
370 {
371 batch->start = bbo->bo.map;
372 batch->next = bbo->bo.map + bbo->length;
373 batch->end = bbo->bo.map + bbo->bo.size - batch_padding;
374 batch->relocs = &bbo->relocs;
375 }
376
377 static void
378 anv_batch_bo_finish(struct anv_batch_bo *bbo, struct anv_batch *batch)
379 {
380 assert(batch->start == bbo->bo.map);
381 bbo->length = batch->next - batch->start;
382 VG(VALGRIND_CHECK_MEM_IS_DEFINED(batch->start, bbo->length));
383 }
384
385 static VkResult
386 anv_batch_bo_grow(struct anv_cmd_buffer *cmd_buffer, struct anv_batch_bo *bbo,
387 struct anv_batch *batch, size_t aditional,
388 size_t batch_padding)
389 {
390 assert(batch->start == bbo->bo.map);
391 bbo->length = batch->next - batch->start;
392
393 size_t new_size = bbo->bo.size;
394 while (new_size <= bbo->length + aditional + batch_padding)
395 new_size *= 2;
396
397 if (new_size == bbo->bo.size)
398 return VK_SUCCESS;
399
400 struct anv_bo new_bo;
401 VkResult result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
402 &new_bo, new_size);
403 if (result != VK_SUCCESS)
404 return result;
405
406 memcpy(new_bo.map, bbo->bo.map, bbo->length);
407
408 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
409
410 bbo->bo = new_bo;
411 anv_batch_bo_continue(bbo, batch, batch_padding);
412
413 return VK_SUCCESS;
414 }
415
416 static void
417 anv_batch_bo_link(struct anv_cmd_buffer *cmd_buffer,
418 struct anv_batch_bo *prev_bbo,
419 struct anv_batch_bo *next_bbo,
420 uint32_t next_bbo_offset)
421 {
422 MAYBE_UNUSED const uint32_t bb_start_offset =
423 prev_bbo->length - GEN8_MI_BATCH_BUFFER_START_length * 4;
424 MAYBE_UNUSED const uint32_t *bb_start = prev_bbo->bo.map + bb_start_offset;
425
426 /* Make sure we're looking at a MI_BATCH_BUFFER_START */
427 assert(((*bb_start >> 29) & 0x07) == 0);
428 assert(((*bb_start >> 23) & 0x3f) == 49);
429
430 if (cmd_buffer->device->instance->physicalDevice.use_softpin) {
431 assert(prev_bbo->bo.flags & EXEC_OBJECT_PINNED);
432 assert(next_bbo->bo.flags & EXEC_OBJECT_PINNED);
433
434 write_reloc(cmd_buffer->device,
435 prev_bbo->bo.map + bb_start_offset + 4,
436 next_bbo->bo.offset + next_bbo_offset, true);
437 } else {
438 uint32_t reloc_idx = prev_bbo->relocs.num_relocs - 1;
439 assert(prev_bbo->relocs.relocs[reloc_idx].offset == bb_start_offset + 4);
440
441 prev_bbo->relocs.reloc_bos[reloc_idx] = &next_bbo->bo;
442 prev_bbo->relocs.relocs[reloc_idx].delta = next_bbo_offset;
443
444 /* Use a bogus presumed offset to force a relocation */
445 prev_bbo->relocs.relocs[reloc_idx].presumed_offset = -1;
446 }
447 }
448
449 static void
450 anv_batch_bo_destroy(struct anv_batch_bo *bbo,
451 struct anv_cmd_buffer *cmd_buffer)
452 {
453 anv_reloc_list_finish(&bbo->relocs, &cmd_buffer->pool->alloc);
454 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
455 vk_free(&cmd_buffer->pool->alloc, bbo);
456 }
457
458 static VkResult
459 anv_batch_bo_list_clone(const struct list_head *list,
460 struct anv_cmd_buffer *cmd_buffer,
461 struct list_head *new_list)
462 {
463 VkResult result = VK_SUCCESS;
464
465 list_inithead(new_list);
466
467 struct anv_batch_bo *prev_bbo = NULL;
468 list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
469 struct anv_batch_bo *new_bbo = NULL;
470 result = anv_batch_bo_clone(cmd_buffer, bbo, &new_bbo);
471 if (result != VK_SUCCESS)
472 break;
473 list_addtail(&new_bbo->link, new_list);
474
475 if (prev_bbo)
476 anv_batch_bo_link(cmd_buffer, prev_bbo, new_bbo, 0);
477
478 prev_bbo = new_bbo;
479 }
480
481 if (result != VK_SUCCESS) {
482 list_for_each_entry_safe(struct anv_batch_bo, bbo, new_list, link)
483 anv_batch_bo_destroy(bbo, cmd_buffer);
484 }
485
486 return result;
487 }
488
489 /*-----------------------------------------------------------------------*
490 * Functions related to anv_batch_bo
491 *-----------------------------------------------------------------------*/
492
493 static struct anv_batch_bo *
494 anv_cmd_buffer_current_batch_bo(struct anv_cmd_buffer *cmd_buffer)
495 {
496 return LIST_ENTRY(struct anv_batch_bo, cmd_buffer->batch_bos.prev, link);
497 }
498
499 struct anv_address
500 anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer *cmd_buffer)
501 {
502 struct anv_state *bt_block = u_vector_head(&cmd_buffer->bt_block_states);
503 return (struct anv_address) {
504 .bo = &anv_binding_table_pool(cmd_buffer->device)->block_pool.bo,
505 .offset = bt_block->offset,
506 };
507 }
508
509 static void
510 emit_batch_buffer_start(struct anv_cmd_buffer *cmd_buffer,
511 struct anv_bo *bo, uint32_t offset)
512 {
513 /* In gen8+ the address field grew to two dwords to accomodate 48 bit
514 * offsets. The high 16 bits are in the last dword, so we can use the gen8
515 * version in either case, as long as we set the instruction length in the
516 * header accordingly. This means that we always emit three dwords here
517 * and all the padding and adjustment we do in this file works for all
518 * gens.
519 */
520
521 #define GEN7_MI_BATCH_BUFFER_START_length 2
522 #define GEN7_MI_BATCH_BUFFER_START_length_bias 2
523
524 const uint32_t gen7_length =
525 GEN7_MI_BATCH_BUFFER_START_length - GEN7_MI_BATCH_BUFFER_START_length_bias;
526 const uint32_t gen8_length =
527 GEN8_MI_BATCH_BUFFER_START_length - GEN8_MI_BATCH_BUFFER_START_length_bias;
528
529 anv_batch_emit(&cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_START, bbs) {
530 bbs.DWordLength = cmd_buffer->device->info.gen < 8 ?
531 gen7_length : gen8_length;
532 bbs.SecondLevelBatchBuffer = Firstlevelbatch;
533 bbs.AddressSpaceIndicator = ASI_PPGTT;
534 bbs.BatchBufferStartAddress = (struct anv_address) { bo, offset };
535 }
536 }
537
538 static void
539 cmd_buffer_chain_to_batch_bo(struct anv_cmd_buffer *cmd_buffer,
540 struct anv_batch_bo *bbo)
541 {
542 struct anv_batch *batch = &cmd_buffer->batch;
543 struct anv_batch_bo *current_bbo =
544 anv_cmd_buffer_current_batch_bo(cmd_buffer);
545
546 /* We set the end of the batch a little short so we would be sure we
547 * have room for the chaining command. Since we're about to emit the
548 * chaining command, let's set it back where it should go.
549 */
550 batch->end += GEN8_MI_BATCH_BUFFER_START_length * 4;
551 assert(batch->end == current_bbo->bo.map + current_bbo->bo.size);
552
553 emit_batch_buffer_start(cmd_buffer, &bbo->bo, 0);
554
555 anv_batch_bo_finish(current_bbo, batch);
556 }
557
558 static VkResult
559 anv_cmd_buffer_chain_batch(struct anv_batch *batch, void *_data)
560 {
561 struct anv_cmd_buffer *cmd_buffer = _data;
562 struct anv_batch_bo *new_bbo;
563
564 VkResult result = anv_batch_bo_create(cmd_buffer, &new_bbo);
565 if (result != VK_SUCCESS)
566 return result;
567
568 struct anv_batch_bo **seen_bbo = u_vector_add(&cmd_buffer->seen_bbos);
569 if (seen_bbo == NULL) {
570 anv_batch_bo_destroy(new_bbo, cmd_buffer);
571 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
572 }
573 *seen_bbo = new_bbo;
574
575 cmd_buffer_chain_to_batch_bo(cmd_buffer, new_bbo);
576
577 list_addtail(&new_bbo->link, &cmd_buffer->batch_bos);
578
579 anv_batch_bo_start(new_bbo, batch, GEN8_MI_BATCH_BUFFER_START_length * 4);
580
581 return VK_SUCCESS;
582 }
583
584 static VkResult
585 anv_cmd_buffer_grow_batch(struct anv_batch *batch, void *_data)
586 {
587 struct anv_cmd_buffer *cmd_buffer = _data;
588 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
589
590 anv_batch_bo_grow(cmd_buffer, bbo, &cmd_buffer->batch, 4096,
591 GEN8_MI_BATCH_BUFFER_START_length * 4);
592
593 return VK_SUCCESS;
594 }
595
596 /** Allocate a binding table
597 *
598 * This function allocates a binding table. This is a bit more complicated
599 * than one would think due to a combination of Vulkan driver design and some
600 * unfortunate hardware restrictions.
601 *
602 * The 3DSTATE_BINDING_TABLE_POINTERS_* packets only have a 16-bit field for
603 * the binding table pointer which means that all binding tables need to live
604 * in the bottom 64k of surface state base address. The way the GL driver has
605 * classically dealt with this restriction is to emit all surface states
606 * on-the-fly into the batch and have a batch buffer smaller than 64k. This
607 * isn't really an option in Vulkan for a couple of reasons:
608 *
609 * 1) In Vulkan, we have growing (or chaining) batches so surface states have
610 * to live in their own buffer and we have to be able to re-emit
611 * STATE_BASE_ADDRESS as needed which requires a full pipeline stall. In
612 * order to avoid emitting STATE_BASE_ADDRESS any more often than needed
613 * (it's not that hard to hit 64k of just binding tables), we allocate
614 * surface state objects up-front when VkImageView is created. In order
615 * for this to work, surface state objects need to be allocated from a
616 * global buffer.
617 *
618 * 2) We tried to design the surface state system in such a way that it's
619 * already ready for bindless texturing. The way bindless texturing works
620 * on our hardware is that you have a big pool of surface state objects
621 * (with its own state base address) and the bindless handles are simply
622 * offsets into that pool. With the architecture we chose, we already
623 * have that pool and it's exactly the same pool that we use for regular
624 * surface states so we should already be ready for bindless.
625 *
626 * 3) For render targets, we need to be able to fill out the surface states
627 * later in vkBeginRenderPass so that we can assign clear colors
628 * correctly. One way to do this would be to just create the surface
629 * state data and then repeatedly copy it into the surface state BO every
630 * time we have to re-emit STATE_BASE_ADDRESS. While this works, it's
631 * rather annoying and just being able to allocate them up-front and
632 * re-use them for the entire render pass.
633 *
634 * While none of these are technically blockers for emitting state on the fly
635 * like we do in GL, the ability to have a single surface state pool is
636 * simplifies things greatly. Unfortunately, it comes at a cost...
637 *
638 * Because of the 64k limitation of 3DSTATE_BINDING_TABLE_POINTERS_*, we can't
639 * place the binding tables just anywhere in surface state base address.
640 * Because 64k isn't a whole lot of space, we can't simply restrict the
641 * surface state buffer to 64k, we have to be more clever. The solution we've
642 * chosen is to have a block pool with a maximum size of 2G that starts at
643 * zero and grows in both directions. All surface states are allocated from
644 * the top of the pool (positive offsets) and we allocate blocks (< 64k) of
645 * binding tables from the bottom of the pool (negative offsets). Every time
646 * we allocate a new binding table block, we set surface state base address to
647 * point to the bottom of the binding table block. This way all of the
648 * binding tables in the block are in the bottom 64k of surface state base
649 * address. When we fill out the binding table, we add the distance between
650 * the bottom of our binding table block and zero of the block pool to the
651 * surface state offsets so that they are correct relative to out new surface
652 * state base address at the bottom of the binding table block.
653 *
654 * \see adjust_relocations_from_block_pool()
655 * \see adjust_relocations_too_block_pool()
656 *
657 * \param[in] entries The number of surface state entries the binding
658 * table should be able to hold.
659 *
660 * \param[out] state_offset The offset surface surface state base address
661 * where the surface states live. This must be
662 * added to the surface state offset when it is
663 * written into the binding table entry.
664 *
665 * \return An anv_state representing the binding table
666 */
667 struct anv_state
668 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
669 uint32_t entries, uint32_t *state_offset)
670 {
671 struct anv_device *device = cmd_buffer->device;
672 struct anv_state_pool *state_pool = &device->surface_state_pool;
673 struct anv_state *bt_block = u_vector_head(&cmd_buffer->bt_block_states);
674 struct anv_state state;
675
676 state.alloc_size = align_u32(entries * 4, 32);
677
678 if (cmd_buffer->bt_next + state.alloc_size > state_pool->block_size)
679 return (struct anv_state) { 0 };
680
681 state.offset = cmd_buffer->bt_next;
682 state.map = anv_binding_table_pool(device)->block_pool.map +
683 bt_block->offset + state.offset;
684
685 cmd_buffer->bt_next += state.alloc_size;
686
687 if (device->instance->physicalDevice.use_softpin) {
688 assert(bt_block->offset >= 0);
689 *state_offset = device->surface_state_pool.block_pool.start_address -
690 device->binding_table_pool.block_pool.start_address - bt_block->offset;
691 } else {
692 assert(bt_block->offset < 0);
693 *state_offset = -bt_block->offset;
694 }
695
696 return state;
697 }
698
699 struct anv_state
700 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer *cmd_buffer)
701 {
702 struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
703 return anv_state_stream_alloc(&cmd_buffer->surface_state_stream,
704 isl_dev->ss.size, isl_dev->ss.align);
705 }
706
707 struct anv_state
708 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer *cmd_buffer,
709 uint32_t size, uint32_t alignment)
710 {
711 return anv_state_stream_alloc(&cmd_buffer->dynamic_state_stream,
712 size, alignment);
713 }
714
715 VkResult
716 anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer *cmd_buffer)
717 {
718 struct anv_state *bt_block = u_vector_add(&cmd_buffer->bt_block_states);
719 if (bt_block == NULL) {
720 anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_HOST_MEMORY);
721 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
722 }
723
724 *bt_block = anv_binding_table_pool_alloc(cmd_buffer->device);
725 cmd_buffer->bt_next = 0;
726
727 return VK_SUCCESS;
728 }
729
730 VkResult
731 anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
732 {
733 struct anv_batch_bo *batch_bo;
734 VkResult result;
735
736 list_inithead(&cmd_buffer->batch_bos);
737
738 result = anv_batch_bo_create(cmd_buffer, &batch_bo);
739 if (result != VK_SUCCESS)
740 return result;
741
742 list_addtail(&batch_bo->link, &cmd_buffer->batch_bos);
743
744 cmd_buffer->batch.alloc = &cmd_buffer->pool->alloc;
745 cmd_buffer->batch.user_data = cmd_buffer;
746
747 if (cmd_buffer->device->can_chain_batches) {
748 cmd_buffer->batch.extend_cb = anv_cmd_buffer_chain_batch;
749 } else {
750 cmd_buffer->batch.extend_cb = anv_cmd_buffer_grow_batch;
751 }
752
753 anv_batch_bo_start(batch_bo, &cmd_buffer->batch,
754 GEN8_MI_BATCH_BUFFER_START_length * 4);
755
756 int success = u_vector_init(&cmd_buffer->seen_bbos,
757 sizeof(struct anv_bo *),
758 8 * sizeof(struct anv_bo *));
759 if (!success)
760 goto fail_batch_bo;
761
762 *(struct anv_batch_bo **)u_vector_add(&cmd_buffer->seen_bbos) = batch_bo;
763
764 /* u_vector requires power-of-two size elements */
765 unsigned pow2_state_size = util_next_power_of_two(sizeof(struct anv_state));
766 success = u_vector_init(&cmd_buffer->bt_block_states,
767 pow2_state_size, 8 * pow2_state_size);
768 if (!success)
769 goto fail_seen_bbos;
770
771 result = anv_reloc_list_init(&cmd_buffer->surface_relocs,
772 &cmd_buffer->pool->alloc);
773 if (result != VK_SUCCESS)
774 goto fail_bt_blocks;
775 cmd_buffer->last_ss_pool_center = 0;
776
777 result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
778 if (result != VK_SUCCESS)
779 goto fail_bt_blocks;
780
781 return VK_SUCCESS;
782
783 fail_bt_blocks:
784 u_vector_finish(&cmd_buffer->bt_block_states);
785 fail_seen_bbos:
786 u_vector_finish(&cmd_buffer->seen_bbos);
787 fail_batch_bo:
788 anv_batch_bo_destroy(batch_bo, cmd_buffer);
789
790 return result;
791 }
792
793 void
794 anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
795 {
796 struct anv_state *bt_block;
797 u_vector_foreach(bt_block, &cmd_buffer->bt_block_states)
798 anv_binding_table_pool_free(cmd_buffer->device, *bt_block);
799 u_vector_finish(&cmd_buffer->bt_block_states);
800
801 anv_reloc_list_finish(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc);
802
803 u_vector_finish(&cmd_buffer->seen_bbos);
804
805 /* Destroy all of the batch buffers */
806 list_for_each_entry_safe(struct anv_batch_bo, bbo,
807 &cmd_buffer->batch_bos, link) {
808 anv_batch_bo_destroy(bbo, cmd_buffer);
809 }
810 }
811
812 void
813 anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
814 {
815 /* Delete all but the first batch bo */
816 assert(!list_empty(&cmd_buffer->batch_bos));
817 while (cmd_buffer->batch_bos.next != cmd_buffer->batch_bos.prev) {
818 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
819 list_del(&bbo->link);
820 anv_batch_bo_destroy(bbo, cmd_buffer);
821 }
822 assert(!list_empty(&cmd_buffer->batch_bos));
823
824 anv_batch_bo_start(anv_cmd_buffer_current_batch_bo(cmd_buffer),
825 &cmd_buffer->batch,
826 GEN8_MI_BATCH_BUFFER_START_length * 4);
827
828 while (u_vector_length(&cmd_buffer->bt_block_states) > 1) {
829 struct anv_state *bt_block = u_vector_remove(&cmd_buffer->bt_block_states);
830 anv_binding_table_pool_free(cmd_buffer->device, *bt_block);
831 }
832 assert(u_vector_length(&cmd_buffer->bt_block_states) == 1);
833 cmd_buffer->bt_next = 0;
834
835 cmd_buffer->surface_relocs.num_relocs = 0;
836 _mesa_set_clear(cmd_buffer->surface_relocs.deps, NULL);
837 cmd_buffer->last_ss_pool_center = 0;
838
839 /* Reset the list of seen buffers */
840 cmd_buffer->seen_bbos.head = 0;
841 cmd_buffer->seen_bbos.tail = 0;
842
843 *(struct anv_batch_bo **)u_vector_add(&cmd_buffer->seen_bbos) =
844 anv_cmd_buffer_current_batch_bo(cmd_buffer);
845 }
846
847 void
848 anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer)
849 {
850 struct anv_batch_bo *batch_bo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
851
852 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
853 /* When we start a batch buffer, we subtract a certain amount of
854 * padding from the end to ensure that we always have room to emit a
855 * BATCH_BUFFER_START to chain to the next BO. We need to remove
856 * that padding before we end the batch; otherwise, we may end up
857 * with our BATCH_BUFFER_END in another BO.
858 */
859 cmd_buffer->batch.end += GEN8_MI_BATCH_BUFFER_START_length * 4;
860 assert(cmd_buffer->batch.end == batch_bo->bo.map + batch_bo->bo.size);
861
862 anv_batch_emit(&cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_END, bbe);
863
864 /* Round batch up to an even number of dwords. */
865 if ((cmd_buffer->batch.next - cmd_buffer->batch.start) & 4)
866 anv_batch_emit(&cmd_buffer->batch, GEN8_MI_NOOP, noop);
867
868 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_PRIMARY;
869 } else {
870 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
871 /* If this is a secondary command buffer, we need to determine the
872 * mode in which it will be executed with vkExecuteCommands. We
873 * determine this statically here so that this stays in sync with the
874 * actual ExecuteCommands implementation.
875 */
876 const uint32_t length = cmd_buffer->batch.next - cmd_buffer->batch.start;
877 if (!cmd_buffer->device->can_chain_batches) {
878 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT;
879 } else if ((cmd_buffer->batch_bos.next == cmd_buffer->batch_bos.prev) &&
880 (length < ANV_CMD_BUFFER_BATCH_SIZE / 2)) {
881 /* If the secondary has exactly one batch buffer in its list *and*
882 * that batch buffer is less than half of the maximum size, we're
883 * probably better of simply copying it into our batch.
884 */
885 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_EMIT;
886 } else if (!(cmd_buffer->usage_flags &
887 VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
888 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_CHAIN;
889
890 /* In order to chain, we need this command buffer to contain an
891 * MI_BATCH_BUFFER_START which will jump back to the calling batch.
892 * It doesn't matter where it points now so long as has a valid
893 * relocation. We'll adjust it later as part of the chaining
894 * process.
895 *
896 * We set the end of the batch a little short so we would be sure we
897 * have room for the chaining command. Since we're about to emit the
898 * chaining command, let's set it back where it should go.
899 */
900 cmd_buffer->batch.end += GEN8_MI_BATCH_BUFFER_START_length * 4;
901 assert(cmd_buffer->batch.start == batch_bo->bo.map);
902 assert(cmd_buffer->batch.end == batch_bo->bo.map + batch_bo->bo.size);
903
904 emit_batch_buffer_start(cmd_buffer, &batch_bo->bo, 0);
905 assert(cmd_buffer->batch.start == batch_bo->bo.map);
906 } else {
907 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN;
908 }
909 }
910
911 anv_batch_bo_finish(batch_bo, &cmd_buffer->batch);
912 }
913
914 static VkResult
915 anv_cmd_buffer_add_seen_bbos(struct anv_cmd_buffer *cmd_buffer,
916 struct list_head *list)
917 {
918 list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
919 struct anv_batch_bo **bbo_ptr = u_vector_add(&cmd_buffer->seen_bbos);
920 if (bbo_ptr == NULL)
921 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
922
923 *bbo_ptr = bbo;
924 }
925
926 return VK_SUCCESS;
927 }
928
929 void
930 anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary,
931 struct anv_cmd_buffer *secondary)
932 {
933 switch (secondary->exec_mode) {
934 case ANV_CMD_BUFFER_EXEC_MODE_EMIT:
935 anv_batch_emit_batch(&primary->batch, &secondary->batch);
936 break;
937 case ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT: {
938 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(primary);
939 unsigned length = secondary->batch.end - secondary->batch.start;
940 anv_batch_bo_grow(primary, bbo, &primary->batch, length,
941 GEN8_MI_BATCH_BUFFER_START_length * 4);
942 anv_batch_emit_batch(&primary->batch, &secondary->batch);
943 break;
944 }
945 case ANV_CMD_BUFFER_EXEC_MODE_CHAIN: {
946 struct anv_batch_bo *first_bbo =
947 list_first_entry(&secondary->batch_bos, struct anv_batch_bo, link);
948 struct anv_batch_bo *last_bbo =
949 list_last_entry(&secondary->batch_bos, struct anv_batch_bo, link);
950
951 emit_batch_buffer_start(primary, &first_bbo->bo, 0);
952
953 struct anv_batch_bo *this_bbo = anv_cmd_buffer_current_batch_bo(primary);
954 assert(primary->batch.start == this_bbo->bo.map);
955 uint32_t offset = primary->batch.next - primary->batch.start;
956
957 /* Make the tail of the secondary point back to right after the
958 * MI_BATCH_BUFFER_START in the primary batch.
959 */
960 anv_batch_bo_link(primary, last_bbo, this_bbo, offset);
961
962 anv_cmd_buffer_add_seen_bbos(primary, &secondary->batch_bos);
963 break;
964 }
965 case ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN: {
966 struct list_head copy_list;
967 VkResult result = anv_batch_bo_list_clone(&secondary->batch_bos,
968 secondary,
969 &copy_list);
970 if (result != VK_SUCCESS)
971 return; /* FIXME */
972
973 anv_cmd_buffer_add_seen_bbos(primary, &copy_list);
974
975 struct anv_batch_bo *first_bbo =
976 list_first_entry(&copy_list, struct anv_batch_bo, link);
977 struct anv_batch_bo *last_bbo =
978 list_last_entry(&copy_list, struct anv_batch_bo, link);
979
980 cmd_buffer_chain_to_batch_bo(primary, first_bbo);
981
982 list_splicetail(&copy_list, &primary->batch_bos);
983
984 anv_batch_bo_continue(last_bbo, &primary->batch,
985 GEN8_MI_BATCH_BUFFER_START_length * 4);
986 break;
987 }
988 default:
989 assert(!"Invalid execution mode");
990 }
991
992 anv_reloc_list_append(&primary->surface_relocs, &primary->pool->alloc,
993 &secondary->surface_relocs, 0);
994 }
995
996 struct anv_execbuf {
997 struct drm_i915_gem_execbuffer2 execbuf;
998
999 struct drm_i915_gem_exec_object2 * objects;
1000 uint32_t bo_count;
1001 struct anv_bo ** bos;
1002
1003 /* Allocated length of the 'objects' and 'bos' arrays */
1004 uint32_t array_length;
1005
1006 bool has_relocs;
1007
1008 uint32_t fence_count;
1009 uint32_t fence_array_length;
1010 struct drm_i915_gem_exec_fence * fences;
1011 struct anv_syncobj ** syncobjs;
1012 };
1013
1014 static void
1015 anv_execbuf_init(struct anv_execbuf *exec)
1016 {
1017 memset(exec, 0, sizeof(*exec));
1018 }
1019
1020 static void
1021 anv_execbuf_finish(struct anv_execbuf *exec,
1022 const VkAllocationCallbacks *alloc)
1023 {
1024 vk_free(alloc, exec->objects);
1025 vk_free(alloc, exec->bos);
1026 vk_free(alloc, exec->fences);
1027 vk_free(alloc, exec->syncobjs);
1028 }
1029
1030 static int
1031 _compare_bo_handles(const void *_bo1, const void *_bo2)
1032 {
1033 struct anv_bo * const *bo1 = _bo1;
1034 struct anv_bo * const *bo2 = _bo2;
1035
1036 return (*bo1)->gem_handle - (*bo2)->gem_handle;
1037 }
1038
1039 static VkResult
1040 anv_execbuf_add_bo(struct anv_execbuf *exec,
1041 struct anv_bo *bo,
1042 struct anv_reloc_list *relocs,
1043 uint32_t extra_flags,
1044 const VkAllocationCallbacks *alloc)
1045 {
1046 struct drm_i915_gem_exec_object2 *obj = NULL;
1047
1048 if (bo->index < exec->bo_count && exec->bos[bo->index] == bo)
1049 obj = &exec->objects[bo->index];
1050
1051 if (obj == NULL) {
1052 /* We've never seen this one before. Add it to the list and assign
1053 * an id that we can use later.
1054 */
1055 if (exec->bo_count >= exec->array_length) {
1056 uint32_t new_len = exec->objects ? exec->array_length * 2 : 64;
1057
1058 struct drm_i915_gem_exec_object2 *new_objects =
1059 vk_alloc(alloc, new_len * sizeof(*new_objects),
1060 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
1061 if (new_objects == NULL)
1062 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1063
1064 struct anv_bo **new_bos =
1065 vk_alloc(alloc, new_len * sizeof(*new_bos),
1066 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
1067 if (new_bos == NULL) {
1068 vk_free(alloc, new_objects);
1069 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1070 }
1071
1072 if (exec->objects) {
1073 memcpy(new_objects, exec->objects,
1074 exec->bo_count * sizeof(*new_objects));
1075 memcpy(new_bos, exec->bos,
1076 exec->bo_count * sizeof(*new_bos));
1077 }
1078
1079 vk_free(alloc, exec->objects);
1080 vk_free(alloc, exec->bos);
1081
1082 exec->objects = new_objects;
1083 exec->bos = new_bos;
1084 exec->array_length = new_len;
1085 }
1086
1087 assert(exec->bo_count < exec->array_length);
1088
1089 bo->index = exec->bo_count++;
1090 obj = &exec->objects[bo->index];
1091 exec->bos[bo->index] = bo;
1092
1093 obj->handle = bo->gem_handle;
1094 obj->relocation_count = 0;
1095 obj->relocs_ptr = 0;
1096 obj->alignment = 0;
1097 obj->offset = bo->offset;
1098 obj->flags = (bo->flags & ~ANV_BO_FLAG_MASK) | extra_flags;
1099 obj->rsvd1 = 0;
1100 obj->rsvd2 = 0;
1101 }
1102
1103 if (relocs != NULL) {
1104 assert(obj->relocation_count == 0);
1105
1106 if (relocs->num_relocs > 0) {
1107 /* This is the first time we've ever seen a list of relocations for
1108 * this BO. Go ahead and set the relocations and then walk the list
1109 * of relocations and add them all.
1110 */
1111 exec->has_relocs = true;
1112 obj->relocation_count = relocs->num_relocs;
1113 obj->relocs_ptr = (uintptr_t) relocs->relocs;
1114
1115 for (size_t i = 0; i < relocs->num_relocs; i++) {
1116 VkResult result;
1117
1118 /* A quick sanity check on relocations */
1119 assert(relocs->relocs[i].offset < bo->size);
1120 result = anv_execbuf_add_bo(exec, relocs->reloc_bos[i], NULL,
1121 extra_flags, alloc);
1122
1123 if (result != VK_SUCCESS)
1124 return result;
1125 }
1126 }
1127
1128 if (relocs->deps && relocs->deps->entries > 0) {
1129 const uint32_t entries = relocs->deps->entries;
1130 struct anv_bo **bos =
1131 vk_alloc(alloc, entries * sizeof(*bos),
1132 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
1133 if (bos == NULL)
1134 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1135
1136 struct anv_bo **bo = bos;
1137 set_foreach(relocs->deps, entry) {
1138 *bo++ = (void *)entry->key;
1139 }
1140
1141 qsort(bos, entries, sizeof(struct anv_bo*), _compare_bo_handles);
1142
1143 VkResult result = VK_SUCCESS;
1144 for (bo = bos; bo < bos + entries; bo++) {
1145 result = anv_execbuf_add_bo(exec, *bo, NULL, extra_flags, alloc);
1146 if (result != VK_SUCCESS)
1147 break;
1148 }
1149
1150 vk_free(alloc, bos);
1151
1152 if (result != VK_SUCCESS)
1153 return result;
1154 }
1155 }
1156
1157 return VK_SUCCESS;
1158 }
1159
1160 static VkResult
1161 anv_execbuf_add_syncobj(struct anv_execbuf *exec,
1162 uint32_t handle, uint32_t flags,
1163 const VkAllocationCallbacks *alloc)
1164 {
1165 assert(flags != 0);
1166
1167 if (exec->fence_count >= exec->fence_array_length) {
1168 uint32_t new_len = MAX2(exec->fence_array_length * 2, 64);
1169
1170 exec->fences = vk_realloc(alloc, exec->fences,
1171 new_len * sizeof(*exec->fences),
1172 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
1173 if (exec->fences == NULL)
1174 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1175
1176 exec->fence_array_length = new_len;
1177 }
1178
1179 exec->fences[exec->fence_count] = (struct drm_i915_gem_exec_fence) {
1180 .handle = handle,
1181 .flags = flags,
1182 };
1183
1184 exec->fence_count++;
1185
1186 return VK_SUCCESS;
1187 }
1188
1189 static void
1190 anv_cmd_buffer_process_relocs(struct anv_cmd_buffer *cmd_buffer,
1191 struct anv_reloc_list *list)
1192 {
1193 for (size_t i = 0; i < list->num_relocs; i++)
1194 list->relocs[i].target_handle = list->reloc_bos[i]->index;
1195 }
1196
1197 static void
1198 adjust_relocations_from_state_pool(struct anv_state_pool *pool,
1199 struct anv_reloc_list *relocs,
1200 uint32_t last_pool_center_bo_offset)
1201 {
1202 assert(last_pool_center_bo_offset <= pool->block_pool.center_bo_offset);
1203 uint32_t delta = pool->block_pool.center_bo_offset - last_pool_center_bo_offset;
1204
1205 for (size_t i = 0; i < relocs->num_relocs; i++) {
1206 /* All of the relocations from this block pool to other BO's should
1207 * have been emitted relative to the surface block pool center. We
1208 * need to add the center offset to make them relative to the
1209 * beginning of the actual GEM bo.
1210 */
1211 relocs->relocs[i].offset += delta;
1212 }
1213 }
1214
1215 static void
1216 adjust_relocations_to_state_pool(struct anv_state_pool *pool,
1217 struct anv_bo *from_bo,
1218 struct anv_reloc_list *relocs,
1219 uint32_t last_pool_center_bo_offset)
1220 {
1221 assert(last_pool_center_bo_offset <= pool->block_pool.center_bo_offset);
1222 uint32_t delta = pool->block_pool.center_bo_offset - last_pool_center_bo_offset;
1223
1224 /* When we initially emit relocations into a block pool, we don't
1225 * actually know what the final center_bo_offset will be so we just emit
1226 * it as if center_bo_offset == 0. Now that we know what the center
1227 * offset is, we need to walk the list of relocations and adjust any
1228 * relocations that point to the pool bo with the correct offset.
1229 */
1230 for (size_t i = 0; i < relocs->num_relocs; i++) {
1231 if (relocs->reloc_bos[i] == &pool->block_pool.bo) {
1232 /* Adjust the delta value in the relocation to correctly
1233 * correspond to the new delta. Initially, this value may have
1234 * been negative (if treated as unsigned), but we trust in
1235 * uint32_t roll-over to fix that for us at this point.
1236 */
1237 relocs->relocs[i].delta += delta;
1238
1239 /* Since the delta has changed, we need to update the actual
1240 * relocated value with the new presumed value. This function
1241 * should only be called on batch buffers, so we know it isn't in
1242 * use by the GPU at the moment.
1243 */
1244 assert(relocs->relocs[i].offset < from_bo->size);
1245 write_reloc(pool->block_pool.device,
1246 from_bo->map + relocs->relocs[i].offset,
1247 relocs->relocs[i].presumed_offset +
1248 relocs->relocs[i].delta, false);
1249 }
1250 }
1251 }
1252
1253 static void
1254 anv_reloc_list_apply(struct anv_device *device,
1255 struct anv_reloc_list *list,
1256 struct anv_bo *bo,
1257 bool always_relocate)
1258 {
1259 for (size_t i = 0; i < list->num_relocs; i++) {
1260 struct anv_bo *target_bo = list->reloc_bos[i];
1261 if (list->relocs[i].presumed_offset == target_bo->offset &&
1262 !always_relocate)
1263 continue;
1264
1265 void *p = bo->map + list->relocs[i].offset;
1266 write_reloc(device, p, target_bo->offset + list->relocs[i].delta, true);
1267 list->relocs[i].presumed_offset = target_bo->offset;
1268 }
1269 }
1270
1271 /**
1272 * This function applies the relocation for a command buffer and writes the
1273 * actual addresses into the buffers as per what we were told by the kernel on
1274 * the previous execbuf2 call. This should be safe to do because, for each
1275 * relocated address, we have two cases:
1276 *
1277 * 1) The target BO is inactive (as seen by the kernel). In this case, it is
1278 * not in use by the GPU so updating the address is 100% ok. It won't be
1279 * in-use by the GPU (from our context) again until the next execbuf2
1280 * happens. If the kernel decides to move it in the next execbuf2, it
1281 * will have to do the relocations itself, but that's ok because it should
1282 * have all of the information needed to do so.
1283 *
1284 * 2) The target BO is active (as seen by the kernel). In this case, it
1285 * hasn't moved since the last execbuffer2 call because GTT shuffling
1286 * *only* happens when the BO is idle. (From our perspective, it only
1287 * happens inside the execbuffer2 ioctl, but the shuffling may be
1288 * triggered by another ioctl, with full-ppgtt this is limited to only
1289 * execbuffer2 ioctls on the same context, or memory pressure.) Since the
1290 * target BO hasn't moved, our anv_bo::offset exactly matches the BO's GTT
1291 * address and the relocated value we are writing into the BO will be the
1292 * same as the value that is already there.
1293 *
1294 * There is also a possibility that the target BO is active but the exact
1295 * RENDER_SURFACE_STATE object we are writing the relocation into isn't in
1296 * use. In this case, the address currently in the RENDER_SURFACE_STATE
1297 * may be stale but it's still safe to write the relocation because that
1298 * particular RENDER_SURFACE_STATE object isn't in-use by the GPU and
1299 * won't be until the next execbuf2 call.
1300 *
1301 * By doing relocations on the CPU, we can tell the kernel that it doesn't
1302 * need to bother. We want to do this because the surface state buffer is
1303 * used by every command buffer so, if the kernel does the relocations, it
1304 * will always be busy and the kernel will always stall. This is also
1305 * probably the fastest mechanism for doing relocations since the kernel would
1306 * have to make a full copy of all the relocations lists.
1307 */
1308 static bool
1309 relocate_cmd_buffer(struct anv_cmd_buffer *cmd_buffer,
1310 struct anv_execbuf *exec)
1311 {
1312 if (!exec->has_relocs)
1313 return true;
1314
1315 static int userspace_relocs = -1;
1316 if (userspace_relocs < 0)
1317 userspace_relocs = env_var_as_boolean("ANV_USERSPACE_RELOCS", true);
1318 if (!userspace_relocs)
1319 return false;
1320
1321 /* First, we have to check to see whether or not we can even do the
1322 * relocation. New buffers which have never been submitted to the kernel
1323 * don't have a valid offset so we need to let the kernel do relocations so
1324 * that we can get offsets for them. On future execbuf2 calls, those
1325 * buffers will have offsets and we will be able to skip relocating.
1326 * Invalid offsets are indicated by anv_bo::offset == (uint64_t)-1.
1327 */
1328 for (uint32_t i = 0; i < exec->bo_count; i++) {
1329 if (exec->bos[i]->offset == (uint64_t)-1)
1330 return false;
1331 }
1332
1333 /* Since surface states are shared between command buffers and we don't
1334 * know what order they will be submitted to the kernel, we don't know
1335 * what address is actually written in the surface state object at any
1336 * given time. The only option is to always relocate them.
1337 */
1338 anv_reloc_list_apply(cmd_buffer->device, &cmd_buffer->surface_relocs,
1339 &cmd_buffer->device->surface_state_pool.block_pool.bo,
1340 true /* always relocate surface states */);
1341
1342 /* Since we own all of the batch buffers, we know what values are stored
1343 * in the relocated addresses and only have to update them if the offsets
1344 * have changed.
1345 */
1346 struct anv_batch_bo **bbo;
1347 u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1348 anv_reloc_list_apply(cmd_buffer->device,
1349 &(*bbo)->relocs, &(*bbo)->bo, false);
1350 }
1351
1352 for (uint32_t i = 0; i < exec->bo_count; i++)
1353 exec->objects[i].offset = exec->bos[i]->offset;
1354
1355 return true;
1356 }
1357
1358 static VkResult
1359 setup_execbuf_for_cmd_buffer(struct anv_execbuf *execbuf,
1360 struct anv_cmd_buffer *cmd_buffer)
1361 {
1362 struct anv_batch *batch = &cmd_buffer->batch;
1363 struct anv_state_pool *ss_pool =
1364 &cmd_buffer->device->surface_state_pool;
1365
1366 adjust_relocations_from_state_pool(ss_pool, &cmd_buffer->surface_relocs,
1367 cmd_buffer->last_ss_pool_center);
1368 VkResult result = anv_execbuf_add_bo(execbuf, &ss_pool->block_pool.bo,
1369 &cmd_buffer->surface_relocs, 0,
1370 &cmd_buffer->device->alloc);
1371 if (result != VK_SUCCESS)
1372 return result;
1373
1374 /* First, we walk over all of the bos we've seen and add them and their
1375 * relocations to the validate list.
1376 */
1377 struct anv_batch_bo **bbo;
1378 u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1379 adjust_relocations_to_state_pool(ss_pool, &(*bbo)->bo, &(*bbo)->relocs,
1380 cmd_buffer->last_ss_pool_center);
1381
1382 result = anv_execbuf_add_bo(execbuf, &(*bbo)->bo, &(*bbo)->relocs, 0,
1383 &cmd_buffer->device->alloc);
1384 if (result != VK_SUCCESS)
1385 return result;
1386 }
1387
1388 /* Now that we've adjusted all of the surface state relocations, we need to
1389 * record the surface state pool center so future executions of the command
1390 * buffer can adjust correctly.
1391 */
1392 cmd_buffer->last_ss_pool_center = ss_pool->block_pool.center_bo_offset;
1393
1394 struct anv_batch_bo *first_batch_bo =
1395 list_first_entry(&cmd_buffer->batch_bos, struct anv_batch_bo, link);
1396
1397 /* The kernel requires that the last entry in the validation list be the
1398 * batch buffer to execute. We can simply swap the element
1399 * corresponding to the first batch_bo in the chain with the last
1400 * element in the list.
1401 */
1402 if (first_batch_bo->bo.index != execbuf->bo_count - 1) {
1403 uint32_t idx = first_batch_bo->bo.index;
1404 uint32_t last_idx = execbuf->bo_count - 1;
1405
1406 struct drm_i915_gem_exec_object2 tmp_obj = execbuf->objects[idx];
1407 assert(execbuf->bos[idx] == &first_batch_bo->bo);
1408
1409 execbuf->objects[idx] = execbuf->objects[last_idx];
1410 execbuf->bos[idx] = execbuf->bos[last_idx];
1411 execbuf->bos[idx]->index = idx;
1412
1413 execbuf->objects[last_idx] = tmp_obj;
1414 execbuf->bos[last_idx] = &first_batch_bo->bo;
1415 first_batch_bo->bo.index = last_idx;
1416 }
1417
1418 /* If we are pinning our BOs, we shouldn't have to relocate anything */
1419 if (cmd_buffer->device->instance->physicalDevice.use_softpin)
1420 assert(!execbuf->has_relocs);
1421
1422 /* Now we go through and fixup all of the relocation lists to point to
1423 * the correct indices in the object array. We have to do this after we
1424 * reorder the list above as some of the indices may have changed.
1425 */
1426 if (execbuf->has_relocs) {
1427 u_vector_foreach(bbo, &cmd_buffer->seen_bbos)
1428 anv_cmd_buffer_process_relocs(cmd_buffer, &(*bbo)->relocs);
1429
1430 anv_cmd_buffer_process_relocs(cmd_buffer, &cmd_buffer->surface_relocs);
1431 }
1432
1433 if (!cmd_buffer->device->info.has_llc) {
1434 __builtin_ia32_mfence();
1435 u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1436 for (uint32_t i = 0; i < (*bbo)->length; i += CACHELINE_SIZE)
1437 __builtin_ia32_clflush((*bbo)->bo.map + i);
1438 }
1439 }
1440
1441 execbuf->execbuf = (struct drm_i915_gem_execbuffer2) {
1442 .buffers_ptr = (uintptr_t) execbuf->objects,
1443 .buffer_count = execbuf->bo_count,
1444 .batch_start_offset = 0,
1445 .batch_len = batch->next - batch->start,
1446 .cliprects_ptr = 0,
1447 .num_cliprects = 0,
1448 .DR1 = 0,
1449 .DR4 = 0,
1450 .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER,
1451 .rsvd1 = cmd_buffer->device->context_id,
1452 .rsvd2 = 0,
1453 };
1454
1455 if (relocate_cmd_buffer(cmd_buffer, execbuf)) {
1456 /* If we were able to successfully relocate everything, tell the kernel
1457 * that it can skip doing relocations. The requirement for using
1458 * NO_RELOC is:
1459 *
1460 * 1) The addresses written in the objects must match the corresponding
1461 * reloc.presumed_offset which in turn must match the corresponding
1462 * execobject.offset.
1463 *
1464 * 2) To avoid stalling, execobject.offset should match the current
1465 * address of that object within the active context.
1466 *
1467 * In order to satisfy all of the invariants that make userspace
1468 * relocations to be safe (see relocate_cmd_buffer()), we need to
1469 * further ensure that the addresses we use match those used by the
1470 * kernel for the most recent execbuf2.
1471 *
1472 * The kernel may still choose to do relocations anyway if something has
1473 * moved in the GTT. In this case, the relocation list still needs to be
1474 * valid. All relocations on the batch buffers are already valid and
1475 * kept up-to-date. For surface state relocations, by applying the
1476 * relocations in relocate_cmd_buffer, we ensured that the address in
1477 * the RENDER_SURFACE_STATE matches presumed_offset, so it should be
1478 * safe for the kernel to relocate them as needed.
1479 */
1480 execbuf->execbuf.flags |= I915_EXEC_NO_RELOC;
1481 } else {
1482 /* In the case where we fall back to doing kernel relocations, we need
1483 * to ensure that the relocation list is valid. All relocations on the
1484 * batch buffers are already valid and kept up-to-date. Since surface
1485 * states are shared between command buffers and we don't know what
1486 * order they will be submitted to the kernel, we don't know what
1487 * address is actually written in the surface state object at any given
1488 * time. The only option is to set a bogus presumed offset and let the
1489 * kernel relocate them.
1490 */
1491 for (size_t i = 0; i < cmd_buffer->surface_relocs.num_relocs; i++)
1492 cmd_buffer->surface_relocs.relocs[i].presumed_offset = -1;
1493 }
1494
1495 return VK_SUCCESS;
1496 }
1497
1498 static VkResult
1499 setup_empty_execbuf(struct anv_execbuf *execbuf, struct anv_device *device)
1500 {
1501 VkResult result = anv_execbuf_add_bo(execbuf, &device->trivial_batch_bo,
1502 NULL, 0, &device->alloc);
1503 if (result != VK_SUCCESS)
1504 return result;
1505
1506 execbuf->execbuf = (struct drm_i915_gem_execbuffer2) {
1507 .buffers_ptr = (uintptr_t) execbuf->objects,
1508 .buffer_count = execbuf->bo_count,
1509 .batch_start_offset = 0,
1510 .batch_len = 8, /* GEN7_MI_BATCH_BUFFER_END and NOOP */
1511 .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER,
1512 .rsvd1 = device->context_id,
1513 .rsvd2 = 0,
1514 };
1515
1516 return VK_SUCCESS;
1517 }
1518
1519 VkResult
1520 anv_cmd_buffer_execbuf(struct anv_device *device,
1521 struct anv_cmd_buffer *cmd_buffer,
1522 const VkSemaphore *in_semaphores,
1523 uint32_t num_in_semaphores,
1524 const VkSemaphore *out_semaphores,
1525 uint32_t num_out_semaphores,
1526 VkFence _fence)
1527 {
1528 ANV_FROM_HANDLE(anv_fence, fence, _fence);
1529
1530 struct anv_execbuf execbuf;
1531 anv_execbuf_init(&execbuf);
1532
1533 int in_fence = -1;
1534 VkResult result = VK_SUCCESS;
1535 for (uint32_t i = 0; i < num_in_semaphores; i++) {
1536 ANV_FROM_HANDLE(anv_semaphore, semaphore, in_semaphores[i]);
1537 struct anv_semaphore_impl *impl =
1538 semaphore->temporary.type != ANV_SEMAPHORE_TYPE_NONE ?
1539 &semaphore->temporary : &semaphore->permanent;
1540
1541 switch (impl->type) {
1542 case ANV_SEMAPHORE_TYPE_BO:
1543 result = anv_execbuf_add_bo(&execbuf, impl->bo, NULL,
1544 0, &device->alloc);
1545 if (result != VK_SUCCESS)
1546 return result;
1547 break;
1548
1549 case ANV_SEMAPHORE_TYPE_SYNC_FILE:
1550 if (in_fence == -1) {
1551 in_fence = impl->fd;
1552 } else {
1553 int merge = anv_gem_sync_file_merge(device, in_fence, impl->fd);
1554 if (merge == -1)
1555 return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
1556
1557 close(impl->fd);
1558 close(in_fence);
1559 in_fence = merge;
1560 }
1561
1562 impl->fd = -1;
1563 break;
1564
1565 case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
1566 result = anv_execbuf_add_syncobj(&execbuf, impl->syncobj,
1567 I915_EXEC_FENCE_WAIT,
1568 &device->alloc);
1569 if (result != VK_SUCCESS)
1570 return result;
1571 break;
1572
1573 default:
1574 break;
1575 }
1576 }
1577
1578 bool need_out_fence = false;
1579 for (uint32_t i = 0; i < num_out_semaphores; i++) {
1580 ANV_FROM_HANDLE(anv_semaphore, semaphore, out_semaphores[i]);
1581
1582 /* Under most circumstances, out fences won't be temporary. However,
1583 * the spec does allow it for opaque_fd. From the Vulkan 1.0.53 spec:
1584 *
1585 * "If the import is temporary, the implementation must restore the
1586 * semaphore to its prior permanent state after submitting the next
1587 * semaphore wait operation."
1588 *
1589 * The spec says nothing whatsoever about signal operations on
1590 * temporarily imported semaphores so it appears they are allowed.
1591 * There are also CTS tests that require this to work.
1592 */
1593 struct anv_semaphore_impl *impl =
1594 semaphore->temporary.type != ANV_SEMAPHORE_TYPE_NONE ?
1595 &semaphore->temporary : &semaphore->permanent;
1596
1597 switch (impl->type) {
1598 case ANV_SEMAPHORE_TYPE_BO:
1599 result = anv_execbuf_add_bo(&execbuf, impl->bo, NULL,
1600 EXEC_OBJECT_WRITE, &device->alloc);
1601 if (result != VK_SUCCESS)
1602 return result;
1603 break;
1604
1605 case ANV_SEMAPHORE_TYPE_SYNC_FILE:
1606 need_out_fence = true;
1607 break;
1608
1609 case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
1610 result = anv_execbuf_add_syncobj(&execbuf, impl->syncobj,
1611 I915_EXEC_FENCE_SIGNAL,
1612 &device->alloc);
1613 if (result != VK_SUCCESS)
1614 return result;
1615 break;
1616
1617 default:
1618 break;
1619 }
1620 }
1621
1622 if (fence) {
1623 /* Under most circumstances, out fences won't be temporary. However,
1624 * the spec does allow it for opaque_fd. From the Vulkan 1.0.53 spec:
1625 *
1626 * "If the import is temporary, the implementation must restore the
1627 * semaphore to its prior permanent state after submitting the next
1628 * semaphore wait operation."
1629 *
1630 * The spec says nothing whatsoever about signal operations on
1631 * temporarily imported semaphores so it appears they are allowed.
1632 * There are also CTS tests that require this to work.
1633 */
1634 struct anv_fence_impl *impl =
1635 fence->temporary.type != ANV_FENCE_TYPE_NONE ?
1636 &fence->temporary : &fence->permanent;
1637
1638 switch (impl->type) {
1639 case ANV_FENCE_TYPE_BO:
1640 result = anv_execbuf_add_bo(&execbuf, &impl->bo.bo, NULL,
1641 EXEC_OBJECT_WRITE, &device->alloc);
1642 if (result != VK_SUCCESS)
1643 return result;
1644 break;
1645
1646 case ANV_FENCE_TYPE_SYNCOBJ:
1647 result = anv_execbuf_add_syncobj(&execbuf, impl->syncobj,
1648 I915_EXEC_FENCE_SIGNAL,
1649 &device->alloc);
1650 if (result != VK_SUCCESS)
1651 return result;
1652 break;
1653
1654 default:
1655 unreachable("Invalid fence type");
1656 }
1657 }
1658
1659 if (cmd_buffer)
1660 result = setup_execbuf_for_cmd_buffer(&execbuf, cmd_buffer);
1661 else
1662 result = setup_empty_execbuf(&execbuf, device);
1663
1664 if (result != VK_SUCCESS)
1665 return result;
1666
1667 if (execbuf.fence_count > 0) {
1668 assert(device->instance->physicalDevice.has_syncobj);
1669 execbuf.execbuf.flags |= I915_EXEC_FENCE_ARRAY;
1670 execbuf.execbuf.num_cliprects = execbuf.fence_count;
1671 execbuf.execbuf.cliprects_ptr = (uintptr_t) execbuf.fences;
1672 }
1673
1674 if (in_fence != -1) {
1675 execbuf.execbuf.flags |= I915_EXEC_FENCE_IN;
1676 execbuf.execbuf.rsvd2 |= (uint32_t)in_fence;
1677 }
1678
1679 if (need_out_fence)
1680 execbuf.execbuf.flags |= I915_EXEC_FENCE_OUT;
1681
1682 result = anv_device_execbuf(device, &execbuf.execbuf, execbuf.bos);
1683
1684 /* Execbuf does not consume the in_fence. It's our job to close it. */
1685 if (in_fence != -1)
1686 close(in_fence);
1687
1688 for (uint32_t i = 0; i < num_in_semaphores; i++) {
1689 ANV_FROM_HANDLE(anv_semaphore, semaphore, in_semaphores[i]);
1690 /* From the Vulkan 1.0.53 spec:
1691 *
1692 * "If the import is temporary, the implementation must restore the
1693 * semaphore to its prior permanent state after submitting the next
1694 * semaphore wait operation."
1695 *
1696 * This has to happen after the execbuf in case we close any syncobjs in
1697 * the process.
1698 */
1699 anv_semaphore_reset_temporary(device, semaphore);
1700 }
1701
1702 if (fence && fence->permanent.type == ANV_FENCE_TYPE_BO) {
1703 /* BO fences can't be shared, so they can't be temporary. */
1704 assert(fence->temporary.type == ANV_FENCE_TYPE_NONE);
1705
1706 /* Once the execbuf has returned, we need to set the fence state to
1707 * SUBMITTED. We can't do this before calling execbuf because
1708 * anv_GetFenceStatus does take the global device lock before checking
1709 * fence->state.
1710 *
1711 * We set the fence state to SUBMITTED regardless of whether or not the
1712 * execbuf succeeds because we need to ensure that vkWaitForFences() and
1713 * vkGetFenceStatus() return a valid result (VK_ERROR_DEVICE_LOST or
1714 * VK_SUCCESS) in a finite amount of time even if execbuf fails.
1715 */
1716 fence->permanent.bo.state = ANV_BO_FENCE_STATE_SUBMITTED;
1717 }
1718
1719 if (result == VK_SUCCESS && need_out_fence) {
1720 int out_fence = execbuf.execbuf.rsvd2 >> 32;
1721 for (uint32_t i = 0; i < num_out_semaphores; i++) {
1722 ANV_FROM_HANDLE(anv_semaphore, semaphore, out_semaphores[i]);
1723 /* Out fences can't have temporary state because that would imply
1724 * that we imported a sync file and are trying to signal it.
1725 */
1726 assert(semaphore->temporary.type == ANV_SEMAPHORE_TYPE_NONE);
1727 struct anv_semaphore_impl *impl = &semaphore->permanent;
1728
1729 if (impl->type == ANV_SEMAPHORE_TYPE_SYNC_FILE) {
1730 assert(impl->fd == -1);
1731 impl->fd = dup(out_fence);
1732 }
1733 }
1734 close(out_fence);
1735 }
1736
1737 anv_execbuf_finish(&execbuf, &device->alloc);
1738
1739 return result;
1740 }