anv/allocator: Drop the block_size field from block_pool
[mesa.git] / src / intel / vulkan / anv_batch_chain.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31
32 #include "genxml/gen8_pack.h"
33
34 #include "util/debug.h"
35
36 /** \file anv_batch_chain.c
37 *
38 * This file contains functions related to anv_cmd_buffer as a data
39 * structure. This involves everything required to create and destroy
40 * the actual batch buffers as well as link them together and handle
41 * relocations and surface state. It specifically does *not* contain any
42 * handling of actual vkCmd calls beyond vkCmdExecuteCommands.
43 */
44
45 /*-----------------------------------------------------------------------*
46 * Functions related to anv_reloc_list
47 *-----------------------------------------------------------------------*/
48
49 static VkResult
50 anv_reloc_list_init_clone(struct anv_reloc_list *list,
51 const VkAllocationCallbacks *alloc,
52 const struct anv_reloc_list *other_list)
53 {
54 if (other_list) {
55 list->num_relocs = other_list->num_relocs;
56 list->array_length = other_list->array_length;
57 } else {
58 list->num_relocs = 0;
59 list->array_length = 256;
60 }
61
62 list->relocs =
63 vk_alloc(alloc, list->array_length * sizeof(*list->relocs), 8,
64 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
65
66 if (list->relocs == NULL)
67 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
68
69 list->reloc_bos =
70 vk_alloc(alloc, list->array_length * sizeof(*list->reloc_bos), 8,
71 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
72
73 if (list->reloc_bos == NULL) {
74 vk_free(alloc, list->relocs);
75 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
76 }
77
78 if (other_list) {
79 memcpy(list->relocs, other_list->relocs,
80 list->array_length * sizeof(*list->relocs));
81 memcpy(list->reloc_bos, other_list->reloc_bos,
82 list->array_length * sizeof(*list->reloc_bos));
83 }
84
85 return VK_SUCCESS;
86 }
87
88 VkResult
89 anv_reloc_list_init(struct anv_reloc_list *list,
90 const VkAllocationCallbacks *alloc)
91 {
92 return anv_reloc_list_init_clone(list, alloc, NULL);
93 }
94
95 void
96 anv_reloc_list_finish(struct anv_reloc_list *list,
97 const VkAllocationCallbacks *alloc)
98 {
99 vk_free(alloc, list->relocs);
100 vk_free(alloc, list->reloc_bos);
101 }
102
103 static VkResult
104 anv_reloc_list_grow(struct anv_reloc_list *list,
105 const VkAllocationCallbacks *alloc,
106 size_t num_additional_relocs)
107 {
108 if (list->num_relocs + num_additional_relocs <= list->array_length)
109 return VK_SUCCESS;
110
111 size_t new_length = list->array_length * 2;
112 while (new_length < list->num_relocs + num_additional_relocs)
113 new_length *= 2;
114
115 struct drm_i915_gem_relocation_entry *new_relocs =
116 vk_alloc(alloc, new_length * sizeof(*list->relocs), 8,
117 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
118 if (new_relocs == NULL)
119 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
120
121 struct anv_bo **new_reloc_bos =
122 vk_alloc(alloc, new_length * sizeof(*list->reloc_bos), 8,
123 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
124 if (new_reloc_bos == NULL) {
125 vk_free(alloc, new_relocs);
126 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
127 }
128
129 memcpy(new_relocs, list->relocs, list->num_relocs * sizeof(*list->relocs));
130 memcpy(new_reloc_bos, list->reloc_bos,
131 list->num_relocs * sizeof(*list->reloc_bos));
132
133 vk_free(alloc, list->relocs);
134 vk_free(alloc, list->reloc_bos);
135
136 list->array_length = new_length;
137 list->relocs = new_relocs;
138 list->reloc_bos = new_reloc_bos;
139
140 return VK_SUCCESS;
141 }
142
143 VkResult
144 anv_reloc_list_add(struct anv_reloc_list *list,
145 const VkAllocationCallbacks *alloc,
146 uint32_t offset, struct anv_bo *target_bo, uint32_t delta)
147 {
148 struct drm_i915_gem_relocation_entry *entry;
149 int index;
150
151 const uint32_t domain =
152 (target_bo->flags & EXEC_OBJECT_WRITE) ? I915_GEM_DOMAIN_RENDER : 0;
153
154 VkResult result = anv_reloc_list_grow(list, alloc, 1);
155 if (result != VK_SUCCESS)
156 return result;
157
158 /* XXX: Can we use I915_EXEC_HANDLE_LUT? */
159 index = list->num_relocs++;
160 list->reloc_bos[index] = target_bo;
161 entry = &list->relocs[index];
162 entry->target_handle = target_bo->gem_handle;
163 entry->delta = delta;
164 entry->offset = offset;
165 entry->presumed_offset = target_bo->offset;
166 entry->read_domains = domain;
167 entry->write_domain = domain;
168 VG(VALGRIND_CHECK_MEM_IS_DEFINED(entry, sizeof(*entry)));
169
170 return VK_SUCCESS;
171 }
172
173 static VkResult
174 anv_reloc_list_append(struct anv_reloc_list *list,
175 const VkAllocationCallbacks *alloc,
176 struct anv_reloc_list *other, uint32_t offset)
177 {
178 VkResult result = anv_reloc_list_grow(list, alloc, other->num_relocs);
179 if (result != VK_SUCCESS)
180 return result;
181
182 memcpy(&list->relocs[list->num_relocs], &other->relocs[0],
183 other->num_relocs * sizeof(other->relocs[0]));
184 memcpy(&list->reloc_bos[list->num_relocs], &other->reloc_bos[0],
185 other->num_relocs * sizeof(other->reloc_bos[0]));
186
187 for (uint32_t i = 0; i < other->num_relocs; i++)
188 list->relocs[i + list->num_relocs].offset += offset;
189
190 list->num_relocs += other->num_relocs;
191 return VK_SUCCESS;
192 }
193
194 /*-----------------------------------------------------------------------*
195 * Functions related to anv_batch
196 *-----------------------------------------------------------------------*/
197
198 void *
199 anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords)
200 {
201 if (batch->next + num_dwords * 4 > batch->end) {
202 VkResult result = batch->extend_cb(batch, batch->user_data);
203 if (result != VK_SUCCESS) {
204 anv_batch_set_error(batch, result);
205 return NULL;
206 }
207 }
208
209 void *p = batch->next;
210
211 batch->next += num_dwords * 4;
212 assert(batch->next <= batch->end);
213
214 return p;
215 }
216
217 uint64_t
218 anv_batch_emit_reloc(struct anv_batch *batch,
219 void *location, struct anv_bo *bo, uint32_t delta)
220 {
221 VkResult result = anv_reloc_list_add(batch->relocs, batch->alloc,
222 location - batch->start, bo, delta);
223 if (result != VK_SUCCESS) {
224 anv_batch_set_error(batch, result);
225 return 0;
226 }
227
228 return bo->offset + delta;
229 }
230
231 void
232 anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other)
233 {
234 uint32_t size, offset;
235
236 size = other->next - other->start;
237 assert(size % 4 == 0);
238
239 if (batch->next + size > batch->end) {
240 VkResult result = batch->extend_cb(batch, batch->user_data);
241 if (result != VK_SUCCESS) {
242 anv_batch_set_error(batch, result);
243 return;
244 }
245 }
246
247 assert(batch->next + size <= batch->end);
248
249 VG(VALGRIND_CHECK_MEM_IS_DEFINED(other->start, size));
250 memcpy(batch->next, other->start, size);
251
252 offset = batch->next - batch->start;
253 VkResult result = anv_reloc_list_append(batch->relocs, batch->alloc,
254 other->relocs, offset);
255 if (result != VK_SUCCESS) {
256 anv_batch_set_error(batch, result);
257 return;
258 }
259
260 batch->next += size;
261 }
262
263 /*-----------------------------------------------------------------------*
264 * Functions related to anv_batch_bo
265 *-----------------------------------------------------------------------*/
266
267 static VkResult
268 anv_batch_bo_create(struct anv_cmd_buffer *cmd_buffer,
269 struct anv_batch_bo **bbo_out)
270 {
271 VkResult result;
272
273 struct anv_batch_bo *bbo = vk_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo),
274 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
275 if (bbo == NULL)
276 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
277
278 result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool, &bbo->bo,
279 ANV_CMD_BUFFER_BATCH_SIZE);
280 if (result != VK_SUCCESS)
281 goto fail_alloc;
282
283 result = anv_reloc_list_init(&bbo->relocs, &cmd_buffer->pool->alloc);
284 if (result != VK_SUCCESS)
285 goto fail_bo_alloc;
286
287 *bbo_out = bbo;
288
289 return VK_SUCCESS;
290
291 fail_bo_alloc:
292 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
293 fail_alloc:
294 vk_free(&cmd_buffer->pool->alloc, bbo);
295
296 return result;
297 }
298
299 static VkResult
300 anv_batch_bo_clone(struct anv_cmd_buffer *cmd_buffer,
301 const struct anv_batch_bo *other_bbo,
302 struct anv_batch_bo **bbo_out)
303 {
304 VkResult result;
305
306 struct anv_batch_bo *bbo = vk_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo),
307 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
308 if (bbo == NULL)
309 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
310
311 result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool, &bbo->bo,
312 other_bbo->bo.size);
313 if (result != VK_SUCCESS)
314 goto fail_alloc;
315
316 result = anv_reloc_list_init_clone(&bbo->relocs, &cmd_buffer->pool->alloc,
317 &other_bbo->relocs);
318 if (result != VK_SUCCESS)
319 goto fail_bo_alloc;
320
321 bbo->length = other_bbo->length;
322 memcpy(bbo->bo.map, other_bbo->bo.map, other_bbo->length);
323
324 *bbo_out = bbo;
325
326 return VK_SUCCESS;
327
328 fail_bo_alloc:
329 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
330 fail_alloc:
331 vk_free(&cmd_buffer->pool->alloc, bbo);
332
333 return result;
334 }
335
336 static void
337 anv_batch_bo_start(struct anv_batch_bo *bbo, struct anv_batch *batch,
338 size_t batch_padding)
339 {
340 batch->next = batch->start = bbo->bo.map;
341 batch->end = bbo->bo.map + bbo->bo.size - batch_padding;
342 batch->relocs = &bbo->relocs;
343 bbo->relocs.num_relocs = 0;
344 }
345
346 static void
347 anv_batch_bo_continue(struct anv_batch_bo *bbo, struct anv_batch *batch,
348 size_t batch_padding)
349 {
350 batch->start = bbo->bo.map;
351 batch->next = bbo->bo.map + bbo->length;
352 batch->end = bbo->bo.map + bbo->bo.size - batch_padding;
353 batch->relocs = &bbo->relocs;
354 }
355
356 static void
357 anv_batch_bo_finish(struct anv_batch_bo *bbo, struct anv_batch *batch)
358 {
359 assert(batch->start == bbo->bo.map);
360 bbo->length = batch->next - batch->start;
361 VG(VALGRIND_CHECK_MEM_IS_DEFINED(batch->start, bbo->length));
362 }
363
364 static VkResult
365 anv_batch_bo_grow(struct anv_cmd_buffer *cmd_buffer, struct anv_batch_bo *bbo,
366 struct anv_batch *batch, size_t aditional,
367 size_t batch_padding)
368 {
369 assert(batch->start == bbo->bo.map);
370 bbo->length = batch->next - batch->start;
371
372 size_t new_size = bbo->bo.size;
373 while (new_size <= bbo->length + aditional + batch_padding)
374 new_size *= 2;
375
376 if (new_size == bbo->bo.size)
377 return VK_SUCCESS;
378
379 struct anv_bo new_bo;
380 VkResult result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
381 &new_bo, new_size);
382 if (result != VK_SUCCESS)
383 return result;
384
385 memcpy(new_bo.map, bbo->bo.map, bbo->length);
386
387 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
388
389 bbo->bo = new_bo;
390 anv_batch_bo_continue(bbo, batch, batch_padding);
391
392 return VK_SUCCESS;
393 }
394
395 static void
396 anv_batch_bo_destroy(struct anv_batch_bo *bbo,
397 struct anv_cmd_buffer *cmd_buffer)
398 {
399 anv_reloc_list_finish(&bbo->relocs, &cmd_buffer->pool->alloc);
400 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
401 vk_free(&cmd_buffer->pool->alloc, bbo);
402 }
403
404 static VkResult
405 anv_batch_bo_list_clone(const struct list_head *list,
406 struct anv_cmd_buffer *cmd_buffer,
407 struct list_head *new_list)
408 {
409 VkResult result = VK_SUCCESS;
410
411 list_inithead(new_list);
412
413 struct anv_batch_bo *prev_bbo = NULL;
414 list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
415 struct anv_batch_bo *new_bbo = NULL;
416 result = anv_batch_bo_clone(cmd_buffer, bbo, &new_bbo);
417 if (result != VK_SUCCESS)
418 break;
419 list_addtail(&new_bbo->link, new_list);
420
421 if (prev_bbo) {
422 /* As we clone this list of batch_bo's, they chain one to the
423 * other using MI_BATCH_BUFFER_START commands. We need to fix up
424 * those relocations as we go. Fortunately, this is pretty easy
425 * as it will always be the last relocation in the list.
426 */
427 uint32_t last_idx = prev_bbo->relocs.num_relocs - 1;
428 assert(prev_bbo->relocs.reloc_bos[last_idx] == &bbo->bo);
429 prev_bbo->relocs.reloc_bos[last_idx] = &new_bbo->bo;
430 }
431
432 prev_bbo = new_bbo;
433 }
434
435 if (result != VK_SUCCESS) {
436 list_for_each_entry_safe(struct anv_batch_bo, bbo, new_list, link)
437 anv_batch_bo_destroy(bbo, cmd_buffer);
438 }
439
440 return result;
441 }
442
443 /*-----------------------------------------------------------------------*
444 * Functions related to anv_batch_bo
445 *-----------------------------------------------------------------------*/
446
447 static inline struct anv_batch_bo *
448 anv_cmd_buffer_current_batch_bo(struct anv_cmd_buffer *cmd_buffer)
449 {
450 return LIST_ENTRY(struct anv_batch_bo, cmd_buffer->batch_bos.prev, link);
451 }
452
453 struct anv_address
454 anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer *cmd_buffer)
455 {
456 return (struct anv_address) {
457 .bo = &cmd_buffer->device->surface_state_block_pool.bo,
458 .offset = *(int32_t *)u_vector_head(&cmd_buffer->bt_blocks),
459 };
460 }
461
462 static void
463 emit_batch_buffer_start(struct anv_cmd_buffer *cmd_buffer,
464 struct anv_bo *bo, uint32_t offset)
465 {
466 /* In gen8+ the address field grew to two dwords to accomodate 48 bit
467 * offsets. The high 16 bits are in the last dword, so we can use the gen8
468 * version in either case, as long as we set the instruction length in the
469 * header accordingly. This means that we always emit three dwords here
470 * and all the padding and adjustment we do in this file works for all
471 * gens.
472 */
473
474 #define GEN7_MI_BATCH_BUFFER_START_length 2
475 #define GEN7_MI_BATCH_BUFFER_START_length_bias 2
476
477 const uint32_t gen7_length =
478 GEN7_MI_BATCH_BUFFER_START_length - GEN7_MI_BATCH_BUFFER_START_length_bias;
479 const uint32_t gen8_length =
480 GEN8_MI_BATCH_BUFFER_START_length - GEN8_MI_BATCH_BUFFER_START_length_bias;
481
482 anv_batch_emit(&cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_START, bbs) {
483 bbs.DWordLength = cmd_buffer->device->info.gen < 8 ?
484 gen7_length : gen8_length;
485 bbs._2ndLevelBatchBuffer = _1stlevelbatch;
486 bbs.AddressSpaceIndicator = ASI_PPGTT;
487 bbs.BatchBufferStartAddress = (struct anv_address) { bo, offset };
488 }
489 }
490
491 static void
492 cmd_buffer_chain_to_batch_bo(struct anv_cmd_buffer *cmd_buffer,
493 struct anv_batch_bo *bbo)
494 {
495 struct anv_batch *batch = &cmd_buffer->batch;
496 struct anv_batch_bo *current_bbo =
497 anv_cmd_buffer_current_batch_bo(cmd_buffer);
498
499 /* We set the end of the batch a little short so we would be sure we
500 * have room for the chaining command. Since we're about to emit the
501 * chaining command, let's set it back where it should go.
502 */
503 batch->end += GEN8_MI_BATCH_BUFFER_START_length * 4;
504 assert(batch->end == current_bbo->bo.map + current_bbo->bo.size);
505
506 emit_batch_buffer_start(cmd_buffer, &bbo->bo, 0);
507
508 anv_batch_bo_finish(current_bbo, batch);
509 }
510
511 static VkResult
512 anv_cmd_buffer_chain_batch(struct anv_batch *batch, void *_data)
513 {
514 struct anv_cmd_buffer *cmd_buffer = _data;
515 struct anv_batch_bo *new_bbo;
516
517 VkResult result = anv_batch_bo_create(cmd_buffer, &new_bbo);
518 if (result != VK_SUCCESS)
519 return result;
520
521 struct anv_batch_bo **seen_bbo = u_vector_add(&cmd_buffer->seen_bbos);
522 if (seen_bbo == NULL) {
523 anv_batch_bo_destroy(new_bbo, cmd_buffer);
524 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
525 }
526 *seen_bbo = new_bbo;
527
528 cmd_buffer_chain_to_batch_bo(cmd_buffer, new_bbo);
529
530 list_addtail(&new_bbo->link, &cmd_buffer->batch_bos);
531
532 anv_batch_bo_start(new_bbo, batch, GEN8_MI_BATCH_BUFFER_START_length * 4);
533
534 return VK_SUCCESS;
535 }
536
537 static VkResult
538 anv_cmd_buffer_grow_batch(struct anv_batch *batch, void *_data)
539 {
540 struct anv_cmd_buffer *cmd_buffer = _data;
541 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
542
543 anv_batch_bo_grow(cmd_buffer, bbo, &cmd_buffer->batch, 4096,
544 GEN8_MI_BATCH_BUFFER_START_length * 4);
545
546 return VK_SUCCESS;
547 }
548
549 /** Allocate a binding table
550 *
551 * This function allocates a binding table. This is a bit more complicated
552 * than one would think due to a combination of Vulkan driver design and some
553 * unfortunate hardware restrictions.
554 *
555 * The 3DSTATE_BINDING_TABLE_POINTERS_* packets only have a 16-bit field for
556 * the binding table pointer which means that all binding tables need to live
557 * in the bottom 64k of surface state base address. The way the GL driver has
558 * classically dealt with this restriction is to emit all surface states
559 * on-the-fly into the batch and have a batch buffer smaller than 64k. This
560 * isn't really an option in Vulkan for a couple of reasons:
561 *
562 * 1) In Vulkan, we have growing (or chaining) batches so surface states have
563 * to live in their own buffer and we have to be able to re-emit
564 * STATE_BASE_ADDRESS as needed which requires a full pipeline stall. In
565 * order to avoid emitting STATE_BASE_ADDRESS any more often than needed
566 * (it's not that hard to hit 64k of just binding tables), we allocate
567 * surface state objects up-front when VkImageView is created. In order
568 * for this to work, surface state objects need to be allocated from a
569 * global buffer.
570 *
571 * 2) We tried to design the surface state system in such a way that it's
572 * already ready for bindless texturing. The way bindless texturing works
573 * on our hardware is that you have a big pool of surface state objects
574 * (with its own state base address) and the bindless handles are simply
575 * offsets into that pool. With the architecture we chose, we already
576 * have that pool and it's exactly the same pool that we use for regular
577 * surface states so we should already be ready for bindless.
578 *
579 * 3) For render targets, we need to be able to fill out the surface states
580 * later in vkBeginRenderPass so that we can assign clear colors
581 * correctly. One way to do this would be to just create the surface
582 * state data and then repeatedly copy it into the surface state BO every
583 * time we have to re-emit STATE_BASE_ADDRESS. While this works, it's
584 * rather annoying and just being able to allocate them up-front and
585 * re-use them for the entire render pass.
586 *
587 * While none of these are technically blockers for emitting state on the fly
588 * like we do in GL, the ability to have a single surface state pool is
589 * simplifies things greatly. Unfortunately, it comes at a cost...
590 *
591 * Because of the 64k limitation of 3DSTATE_BINDING_TABLE_POINTERS_*, we can't
592 * place the binding tables just anywhere in surface state base address.
593 * Because 64k isn't a whole lot of space, we can't simply restrict the
594 * surface state buffer to 64k, we have to be more clever. The solution we've
595 * chosen is to have a block pool with a maximum size of 2G that starts at
596 * zero and grows in both directions. All surface states are allocated from
597 * the top of the pool (positive offsets) and we allocate blocks (< 64k) of
598 * binding tables from the bottom of the pool (negative offsets). Every time
599 * we allocate a new binding table block, we set surface state base address to
600 * point to the bottom of the binding table block. This way all of the
601 * binding tables in the block are in the bottom 64k of surface state base
602 * address. When we fill out the binding table, we add the distance between
603 * the bottom of our binding table block and zero of the block pool to the
604 * surface state offsets so that they are correct relative to out new surface
605 * state base address at the bottom of the binding table block.
606 *
607 * \see adjust_relocations_from_block_pool()
608 * \see adjust_relocations_too_block_pool()
609 *
610 * \param[in] entries The number of surface state entries the binding
611 * table should be able to hold.
612 *
613 * \param[out] state_offset The offset surface surface state base address
614 * where the surface states live. This must be
615 * added to the surface state offset when it is
616 * written into the binding table entry.
617 *
618 * \return An anv_state representing the binding table
619 */
620 struct anv_state
621 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
622 uint32_t entries, uint32_t *state_offset)
623 {
624 struct anv_block_pool *block_pool =
625 &cmd_buffer->device->surface_state_block_pool;
626 struct anv_state_pool *state_pool = &cmd_buffer->device->surface_state_pool;
627 int32_t *bt_block = u_vector_head(&cmd_buffer->bt_blocks);
628 struct anv_state state;
629
630 state.alloc_size = align_u32(entries * 4, 32);
631
632 if (cmd_buffer->bt_next + state.alloc_size > state_pool->block_size)
633 return (struct anv_state) { 0 };
634
635 state.offset = cmd_buffer->bt_next;
636 state.map = block_pool->map + *bt_block + state.offset;
637
638 cmd_buffer->bt_next += state.alloc_size;
639
640 assert(*bt_block < 0);
641 *state_offset = -(*bt_block);
642
643 return state;
644 }
645
646 struct anv_state
647 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer *cmd_buffer)
648 {
649 struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
650 return anv_state_stream_alloc(&cmd_buffer->surface_state_stream,
651 isl_dev->ss.size, isl_dev->ss.align);
652 }
653
654 struct anv_state
655 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer *cmd_buffer,
656 uint32_t size, uint32_t alignment)
657 {
658 return anv_state_stream_alloc(&cmd_buffer->dynamic_state_stream,
659 size, alignment);
660 }
661
662 VkResult
663 anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer *cmd_buffer)
664 {
665 struct anv_block_pool *block_pool =
666 &cmd_buffer->device->surface_state_block_pool;
667 struct anv_state_pool *state_pool = &cmd_buffer->device->surface_state_pool;
668
669 int32_t *offset = u_vector_add(&cmd_buffer->bt_blocks);
670 if (offset == NULL) {
671 anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_HOST_MEMORY);
672 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
673 }
674
675 *offset = anv_block_pool_alloc_back(block_pool, state_pool->block_size);
676 cmd_buffer->bt_next = 0;
677
678 return VK_SUCCESS;
679 }
680
681 VkResult
682 anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
683 {
684 struct anv_batch_bo *batch_bo;
685 VkResult result;
686
687 list_inithead(&cmd_buffer->batch_bos);
688
689 result = anv_batch_bo_create(cmd_buffer, &batch_bo);
690 if (result != VK_SUCCESS)
691 return result;
692
693 list_addtail(&batch_bo->link, &cmd_buffer->batch_bos);
694
695 cmd_buffer->batch.alloc = &cmd_buffer->pool->alloc;
696 cmd_buffer->batch.user_data = cmd_buffer;
697
698 if (cmd_buffer->device->can_chain_batches) {
699 cmd_buffer->batch.extend_cb = anv_cmd_buffer_chain_batch;
700 } else {
701 cmd_buffer->batch.extend_cb = anv_cmd_buffer_grow_batch;
702 }
703
704 anv_batch_bo_start(batch_bo, &cmd_buffer->batch,
705 GEN8_MI_BATCH_BUFFER_START_length * 4);
706
707 int success = u_vector_init(&cmd_buffer->seen_bbos,
708 sizeof(struct anv_bo *),
709 8 * sizeof(struct anv_bo *));
710 if (!success)
711 goto fail_batch_bo;
712
713 *(struct anv_batch_bo **)u_vector_add(&cmd_buffer->seen_bbos) = batch_bo;
714
715 success = u_vector_init(&cmd_buffer->bt_blocks, sizeof(int32_t),
716 8 * sizeof(int32_t));
717 if (!success)
718 goto fail_seen_bbos;
719
720 result = anv_reloc_list_init(&cmd_buffer->surface_relocs,
721 &cmd_buffer->pool->alloc);
722 if (result != VK_SUCCESS)
723 goto fail_bt_blocks;
724 cmd_buffer->last_ss_pool_center = 0;
725
726 result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
727 if (result != VK_SUCCESS)
728 goto fail_bt_blocks;
729
730 return VK_SUCCESS;
731
732 fail_bt_blocks:
733 u_vector_finish(&cmd_buffer->bt_blocks);
734 fail_seen_bbos:
735 u_vector_finish(&cmd_buffer->seen_bbos);
736 fail_batch_bo:
737 anv_batch_bo_destroy(batch_bo, cmd_buffer);
738
739 return result;
740 }
741
742 void
743 anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
744 {
745 int32_t *bt_block;
746 u_vector_foreach(bt_block, &cmd_buffer->bt_blocks) {
747 anv_block_pool_free(&cmd_buffer->device->surface_state_block_pool,
748 *bt_block);
749 }
750 u_vector_finish(&cmd_buffer->bt_blocks);
751
752 anv_reloc_list_finish(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc);
753
754 u_vector_finish(&cmd_buffer->seen_bbos);
755
756 /* Destroy all of the batch buffers */
757 list_for_each_entry_safe(struct anv_batch_bo, bbo,
758 &cmd_buffer->batch_bos, link) {
759 anv_batch_bo_destroy(bbo, cmd_buffer);
760 }
761 }
762
763 void
764 anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
765 {
766 /* Delete all but the first batch bo */
767 assert(!list_empty(&cmd_buffer->batch_bos));
768 while (cmd_buffer->batch_bos.next != cmd_buffer->batch_bos.prev) {
769 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
770 list_del(&bbo->link);
771 anv_batch_bo_destroy(bbo, cmd_buffer);
772 }
773 assert(!list_empty(&cmd_buffer->batch_bos));
774
775 anv_batch_bo_start(anv_cmd_buffer_current_batch_bo(cmd_buffer),
776 &cmd_buffer->batch,
777 GEN8_MI_BATCH_BUFFER_START_length * 4);
778
779 while (u_vector_length(&cmd_buffer->bt_blocks) > 1) {
780 int32_t *bt_block = u_vector_remove(&cmd_buffer->bt_blocks);
781 anv_block_pool_free(&cmd_buffer->device->surface_state_block_pool,
782 *bt_block);
783 }
784 assert(u_vector_length(&cmd_buffer->bt_blocks) == 1);
785 cmd_buffer->bt_next = 0;
786
787 cmd_buffer->surface_relocs.num_relocs = 0;
788 cmd_buffer->last_ss_pool_center = 0;
789
790 /* Reset the list of seen buffers */
791 cmd_buffer->seen_bbos.head = 0;
792 cmd_buffer->seen_bbos.tail = 0;
793
794 *(struct anv_batch_bo **)u_vector_add(&cmd_buffer->seen_bbos) =
795 anv_cmd_buffer_current_batch_bo(cmd_buffer);
796 }
797
798 void
799 anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer)
800 {
801 struct anv_batch_bo *batch_bo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
802
803 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
804 /* When we start a batch buffer, we subtract a certain amount of
805 * padding from the end to ensure that we always have room to emit a
806 * BATCH_BUFFER_START to chain to the next BO. We need to remove
807 * that padding before we end the batch; otherwise, we may end up
808 * with our BATCH_BUFFER_END in another BO.
809 */
810 cmd_buffer->batch.end += GEN8_MI_BATCH_BUFFER_START_length * 4;
811 assert(cmd_buffer->batch.end == batch_bo->bo.map + batch_bo->bo.size);
812
813 anv_batch_emit(&cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_END, bbe);
814
815 /* Round batch up to an even number of dwords. */
816 if ((cmd_buffer->batch.next - cmd_buffer->batch.start) & 4)
817 anv_batch_emit(&cmd_buffer->batch, GEN8_MI_NOOP, noop);
818
819 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_PRIMARY;
820 }
821
822 anv_batch_bo_finish(batch_bo, &cmd_buffer->batch);
823
824 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
825 /* If this is a secondary command buffer, we need to determine the
826 * mode in which it will be executed with vkExecuteCommands. We
827 * determine this statically here so that this stays in sync with the
828 * actual ExecuteCommands implementation.
829 */
830 if (!cmd_buffer->device->can_chain_batches) {
831 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT;
832 } else if ((cmd_buffer->batch_bos.next == cmd_buffer->batch_bos.prev) &&
833 (batch_bo->length < ANV_CMD_BUFFER_BATCH_SIZE / 2)) {
834 /* If the secondary has exactly one batch buffer in its list *and*
835 * that batch buffer is less than half of the maximum size, we're
836 * probably better of simply copying it into our batch.
837 */
838 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_EMIT;
839 } else if (!(cmd_buffer->usage_flags &
840 VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
841 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_CHAIN;
842
843 /* When we chain, we need to add an MI_BATCH_BUFFER_START command
844 * with its relocation. In order to handle this we'll increment here
845 * so we can unconditionally decrement right before adding the
846 * MI_BATCH_BUFFER_START command.
847 */
848 batch_bo->relocs.num_relocs++;
849 cmd_buffer->batch.next += GEN8_MI_BATCH_BUFFER_START_length * 4;
850 } else {
851 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN;
852 }
853 }
854 }
855
856 static inline VkResult
857 anv_cmd_buffer_add_seen_bbos(struct anv_cmd_buffer *cmd_buffer,
858 struct list_head *list)
859 {
860 list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
861 struct anv_batch_bo **bbo_ptr = u_vector_add(&cmd_buffer->seen_bbos);
862 if (bbo_ptr == NULL)
863 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
864
865 *bbo_ptr = bbo;
866 }
867
868 return VK_SUCCESS;
869 }
870
871 void
872 anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary,
873 struct anv_cmd_buffer *secondary)
874 {
875 switch (secondary->exec_mode) {
876 case ANV_CMD_BUFFER_EXEC_MODE_EMIT:
877 anv_batch_emit_batch(&primary->batch, &secondary->batch);
878 break;
879 case ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT: {
880 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(primary);
881 unsigned length = secondary->batch.end - secondary->batch.start;
882 anv_batch_bo_grow(primary, bbo, &primary->batch, length,
883 GEN8_MI_BATCH_BUFFER_START_length * 4);
884 anv_batch_emit_batch(&primary->batch, &secondary->batch);
885 break;
886 }
887 case ANV_CMD_BUFFER_EXEC_MODE_CHAIN: {
888 struct anv_batch_bo *first_bbo =
889 list_first_entry(&secondary->batch_bos, struct anv_batch_bo, link);
890 struct anv_batch_bo *last_bbo =
891 list_last_entry(&secondary->batch_bos, struct anv_batch_bo, link);
892
893 emit_batch_buffer_start(primary, &first_bbo->bo, 0);
894
895 struct anv_batch_bo *this_bbo = anv_cmd_buffer_current_batch_bo(primary);
896 assert(primary->batch.start == this_bbo->bo.map);
897 uint32_t offset = primary->batch.next - primary->batch.start;
898 const uint32_t inst_size = GEN8_MI_BATCH_BUFFER_START_length * 4;
899
900 /* Roll back the previous MI_BATCH_BUFFER_START and its relocation so we
901 * can emit a new command and relocation for the current splice. In
902 * order to handle the initial-use case, we incremented next and
903 * num_relocs in end_batch_buffer() so we can alyways just subtract
904 * here.
905 */
906 last_bbo->relocs.num_relocs--;
907 secondary->batch.next -= inst_size;
908 emit_batch_buffer_start(secondary, &this_bbo->bo, offset);
909 anv_cmd_buffer_add_seen_bbos(primary, &secondary->batch_bos);
910
911 /* After patching up the secondary buffer, we need to clflush the
912 * modified instruction in case we're on a !llc platform. We use a
913 * little loop to handle the case where the instruction crosses a cache
914 * line boundary.
915 */
916 if (!primary->device->info.has_llc) {
917 void *inst = secondary->batch.next - inst_size;
918 void *p = (void *) (((uintptr_t) inst) & ~CACHELINE_MASK);
919 __builtin_ia32_mfence();
920 while (p < secondary->batch.next) {
921 __builtin_ia32_clflush(p);
922 p += CACHELINE_SIZE;
923 }
924 }
925 break;
926 }
927 case ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN: {
928 struct list_head copy_list;
929 VkResult result = anv_batch_bo_list_clone(&secondary->batch_bos,
930 secondary,
931 &copy_list);
932 if (result != VK_SUCCESS)
933 return; /* FIXME */
934
935 anv_cmd_buffer_add_seen_bbos(primary, &copy_list);
936
937 struct anv_batch_bo *first_bbo =
938 list_first_entry(&copy_list, struct anv_batch_bo, link);
939 struct anv_batch_bo *last_bbo =
940 list_last_entry(&copy_list, struct anv_batch_bo, link);
941
942 cmd_buffer_chain_to_batch_bo(primary, first_bbo);
943
944 list_splicetail(&copy_list, &primary->batch_bos);
945
946 anv_batch_bo_continue(last_bbo, &primary->batch,
947 GEN8_MI_BATCH_BUFFER_START_length * 4);
948 break;
949 }
950 default:
951 assert(!"Invalid execution mode");
952 }
953
954 anv_reloc_list_append(&primary->surface_relocs, &primary->pool->alloc,
955 &secondary->surface_relocs, 0);
956 }
957
958 struct anv_execbuf {
959 struct drm_i915_gem_execbuffer2 execbuf;
960
961 struct drm_i915_gem_exec_object2 * objects;
962 uint32_t bo_count;
963 struct anv_bo ** bos;
964
965 /* Allocated length of the 'objects' and 'bos' arrays */
966 uint32_t array_length;
967 };
968
969 static void
970 anv_execbuf_init(struct anv_execbuf *exec)
971 {
972 memset(exec, 0, sizeof(*exec));
973 }
974
975 static void
976 anv_execbuf_finish(struct anv_execbuf *exec,
977 const VkAllocationCallbacks *alloc)
978 {
979 vk_free(alloc, exec->objects);
980 vk_free(alloc, exec->bos);
981 }
982
983 static VkResult
984 anv_execbuf_add_bo(struct anv_execbuf *exec,
985 struct anv_bo *bo,
986 struct anv_reloc_list *relocs,
987 uint32_t extra_flags,
988 const VkAllocationCallbacks *alloc)
989 {
990 struct drm_i915_gem_exec_object2 *obj = NULL;
991
992 if (bo->index < exec->bo_count && exec->bos[bo->index] == bo)
993 obj = &exec->objects[bo->index];
994
995 if (obj == NULL) {
996 /* We've never seen this one before. Add it to the list and assign
997 * an id that we can use later.
998 */
999 if (exec->bo_count >= exec->array_length) {
1000 uint32_t new_len = exec->objects ? exec->array_length * 2 : 64;
1001
1002 struct drm_i915_gem_exec_object2 *new_objects =
1003 vk_alloc(alloc, new_len * sizeof(*new_objects),
1004 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
1005 if (new_objects == NULL)
1006 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1007
1008 struct anv_bo **new_bos =
1009 vk_alloc(alloc, new_len * sizeof(*new_bos),
1010 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
1011 if (new_bos == NULL) {
1012 vk_free(alloc, new_objects);
1013 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1014 }
1015
1016 if (exec->objects) {
1017 memcpy(new_objects, exec->objects,
1018 exec->bo_count * sizeof(*new_objects));
1019 memcpy(new_bos, exec->bos,
1020 exec->bo_count * sizeof(*new_bos));
1021 }
1022
1023 vk_free(alloc, exec->objects);
1024 vk_free(alloc, exec->bos);
1025
1026 exec->objects = new_objects;
1027 exec->bos = new_bos;
1028 exec->array_length = new_len;
1029 }
1030
1031 assert(exec->bo_count < exec->array_length);
1032
1033 bo->index = exec->bo_count++;
1034 obj = &exec->objects[bo->index];
1035 exec->bos[bo->index] = bo;
1036
1037 obj->handle = bo->gem_handle;
1038 obj->relocation_count = 0;
1039 obj->relocs_ptr = 0;
1040 obj->alignment = 0;
1041 obj->offset = bo->offset;
1042 obj->flags = bo->flags | extra_flags;
1043 obj->rsvd1 = 0;
1044 obj->rsvd2 = 0;
1045 }
1046
1047 if (relocs != NULL && obj->relocation_count == 0) {
1048 /* This is the first time we've ever seen a list of relocations for
1049 * this BO. Go ahead and set the relocations and then walk the list
1050 * of relocations and add them all.
1051 */
1052 obj->relocation_count = relocs->num_relocs;
1053 obj->relocs_ptr = (uintptr_t) relocs->relocs;
1054
1055 for (size_t i = 0; i < relocs->num_relocs; i++) {
1056 /* A quick sanity check on relocations */
1057 assert(relocs->relocs[i].offset < bo->size);
1058 anv_execbuf_add_bo(exec, relocs->reloc_bos[i], NULL,
1059 extra_flags, alloc);
1060 }
1061 }
1062
1063 return VK_SUCCESS;
1064 }
1065
1066 static void
1067 anv_cmd_buffer_process_relocs(struct anv_cmd_buffer *cmd_buffer,
1068 struct anv_reloc_list *list)
1069 {
1070 for (size_t i = 0; i < list->num_relocs; i++)
1071 list->relocs[i].target_handle = list->reloc_bos[i]->index;
1072 }
1073
1074 static void
1075 write_reloc(const struct anv_device *device, void *p, uint64_t v, bool flush)
1076 {
1077 unsigned reloc_size = 0;
1078 if (device->info.gen >= 8) {
1079 /* From the Broadwell PRM Vol. 2a, MI_LOAD_REGISTER_MEM::MemoryAddress:
1080 *
1081 * "This field specifies the address of the memory location where the
1082 * register value specified in the DWord above will read from. The
1083 * address specifies the DWord location of the data. Range =
1084 * GraphicsVirtualAddress[63:2] for a DWord register GraphicsAddress
1085 * [63:48] are ignored by the HW and assumed to be in correct
1086 * canonical form [63:48] == [47]."
1087 */
1088 const int shift = 63 - 47;
1089 reloc_size = sizeof(uint64_t);
1090 *(uint64_t *)p = (((int64_t)v) << shift) >> shift;
1091 } else {
1092 reloc_size = sizeof(uint32_t);
1093 *(uint32_t *)p = v;
1094 }
1095
1096 if (flush && !device->info.has_llc)
1097 anv_flush_range(p, reloc_size);
1098 }
1099
1100 static void
1101 adjust_relocations_from_state_pool(struct anv_block_pool *pool,
1102 struct anv_reloc_list *relocs,
1103 uint32_t last_pool_center_bo_offset)
1104 {
1105 assert(last_pool_center_bo_offset <= pool->center_bo_offset);
1106 uint32_t delta = pool->center_bo_offset - last_pool_center_bo_offset;
1107
1108 for (size_t i = 0; i < relocs->num_relocs; i++) {
1109 /* All of the relocations from this block pool to other BO's should
1110 * have been emitted relative to the surface block pool center. We
1111 * need to add the center offset to make them relative to the
1112 * beginning of the actual GEM bo.
1113 */
1114 relocs->relocs[i].offset += delta;
1115 }
1116 }
1117
1118 static void
1119 adjust_relocations_to_state_pool(struct anv_block_pool *pool,
1120 struct anv_bo *from_bo,
1121 struct anv_reloc_list *relocs,
1122 uint32_t last_pool_center_bo_offset)
1123 {
1124 assert(last_pool_center_bo_offset <= pool->center_bo_offset);
1125 uint32_t delta = pool->center_bo_offset - last_pool_center_bo_offset;
1126
1127 /* When we initially emit relocations into a block pool, we don't
1128 * actually know what the final center_bo_offset will be so we just emit
1129 * it as if center_bo_offset == 0. Now that we know what the center
1130 * offset is, we need to walk the list of relocations and adjust any
1131 * relocations that point to the pool bo with the correct offset.
1132 */
1133 for (size_t i = 0; i < relocs->num_relocs; i++) {
1134 if (relocs->reloc_bos[i] == &pool->bo) {
1135 /* Adjust the delta value in the relocation to correctly
1136 * correspond to the new delta. Initially, this value may have
1137 * been negative (if treated as unsigned), but we trust in
1138 * uint32_t roll-over to fix that for us at this point.
1139 */
1140 relocs->relocs[i].delta += delta;
1141
1142 /* Since the delta has changed, we need to update the actual
1143 * relocated value with the new presumed value. This function
1144 * should only be called on batch buffers, so we know it isn't in
1145 * use by the GPU at the moment.
1146 */
1147 assert(relocs->relocs[i].offset < from_bo->size);
1148 write_reloc(pool->device, from_bo->map + relocs->relocs[i].offset,
1149 relocs->relocs[i].presumed_offset +
1150 relocs->relocs[i].delta, false);
1151 }
1152 }
1153 }
1154
1155 static void
1156 anv_reloc_list_apply(struct anv_device *device,
1157 struct anv_reloc_list *list,
1158 struct anv_bo *bo,
1159 bool always_relocate)
1160 {
1161 for (size_t i = 0; i < list->num_relocs; i++) {
1162 struct anv_bo *target_bo = list->reloc_bos[i];
1163 if (list->relocs[i].presumed_offset == target_bo->offset &&
1164 !always_relocate)
1165 continue;
1166
1167 void *p = bo->map + list->relocs[i].offset;
1168 write_reloc(device, p, target_bo->offset + list->relocs[i].delta, true);
1169 list->relocs[i].presumed_offset = target_bo->offset;
1170 }
1171 }
1172
1173 /**
1174 * This function applies the relocation for a command buffer and writes the
1175 * actual addresses into the buffers as per what we were told by the kernel on
1176 * the previous execbuf2 call. This should be safe to do because, for each
1177 * relocated address, we have two cases:
1178 *
1179 * 1) The target BO is inactive (as seen by the kernel). In this case, it is
1180 * not in use by the GPU so updating the address is 100% ok. It won't be
1181 * in-use by the GPU (from our context) again until the next execbuf2
1182 * happens. If the kernel decides to move it in the next execbuf2, it
1183 * will have to do the relocations itself, but that's ok because it should
1184 * have all of the information needed to do so.
1185 *
1186 * 2) The target BO is active (as seen by the kernel). In this case, it
1187 * hasn't moved since the last execbuffer2 call because GTT shuffling
1188 * *only* happens when the BO is idle. (From our perspective, it only
1189 * happens inside the execbuffer2 ioctl, but the shuffling may be
1190 * triggered by another ioctl, with full-ppgtt this is limited to only
1191 * execbuffer2 ioctls on the same context, or memory pressure.) Since the
1192 * target BO hasn't moved, our anv_bo::offset exactly matches the BO's GTT
1193 * address and the relocated value we are writing into the BO will be the
1194 * same as the value that is already there.
1195 *
1196 * There is also a possibility that the target BO is active but the exact
1197 * RENDER_SURFACE_STATE object we are writing the relocation into isn't in
1198 * use. In this case, the address currently in the RENDER_SURFACE_STATE
1199 * may be stale but it's still safe to write the relocation because that
1200 * particular RENDER_SURFACE_STATE object isn't in-use by the GPU and
1201 * won't be until the next execbuf2 call.
1202 *
1203 * By doing relocations on the CPU, we can tell the kernel that it doesn't
1204 * need to bother. We want to do this because the surface state buffer is
1205 * used by every command buffer so, if the kernel does the relocations, it
1206 * will always be busy and the kernel will always stall. This is also
1207 * probably the fastest mechanism for doing relocations since the kernel would
1208 * have to make a full copy of all the relocations lists.
1209 */
1210 static bool
1211 relocate_cmd_buffer(struct anv_cmd_buffer *cmd_buffer,
1212 struct anv_execbuf *exec)
1213 {
1214 static int userspace_relocs = -1;
1215 if (userspace_relocs < 0)
1216 userspace_relocs = env_var_as_boolean("ANV_USERSPACE_RELOCS", true);
1217 if (!userspace_relocs)
1218 return false;
1219
1220 /* First, we have to check to see whether or not we can even do the
1221 * relocation. New buffers which have never been submitted to the kernel
1222 * don't have a valid offset so we need to let the kernel do relocations so
1223 * that we can get offsets for them. On future execbuf2 calls, those
1224 * buffers will have offsets and we will be able to skip relocating.
1225 * Invalid offsets are indicated by anv_bo::offset == (uint64_t)-1.
1226 */
1227 for (uint32_t i = 0; i < exec->bo_count; i++) {
1228 if (exec->bos[i]->offset == (uint64_t)-1)
1229 return false;
1230 }
1231
1232 /* Since surface states are shared between command buffers and we don't
1233 * know what order they will be submitted to the kernel, we don't know
1234 * what address is actually written in the surface state object at any
1235 * given time. The only option is to always relocate them.
1236 */
1237 anv_reloc_list_apply(cmd_buffer->device, &cmd_buffer->surface_relocs,
1238 &cmd_buffer->device->surface_state_block_pool.bo,
1239 true /* always relocate surface states */);
1240
1241 /* Since we own all of the batch buffers, we know what values are stored
1242 * in the relocated addresses and only have to update them if the offsets
1243 * have changed.
1244 */
1245 struct anv_batch_bo **bbo;
1246 u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1247 anv_reloc_list_apply(cmd_buffer->device,
1248 &(*bbo)->relocs, &(*bbo)->bo, false);
1249 }
1250
1251 for (uint32_t i = 0; i < exec->bo_count; i++)
1252 exec->objects[i].offset = exec->bos[i]->offset;
1253
1254 return true;
1255 }
1256
1257 static VkResult
1258 setup_execbuf_for_cmd_buffer(struct anv_execbuf *execbuf,
1259 struct anv_cmd_buffer *cmd_buffer)
1260 {
1261 struct anv_batch *batch = &cmd_buffer->batch;
1262 struct anv_block_pool *ss_pool =
1263 &cmd_buffer->device->surface_state_block_pool;
1264
1265 adjust_relocations_from_state_pool(ss_pool, &cmd_buffer->surface_relocs,
1266 cmd_buffer->last_ss_pool_center);
1267 VkResult result =
1268 anv_execbuf_add_bo(execbuf, &ss_pool->bo, &cmd_buffer->surface_relocs, 0,
1269 &cmd_buffer->device->alloc);
1270 if (result != VK_SUCCESS)
1271 return result;
1272
1273 /* First, we walk over all of the bos we've seen and add them and their
1274 * relocations to the validate list.
1275 */
1276 struct anv_batch_bo **bbo;
1277 u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1278 adjust_relocations_to_state_pool(ss_pool, &(*bbo)->bo, &(*bbo)->relocs,
1279 cmd_buffer->last_ss_pool_center);
1280
1281 result = anv_execbuf_add_bo(execbuf, &(*bbo)->bo, &(*bbo)->relocs, 0,
1282 &cmd_buffer->device->alloc);
1283 if (result != VK_SUCCESS)
1284 return result;
1285 }
1286
1287 /* Now that we've adjusted all of the surface state relocations, we need to
1288 * record the surface state pool center so future executions of the command
1289 * buffer can adjust correctly.
1290 */
1291 cmd_buffer->last_ss_pool_center = ss_pool->center_bo_offset;
1292
1293 struct anv_batch_bo *first_batch_bo =
1294 list_first_entry(&cmd_buffer->batch_bos, struct anv_batch_bo, link);
1295
1296 /* The kernel requires that the last entry in the validation list be the
1297 * batch buffer to execute. We can simply swap the element
1298 * corresponding to the first batch_bo in the chain with the last
1299 * element in the list.
1300 */
1301 if (first_batch_bo->bo.index != execbuf->bo_count - 1) {
1302 uint32_t idx = first_batch_bo->bo.index;
1303 uint32_t last_idx = execbuf->bo_count - 1;
1304
1305 struct drm_i915_gem_exec_object2 tmp_obj = execbuf->objects[idx];
1306 assert(execbuf->bos[idx] == &first_batch_bo->bo);
1307
1308 execbuf->objects[idx] = execbuf->objects[last_idx];
1309 execbuf->bos[idx] = execbuf->bos[last_idx];
1310 execbuf->bos[idx]->index = idx;
1311
1312 execbuf->objects[last_idx] = tmp_obj;
1313 execbuf->bos[last_idx] = &first_batch_bo->bo;
1314 first_batch_bo->bo.index = last_idx;
1315 }
1316
1317 /* Now we go through and fixup all of the relocation lists to point to
1318 * the correct indices in the object array. We have to do this after we
1319 * reorder the list above as some of the indices may have changed.
1320 */
1321 u_vector_foreach(bbo, &cmd_buffer->seen_bbos)
1322 anv_cmd_buffer_process_relocs(cmd_buffer, &(*bbo)->relocs);
1323
1324 anv_cmd_buffer_process_relocs(cmd_buffer, &cmd_buffer->surface_relocs);
1325
1326 if (!cmd_buffer->device->info.has_llc) {
1327 __builtin_ia32_mfence();
1328 u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1329 for (uint32_t i = 0; i < (*bbo)->length; i += CACHELINE_SIZE)
1330 __builtin_ia32_clflush((*bbo)->bo.map + i);
1331 }
1332 }
1333
1334 execbuf->execbuf = (struct drm_i915_gem_execbuffer2) {
1335 .buffers_ptr = (uintptr_t) execbuf->objects,
1336 .buffer_count = execbuf->bo_count,
1337 .batch_start_offset = 0,
1338 .batch_len = batch->next - batch->start,
1339 .cliprects_ptr = 0,
1340 .num_cliprects = 0,
1341 .DR1 = 0,
1342 .DR4 = 0,
1343 .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER |
1344 I915_EXEC_CONSTANTS_REL_GENERAL,
1345 .rsvd1 = cmd_buffer->device->context_id,
1346 .rsvd2 = 0,
1347 };
1348
1349 if (relocate_cmd_buffer(cmd_buffer, execbuf)) {
1350 /* If we were able to successfully relocate everything, tell the kernel
1351 * that it can skip doing relocations. The requirement for using
1352 * NO_RELOC is:
1353 *
1354 * 1) The addresses written in the objects must match the corresponding
1355 * reloc.presumed_offset which in turn must match the corresponding
1356 * execobject.offset.
1357 *
1358 * 2) To avoid stalling, execobject.offset should match the current
1359 * address of that object within the active context.
1360 *
1361 * In order to satisfy all of the invariants that make userspace
1362 * relocations to be safe (see relocate_cmd_buffer()), we need to
1363 * further ensure that the addresses we use match those used by the
1364 * kernel for the most recent execbuf2.
1365 *
1366 * The kernel may still choose to do relocations anyway if something has
1367 * moved in the GTT. In this case, the relocation list still needs to be
1368 * valid. All relocations on the batch buffers are already valid and
1369 * kept up-to-date. For surface state relocations, by applying the
1370 * relocations in relocate_cmd_buffer, we ensured that the address in
1371 * the RENDER_SURFACE_STATE matches presumed_offset, so it should be
1372 * safe for the kernel to relocate them as needed.
1373 */
1374 execbuf->execbuf.flags |= I915_EXEC_NO_RELOC;
1375 } else {
1376 /* In the case where we fall back to doing kernel relocations, we need
1377 * to ensure that the relocation list is valid. All relocations on the
1378 * batch buffers are already valid and kept up-to-date. Since surface
1379 * states are shared between command buffers and we don't know what
1380 * order they will be submitted to the kernel, we don't know what
1381 * address is actually written in the surface state object at any given
1382 * time. The only option is to set a bogus presumed offset and let the
1383 * kernel relocate them.
1384 */
1385 for (size_t i = 0; i < cmd_buffer->surface_relocs.num_relocs; i++)
1386 cmd_buffer->surface_relocs.relocs[i].presumed_offset = -1;
1387 }
1388
1389 return VK_SUCCESS;
1390 }
1391
1392 VkResult
1393 anv_cmd_buffer_execbuf(struct anv_device *device,
1394 struct anv_cmd_buffer *cmd_buffer,
1395 const VkSemaphore *in_semaphores,
1396 uint32_t num_in_semaphores,
1397 const VkSemaphore *out_semaphores,
1398 uint32_t num_out_semaphores)
1399 {
1400 struct anv_execbuf execbuf;
1401 anv_execbuf_init(&execbuf);
1402
1403 VkResult result = VK_SUCCESS;
1404 for (uint32_t i = 0; i < num_in_semaphores; i++) {
1405 ANV_FROM_HANDLE(anv_semaphore, semaphore, in_semaphores[i]);
1406 assert(semaphore->temporary.type == ANV_SEMAPHORE_TYPE_NONE);
1407 struct anv_semaphore_impl *impl = &semaphore->permanent;
1408
1409 switch (impl->type) {
1410 case ANV_SEMAPHORE_TYPE_BO:
1411 result = anv_execbuf_add_bo(&execbuf, impl->bo, NULL,
1412 0, &device->alloc);
1413 if (result != VK_SUCCESS)
1414 return result;
1415 break;
1416 default:
1417 break;
1418 }
1419 }
1420
1421 for (uint32_t i = 0; i < num_out_semaphores; i++) {
1422 ANV_FROM_HANDLE(anv_semaphore, semaphore, out_semaphores[i]);
1423 assert(semaphore->temporary.type == ANV_SEMAPHORE_TYPE_NONE);
1424 struct anv_semaphore_impl *impl = &semaphore->permanent;
1425
1426 switch (impl->type) {
1427 case ANV_SEMAPHORE_TYPE_BO:
1428 result = anv_execbuf_add_bo(&execbuf, impl->bo, NULL,
1429 EXEC_OBJECT_WRITE, &device->alloc);
1430 if (result != VK_SUCCESS)
1431 return result;
1432 break;
1433 default:
1434 break;
1435 }
1436 }
1437
1438 result = setup_execbuf_for_cmd_buffer(&execbuf, cmd_buffer);
1439 if (result != VK_SUCCESS)
1440 return result;
1441
1442 result = anv_device_execbuf(device, &execbuf.execbuf, execbuf.bos);
1443
1444 anv_execbuf_finish(&execbuf, &device->alloc);
1445
1446 return result;
1447 }