Merge remote-tracking branch 'public/master' into vulkan
[mesa.git] / src / intel / vulkan / anv_batch_chain.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31
32 #include "genxml/gen7_pack.h"
33 #include "genxml/gen8_pack.h"
34
35 /** \file anv_batch_chain.c
36 *
37 * This file contains functions related to anv_cmd_buffer as a data
38 * structure. This involves everything required to create and destroy
39 * the actual batch buffers as well as link them together and handle
40 * relocations and surface state. It specifically does *not* contain any
41 * handling of actual vkCmd calls beyond vkCmdExecuteCommands.
42 */
43
44 /*-----------------------------------------------------------------------*
45 * Functions related to anv_reloc_list
46 *-----------------------------------------------------------------------*/
47
48 static VkResult
49 anv_reloc_list_init_clone(struct anv_reloc_list *list,
50 const VkAllocationCallbacks *alloc,
51 const struct anv_reloc_list *other_list)
52 {
53 if (other_list) {
54 list->num_relocs = other_list->num_relocs;
55 list->array_length = other_list->array_length;
56 } else {
57 list->num_relocs = 0;
58 list->array_length = 256;
59 }
60
61 list->relocs =
62 anv_alloc(alloc, list->array_length * sizeof(*list->relocs), 8,
63 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
64
65 if (list->relocs == NULL)
66 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
67
68 list->reloc_bos =
69 anv_alloc(alloc, list->array_length * sizeof(*list->reloc_bos), 8,
70 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
71
72 if (list->reloc_bos == NULL) {
73 anv_free(alloc, list->relocs);
74 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
75 }
76
77 if (other_list) {
78 memcpy(list->relocs, other_list->relocs,
79 list->array_length * sizeof(*list->relocs));
80 memcpy(list->reloc_bos, other_list->reloc_bos,
81 list->array_length * sizeof(*list->reloc_bos));
82 }
83
84 return VK_SUCCESS;
85 }
86
87 VkResult
88 anv_reloc_list_init(struct anv_reloc_list *list,
89 const VkAllocationCallbacks *alloc)
90 {
91 return anv_reloc_list_init_clone(list, alloc, NULL);
92 }
93
94 void
95 anv_reloc_list_finish(struct anv_reloc_list *list,
96 const VkAllocationCallbacks *alloc)
97 {
98 anv_free(alloc, list->relocs);
99 anv_free(alloc, list->reloc_bos);
100 }
101
102 static VkResult
103 anv_reloc_list_grow(struct anv_reloc_list *list,
104 const VkAllocationCallbacks *alloc,
105 size_t num_additional_relocs)
106 {
107 if (list->num_relocs + num_additional_relocs <= list->array_length)
108 return VK_SUCCESS;
109
110 size_t new_length = list->array_length * 2;
111 while (new_length < list->num_relocs + num_additional_relocs)
112 new_length *= 2;
113
114 struct drm_i915_gem_relocation_entry *new_relocs =
115 anv_alloc(alloc, new_length * sizeof(*list->relocs), 8,
116 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
117 if (new_relocs == NULL)
118 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
119
120 struct anv_bo **new_reloc_bos =
121 anv_alloc(alloc, new_length * sizeof(*list->reloc_bos), 8,
122 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
123 if (new_relocs == NULL) {
124 anv_free(alloc, new_relocs);
125 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
126 }
127
128 memcpy(new_relocs, list->relocs, list->num_relocs * sizeof(*list->relocs));
129 memcpy(new_reloc_bos, list->reloc_bos,
130 list->num_relocs * sizeof(*list->reloc_bos));
131
132 anv_free(alloc, list->relocs);
133 anv_free(alloc, list->reloc_bos);
134
135 list->array_length = new_length;
136 list->relocs = new_relocs;
137 list->reloc_bos = new_reloc_bos;
138
139 return VK_SUCCESS;
140 }
141
142 uint64_t
143 anv_reloc_list_add(struct anv_reloc_list *list,
144 const VkAllocationCallbacks *alloc,
145 uint32_t offset, struct anv_bo *target_bo, uint32_t delta)
146 {
147 struct drm_i915_gem_relocation_entry *entry;
148 int index;
149
150 const uint32_t domain =
151 target_bo->is_winsys_bo ? I915_GEM_DOMAIN_RENDER : 0;
152
153 anv_reloc_list_grow(list, alloc, 1);
154 /* TODO: Handle failure */
155
156 /* XXX: Can we use I915_EXEC_HANDLE_LUT? */
157 index = list->num_relocs++;
158 list->reloc_bos[index] = target_bo;
159 entry = &list->relocs[index];
160 entry->target_handle = target_bo->gem_handle;
161 entry->delta = delta;
162 entry->offset = offset;
163 entry->presumed_offset = target_bo->offset;
164 entry->read_domains = domain;
165 entry->write_domain = domain;
166 VG(VALGRIND_CHECK_MEM_IS_DEFINED(entry, sizeof(*entry)));
167
168 return target_bo->offset + delta;
169 }
170
171 static void
172 anv_reloc_list_append(struct anv_reloc_list *list,
173 const VkAllocationCallbacks *alloc,
174 struct anv_reloc_list *other, uint32_t offset)
175 {
176 anv_reloc_list_grow(list, alloc, other->num_relocs);
177 /* TODO: Handle failure */
178
179 memcpy(&list->relocs[list->num_relocs], &other->relocs[0],
180 other->num_relocs * sizeof(other->relocs[0]));
181 memcpy(&list->reloc_bos[list->num_relocs], &other->reloc_bos[0],
182 other->num_relocs * sizeof(other->reloc_bos[0]));
183
184 for (uint32_t i = 0; i < other->num_relocs; i++)
185 list->relocs[i + list->num_relocs].offset += offset;
186
187 list->num_relocs += other->num_relocs;
188 }
189
190 /*-----------------------------------------------------------------------*
191 * Functions related to anv_batch
192 *-----------------------------------------------------------------------*/
193
194 void *
195 anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords)
196 {
197 if (batch->next + num_dwords * 4 > batch->end)
198 batch->extend_cb(batch, batch->user_data);
199
200 void *p = batch->next;
201
202 batch->next += num_dwords * 4;
203 assert(batch->next <= batch->end);
204
205 return p;
206 }
207
208 uint64_t
209 anv_batch_emit_reloc(struct anv_batch *batch,
210 void *location, struct anv_bo *bo, uint32_t delta)
211 {
212 return anv_reloc_list_add(batch->relocs, batch->alloc,
213 location - batch->start, bo, delta);
214 }
215
216 void
217 anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other)
218 {
219 uint32_t size, offset;
220
221 size = other->next - other->start;
222 assert(size % 4 == 0);
223
224 if (batch->next + size > batch->end)
225 batch->extend_cb(batch, batch->user_data);
226
227 assert(batch->next + size <= batch->end);
228
229 VG(VALGRIND_CHECK_MEM_IS_DEFINED(other->start, size));
230 memcpy(batch->next, other->start, size);
231
232 offset = batch->next - batch->start;
233 anv_reloc_list_append(batch->relocs, batch->alloc,
234 other->relocs, offset);
235
236 batch->next += size;
237 }
238
239 /*-----------------------------------------------------------------------*
240 * Functions related to anv_batch_bo
241 *-----------------------------------------------------------------------*/
242
243 static VkResult
244 anv_batch_bo_create(struct anv_cmd_buffer *cmd_buffer,
245 struct anv_batch_bo **bbo_out)
246 {
247 VkResult result;
248
249 struct anv_batch_bo *bbo = anv_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo),
250 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
251 if (bbo == NULL)
252 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
253
254 result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool, &bbo->bo,
255 ANV_CMD_BUFFER_BATCH_SIZE);
256 if (result != VK_SUCCESS)
257 goto fail_alloc;
258
259 result = anv_reloc_list_init(&bbo->relocs, &cmd_buffer->pool->alloc);
260 if (result != VK_SUCCESS)
261 goto fail_bo_alloc;
262
263 *bbo_out = bbo;
264
265 return VK_SUCCESS;
266
267 fail_bo_alloc:
268 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
269 fail_alloc:
270 anv_free(&cmd_buffer->pool->alloc, bbo);
271
272 return result;
273 }
274
275 static VkResult
276 anv_batch_bo_clone(struct anv_cmd_buffer *cmd_buffer,
277 const struct anv_batch_bo *other_bbo,
278 struct anv_batch_bo **bbo_out)
279 {
280 VkResult result;
281
282 struct anv_batch_bo *bbo = anv_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo),
283 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
284 if (bbo == NULL)
285 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
286
287 result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool, &bbo->bo,
288 other_bbo->bo.size);
289 if (result != VK_SUCCESS)
290 goto fail_alloc;
291
292 result = anv_reloc_list_init_clone(&bbo->relocs, &cmd_buffer->pool->alloc,
293 &other_bbo->relocs);
294 if (result != VK_SUCCESS)
295 goto fail_bo_alloc;
296
297 bbo->length = other_bbo->length;
298 memcpy(bbo->bo.map, other_bbo->bo.map, other_bbo->length);
299
300 bbo->last_ss_pool_bo_offset = other_bbo->last_ss_pool_bo_offset;
301
302 *bbo_out = bbo;
303
304 return VK_SUCCESS;
305
306 fail_bo_alloc:
307 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
308 fail_alloc:
309 anv_free(&cmd_buffer->pool->alloc, bbo);
310
311 return result;
312 }
313
314 static void
315 anv_batch_bo_start(struct anv_batch_bo *bbo, struct anv_batch *batch,
316 size_t batch_padding)
317 {
318 batch->next = batch->start = bbo->bo.map;
319 batch->end = bbo->bo.map + bbo->bo.size - batch_padding;
320 batch->relocs = &bbo->relocs;
321 bbo->last_ss_pool_bo_offset = 0;
322 bbo->relocs.num_relocs = 0;
323 }
324
325 static void
326 anv_batch_bo_continue(struct anv_batch_bo *bbo, struct anv_batch *batch,
327 size_t batch_padding)
328 {
329 batch->start = bbo->bo.map;
330 batch->next = bbo->bo.map + bbo->length;
331 batch->end = bbo->bo.map + bbo->bo.size - batch_padding;
332 batch->relocs = &bbo->relocs;
333 }
334
335 static void
336 anv_batch_bo_finish(struct anv_batch_bo *bbo, struct anv_batch *batch)
337 {
338 assert(batch->start == bbo->bo.map);
339 bbo->length = batch->next - batch->start;
340 VG(VALGRIND_CHECK_MEM_IS_DEFINED(batch->start, bbo->length));
341 }
342
343 static VkResult
344 anv_batch_bo_grow(struct anv_cmd_buffer *cmd_buffer, struct anv_batch_bo *bbo,
345 struct anv_batch *batch, size_t aditional,
346 size_t batch_padding)
347 {
348 assert(batch->start == bbo->bo.map);
349 bbo->length = batch->next - batch->start;
350
351 size_t new_size = bbo->bo.size;
352 while (new_size <= bbo->length + aditional + batch_padding)
353 new_size *= 2;
354
355 if (new_size == bbo->bo.size)
356 return VK_SUCCESS;
357
358 struct anv_bo new_bo;
359 VkResult result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
360 &new_bo, new_size);
361 if (result != VK_SUCCESS)
362 return result;
363
364 memcpy(new_bo.map, bbo->bo.map, bbo->length);
365
366 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
367
368 bbo->bo = new_bo;
369 anv_batch_bo_continue(bbo, batch, batch_padding);
370
371 return VK_SUCCESS;
372 }
373
374 static void
375 anv_batch_bo_destroy(struct anv_batch_bo *bbo,
376 struct anv_cmd_buffer *cmd_buffer)
377 {
378 anv_reloc_list_finish(&bbo->relocs, &cmd_buffer->pool->alloc);
379 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
380 anv_free(&cmd_buffer->pool->alloc, bbo);
381 }
382
383 static VkResult
384 anv_batch_bo_list_clone(const struct list_head *list,
385 struct anv_cmd_buffer *cmd_buffer,
386 struct list_head *new_list)
387 {
388 VkResult result = VK_SUCCESS;
389
390 list_inithead(new_list);
391
392 struct anv_batch_bo *prev_bbo = NULL;
393 list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
394 struct anv_batch_bo *new_bbo = NULL;
395 result = anv_batch_bo_clone(cmd_buffer, bbo, &new_bbo);
396 if (result != VK_SUCCESS)
397 break;
398 list_addtail(&new_bbo->link, new_list);
399
400 if (prev_bbo) {
401 /* As we clone this list of batch_bo's, they chain one to the
402 * other using MI_BATCH_BUFFER_START commands. We need to fix up
403 * those relocations as we go. Fortunately, this is pretty easy
404 * as it will always be the last relocation in the list.
405 */
406 uint32_t last_idx = prev_bbo->relocs.num_relocs - 1;
407 assert(prev_bbo->relocs.reloc_bos[last_idx] == &bbo->bo);
408 prev_bbo->relocs.reloc_bos[last_idx] = &new_bbo->bo;
409 }
410
411 prev_bbo = new_bbo;
412 }
413
414 if (result != VK_SUCCESS) {
415 list_for_each_entry_safe(struct anv_batch_bo, bbo, new_list, link)
416 anv_batch_bo_destroy(bbo, cmd_buffer);
417 }
418
419 return result;
420 }
421
422 /*-----------------------------------------------------------------------*
423 * Functions related to anv_batch_bo
424 *-----------------------------------------------------------------------*/
425
426 static inline struct anv_batch_bo *
427 anv_cmd_buffer_current_batch_bo(struct anv_cmd_buffer *cmd_buffer)
428 {
429 return LIST_ENTRY(struct anv_batch_bo, cmd_buffer->batch_bos.prev, link);
430 }
431
432 struct anv_address
433 anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer *cmd_buffer)
434 {
435 return (struct anv_address) {
436 .bo = &cmd_buffer->device->surface_state_block_pool.bo,
437 .offset = *(int32_t *)anv_vector_head(&cmd_buffer->bt_blocks),
438 };
439 }
440
441 static void
442 emit_batch_buffer_start(struct anv_cmd_buffer *cmd_buffer,
443 struct anv_bo *bo, uint32_t offset)
444 {
445 /* In gen8+ the address field grew to two dwords to accomodate 48 bit
446 * offsets. The high 16 bits are in the last dword, so we can use the gen8
447 * version in either case, as long as we set the instruction length in the
448 * header accordingly. This means that we always emit three dwords here
449 * and all the padding and adjustment we do in this file works for all
450 * gens.
451 */
452
453 const uint32_t gen7_length =
454 GEN7_MI_BATCH_BUFFER_START_length - GEN7_MI_BATCH_BUFFER_START_length_bias;
455 const uint32_t gen8_length =
456 GEN8_MI_BATCH_BUFFER_START_length - GEN8_MI_BATCH_BUFFER_START_length_bias;
457
458 anv_batch_emit(&cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_START,
459 .DWordLength = cmd_buffer->device->info.gen < 8 ?
460 gen7_length : gen8_length,
461 ._2ndLevelBatchBuffer = _1stlevelbatch,
462 .AddressSpaceIndicator = ASI_PPGTT,
463 .BatchBufferStartAddress = { bo, offset });
464 }
465
466 static void
467 cmd_buffer_chain_to_batch_bo(struct anv_cmd_buffer *cmd_buffer,
468 struct anv_batch_bo *bbo)
469 {
470 struct anv_batch *batch = &cmd_buffer->batch;
471 struct anv_batch_bo *current_bbo =
472 anv_cmd_buffer_current_batch_bo(cmd_buffer);
473
474 /* We set the end of the batch a little short so we would be sure we
475 * have room for the chaining command. Since we're about to emit the
476 * chaining command, let's set it back where it should go.
477 */
478 batch->end += GEN8_MI_BATCH_BUFFER_START_length * 4;
479 assert(batch->end == current_bbo->bo.map + current_bbo->bo.size);
480
481 emit_batch_buffer_start(cmd_buffer, &bbo->bo, 0);
482
483 anv_batch_bo_finish(current_bbo, batch);
484 }
485
486 static VkResult
487 anv_cmd_buffer_chain_batch(struct anv_batch *batch, void *_data)
488 {
489 struct anv_cmd_buffer *cmd_buffer = _data;
490 struct anv_batch_bo *new_bbo;
491
492 VkResult result = anv_batch_bo_create(cmd_buffer, &new_bbo);
493 if (result != VK_SUCCESS)
494 return result;
495
496 struct anv_batch_bo **seen_bbo = anv_vector_add(&cmd_buffer->seen_bbos);
497 if (seen_bbo == NULL) {
498 anv_batch_bo_destroy(new_bbo, cmd_buffer);
499 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
500 }
501 *seen_bbo = new_bbo;
502
503 cmd_buffer_chain_to_batch_bo(cmd_buffer, new_bbo);
504
505 list_addtail(&new_bbo->link, &cmd_buffer->batch_bos);
506
507 anv_batch_bo_start(new_bbo, batch, GEN8_MI_BATCH_BUFFER_START_length * 4);
508
509 return VK_SUCCESS;
510 }
511
512 static VkResult
513 anv_cmd_buffer_grow_batch(struct anv_batch *batch, void *_data)
514 {
515 struct anv_cmd_buffer *cmd_buffer = _data;
516 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
517
518 anv_batch_bo_grow(cmd_buffer, bbo, &cmd_buffer->batch, 4096,
519 GEN8_MI_BATCH_BUFFER_START_length * 4);
520
521 return VK_SUCCESS;
522 }
523
524 struct anv_state
525 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
526 uint32_t entries, uint32_t *state_offset)
527 {
528 struct anv_block_pool *block_pool =
529 &cmd_buffer->device->surface_state_block_pool;
530 int32_t *bt_block = anv_vector_head(&cmd_buffer->bt_blocks);
531 struct anv_state state;
532
533 state.alloc_size = align_u32(entries * 4, 32);
534
535 if (cmd_buffer->bt_next + state.alloc_size > block_pool->block_size)
536 return (struct anv_state) { 0 };
537
538 state.offset = cmd_buffer->bt_next;
539 state.map = block_pool->map + *bt_block + state.offset;
540
541 cmd_buffer->bt_next += state.alloc_size;
542
543 assert(*bt_block < 0);
544 *state_offset = -(*bt_block);
545
546 return state;
547 }
548
549 struct anv_state
550 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer *cmd_buffer)
551 {
552 return anv_state_stream_alloc(&cmd_buffer->surface_state_stream, 64, 64);
553 }
554
555 struct anv_state
556 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer *cmd_buffer,
557 uint32_t size, uint32_t alignment)
558 {
559 return anv_state_stream_alloc(&cmd_buffer->dynamic_state_stream,
560 size, alignment);
561 }
562
563 VkResult
564 anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer *cmd_buffer)
565 {
566 struct anv_block_pool *block_pool =
567 &cmd_buffer->device->surface_state_block_pool;
568
569 int32_t *offset = anv_vector_add(&cmd_buffer->bt_blocks);
570 if (offset == NULL)
571 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
572
573 *offset = anv_block_pool_alloc_back(block_pool);
574 cmd_buffer->bt_next = 0;
575
576 return VK_SUCCESS;
577 }
578
579 VkResult
580 anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
581 {
582 struct anv_batch_bo *batch_bo;
583 VkResult result;
584
585 list_inithead(&cmd_buffer->batch_bos);
586
587 result = anv_batch_bo_create(cmd_buffer, &batch_bo);
588 if (result != VK_SUCCESS)
589 return result;
590
591 list_addtail(&batch_bo->link, &cmd_buffer->batch_bos);
592
593 cmd_buffer->batch.alloc = &cmd_buffer->pool->alloc;
594 cmd_buffer->batch.user_data = cmd_buffer;
595
596 if (cmd_buffer->device->can_chain_batches) {
597 cmd_buffer->batch.extend_cb = anv_cmd_buffer_chain_batch;
598 } else {
599 cmd_buffer->batch.extend_cb = anv_cmd_buffer_grow_batch;
600 }
601
602 anv_batch_bo_start(batch_bo, &cmd_buffer->batch,
603 GEN8_MI_BATCH_BUFFER_START_length * 4);
604
605 int success = anv_vector_init(&cmd_buffer->seen_bbos,
606 sizeof(struct anv_bo *),
607 8 * sizeof(struct anv_bo *));
608 if (!success)
609 goto fail_batch_bo;
610
611 *(struct anv_batch_bo **)anv_vector_add(&cmd_buffer->seen_bbos) = batch_bo;
612
613 success = anv_vector_init(&cmd_buffer->bt_blocks, sizeof(int32_t),
614 8 * sizeof(int32_t));
615 if (!success)
616 goto fail_seen_bbos;
617
618 result = anv_reloc_list_init(&cmd_buffer->surface_relocs,
619 &cmd_buffer->pool->alloc);
620 if (result != VK_SUCCESS)
621 goto fail_bt_blocks;
622
623 anv_cmd_buffer_new_binding_table_block(cmd_buffer);
624
625 cmd_buffer->execbuf2.objects = NULL;
626 cmd_buffer->execbuf2.bos = NULL;
627 cmd_buffer->execbuf2.array_length = 0;
628
629 return VK_SUCCESS;
630
631 fail_bt_blocks:
632 anv_vector_finish(&cmd_buffer->bt_blocks);
633 fail_seen_bbos:
634 anv_vector_finish(&cmd_buffer->seen_bbos);
635 fail_batch_bo:
636 anv_batch_bo_destroy(batch_bo, cmd_buffer);
637
638 return result;
639 }
640
641 void
642 anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
643 {
644 int32_t *bt_block;
645 anv_vector_foreach(bt_block, &cmd_buffer->bt_blocks) {
646 anv_block_pool_free(&cmd_buffer->device->surface_state_block_pool,
647 *bt_block);
648 }
649 anv_vector_finish(&cmd_buffer->bt_blocks);
650
651 anv_reloc_list_finish(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc);
652
653 anv_vector_finish(&cmd_buffer->seen_bbos);
654
655 /* Destroy all of the batch buffers */
656 list_for_each_entry_safe(struct anv_batch_bo, bbo,
657 &cmd_buffer->batch_bos, link) {
658 anv_batch_bo_destroy(bbo, cmd_buffer);
659 }
660
661 anv_free(&cmd_buffer->pool->alloc, cmd_buffer->execbuf2.objects);
662 anv_free(&cmd_buffer->pool->alloc, cmd_buffer->execbuf2.bos);
663 }
664
665 void
666 anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
667 {
668 /* Delete all but the first batch bo */
669 assert(!list_empty(&cmd_buffer->batch_bos));
670 while (cmd_buffer->batch_bos.next != cmd_buffer->batch_bos.prev) {
671 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
672 list_del(&bbo->link);
673 anv_batch_bo_destroy(bbo, cmd_buffer);
674 }
675 assert(!list_empty(&cmd_buffer->batch_bos));
676
677 anv_batch_bo_start(anv_cmd_buffer_current_batch_bo(cmd_buffer),
678 &cmd_buffer->batch,
679 GEN8_MI_BATCH_BUFFER_START_length * 4);
680
681 while (anv_vector_length(&cmd_buffer->bt_blocks) > 1) {
682 int32_t *bt_block = anv_vector_remove(&cmd_buffer->bt_blocks);
683 anv_block_pool_free(&cmd_buffer->device->surface_state_block_pool,
684 *bt_block);
685 }
686 assert(anv_vector_length(&cmd_buffer->bt_blocks) == 1);
687 cmd_buffer->bt_next = 0;
688
689 cmd_buffer->surface_relocs.num_relocs = 0;
690
691 /* Reset the list of seen buffers */
692 cmd_buffer->seen_bbos.head = 0;
693 cmd_buffer->seen_bbos.tail = 0;
694
695 *(struct anv_batch_bo **)anv_vector_add(&cmd_buffer->seen_bbos) =
696 anv_cmd_buffer_current_batch_bo(cmd_buffer);
697 }
698
699 void
700 anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer)
701 {
702 struct anv_batch_bo *batch_bo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
703
704 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
705 /* When we start a batch buffer, we subtract a certain amount of
706 * padding from the end to ensure that we always have room to emit a
707 * BATCH_BUFFER_START to chain to the next BO. We need to remove
708 * that padding before we end the batch; otherwise, we may end up
709 * with our BATCH_BUFFER_END in another BO.
710 */
711 cmd_buffer->batch.end += GEN8_MI_BATCH_BUFFER_START_length * 4;
712 assert(cmd_buffer->batch.end == batch_bo->bo.map + batch_bo->bo.size);
713
714 anv_batch_emit(&cmd_buffer->batch, GEN7_MI_BATCH_BUFFER_END);
715
716 /* Round batch up to an even number of dwords. */
717 if ((cmd_buffer->batch.next - cmd_buffer->batch.start) & 4)
718 anv_batch_emit(&cmd_buffer->batch, GEN7_MI_NOOP);
719
720 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_PRIMARY;
721 }
722
723 anv_batch_bo_finish(batch_bo, &cmd_buffer->batch);
724
725 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
726 /* If this is a secondary command buffer, we need to determine the
727 * mode in which it will be executed with vkExecuteCommands. We
728 * determine this statically here so that this stays in sync with the
729 * actual ExecuteCommands implementation.
730 */
731 if (!cmd_buffer->device->can_chain_batches) {
732 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT;
733 } else if ((cmd_buffer->batch_bos.next == cmd_buffer->batch_bos.prev) &&
734 (batch_bo->length < ANV_CMD_BUFFER_BATCH_SIZE / 2)) {
735 /* If the secondary has exactly one batch buffer in its list *and*
736 * that batch buffer is less than half of the maximum size, we're
737 * probably better of simply copying it into our batch.
738 */
739 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_EMIT;
740 } else if (!(cmd_buffer->usage_flags &
741 VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
742 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_CHAIN;
743
744 /* When we chain, we need to add an MI_BATCH_BUFFER_START command
745 * with its relocation. In order to handle this we'll increment here
746 * so we can unconditionally decrement right before adding the
747 * MI_BATCH_BUFFER_START command.
748 */
749 batch_bo->relocs.num_relocs++;
750 cmd_buffer->batch.next += GEN8_MI_BATCH_BUFFER_START_length * 4;
751 } else {
752 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN;
753 }
754 }
755 }
756
757 static inline VkResult
758 anv_cmd_buffer_add_seen_bbos(struct anv_cmd_buffer *cmd_buffer,
759 struct list_head *list)
760 {
761 list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
762 struct anv_batch_bo **bbo_ptr = anv_vector_add(&cmd_buffer->seen_bbos);
763 if (bbo_ptr == NULL)
764 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
765
766 *bbo_ptr = bbo;
767 }
768
769 return VK_SUCCESS;
770 }
771
772 void
773 anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary,
774 struct anv_cmd_buffer *secondary)
775 {
776 switch (secondary->exec_mode) {
777 case ANV_CMD_BUFFER_EXEC_MODE_EMIT:
778 anv_batch_emit_batch(&primary->batch, &secondary->batch);
779 anv_cmd_buffer_emit_state_base_address(primary);
780 break;
781 case ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT: {
782 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(primary);
783 unsigned length = secondary->batch.end - secondary->batch.start;
784 anv_batch_bo_grow(primary, bbo, &primary->batch, length,
785 GEN8_MI_BATCH_BUFFER_START_length * 4);
786 anv_batch_emit_batch(&primary->batch, &secondary->batch);
787 anv_cmd_buffer_emit_state_base_address(primary);
788 break;
789 }
790 case ANV_CMD_BUFFER_EXEC_MODE_CHAIN: {
791 struct anv_batch_bo *first_bbo =
792 list_first_entry(&secondary->batch_bos, struct anv_batch_bo, link);
793 struct anv_batch_bo *last_bbo =
794 list_last_entry(&secondary->batch_bos, struct anv_batch_bo, link);
795
796 emit_batch_buffer_start(primary, &first_bbo->bo, 0);
797
798 struct anv_batch_bo *this_bbo = anv_cmd_buffer_current_batch_bo(primary);
799 assert(primary->batch.start == this_bbo->bo.map);
800 uint32_t offset = primary->batch.next - primary->batch.start;
801 const uint32_t inst_size = GEN8_MI_BATCH_BUFFER_START_length * 4;
802
803 /* Roll back the previous MI_BATCH_BUFFER_START and its relocation so we
804 * can emit a new command and relocation for the current splice. In
805 * order to handle the initial-use case, we incremented next and
806 * num_relocs in end_batch_buffer() so we can alyways just subtract
807 * here.
808 */
809 last_bbo->relocs.num_relocs--;
810 secondary->batch.next -= inst_size;
811 emit_batch_buffer_start(secondary, &this_bbo->bo, offset);
812 anv_cmd_buffer_add_seen_bbos(primary, &secondary->batch_bos);
813
814 /* After patching up the secondary buffer, we need to clflush the
815 * modified instruction in case we're on a !llc platform. We use a
816 * little loop to handle the case where the instruction crosses a cache
817 * line boundary.
818 */
819 if (!primary->device->info.has_llc) {
820 void *inst = secondary->batch.next - inst_size;
821 void *p = (void *) (((uintptr_t) inst) & ~CACHELINE_MASK);
822 __builtin_ia32_mfence();
823 while (p < secondary->batch.next) {
824 __builtin_ia32_clflush(p);
825 p += CACHELINE_SIZE;
826 }
827 }
828
829 anv_cmd_buffer_emit_state_base_address(primary);
830 break;
831 }
832 case ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN: {
833 struct list_head copy_list;
834 VkResult result = anv_batch_bo_list_clone(&secondary->batch_bos,
835 secondary,
836 &copy_list);
837 if (result != VK_SUCCESS)
838 return; /* FIXME */
839
840 anv_cmd_buffer_add_seen_bbos(primary, &copy_list);
841
842 struct anv_batch_bo *first_bbo =
843 list_first_entry(&copy_list, struct anv_batch_bo, link);
844 struct anv_batch_bo *last_bbo =
845 list_last_entry(&copy_list, struct anv_batch_bo, link);
846
847 cmd_buffer_chain_to_batch_bo(primary, first_bbo);
848
849 list_splicetail(&copy_list, &primary->batch_bos);
850
851 anv_batch_bo_continue(last_bbo, &primary->batch,
852 GEN8_MI_BATCH_BUFFER_START_length * 4);
853
854 anv_cmd_buffer_emit_state_base_address(primary);
855 break;
856 }
857 default:
858 assert(!"Invalid execution mode");
859 }
860
861 anv_reloc_list_append(&primary->surface_relocs, &primary->pool->alloc,
862 &secondary->surface_relocs, 0);
863 }
864
865 static VkResult
866 anv_cmd_buffer_add_bo(struct anv_cmd_buffer *cmd_buffer,
867 struct anv_bo *bo,
868 struct anv_reloc_list *relocs)
869 {
870 struct drm_i915_gem_exec_object2 *obj = NULL;
871
872 if (bo->index < cmd_buffer->execbuf2.bo_count &&
873 cmd_buffer->execbuf2.bos[bo->index] == bo)
874 obj = &cmd_buffer->execbuf2.objects[bo->index];
875
876 if (obj == NULL) {
877 /* We've never seen this one before. Add it to the list and assign
878 * an id that we can use later.
879 */
880 if (cmd_buffer->execbuf2.bo_count >= cmd_buffer->execbuf2.array_length) {
881 uint32_t new_len = cmd_buffer->execbuf2.objects ?
882 cmd_buffer->execbuf2.array_length * 2 : 64;
883
884 struct drm_i915_gem_exec_object2 *new_objects =
885 anv_alloc(&cmd_buffer->pool->alloc, new_len * sizeof(*new_objects),
886 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
887 if (new_objects == NULL)
888 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
889
890 struct anv_bo **new_bos =
891 anv_alloc(&cmd_buffer->pool->alloc, new_len * sizeof(*new_bos),
892 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
893 if (new_objects == NULL) {
894 anv_free(&cmd_buffer->pool->alloc, new_objects);
895 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
896 }
897
898 if (cmd_buffer->execbuf2.objects) {
899 memcpy(new_objects, cmd_buffer->execbuf2.objects,
900 cmd_buffer->execbuf2.bo_count * sizeof(*new_objects));
901 memcpy(new_bos, cmd_buffer->execbuf2.bos,
902 cmd_buffer->execbuf2.bo_count * sizeof(*new_bos));
903 }
904
905 cmd_buffer->execbuf2.objects = new_objects;
906 cmd_buffer->execbuf2.bos = new_bos;
907 cmd_buffer->execbuf2.array_length = new_len;
908 }
909
910 assert(cmd_buffer->execbuf2.bo_count < cmd_buffer->execbuf2.array_length);
911
912 bo->index = cmd_buffer->execbuf2.bo_count++;
913 obj = &cmd_buffer->execbuf2.objects[bo->index];
914 cmd_buffer->execbuf2.bos[bo->index] = bo;
915
916 obj->handle = bo->gem_handle;
917 obj->relocation_count = 0;
918 obj->relocs_ptr = 0;
919 obj->alignment = 0;
920 obj->offset = bo->offset;
921 obj->flags = bo->is_winsys_bo ? EXEC_OBJECT_WRITE : 0;
922 obj->rsvd1 = 0;
923 obj->rsvd2 = 0;
924 }
925
926 if (relocs != NULL && obj->relocation_count == 0) {
927 /* This is the first time we've ever seen a list of relocations for
928 * this BO. Go ahead and set the relocations and then walk the list
929 * of relocations and add them all.
930 */
931 obj->relocation_count = relocs->num_relocs;
932 obj->relocs_ptr = (uintptr_t) relocs->relocs;
933
934 for (size_t i = 0; i < relocs->num_relocs; i++) {
935 /* A quick sanity check on relocations */
936 assert(relocs->relocs[i].offset < bo->size);
937 anv_cmd_buffer_add_bo(cmd_buffer, relocs->reloc_bos[i], NULL);
938 }
939 }
940
941 return VK_SUCCESS;
942 }
943
944 static void
945 anv_cmd_buffer_process_relocs(struct anv_cmd_buffer *cmd_buffer,
946 struct anv_reloc_list *list)
947 {
948 struct anv_bo *bo;
949
950 /* If the kernel supports I915_EXEC_NO_RELOC, it will compare offset in
951 * struct drm_i915_gem_exec_object2 against the bos current offset and if
952 * all bos haven't moved it will skip relocation processing alltogether.
953 * If I915_EXEC_NO_RELOC is not supported, the kernel ignores the incoming
954 * value of offset so we can set it either way. For that to work we need
955 * to make sure all relocs use the same presumed offset.
956 */
957
958 for (size_t i = 0; i < list->num_relocs; i++) {
959 bo = list->reloc_bos[i];
960 if (bo->offset != list->relocs[i].presumed_offset)
961 cmd_buffer->execbuf2.need_reloc = true;
962
963 list->relocs[i].target_handle = bo->index;
964 }
965 }
966
967 static uint64_t
968 read_reloc(const struct anv_device *device, const void *p)
969 {
970 if (device->info.gen >= 8)
971 return *(uint64_t *)p;
972 else
973 return *(uint32_t *)p;
974 }
975
976 static void
977 write_reloc(const struct anv_device *device, void *p, uint64_t v)
978 {
979 if (device->info.gen >= 8)
980 *(uint64_t *)p = v;
981 else
982 *(uint32_t *)p = v;
983 }
984
985 static void
986 adjust_relocations_from_block_pool(struct anv_block_pool *pool,
987 struct anv_reloc_list *relocs)
988 {
989 for (size_t i = 0; i < relocs->num_relocs; i++) {
990 /* In general, we don't know how stale the relocated value is. It
991 * may have been used last time or it may not. Since we don't want
992 * to stomp it while the GPU may be accessing it, we haven't updated
993 * it anywhere else in the code. Instead, we just set the presumed
994 * offset to what it is now based on the delta and the data in the
995 * block pool. Then the kernel will update it for us if needed.
996 */
997 assert(relocs->relocs[i].offset < pool->state.end);
998 const void *p = pool->map + relocs->relocs[i].offset;
999
1000 /* We're reading back the relocated value from potentially incoherent
1001 * memory here. However, any change to the value will be from the kernel
1002 * writing out relocations, which will keep the CPU cache up to date.
1003 */
1004 relocs->relocs[i].presumed_offset =
1005 read_reloc(pool->device, p) - relocs->relocs[i].delta;
1006
1007 /* All of the relocations from this block pool to other BO's should
1008 * have been emitted relative to the surface block pool center. We
1009 * need to add the center offset to make them relative to the
1010 * beginning of the actual GEM bo.
1011 */
1012 relocs->relocs[i].offset += pool->center_bo_offset;
1013 }
1014 }
1015
1016 static void
1017 adjust_relocations_to_block_pool(struct anv_block_pool *pool,
1018 struct anv_bo *from_bo,
1019 struct anv_reloc_list *relocs,
1020 uint32_t *last_pool_center_bo_offset)
1021 {
1022 assert(*last_pool_center_bo_offset <= pool->center_bo_offset);
1023 uint32_t delta = pool->center_bo_offset - *last_pool_center_bo_offset;
1024
1025 /* When we initially emit relocations into a block pool, we don't
1026 * actually know what the final center_bo_offset will be so we just emit
1027 * it as if center_bo_offset == 0. Now that we know what the center
1028 * offset is, we need to walk the list of relocations and adjust any
1029 * relocations that point to the pool bo with the correct offset.
1030 */
1031 for (size_t i = 0; i < relocs->num_relocs; i++) {
1032 if (relocs->reloc_bos[i] == &pool->bo) {
1033 /* Adjust the delta value in the relocation to correctly
1034 * correspond to the new delta. Initially, this value may have
1035 * been negative (if treated as unsigned), but we trust in
1036 * uint32_t roll-over to fix that for us at this point.
1037 */
1038 relocs->relocs[i].delta += delta;
1039
1040 /* Since the delta has changed, we need to update the actual
1041 * relocated value with the new presumed value. This function
1042 * should only be called on batch buffers, so we know it isn't in
1043 * use by the GPU at the moment.
1044 */
1045 assert(relocs->relocs[i].offset < from_bo->size);
1046 write_reloc(pool->device, from_bo->map + relocs->relocs[i].offset,
1047 relocs->relocs[i].presumed_offset +
1048 relocs->relocs[i].delta);
1049 }
1050 }
1051
1052 *last_pool_center_bo_offset = pool->center_bo_offset;
1053 }
1054
1055 void
1056 anv_cmd_buffer_prepare_execbuf(struct anv_cmd_buffer *cmd_buffer)
1057 {
1058 struct anv_batch *batch = &cmd_buffer->batch;
1059 struct anv_block_pool *ss_pool =
1060 &cmd_buffer->device->surface_state_block_pool;
1061
1062 cmd_buffer->execbuf2.bo_count = 0;
1063 cmd_buffer->execbuf2.need_reloc = false;
1064
1065 adjust_relocations_from_block_pool(ss_pool, &cmd_buffer->surface_relocs);
1066 anv_cmd_buffer_add_bo(cmd_buffer, &ss_pool->bo, &cmd_buffer->surface_relocs);
1067
1068 /* First, we walk over all of the bos we've seen and add them and their
1069 * relocations to the validate list.
1070 */
1071 struct anv_batch_bo **bbo;
1072 anv_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1073 adjust_relocations_to_block_pool(ss_pool, &(*bbo)->bo, &(*bbo)->relocs,
1074 &(*bbo)->last_ss_pool_bo_offset);
1075
1076 anv_cmd_buffer_add_bo(cmd_buffer, &(*bbo)->bo, &(*bbo)->relocs);
1077 }
1078
1079 struct anv_batch_bo *first_batch_bo =
1080 list_first_entry(&cmd_buffer->batch_bos, struct anv_batch_bo, link);
1081
1082 /* The kernel requires that the last entry in the validation list be the
1083 * batch buffer to execute. We can simply swap the element
1084 * corresponding to the first batch_bo in the chain with the last
1085 * element in the list.
1086 */
1087 if (first_batch_bo->bo.index != cmd_buffer->execbuf2.bo_count - 1) {
1088 uint32_t idx = first_batch_bo->bo.index;
1089 uint32_t last_idx = cmd_buffer->execbuf2.bo_count - 1;
1090
1091 struct drm_i915_gem_exec_object2 tmp_obj =
1092 cmd_buffer->execbuf2.objects[idx];
1093 assert(cmd_buffer->execbuf2.bos[idx] == &first_batch_bo->bo);
1094
1095 cmd_buffer->execbuf2.objects[idx] = cmd_buffer->execbuf2.objects[last_idx];
1096 cmd_buffer->execbuf2.bos[idx] = cmd_buffer->execbuf2.bos[last_idx];
1097 cmd_buffer->execbuf2.bos[idx]->index = idx;
1098
1099 cmd_buffer->execbuf2.objects[last_idx] = tmp_obj;
1100 cmd_buffer->execbuf2.bos[last_idx] = &first_batch_bo->bo;
1101 first_batch_bo->bo.index = last_idx;
1102 }
1103
1104 /* Now we go through and fixup all of the relocation lists to point to
1105 * the correct indices in the object array. We have to do this after we
1106 * reorder the list above as some of the indices may have changed.
1107 */
1108 anv_vector_foreach(bbo, &cmd_buffer->seen_bbos)
1109 anv_cmd_buffer_process_relocs(cmd_buffer, &(*bbo)->relocs);
1110
1111 anv_cmd_buffer_process_relocs(cmd_buffer, &cmd_buffer->surface_relocs);
1112
1113 if (!cmd_buffer->device->info.has_llc) {
1114 __builtin_ia32_mfence();
1115 anv_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1116 for (uint32_t i = 0; i < (*bbo)->length; i += CACHELINE_SIZE)
1117 __builtin_ia32_clflush((*bbo)->bo.map + i);
1118 }
1119 }
1120
1121 cmd_buffer->execbuf2.execbuf = (struct drm_i915_gem_execbuffer2) {
1122 .buffers_ptr = (uintptr_t) cmd_buffer->execbuf2.objects,
1123 .buffer_count = cmd_buffer->execbuf2.bo_count,
1124 .batch_start_offset = 0,
1125 .batch_len = batch->next - batch->start,
1126 .cliprects_ptr = 0,
1127 .num_cliprects = 0,
1128 .DR1 = 0,
1129 .DR4 = 0,
1130 .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER |
1131 I915_EXEC_CONSTANTS_REL_GENERAL,
1132 .rsvd1 = cmd_buffer->device->context_id,
1133 .rsvd2 = 0,
1134 };
1135
1136 if (!cmd_buffer->execbuf2.need_reloc)
1137 cmd_buffer->execbuf2.execbuf.flags |= I915_EXEC_NO_RELOC;
1138 }