vk/0.210.0: More function argument renaming
[mesa.git] / src / vulkan / anv_batch_chain.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31
32 #include "gen7_pack.h"
33 #include "gen8_pack.h"
34
35 /** \file anv_batch_chain.c
36 *
37 * This file contains functions related to anv_cmd_buffer as a data
38 * structure. This involves everything required to create and destroy
39 * the actual batch buffers as well as link them together and handle
40 * relocations and surface state. It specifically does *not* contain any
41 * handling of actual vkCmd calls beyond vkCmdExecuteCommands.
42 */
43
44 /*-----------------------------------------------------------------------*
45 * Functions related to anv_reloc_list
46 *-----------------------------------------------------------------------*/
47
48 static VkResult
49 anv_reloc_list_init_clone(struct anv_reloc_list *list,
50 struct anv_device *device,
51 const struct anv_reloc_list *other_list)
52 {
53 if (other_list) {
54 list->num_relocs = other_list->num_relocs;
55 list->array_length = other_list->array_length;
56 } else {
57 list->num_relocs = 0;
58 list->array_length = 256;
59 }
60
61 list->relocs =
62 anv_device_alloc(device, list->array_length * sizeof(*list->relocs), 8,
63 VK_SYSTEM_ALLOC_TYPE_INTERNAL);
64
65 if (list->relocs == NULL)
66 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
67
68 list->reloc_bos =
69 anv_device_alloc(device, list->array_length * sizeof(*list->reloc_bos), 8,
70 VK_SYSTEM_ALLOC_TYPE_INTERNAL);
71
72 if (list->reloc_bos == NULL) {
73 anv_device_free(device, list->relocs);
74 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
75 }
76
77 if (other_list) {
78 memcpy(list->relocs, other_list->relocs,
79 list->array_length * sizeof(*list->relocs));
80 memcpy(list->reloc_bos, other_list->reloc_bos,
81 list->array_length * sizeof(*list->reloc_bos));
82 }
83
84 return VK_SUCCESS;
85 }
86
87 VkResult
88 anv_reloc_list_init(struct anv_reloc_list *list, struct anv_device *device)
89 {
90 return anv_reloc_list_init_clone(list, device, NULL);
91 }
92
93 void
94 anv_reloc_list_finish(struct anv_reloc_list *list, struct anv_device *device)
95 {
96 anv_device_free(device, list->relocs);
97 anv_device_free(device, list->reloc_bos);
98 }
99
100 static VkResult
101 anv_reloc_list_grow(struct anv_reloc_list *list, struct anv_device *device,
102 size_t num_additional_relocs)
103 {
104 if (list->num_relocs + num_additional_relocs <= list->array_length)
105 return VK_SUCCESS;
106
107 size_t new_length = list->array_length * 2;
108 while (new_length < list->num_relocs + num_additional_relocs)
109 new_length *= 2;
110
111 struct drm_i915_gem_relocation_entry *new_relocs =
112 anv_device_alloc(device, new_length * sizeof(*list->relocs), 8,
113 VK_SYSTEM_ALLOC_TYPE_INTERNAL);
114 if (new_relocs == NULL)
115 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
116
117 struct anv_bo **new_reloc_bos =
118 anv_device_alloc(device, new_length * sizeof(*list->reloc_bos), 8,
119 VK_SYSTEM_ALLOC_TYPE_INTERNAL);
120 if (new_relocs == NULL) {
121 anv_device_free(device, new_relocs);
122 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
123 }
124
125 memcpy(new_relocs, list->relocs, list->num_relocs * sizeof(*list->relocs));
126 memcpy(new_reloc_bos, list->reloc_bos,
127 list->num_relocs * sizeof(*list->reloc_bos));
128
129 anv_device_free(device, list->relocs);
130 anv_device_free(device, list->reloc_bos);
131
132 list->array_length = new_length;
133 list->relocs = new_relocs;
134 list->reloc_bos = new_reloc_bos;
135
136 return VK_SUCCESS;
137 }
138
139 uint64_t
140 anv_reloc_list_add(struct anv_reloc_list *list, struct anv_device *device,
141 uint32_t offset, struct anv_bo *target_bo, uint32_t delta)
142 {
143 struct drm_i915_gem_relocation_entry *entry;
144 int index;
145
146 anv_reloc_list_grow(list, device, 1);
147 /* TODO: Handle failure */
148
149 /* XXX: Can we use I915_EXEC_HANDLE_LUT? */
150 index = list->num_relocs++;
151 list->reloc_bos[index] = target_bo;
152 entry = &list->relocs[index];
153 entry->target_handle = target_bo->gem_handle;
154 entry->delta = delta;
155 entry->offset = offset;
156 entry->presumed_offset = target_bo->offset;
157 entry->read_domains = 0;
158 entry->write_domain = 0;
159
160 return target_bo->offset + delta;
161 }
162
163 static void
164 anv_reloc_list_append(struct anv_reloc_list *list, struct anv_device *device,
165 struct anv_reloc_list *other, uint32_t offset)
166 {
167 anv_reloc_list_grow(list, device, other->num_relocs);
168 /* TODO: Handle failure */
169
170 memcpy(&list->relocs[list->num_relocs], &other->relocs[0],
171 other->num_relocs * sizeof(other->relocs[0]));
172 memcpy(&list->reloc_bos[list->num_relocs], &other->reloc_bos[0],
173 other->num_relocs * sizeof(other->reloc_bos[0]));
174
175 for (uint32_t i = 0; i < other->num_relocs; i++)
176 list->relocs[i + list->num_relocs].offset += offset;
177
178 list->num_relocs += other->num_relocs;
179 }
180
181 /*-----------------------------------------------------------------------*
182 * Functions related to anv_batch
183 *-----------------------------------------------------------------------*/
184
185 void *
186 anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords)
187 {
188 if (batch->next + num_dwords * 4 > batch->end)
189 batch->extend_cb(batch, batch->user_data);
190
191 void *p = batch->next;
192
193 batch->next += num_dwords * 4;
194 assert(batch->next <= batch->end);
195
196 return p;
197 }
198
199 uint64_t
200 anv_batch_emit_reloc(struct anv_batch *batch,
201 void *location, struct anv_bo *bo, uint32_t delta)
202 {
203 return anv_reloc_list_add(batch->relocs, batch->device,
204 location - batch->start, bo, delta);
205 }
206
207 void
208 anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other)
209 {
210 uint32_t size, offset;
211
212 size = other->next - other->start;
213 assert(size % 4 == 0);
214
215 if (batch->next + size > batch->end)
216 batch->extend_cb(batch, batch->user_data);
217
218 assert(batch->next + size <= batch->end);
219
220 VG(VALGRIND_CHECK_MEM_IS_DEFINED(other->start, size));
221 memcpy(batch->next, other->start, size);
222
223 offset = batch->next - batch->start;
224 anv_reloc_list_append(batch->relocs, batch->device,
225 other->relocs, offset);
226
227 batch->next += size;
228 }
229
230 /*-----------------------------------------------------------------------*
231 * Functions related to anv_batch_bo
232 *-----------------------------------------------------------------------*/
233
234 static VkResult
235 anv_batch_bo_create(struct anv_device *device, struct anv_batch_bo **bbo_out)
236 {
237 VkResult result;
238
239 struct anv_batch_bo *bbo =
240 anv_device_alloc(device, sizeof(*bbo), 8, VK_SYSTEM_ALLOC_TYPE_INTERNAL);
241 if (bbo == NULL)
242 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
243
244 result = anv_bo_pool_alloc(&device->batch_bo_pool, &bbo->bo);
245 if (result != VK_SUCCESS)
246 goto fail_alloc;
247
248 result = anv_reloc_list_init(&bbo->relocs, device);
249 if (result != VK_SUCCESS)
250 goto fail_bo_alloc;
251
252 *bbo_out = bbo;
253
254 return VK_SUCCESS;
255
256 fail_bo_alloc:
257 anv_bo_pool_free(&device->batch_bo_pool, &bbo->bo);
258 fail_alloc:
259 anv_device_free(device, bbo);
260
261 return result;
262 }
263
264 static VkResult
265 anv_batch_bo_clone(struct anv_device *device,
266 const struct anv_batch_bo *other_bbo,
267 struct anv_batch_bo **bbo_out)
268 {
269 VkResult result;
270
271 struct anv_batch_bo *bbo =
272 anv_device_alloc(device, sizeof(*bbo), 8, VK_SYSTEM_ALLOC_TYPE_INTERNAL);
273 if (bbo == NULL)
274 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
275
276 result = anv_bo_pool_alloc(&device->batch_bo_pool, &bbo->bo);
277 if (result != VK_SUCCESS)
278 goto fail_alloc;
279
280 result = anv_reloc_list_init_clone(&bbo->relocs, device, &other_bbo->relocs);
281 if (result != VK_SUCCESS)
282 goto fail_bo_alloc;
283
284 bbo->length = other_bbo->length;
285 memcpy(bbo->bo.map, other_bbo->bo.map, other_bbo->length);
286
287 bbo->last_ss_pool_bo_offset = other_bbo->last_ss_pool_bo_offset;
288
289 *bbo_out = bbo;
290
291 return VK_SUCCESS;
292
293 fail_bo_alloc:
294 anv_bo_pool_free(&device->batch_bo_pool, &bbo->bo);
295 fail_alloc:
296 anv_device_free(device, bbo);
297
298 return result;
299 }
300
301 static void
302 anv_batch_bo_start(struct anv_batch_bo *bbo, struct anv_batch *batch,
303 size_t batch_padding)
304 {
305 batch->next = batch->start = bbo->bo.map;
306 batch->end = bbo->bo.map + bbo->bo.size - batch_padding;
307 batch->relocs = &bbo->relocs;
308 bbo->last_ss_pool_bo_offset = 0;
309 bbo->relocs.num_relocs = 0;
310 }
311
312 static void
313 anv_batch_bo_continue(struct anv_batch_bo *bbo, struct anv_batch *batch,
314 size_t batch_padding)
315 {
316 batch->start = bbo->bo.map;
317 batch->next = bbo->bo.map + bbo->length;
318 batch->end = bbo->bo.map + bbo->bo.size - batch_padding;
319 batch->relocs = &bbo->relocs;
320 }
321
322 static void
323 anv_batch_bo_finish(struct anv_batch_bo *bbo, struct anv_batch *batch)
324 {
325 assert(batch->start == bbo->bo.map);
326 bbo->length = batch->next - batch->start;
327 VG(VALGRIND_CHECK_MEM_IS_DEFINED(batch->start, bbo->length));
328 }
329
330 static void
331 anv_batch_bo_destroy(struct anv_batch_bo *bbo, struct anv_device *device)
332 {
333 anv_reloc_list_finish(&bbo->relocs, device);
334 anv_bo_pool_free(&device->batch_bo_pool, &bbo->bo);
335 anv_device_free(device, bbo);
336 }
337
338 static VkResult
339 anv_batch_bo_list_clone(const struct list_head *list, struct anv_device *device,
340 struct list_head *new_list)
341 {
342 VkResult result = VK_SUCCESS;
343
344 list_inithead(new_list);
345
346 struct anv_batch_bo *prev_bbo = NULL;
347 list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
348 struct anv_batch_bo *new_bbo;
349 result = anv_batch_bo_clone(device, bbo, &new_bbo);
350 if (result != VK_SUCCESS)
351 break;
352 list_addtail(&new_bbo->link, new_list);
353
354 if (prev_bbo) {
355 /* As we clone this list of batch_bo's, they chain one to the
356 * other using MI_BATCH_BUFFER_START commands. We need to fix up
357 * those relocations as we go. Fortunately, this is pretty easy
358 * as it will always be the last relocation in the list.
359 */
360 uint32_t last_idx = prev_bbo->relocs.num_relocs - 1;
361 assert(prev_bbo->relocs.reloc_bos[last_idx] == &bbo->bo);
362 prev_bbo->relocs.reloc_bos[last_idx] = &new_bbo->bo;
363 }
364
365 prev_bbo = new_bbo;
366 }
367
368 if (result != VK_SUCCESS) {
369 list_for_each_entry_safe(struct anv_batch_bo, bbo, new_list, link)
370 anv_batch_bo_destroy(bbo, device);
371 }
372
373 return result;
374 }
375
376 /*-----------------------------------------------------------------------*
377 * Functions related to anv_batch_bo
378 *-----------------------------------------------------------------------*/
379
380 static inline struct anv_batch_bo *
381 anv_cmd_buffer_current_batch_bo(struct anv_cmd_buffer *cmd_buffer)
382 {
383 return LIST_ENTRY(struct anv_batch_bo, cmd_buffer->batch_bos.prev, link);
384 }
385
386 struct anv_address
387 anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer *cmd_buffer)
388 {
389 return (struct anv_address) {
390 .bo = &cmd_buffer->device->surface_state_block_pool.bo,
391 .offset = *(int32_t *)anv_vector_head(&cmd_buffer->bt_blocks),
392 };
393 }
394
395 static void
396 emit_batch_buffer_start(struct anv_batch *batch, struct anv_bo *bo, uint32_t offset)
397 {
398 /* In gen8+ the address field grew to two dwords to accomodate 48 bit
399 * offsets. The high 16 bits are in the last dword, so we can use the gen8
400 * version in either case, as long as we set the instruction length in the
401 * header accordingly. This means that we always emit three dwords here
402 * and all the padding and adjustment we do in this file works for all
403 * gens.
404 */
405
406 const uint32_t gen7_length =
407 GEN7_MI_BATCH_BUFFER_START_length - GEN7_MI_BATCH_BUFFER_START_length_bias;
408 const uint32_t gen8_length =
409 GEN8_MI_BATCH_BUFFER_START_length - GEN8_MI_BATCH_BUFFER_START_length_bias;
410
411 anv_batch_emit(batch, GEN8_MI_BATCH_BUFFER_START,
412 .DwordLength = batch->device->info.gen < 8 ? gen7_length : gen8_length,
413 ._2ndLevelBatchBuffer = _1stlevelbatch,
414 .AddressSpaceIndicator = ASI_PPGTT,
415 .BatchBufferStartAddress = { bo, offset });
416 }
417
418 static void
419 cmd_buffer_chain_to_batch_bo(struct anv_cmd_buffer *cmd_buffer,
420 struct anv_batch_bo *bbo)
421 {
422 struct anv_batch *batch = &cmd_buffer->batch;
423 struct anv_batch_bo *current_bbo =
424 anv_cmd_buffer_current_batch_bo(cmd_buffer);
425
426 /* We set the end of the batch a little short so we would be sure we
427 * have room for the chaining command. Since we're about to emit the
428 * chaining command, let's set it back where it should go.
429 */
430 batch->end += GEN8_MI_BATCH_BUFFER_START_length * 4;
431 assert(batch->end == current_bbo->bo.map + current_bbo->bo.size);
432
433 emit_batch_buffer_start(batch, &bbo->bo, 0);
434
435 anv_batch_bo_finish(current_bbo, batch);
436 }
437
438 static VkResult
439 anv_cmd_buffer_chain_batch(struct anv_batch *batch, void *_data)
440 {
441 struct anv_cmd_buffer *cmd_buffer = _data;
442 struct anv_batch_bo *new_bbo;
443
444 VkResult result = anv_batch_bo_create(cmd_buffer->device, &new_bbo);
445 if (result != VK_SUCCESS)
446 return result;
447
448 struct anv_batch_bo **seen_bbo = anv_vector_add(&cmd_buffer->seen_bbos);
449 if (seen_bbo == NULL) {
450 anv_batch_bo_destroy(new_bbo, cmd_buffer->device);
451 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
452 }
453 *seen_bbo = new_bbo;
454
455 cmd_buffer_chain_to_batch_bo(cmd_buffer, new_bbo);
456
457 list_addtail(&new_bbo->link, &cmd_buffer->batch_bos);
458
459 anv_batch_bo_start(new_bbo, batch, GEN8_MI_BATCH_BUFFER_START_length * 4);
460
461 return VK_SUCCESS;
462 }
463
464 struct anv_state
465 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
466 uint32_t entries, uint32_t *state_offset)
467 {
468 struct anv_block_pool *block_pool =
469 &cmd_buffer->device->surface_state_block_pool;
470 int32_t *bt_block = anv_vector_head(&cmd_buffer->bt_blocks);
471 struct anv_state state;
472
473 state.alloc_size = align_u32(entries * 4, 32);
474
475 if (cmd_buffer->bt_next + state.alloc_size > block_pool->block_size)
476 return (struct anv_state) { 0 };
477
478 state.offset = cmd_buffer->bt_next;
479 state.map = block_pool->map + *bt_block + state.offset;
480
481 cmd_buffer->bt_next += state.alloc_size;
482
483 assert(*bt_block < 0);
484 *state_offset = -(*bt_block);
485
486 return state;
487 }
488
489 struct anv_state
490 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer *cmd_buffer)
491 {
492 return anv_state_stream_alloc(&cmd_buffer->surface_state_stream, 64, 64);
493 }
494
495 struct anv_state
496 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer *cmd_buffer,
497 uint32_t size, uint32_t alignment)
498 {
499 return anv_state_stream_alloc(&cmd_buffer->dynamic_state_stream,
500 size, alignment);
501 }
502
503 VkResult
504 anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer *cmd_buffer)
505 {
506 struct anv_block_pool *block_pool =
507 &cmd_buffer->device->surface_state_block_pool;
508
509 int32_t *offset = anv_vector_add(&cmd_buffer->bt_blocks);
510 if (offset == NULL)
511 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
512
513 *offset = anv_block_pool_alloc_back(block_pool);
514 cmd_buffer->bt_next = 0;
515
516 return VK_SUCCESS;
517 }
518
519 VkResult
520 anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
521 {
522 struct anv_batch_bo *batch_bo;
523 struct anv_device *device = cmd_buffer->device;
524 VkResult result;
525
526 list_inithead(&cmd_buffer->batch_bos);
527
528 result = anv_batch_bo_create(device, &batch_bo);
529 if (result != VK_SUCCESS)
530 return result;
531
532 list_addtail(&batch_bo->link, &cmd_buffer->batch_bos);
533
534 cmd_buffer->batch.device = device;
535 cmd_buffer->batch.extend_cb = anv_cmd_buffer_chain_batch;
536 cmd_buffer->batch.user_data = cmd_buffer;
537
538 anv_batch_bo_start(batch_bo, &cmd_buffer->batch,
539 GEN8_MI_BATCH_BUFFER_START_length * 4);
540
541 int success = anv_vector_init(&cmd_buffer->seen_bbos,
542 sizeof(struct anv_bo *),
543 8 * sizeof(struct anv_bo *));
544 if (!success)
545 goto fail_batch_bo;
546
547 *(struct anv_batch_bo **)anv_vector_add(&cmd_buffer->seen_bbos) = batch_bo;
548
549 success = anv_vector_init(&cmd_buffer->bt_blocks, sizeof(int32_t),
550 8 * sizeof(int32_t));
551 if (!success)
552 goto fail_seen_bbos;
553
554 result = anv_reloc_list_init(&cmd_buffer->surface_relocs,
555 cmd_buffer->device);
556 if (result != VK_SUCCESS)
557 goto fail_bt_blocks;
558
559 anv_cmd_buffer_new_binding_table_block(cmd_buffer);
560
561 cmd_buffer->execbuf2.objects = NULL;
562 cmd_buffer->execbuf2.bos = NULL;
563 cmd_buffer->execbuf2.array_length = 0;
564
565 return VK_SUCCESS;
566
567 fail_bt_blocks:
568 anv_vector_finish(&cmd_buffer->bt_blocks);
569 fail_seen_bbos:
570 anv_vector_finish(&cmd_buffer->seen_bbos);
571 fail_batch_bo:
572 anv_batch_bo_destroy(batch_bo, device);
573
574 return result;
575 }
576
577 void
578 anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
579 {
580 struct anv_device *device = cmd_buffer->device;
581
582 int32_t *bt_block;
583 anv_vector_foreach(bt_block, &cmd_buffer->bt_blocks) {
584 anv_block_pool_free(&cmd_buffer->device->surface_state_block_pool,
585 *bt_block);
586 }
587 anv_vector_finish(&cmd_buffer->bt_blocks);
588
589 anv_reloc_list_finish(&cmd_buffer->surface_relocs, cmd_buffer->device);
590
591 anv_vector_finish(&cmd_buffer->seen_bbos);
592
593 /* Destroy all of the batch buffers */
594 list_for_each_entry_safe(struct anv_batch_bo, bbo,
595 &cmd_buffer->batch_bos, link) {
596 anv_batch_bo_destroy(bbo, device);
597 }
598
599 anv_device_free(device, cmd_buffer->execbuf2.objects);
600 anv_device_free(device, cmd_buffer->execbuf2.bos);
601 }
602
603 void
604 anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
605 {
606 struct anv_device *device = cmd_buffer->device;
607
608 /* Delete all but the first batch bo */
609 assert(!list_empty(&cmd_buffer->batch_bos));
610 while (cmd_buffer->batch_bos.next != cmd_buffer->batch_bos.prev) {
611 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
612 list_del(&bbo->link);
613 anv_batch_bo_destroy(bbo, device);
614 }
615 assert(!list_empty(&cmd_buffer->batch_bos));
616
617 anv_batch_bo_start(anv_cmd_buffer_current_batch_bo(cmd_buffer),
618 &cmd_buffer->batch,
619 GEN8_MI_BATCH_BUFFER_START_length * 4);
620
621 while (anv_vector_length(&cmd_buffer->bt_blocks) > 1) {
622 int32_t *bt_block = anv_vector_remove(&cmd_buffer->bt_blocks);
623 anv_block_pool_free(&cmd_buffer->device->surface_state_block_pool,
624 *bt_block);
625 }
626 assert(anv_vector_length(&cmd_buffer->bt_blocks) == 1);
627 cmd_buffer->bt_next = 0;
628
629 cmd_buffer->surface_relocs.num_relocs = 0;
630
631 /* Reset the list of seen buffers */
632 cmd_buffer->seen_bbos.head = 0;
633 cmd_buffer->seen_bbos.tail = 0;
634
635 *(struct anv_batch_bo **)anv_vector_add(&cmd_buffer->seen_bbos) =
636 anv_cmd_buffer_current_batch_bo(cmd_buffer);
637 }
638
639 void
640 anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer)
641 {
642 struct anv_batch_bo *batch_bo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
643
644 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
645 anv_batch_emit(&cmd_buffer->batch, GEN7_MI_BATCH_BUFFER_END);
646
647 /* Round batch up to an even number of dwords. */
648 if ((cmd_buffer->batch.next - cmd_buffer->batch.start) & 4)
649 anv_batch_emit(&cmd_buffer->batch, GEN7_MI_NOOP);
650
651 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_PRIMARY;
652 }
653
654 anv_batch_bo_finish(batch_bo, &cmd_buffer->batch);
655
656 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
657 /* If this is a secondary command buffer, we need to determine the
658 * mode in which it will be executed with vkExecuteCommands. We
659 * determine this statically here so that this stays in sync with the
660 * actual ExecuteCommands implementation.
661 */
662 if ((cmd_buffer->batch_bos.next == cmd_buffer->batch_bos.prev) &&
663 (anv_cmd_buffer_current_batch_bo(cmd_buffer)->length <
664 ANV_CMD_BUFFER_BATCH_SIZE / 2)) {
665 /* If the secondary has exactly one batch buffer in its list *and*
666 * that batch buffer is less than half of the maximum size, we're
667 * probably better of simply copying it into our batch.
668 */
669 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_EMIT;
670 } else if (!(cmd_buffer->usage_flags &
671 VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
672 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_CHAIN;
673
674 /* When we chain, we need to add an MI_BATCH_BUFFER_START command
675 * with its relocation. In order to handle this we'll increment here
676 * so we can unconditionally decrement right before adding the
677 * MI_BATCH_BUFFER_START command.
678 */
679 anv_cmd_buffer_current_batch_bo(cmd_buffer)->relocs.num_relocs++;
680 cmd_buffer->batch.next += GEN8_MI_BATCH_BUFFER_START_length * 4;
681 } else {
682 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN;
683 }
684 }
685 }
686
687 static inline VkResult
688 anv_cmd_buffer_add_seen_bbos(struct anv_cmd_buffer *cmd_buffer,
689 struct list_head *list)
690 {
691 list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
692 struct anv_batch_bo **bbo_ptr = anv_vector_add(&cmd_buffer->seen_bbos);
693 if (bbo_ptr == NULL)
694 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
695
696 *bbo_ptr = bbo;
697 }
698
699 return VK_SUCCESS;
700 }
701
702 void
703 anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary,
704 struct anv_cmd_buffer *secondary)
705 {
706 switch (secondary->exec_mode) {
707 case ANV_CMD_BUFFER_EXEC_MODE_EMIT:
708 anv_batch_emit_batch(&primary->batch, &secondary->batch);
709 break;
710 case ANV_CMD_BUFFER_EXEC_MODE_CHAIN: {
711 struct anv_batch_bo *first_bbo =
712 list_first_entry(&secondary->batch_bos, struct anv_batch_bo, link);
713 struct anv_batch_bo *last_bbo =
714 list_last_entry(&secondary->batch_bos, struct anv_batch_bo, link);
715
716 emit_batch_buffer_start(&primary->batch, &first_bbo->bo, 0);
717
718 struct anv_batch_bo *this_bbo = anv_cmd_buffer_current_batch_bo(primary);
719 assert(primary->batch.start == this_bbo->bo.map);
720 uint32_t offset = primary->batch.next - primary->batch.start;
721
722 /* Roll back the previous MI_BATCH_BUFFER_START and its relocation so we
723 * can emit a new command and relocation for the current splice. In
724 * order to handle the initial-use case, we incremented next and
725 * num_relocs in end_batch_buffer() so we can alyways just subtract
726 * here.
727 */
728 last_bbo->relocs.num_relocs--;
729 secondary->batch.next -= GEN8_MI_BATCH_BUFFER_START_length * 4;
730 emit_batch_buffer_start(&secondary->batch, &this_bbo->bo, offset);
731 anv_cmd_buffer_add_seen_bbos(primary, &secondary->batch_bos);
732 break;
733 }
734 case ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN: {
735 struct list_head copy_list;
736 VkResult result = anv_batch_bo_list_clone(&secondary->batch_bos,
737 secondary->device,
738 &copy_list);
739 if (result != VK_SUCCESS)
740 return; /* FIXME */
741
742 anv_cmd_buffer_add_seen_bbos(primary, &copy_list);
743
744 struct anv_batch_bo *first_bbo =
745 list_first_entry(&copy_list, struct anv_batch_bo, link);
746 struct anv_batch_bo *last_bbo =
747 list_last_entry(&copy_list, struct anv_batch_bo, link);
748
749 cmd_buffer_chain_to_batch_bo(primary, first_bbo);
750
751 list_splicetail(&copy_list, &primary->batch_bos);
752
753 anv_batch_bo_continue(last_bbo, &primary->batch,
754 GEN8_MI_BATCH_BUFFER_START_length * 4);
755
756 anv_cmd_buffer_emit_state_base_address(primary);
757 break;
758 }
759 default:
760 assert(!"Invalid execution mode");
761 }
762
763 anv_reloc_list_append(&primary->surface_relocs, primary->device,
764 &secondary->surface_relocs, 0);
765 }
766
767 static VkResult
768 anv_cmd_buffer_add_bo(struct anv_cmd_buffer *cmd_buffer,
769 struct anv_bo *bo,
770 struct anv_reloc_list *relocs)
771 {
772 struct drm_i915_gem_exec_object2 *obj = NULL;
773
774 if (bo->index < cmd_buffer->execbuf2.bo_count &&
775 cmd_buffer->execbuf2.bos[bo->index] == bo)
776 obj = &cmd_buffer->execbuf2.objects[bo->index];
777
778 if (obj == NULL) {
779 /* We've never seen this one before. Add it to the list and assign
780 * an id that we can use later.
781 */
782 if (cmd_buffer->execbuf2.bo_count >= cmd_buffer->execbuf2.array_length) {
783 uint32_t new_len = cmd_buffer->execbuf2.objects ?
784 cmd_buffer->execbuf2.array_length * 2 : 64;
785
786 struct drm_i915_gem_exec_object2 *new_objects =
787 anv_device_alloc(cmd_buffer->device, new_len * sizeof(*new_objects),
788 8, VK_SYSTEM_ALLOC_TYPE_INTERNAL);
789 if (new_objects == NULL)
790 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
791
792 struct anv_bo **new_bos =
793 anv_device_alloc(cmd_buffer->device, new_len * sizeof(*new_bos),
794 8, VK_SYSTEM_ALLOC_TYPE_INTERNAL);
795 if (new_objects == NULL) {
796 anv_device_free(cmd_buffer->device, new_objects);
797 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
798 }
799
800 if (cmd_buffer->execbuf2.objects) {
801 memcpy(new_objects, cmd_buffer->execbuf2.objects,
802 cmd_buffer->execbuf2.bo_count * sizeof(*new_objects));
803 memcpy(new_bos, cmd_buffer->execbuf2.bos,
804 cmd_buffer->execbuf2.bo_count * sizeof(*new_bos));
805 }
806
807 cmd_buffer->execbuf2.objects = new_objects;
808 cmd_buffer->execbuf2.bos = new_bos;
809 cmd_buffer->execbuf2.array_length = new_len;
810 }
811
812 assert(cmd_buffer->execbuf2.bo_count < cmd_buffer->execbuf2.array_length);
813
814 bo->index = cmd_buffer->execbuf2.bo_count++;
815 obj = &cmd_buffer->execbuf2.objects[bo->index];
816 cmd_buffer->execbuf2.bos[bo->index] = bo;
817
818 obj->handle = bo->gem_handle;
819 obj->relocation_count = 0;
820 obj->relocs_ptr = 0;
821 obj->alignment = 0;
822 obj->offset = bo->offset;
823 obj->flags = 0;
824 obj->rsvd1 = 0;
825 obj->rsvd2 = 0;
826 }
827
828 if (relocs != NULL && obj->relocation_count == 0) {
829 /* This is the first time we've ever seen a list of relocations for
830 * this BO. Go ahead and set the relocations and then walk the list
831 * of relocations and add them all.
832 */
833 obj->relocation_count = relocs->num_relocs;
834 obj->relocs_ptr = (uintptr_t) relocs->relocs;
835
836 for (size_t i = 0; i < relocs->num_relocs; i++) {
837 /* A quick sanity check on relocations */
838 assert(relocs->relocs[i].offset < bo->size);
839 anv_cmd_buffer_add_bo(cmd_buffer, relocs->reloc_bos[i], NULL);
840 }
841 }
842
843 return VK_SUCCESS;
844 }
845
846 static void
847 anv_cmd_buffer_process_relocs(struct anv_cmd_buffer *cmd_buffer,
848 struct anv_reloc_list *list)
849 {
850 struct anv_bo *bo;
851
852 /* If the kernel supports I915_EXEC_NO_RELOC, it will compare offset in
853 * struct drm_i915_gem_exec_object2 against the bos current offset and if
854 * all bos haven't moved it will skip relocation processing alltogether.
855 * If I915_EXEC_NO_RELOC is not supported, the kernel ignores the incoming
856 * value of offset so we can set it either way. For that to work we need
857 * to make sure all relocs use the same presumed offset.
858 */
859
860 for (size_t i = 0; i < list->num_relocs; i++) {
861 bo = list->reloc_bos[i];
862 if (bo->offset != list->relocs[i].presumed_offset)
863 cmd_buffer->execbuf2.need_reloc = true;
864
865 list->relocs[i].target_handle = bo->index;
866 }
867 }
868
869 static void
870 adjust_relocations_from_block_pool(struct anv_block_pool *pool,
871 struct anv_reloc_list *relocs)
872 {
873 for (size_t i = 0; i < relocs->num_relocs; i++) {
874 /* In general, we don't know how stale the relocated value is. It
875 * may have been used last time or it may not. Since we don't want
876 * to stomp it while the GPU may be accessing it, we haven't updated
877 * it anywhere else in the code. Instead, we just set the presumed
878 * offset to what it is now based on the delta and the data in the
879 * block pool. Then the kernel will update it for us if needed.
880 */
881 assert(relocs->relocs[i].offset < pool->state.end);
882 uint32_t *reloc_data = pool->map + relocs->relocs[i].offset;
883 relocs->relocs[i].presumed_offset = *reloc_data - relocs->relocs[i].delta;
884
885 /* All of the relocations from this block pool to other BO's should
886 * have been emitted relative to the surface block pool center. We
887 * need to add the center offset to make them relative to the
888 * beginning of the actual GEM bo.
889 */
890 relocs->relocs[i].offset += pool->center_bo_offset;
891 }
892 }
893
894 static void
895 adjust_relocations_to_block_pool(struct anv_block_pool *pool,
896 struct anv_bo *from_bo,
897 struct anv_reloc_list *relocs,
898 uint32_t *last_pool_center_bo_offset)
899 {
900 assert(*last_pool_center_bo_offset <= pool->center_bo_offset);
901 uint32_t delta = pool->center_bo_offset - *last_pool_center_bo_offset;
902
903 /* When we initially emit relocations into a block pool, we don't
904 * actually know what the final center_bo_offset will be so we just emit
905 * it as if center_bo_offset == 0. Now that we know what the center
906 * offset is, we need to walk the list of relocations and adjust any
907 * relocations that point to the pool bo with the correct offset.
908 */
909 for (size_t i = 0; i < relocs->num_relocs; i++) {
910 if (relocs->reloc_bos[i] == &pool->bo) {
911 /* Adjust the delta value in the relocation to correctly
912 * correspond to the new delta. Initially, this value may have
913 * been negative (if treated as unsigned), but we trust in
914 * uint32_t roll-over to fix that for us at this point.
915 */
916 relocs->relocs[i].delta += delta;
917
918 /* Since the delta has changed, we need to update the actual
919 * relocated value with the new presumed value. This function
920 * should only be called on batch buffers, so we know it isn't in
921 * use by the GPU at the moment.
922 */
923 assert(relocs->relocs[i].offset < from_bo->size);
924 uint32_t *reloc_data = from_bo->map + relocs->relocs[i].offset;
925 *reloc_data = relocs->relocs[i].presumed_offset +
926 relocs->relocs[i].delta;
927 }
928 }
929
930 *last_pool_center_bo_offset = pool->center_bo_offset;
931 }
932
933 void
934 anv_cmd_buffer_prepare_execbuf(struct anv_cmd_buffer *cmd_buffer)
935 {
936 struct anv_batch *batch = &cmd_buffer->batch;
937 struct anv_block_pool *ss_pool =
938 &cmd_buffer->device->surface_state_block_pool;
939
940 cmd_buffer->execbuf2.bo_count = 0;
941 cmd_buffer->execbuf2.need_reloc = false;
942
943 adjust_relocations_from_block_pool(ss_pool, &cmd_buffer->surface_relocs);
944 anv_cmd_buffer_add_bo(cmd_buffer, &ss_pool->bo, &cmd_buffer->surface_relocs);
945
946 /* First, we walk over all of the bos we've seen and add them and their
947 * relocations to the validate list.
948 */
949 struct anv_batch_bo **bbo;
950 anv_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
951 adjust_relocations_to_block_pool(ss_pool, &(*bbo)->bo, &(*bbo)->relocs,
952 &(*bbo)->last_ss_pool_bo_offset);
953
954 anv_cmd_buffer_add_bo(cmd_buffer, &(*bbo)->bo, &(*bbo)->relocs);
955 }
956
957 struct anv_batch_bo *first_batch_bo =
958 list_first_entry(&cmd_buffer->batch_bos, struct anv_batch_bo, link);
959
960 /* The kernel requires that the last entry in the validation list be the
961 * batch buffer to execute. We can simply swap the element
962 * corresponding to the first batch_bo in the chain with the last
963 * element in the list.
964 */
965 if (first_batch_bo->bo.index != cmd_buffer->execbuf2.bo_count - 1) {
966 uint32_t idx = first_batch_bo->bo.index;
967 uint32_t last_idx = cmd_buffer->execbuf2.bo_count - 1;
968
969 struct drm_i915_gem_exec_object2 tmp_obj =
970 cmd_buffer->execbuf2.objects[idx];
971 assert(cmd_buffer->execbuf2.bos[idx] == &first_batch_bo->bo);
972
973 cmd_buffer->execbuf2.objects[idx] = cmd_buffer->execbuf2.objects[last_idx];
974 cmd_buffer->execbuf2.bos[idx] = cmd_buffer->execbuf2.bos[last_idx];
975 cmd_buffer->execbuf2.bos[idx]->index = idx;
976
977 cmd_buffer->execbuf2.objects[last_idx] = tmp_obj;
978 cmd_buffer->execbuf2.bos[last_idx] = &first_batch_bo->bo;
979 first_batch_bo->bo.index = last_idx;
980 }
981
982 /* Now we go through and fixup all of the relocation lists to point to
983 * the correct indices in the object array. We have to do this after we
984 * reorder the list above as some of the indices may have changed.
985 */
986 anv_vector_foreach(bbo, &cmd_buffer->seen_bbos)
987 anv_cmd_buffer_process_relocs(cmd_buffer, &(*bbo)->relocs);
988
989 anv_cmd_buffer_process_relocs(cmd_buffer, &cmd_buffer->surface_relocs);
990
991 cmd_buffer->execbuf2.execbuf = (struct drm_i915_gem_execbuffer2) {
992 .buffers_ptr = (uintptr_t) cmd_buffer->execbuf2.objects,
993 .buffer_count = cmd_buffer->execbuf2.bo_count,
994 .batch_start_offset = 0,
995 .batch_len = batch->next - batch->start,
996 .cliprects_ptr = 0,
997 .num_cliprects = 0,
998 .DR1 = 0,
999 .DR4 = 0,
1000 .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER |
1001 I915_EXEC_CONSTANTS_REL_GENERAL,
1002 .rsvd1 = cmd_buffer->device->context_id,
1003 .rsvd2 = 0,
1004 };
1005
1006 if (!cmd_buffer->execbuf2.need_reloc)
1007 cmd_buffer->execbuf2.execbuf.flags |= I915_EXEC_NO_RELOC;
1008 }