anv/batch_chain: Add a _alloc_binding_table function
[mesa.git] / src / vulkan / anv_batch_chain.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31
32 /** \file anv_batch_chain.c
33 *
34 * This file contains functions related to anv_cmd_buffer as a data
35 * structure. This involves everything required to create and destroy
36 * the actual batch buffers as well as link them together and handle
37 * relocations and surface state. It specifically does *not* contain any
38 * handling of actual vkCmd calls beyond vkCmdExecuteCommands.
39 */
40
41 /*-----------------------------------------------------------------------*
42 * Functions related to anv_reloc_list
43 *-----------------------------------------------------------------------*/
44
45 static VkResult
46 anv_reloc_list_init_clone(struct anv_reloc_list *list,
47 struct anv_device *device,
48 const struct anv_reloc_list *other_list)
49 {
50 if (other_list) {
51 list->num_relocs = other_list->num_relocs;
52 list->array_length = other_list->array_length;
53 } else {
54 list->num_relocs = 0;
55 list->array_length = 256;
56 }
57
58 list->relocs =
59 anv_device_alloc(device, list->array_length * sizeof(*list->relocs), 8,
60 VK_SYSTEM_ALLOC_TYPE_INTERNAL);
61
62 if (list->relocs == NULL)
63 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
64
65 list->reloc_bos =
66 anv_device_alloc(device, list->array_length * sizeof(*list->reloc_bos), 8,
67 VK_SYSTEM_ALLOC_TYPE_INTERNAL);
68
69 if (list->reloc_bos == NULL) {
70 anv_device_free(device, list->relocs);
71 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
72 }
73
74 if (other_list) {
75 memcpy(list->relocs, other_list->relocs,
76 list->array_length * sizeof(*list->relocs));
77 memcpy(list->reloc_bos, other_list->reloc_bos,
78 list->array_length * sizeof(*list->reloc_bos));
79 }
80
81 return VK_SUCCESS;
82 }
83
84 VkResult
85 anv_reloc_list_init(struct anv_reloc_list *list, struct anv_device *device)
86 {
87 return anv_reloc_list_init_clone(list, device, NULL);
88 }
89
90 void
91 anv_reloc_list_finish(struct anv_reloc_list *list, struct anv_device *device)
92 {
93 anv_device_free(device, list->relocs);
94 anv_device_free(device, list->reloc_bos);
95 }
96
97 static VkResult
98 anv_reloc_list_grow(struct anv_reloc_list *list, struct anv_device *device,
99 size_t num_additional_relocs)
100 {
101 if (list->num_relocs + num_additional_relocs <= list->array_length)
102 return VK_SUCCESS;
103
104 size_t new_length = list->array_length * 2;
105 while (new_length < list->num_relocs + num_additional_relocs)
106 new_length *= 2;
107
108 struct drm_i915_gem_relocation_entry *new_relocs =
109 anv_device_alloc(device, new_length * sizeof(*list->relocs), 8,
110 VK_SYSTEM_ALLOC_TYPE_INTERNAL);
111 if (new_relocs == NULL)
112 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
113
114 struct anv_bo **new_reloc_bos =
115 anv_device_alloc(device, new_length * sizeof(*list->reloc_bos), 8,
116 VK_SYSTEM_ALLOC_TYPE_INTERNAL);
117 if (new_relocs == NULL) {
118 anv_device_free(device, new_relocs);
119 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
120 }
121
122 memcpy(new_relocs, list->relocs, list->num_relocs * sizeof(*list->relocs));
123 memcpy(new_reloc_bos, list->reloc_bos,
124 list->num_relocs * sizeof(*list->reloc_bos));
125
126 anv_device_free(device, list->relocs);
127 anv_device_free(device, list->reloc_bos);
128
129 list->array_length = new_length;
130 list->relocs = new_relocs;
131 list->reloc_bos = new_reloc_bos;
132
133 return VK_SUCCESS;
134 }
135
136 uint64_t
137 anv_reloc_list_add(struct anv_reloc_list *list, struct anv_device *device,
138 uint32_t offset, struct anv_bo *target_bo, uint32_t delta)
139 {
140 struct drm_i915_gem_relocation_entry *entry;
141 int index;
142
143 anv_reloc_list_grow(list, device, 1);
144 /* TODO: Handle failure */
145
146 /* XXX: Can we use I915_EXEC_HANDLE_LUT? */
147 index = list->num_relocs++;
148 list->reloc_bos[index] = target_bo;
149 entry = &list->relocs[index];
150 entry->target_handle = target_bo->gem_handle;
151 entry->delta = delta;
152 entry->offset = offset;
153 entry->presumed_offset = target_bo->offset;
154 entry->read_domains = 0;
155 entry->write_domain = 0;
156
157 return target_bo->offset + delta;
158 }
159
160 static void
161 anv_reloc_list_append(struct anv_reloc_list *list, struct anv_device *device,
162 struct anv_reloc_list *other, uint32_t offset)
163 {
164 anv_reloc_list_grow(list, device, other->num_relocs);
165 /* TODO: Handle failure */
166
167 memcpy(&list->relocs[list->num_relocs], &other->relocs[0],
168 other->num_relocs * sizeof(other->relocs[0]));
169 memcpy(&list->reloc_bos[list->num_relocs], &other->reloc_bos[0],
170 other->num_relocs * sizeof(other->reloc_bos[0]));
171
172 for (uint32_t i = 0; i < other->num_relocs; i++)
173 list->relocs[i + list->num_relocs].offset += offset;
174
175 list->num_relocs += other->num_relocs;
176 }
177
178 /*-----------------------------------------------------------------------*
179 * Functions related to anv_batch
180 *-----------------------------------------------------------------------*/
181
182 void *
183 anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords)
184 {
185 if (batch->next + num_dwords * 4 > batch->end)
186 batch->extend_cb(batch, batch->user_data);
187
188 void *p = batch->next;
189
190 batch->next += num_dwords * 4;
191 assert(batch->next <= batch->end);
192
193 return p;
194 }
195
196 uint64_t
197 anv_batch_emit_reloc(struct anv_batch *batch,
198 void *location, struct anv_bo *bo, uint32_t delta)
199 {
200 return anv_reloc_list_add(batch->relocs, batch->device,
201 location - batch->start, bo, delta);
202 }
203
204 void
205 anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other)
206 {
207 uint32_t size, offset;
208
209 size = other->next - other->start;
210 assert(size % 4 == 0);
211
212 if (batch->next + size > batch->end)
213 batch->extend_cb(batch, batch->user_data);
214
215 assert(batch->next + size <= batch->end);
216
217 VG(VALGRIND_CHECK_MEM_IS_DEFINED(other->start, size));
218 memcpy(batch->next, other->start, size);
219
220 offset = batch->next - batch->start;
221 anv_reloc_list_append(batch->relocs, batch->device,
222 other->relocs, offset);
223
224 batch->next += size;
225 }
226
227 /*-----------------------------------------------------------------------*
228 * Functions related to anv_batch_bo
229 *-----------------------------------------------------------------------*/
230
231 static VkResult
232 anv_batch_bo_create(struct anv_device *device, struct anv_batch_bo **bbo_out)
233 {
234 VkResult result;
235
236 struct anv_batch_bo *bbo =
237 anv_device_alloc(device, sizeof(*bbo), 8, VK_SYSTEM_ALLOC_TYPE_INTERNAL);
238 if (bbo == NULL)
239 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
240
241 result = anv_bo_pool_alloc(&device->batch_bo_pool, &bbo->bo);
242 if (result != VK_SUCCESS)
243 goto fail_alloc;
244
245 result = anv_reloc_list_init(&bbo->relocs, device);
246 if (result != VK_SUCCESS)
247 goto fail_bo_alloc;
248
249 *bbo_out = bbo;
250
251 return VK_SUCCESS;
252
253 fail_bo_alloc:
254 anv_bo_pool_free(&device->batch_bo_pool, &bbo->bo);
255 fail_alloc:
256 anv_device_free(device, bbo);
257
258 return result;
259 }
260
261 static VkResult
262 anv_batch_bo_clone(struct anv_device *device,
263 const struct anv_batch_bo *other_bbo,
264 struct anv_batch_bo **bbo_out)
265 {
266 VkResult result;
267
268 struct anv_batch_bo *bbo =
269 anv_device_alloc(device, sizeof(*bbo), 8, VK_SYSTEM_ALLOC_TYPE_INTERNAL);
270 if (bbo == NULL)
271 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
272
273 result = anv_bo_pool_alloc(&device->batch_bo_pool, &bbo->bo);
274 if (result != VK_SUCCESS)
275 goto fail_alloc;
276
277 result = anv_reloc_list_init_clone(&bbo->relocs, device, &other_bbo->relocs);
278 if (result != VK_SUCCESS)
279 goto fail_bo_alloc;
280
281 bbo->length = other_bbo->length;
282 memcpy(bbo->bo.map, other_bbo->bo.map, other_bbo->length);
283
284 *bbo_out = bbo;
285
286 return VK_SUCCESS;
287
288 fail_bo_alloc:
289 anv_bo_pool_free(&device->batch_bo_pool, &bbo->bo);
290 fail_alloc:
291 anv_device_free(device, bbo);
292
293 return result;
294 }
295
296 static void
297 anv_batch_bo_start(struct anv_batch_bo *bbo, struct anv_batch *batch,
298 size_t batch_padding)
299 {
300 batch->next = batch->start = bbo->bo.map;
301 batch->end = bbo->bo.map + bbo->bo.size - batch_padding;
302 batch->relocs = &bbo->relocs;
303 bbo->relocs.num_relocs = 0;
304 }
305
306 static void
307 anv_batch_bo_continue(struct anv_batch_bo *bbo, struct anv_batch *batch,
308 size_t batch_padding)
309 {
310 batch->start = bbo->bo.map;
311 batch->next = bbo->bo.map + bbo->length;
312 batch->end = bbo->bo.map + bbo->bo.size - batch_padding;
313 batch->relocs = &bbo->relocs;
314 }
315
316 static void
317 anv_batch_bo_finish(struct anv_batch_bo *bbo, struct anv_batch *batch)
318 {
319 assert(batch->start == bbo->bo.map);
320 bbo->length = batch->next - batch->start;
321 VG(VALGRIND_CHECK_MEM_IS_DEFINED(batch->start, bbo->length));
322 }
323
324 static void
325 anv_batch_bo_destroy(struct anv_batch_bo *bbo, struct anv_device *device)
326 {
327 anv_reloc_list_finish(&bbo->relocs, device);
328 anv_bo_pool_free(&device->batch_bo_pool, &bbo->bo);
329 anv_device_free(device, bbo);
330 }
331
332 static VkResult
333 anv_batch_bo_list_clone(const struct list_head *list, struct anv_device *device,
334 struct list_head *new_list)
335 {
336 VkResult result = VK_SUCCESS;
337
338 list_inithead(new_list);
339
340 struct anv_batch_bo *prev_bbo = NULL;
341 list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
342 struct anv_batch_bo *new_bbo;
343 result = anv_batch_bo_clone(device, bbo, &new_bbo);
344 if (result != VK_SUCCESS)
345 break;
346 list_addtail(&new_bbo->link, new_list);
347
348 if (prev_bbo) {
349 /* As we clone this list of batch_bo's, they chain one to the
350 * other using MI_BATCH_BUFFER_START commands. We need to fix up
351 * those relocations as we go. Fortunately, this is pretty easy
352 * as it will always be the last relocation in the list.
353 */
354 uint32_t last_idx = prev_bbo->relocs.num_relocs - 1;
355 assert(prev_bbo->relocs.reloc_bos[last_idx] == &bbo->bo);
356 prev_bbo->relocs.reloc_bos[last_idx] = &new_bbo->bo;
357 }
358
359 prev_bbo = new_bbo;
360 }
361
362 if (result != VK_SUCCESS) {
363 list_for_each_entry_safe(struct anv_batch_bo, bbo, new_list, link)
364 anv_batch_bo_destroy(bbo, device);
365 }
366
367 return result;
368 }
369
370 /*-----------------------------------------------------------------------*
371 * Functions related to anv_batch_bo
372 *-----------------------------------------------------------------------*/
373
374 static inline struct anv_batch_bo *
375 anv_cmd_buffer_current_batch_bo(struct anv_cmd_buffer *cmd_buffer)
376 {
377 return LIST_ENTRY(struct anv_batch_bo, cmd_buffer->batch_bos.prev, link);
378 }
379
380 static inline struct anv_batch_bo *
381 anv_cmd_buffer_current_surface_bbo(struct anv_cmd_buffer *cmd_buffer)
382 {
383 return LIST_ENTRY(struct anv_batch_bo, cmd_buffer->surface_bos.prev, link);
384 }
385
386 struct anv_reloc_list *
387 anv_cmd_buffer_current_surface_relocs(struct anv_cmd_buffer *cmd_buffer)
388 {
389 return &anv_cmd_buffer_current_surface_bbo(cmd_buffer)->relocs;
390 }
391
392 struct anv_address
393 anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer *cmd_buffer)
394 {
395 return (struct anv_address) {
396 .bo = &anv_cmd_buffer_current_surface_bbo(cmd_buffer)->bo,
397 .offset = 0,
398 };
399 }
400
401 static void
402 emit_batch_buffer_start(struct anv_batch *batch, struct anv_bo *bo, uint32_t offset)
403 {
404 /* In gen8+ the address field grew to two dwords to accomodate 48 bit
405 * offsets. The high 16 bits are in the last dword, so we can use the gen8
406 * version in either case, as long as we set the instruction length in the
407 * header accordingly. This means that we always emit three dwords here
408 * and all the padding and adjustment we do in this file works for all
409 * gens.
410 */
411
412 const uint32_t gen7_length =
413 GEN7_MI_BATCH_BUFFER_START_length - GEN7_MI_BATCH_BUFFER_START_length_bias;
414 const uint32_t gen8_length =
415 GEN8_MI_BATCH_BUFFER_START_length - GEN8_MI_BATCH_BUFFER_START_length_bias;
416
417 anv_batch_emit(batch, GEN8_MI_BATCH_BUFFER_START,
418 .DwordLength = batch->device->info.gen < 8 ? gen7_length : gen8_length,
419 ._2ndLevelBatchBuffer = _1stlevelbatch,
420 .AddressSpaceIndicator = ASI_PPGTT,
421 .BatchBufferStartAddress = { bo, offset });
422 }
423
424 static void
425 cmd_buffer_chain_to_batch_bo(struct anv_cmd_buffer *cmd_buffer,
426 struct anv_batch_bo *bbo)
427 {
428 struct anv_batch *batch = &cmd_buffer->batch;
429 struct anv_batch_bo *current_bbo =
430 anv_cmd_buffer_current_batch_bo(cmd_buffer);
431
432 /* We set the end of the batch a little short so we would be sure we
433 * have room for the chaining command. Since we're about to emit the
434 * chaining command, let's set it back where it should go.
435 */
436 batch->end += GEN8_MI_BATCH_BUFFER_START_length * 4;
437 assert(batch->end == current_bbo->bo.map + current_bbo->bo.size);
438
439 emit_batch_buffer_start(batch, &bbo->bo, 0);
440
441 anv_batch_bo_finish(current_bbo, batch);
442 }
443
444 static VkResult
445 anv_cmd_buffer_chain_batch(struct anv_batch *batch, void *_data)
446 {
447 struct anv_cmd_buffer *cmd_buffer = _data;
448 struct anv_batch_bo *new_bbo;
449
450 VkResult result = anv_batch_bo_create(cmd_buffer->device, &new_bbo);
451 if (result != VK_SUCCESS)
452 return result;
453
454 struct anv_batch_bo **seen_bbo = anv_vector_add(&cmd_buffer->seen_bbos);
455 if (seen_bbo == NULL) {
456 anv_batch_bo_destroy(new_bbo, cmd_buffer->device);
457 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
458 }
459 *seen_bbo = new_bbo;
460
461 cmd_buffer_chain_to_batch_bo(cmd_buffer, new_bbo);
462
463 list_addtail(&new_bbo->link, &cmd_buffer->batch_bos);
464
465 anv_batch_bo_start(new_bbo, batch, GEN8_MI_BATCH_BUFFER_START_length * 4);
466
467 return VK_SUCCESS;
468 }
469
470 struct anv_state
471 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer *cmd_buffer,
472 uint32_t size, uint32_t alignment)
473 {
474 struct anv_bo *surface_bo =
475 &anv_cmd_buffer_current_surface_bbo(cmd_buffer)->bo;
476 struct anv_state state;
477
478 state.offset = align_u32(cmd_buffer->surface_next, alignment);
479 if (state.offset + size > surface_bo->size)
480 return (struct anv_state) { 0 };
481
482 state.map = surface_bo->map + state.offset;
483 state.alloc_size = size;
484 cmd_buffer->surface_next = state.offset + size;
485
486 assert(state.offset + size <= surface_bo->size);
487
488 return state;
489 }
490
491 struct anv_state
492 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
493 uint32_t entries)
494 {
495 return anv_cmd_buffer_alloc_surface_state(cmd_buffer, entries * 4, 32);
496 }
497
498 struct anv_state
499 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer *cmd_buffer,
500 uint32_t size, uint32_t alignment)
501 {
502 return anv_state_stream_alloc(&cmd_buffer->dynamic_state_stream,
503 size, alignment);
504 }
505
506 VkResult
507 anv_cmd_buffer_new_surface_state_bo(struct anv_cmd_buffer *cmd_buffer)
508 {
509 struct anv_batch_bo *new_bbo, *old_bbo =
510 anv_cmd_buffer_current_surface_bbo(cmd_buffer);
511
512 /* Finish off the old buffer */
513 old_bbo->length = cmd_buffer->surface_next;
514
515 VkResult result = anv_batch_bo_create(cmd_buffer->device, &new_bbo);
516 if (result != VK_SUCCESS)
517 return result;
518
519 struct anv_batch_bo **seen_bbo = anv_vector_add(&cmd_buffer->seen_bbos);
520 if (seen_bbo == NULL) {
521 anv_batch_bo_destroy(new_bbo, cmd_buffer->device);
522 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
523 }
524 *seen_bbo = new_bbo;
525
526 cmd_buffer->surface_next = 1;
527
528 list_addtail(&new_bbo->link, &cmd_buffer->surface_bos);
529
530 return VK_SUCCESS;
531 }
532
533 VkResult
534 anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
535 {
536 struct anv_batch_bo *batch_bo, *surface_bbo;
537 struct anv_device *device = cmd_buffer->device;
538 VkResult result;
539
540 list_inithead(&cmd_buffer->batch_bos);
541 list_inithead(&cmd_buffer->surface_bos);
542
543 result = anv_batch_bo_create(device, &batch_bo);
544 if (result != VK_SUCCESS)
545 return result;
546
547 list_addtail(&batch_bo->link, &cmd_buffer->batch_bos);
548
549 cmd_buffer->batch.device = device;
550 cmd_buffer->batch.extend_cb = anv_cmd_buffer_chain_batch;
551 cmd_buffer->batch.user_data = cmd_buffer;
552
553 anv_batch_bo_start(batch_bo, &cmd_buffer->batch,
554 GEN8_MI_BATCH_BUFFER_START_length * 4);
555
556 result = anv_batch_bo_create(device, &surface_bbo);
557 if (result != VK_SUCCESS)
558 goto fail_batch_bo;
559
560 list_addtail(&surface_bbo->link, &cmd_buffer->surface_bos);
561
562 int success = anv_vector_init(&cmd_buffer->seen_bbos,
563 sizeof(struct anv_bo *),
564 8 * sizeof(struct anv_bo *));
565 if (!success)
566 goto fail_surface_bo;
567
568 *(struct anv_batch_bo **)anv_vector_add(&cmd_buffer->seen_bbos) = batch_bo;
569 *(struct anv_batch_bo **)anv_vector_add(&cmd_buffer->seen_bbos) = surface_bbo;
570
571 /* Start surface_next at 1 so surface offset 0 is invalid. */
572 cmd_buffer->surface_next = 1;
573
574 cmd_buffer->execbuf2.objects = NULL;
575 cmd_buffer->execbuf2.bos = NULL;
576 cmd_buffer->execbuf2.array_length = 0;
577
578 return VK_SUCCESS;
579
580 fail_surface_bo:
581 anv_batch_bo_destroy(surface_bbo, device);
582 fail_batch_bo:
583 anv_batch_bo_destroy(batch_bo, device);
584
585 return result;
586 }
587
588 void
589 anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
590 {
591 struct anv_device *device = cmd_buffer->device;
592
593 anv_vector_finish(&cmd_buffer->seen_bbos);
594
595 /* Destroy all of the batch buffers */
596 list_for_each_entry_safe(struct anv_batch_bo, bbo,
597 &cmd_buffer->batch_bos, link) {
598 anv_batch_bo_destroy(bbo, device);
599 }
600
601 /* Destroy all of the surface state buffers */
602 list_for_each_entry_safe(struct anv_batch_bo, bbo,
603 &cmd_buffer->surface_bos, link) {
604 anv_batch_bo_destroy(bbo, device);
605 }
606
607 anv_device_free(device, cmd_buffer->execbuf2.objects);
608 anv_device_free(device, cmd_buffer->execbuf2.bos);
609 }
610
611 void
612 anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
613 {
614 struct anv_device *device = cmd_buffer->device;
615
616 /* Delete all but the first batch bo */
617 assert(!list_empty(&cmd_buffer->batch_bos));
618 while (cmd_buffer->batch_bos.next != cmd_buffer->batch_bos.prev) {
619 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
620 list_del(&bbo->link);
621 anv_batch_bo_destroy(bbo, device);
622 }
623 assert(!list_empty(&cmd_buffer->batch_bos));
624
625 anv_batch_bo_start(anv_cmd_buffer_current_batch_bo(cmd_buffer),
626 &cmd_buffer->batch,
627 GEN8_MI_BATCH_BUFFER_START_length * 4);
628
629 /* Delete all but the first batch bo */
630 assert(!list_empty(&cmd_buffer->batch_bos));
631 while (cmd_buffer->surface_bos.next != cmd_buffer->surface_bos.prev) {
632 struct anv_batch_bo *bbo = anv_cmd_buffer_current_surface_bbo(cmd_buffer);
633 list_del(&bbo->link);
634 anv_batch_bo_destroy(bbo, device);
635 }
636 assert(!list_empty(&cmd_buffer->batch_bos));
637
638 anv_cmd_buffer_current_surface_bbo(cmd_buffer)->relocs.num_relocs = 0;
639
640 cmd_buffer->surface_next = 1;
641
642 /* Reset the list of seen buffers */
643 cmd_buffer->seen_bbos.head = 0;
644 cmd_buffer->seen_bbos.tail = 0;
645
646 *(struct anv_batch_bo **)anv_vector_add(&cmd_buffer->seen_bbos) =
647 anv_cmd_buffer_current_batch_bo(cmd_buffer);
648 *(struct anv_batch_bo **)anv_vector_add(&cmd_buffer->seen_bbos) =
649 anv_cmd_buffer_current_surface_bbo(cmd_buffer);
650 }
651
652 void
653 anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer)
654 {
655 struct anv_batch_bo *batch_bo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
656 struct anv_batch_bo *surface_bbo =
657 anv_cmd_buffer_current_surface_bbo(cmd_buffer);
658
659 if (cmd_buffer->level == VK_CMD_BUFFER_LEVEL_PRIMARY) {
660 anv_batch_emit(&cmd_buffer->batch, GEN7_MI_BATCH_BUFFER_END);
661
662 /* Round batch up to an even number of dwords. */
663 if ((cmd_buffer->batch.next - cmd_buffer->batch.start) & 4)
664 anv_batch_emit(&cmd_buffer->batch, GEN7_MI_NOOP);
665
666 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_PRIMARY;
667 }
668
669 anv_batch_bo_finish(batch_bo, &cmd_buffer->batch);
670
671 surface_bbo->length = cmd_buffer->surface_next;
672
673 if (cmd_buffer->level == VK_CMD_BUFFER_LEVEL_SECONDARY) {
674 /* If this is a secondary command buffer, we need to determine the
675 * mode in which it will be executed with vkExecuteCommands. We
676 * determine this statically here so that this stays in sync with the
677 * actual ExecuteCommands implementation.
678 */
679 if ((cmd_buffer->batch_bos.next == cmd_buffer->batch_bos.prev) &&
680 (anv_cmd_buffer_current_batch_bo(cmd_buffer)->length <
681 ANV_CMD_BUFFER_BATCH_SIZE / 2)) {
682 /* If the secondary has exactly one batch buffer in its list *and*
683 * that batch buffer is less than half of the maximum size, we're
684 * probably better of simply copying it into our batch.
685 */
686 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_EMIT;
687 } else if (cmd_buffer->opt_flags &
688 VK_CMD_BUFFER_OPTIMIZE_NO_SIMULTANEOUS_USE_BIT) {
689 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_CHAIN;
690
691 /* When we chain, we need to add an MI_BATCH_BUFFER_START command
692 * with its relocation. In order to handle this we'll increment here
693 * so we can unconditionally decrement right before adding the
694 * MI_BATCH_BUFFER_START command.
695 */
696 anv_cmd_buffer_current_batch_bo(cmd_buffer)->relocs.num_relocs++;
697 cmd_buffer->batch.next += GEN8_MI_BATCH_BUFFER_START_length * 4;
698 } else {
699 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN;
700 }
701 }
702 }
703
704 static inline VkResult
705 anv_cmd_buffer_add_seen_bbos(struct anv_cmd_buffer *cmd_buffer,
706 struct list_head *list)
707 {
708 list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
709 struct anv_batch_bo **bbo_ptr = anv_vector_add(&cmd_buffer->seen_bbos);
710 if (bbo_ptr == NULL)
711 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
712
713 *bbo_ptr = bbo;
714 }
715
716 return VK_SUCCESS;
717 }
718
719 void
720 anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary,
721 struct anv_cmd_buffer *secondary)
722 {
723 switch (secondary->exec_mode) {
724 case ANV_CMD_BUFFER_EXEC_MODE_EMIT:
725 anv_batch_emit_batch(&primary->batch, &secondary->batch);
726 break;
727 case ANV_CMD_BUFFER_EXEC_MODE_CHAIN: {
728 struct anv_batch_bo *first_bbo =
729 list_first_entry(&secondary->batch_bos, struct anv_batch_bo, link);
730 struct anv_batch_bo *last_bbo =
731 list_last_entry(&secondary->batch_bos, struct anv_batch_bo, link);
732
733 emit_batch_buffer_start(&primary->batch, &first_bbo->bo, 0);
734
735 struct anv_batch_bo *this_bbo = anv_cmd_buffer_current_batch_bo(primary);
736 assert(primary->batch.start == this_bbo->bo.map);
737 uint32_t offset = primary->batch.next - primary->batch.start;
738
739 /* Roll back the previous MI_BATCH_BUFFER_START and its relocation so we
740 * can emit a new command and relocation for the current splice. In
741 * order to handle the initial-use case, we incremented next and
742 * num_relocs in end_batch_buffer() so we can alyways just subtract
743 * here.
744 */
745 last_bbo->relocs.num_relocs--;
746 secondary->batch.next -= GEN8_MI_BATCH_BUFFER_START_length * 4;
747 emit_batch_buffer_start(&secondary->batch, &this_bbo->bo, offset);
748 anv_cmd_buffer_add_seen_bbos(primary, &secondary->batch_bos);
749 break;
750 }
751 case ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN: {
752 struct list_head copy_list;
753 VkResult result = anv_batch_bo_list_clone(&secondary->batch_bos,
754 secondary->device,
755 &copy_list);
756 if (result != VK_SUCCESS)
757 return; /* FIXME */
758
759 anv_cmd_buffer_add_seen_bbos(primary, &copy_list);
760
761 struct anv_batch_bo *first_bbo =
762 list_first_entry(&copy_list, struct anv_batch_bo, link);
763 struct anv_batch_bo *last_bbo =
764 list_last_entry(&copy_list, struct anv_batch_bo, link);
765
766 cmd_buffer_chain_to_batch_bo(primary, first_bbo);
767
768 list_splicetail(&copy_list, &primary->batch_bos);
769
770 anv_batch_bo_continue(last_bbo, &primary->batch,
771 GEN8_MI_BATCH_BUFFER_START_length * 4);
772
773 anv_cmd_buffer_emit_state_base_address(primary);
774 break;
775 }
776 default:
777 assert(!"Invalid execution mode");
778 }
779
780 /* Mark the surface buffer from the secondary as seen */
781 anv_cmd_buffer_add_seen_bbos(primary, &secondary->surface_bos);
782 }
783
784 static VkResult
785 anv_cmd_buffer_add_bo(struct anv_cmd_buffer *cmd_buffer,
786 struct anv_bo *bo,
787 struct anv_reloc_list *relocs)
788 {
789 struct drm_i915_gem_exec_object2 *obj = NULL;
790
791 if (bo->index < cmd_buffer->execbuf2.bo_count &&
792 cmd_buffer->execbuf2.bos[bo->index] == bo)
793 obj = &cmd_buffer->execbuf2.objects[bo->index];
794
795 if (obj == NULL) {
796 /* We've never seen this one before. Add it to the list and assign
797 * an id that we can use later.
798 */
799 if (cmd_buffer->execbuf2.bo_count >= cmd_buffer->execbuf2.array_length) {
800 uint32_t new_len = cmd_buffer->execbuf2.objects ?
801 cmd_buffer->execbuf2.array_length * 2 : 64;
802
803 struct drm_i915_gem_exec_object2 *new_objects =
804 anv_device_alloc(cmd_buffer->device, new_len * sizeof(*new_objects),
805 8, VK_SYSTEM_ALLOC_TYPE_INTERNAL);
806 if (new_objects == NULL)
807 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
808
809 struct anv_bo **new_bos =
810 anv_device_alloc(cmd_buffer->device, new_len * sizeof(*new_bos),
811 8, VK_SYSTEM_ALLOC_TYPE_INTERNAL);
812 if (new_objects == NULL) {
813 anv_device_free(cmd_buffer->device, new_objects);
814 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
815 }
816
817 if (cmd_buffer->execbuf2.objects) {
818 memcpy(new_objects, cmd_buffer->execbuf2.objects,
819 cmd_buffer->execbuf2.bo_count * sizeof(*new_objects));
820 memcpy(new_bos, cmd_buffer->execbuf2.bos,
821 cmd_buffer->execbuf2.bo_count * sizeof(*new_bos));
822 }
823
824 cmd_buffer->execbuf2.objects = new_objects;
825 cmd_buffer->execbuf2.bos = new_bos;
826 cmd_buffer->execbuf2.array_length = new_len;
827 }
828
829 assert(cmd_buffer->execbuf2.bo_count < cmd_buffer->execbuf2.array_length);
830
831 bo->index = cmd_buffer->execbuf2.bo_count++;
832 obj = &cmd_buffer->execbuf2.objects[bo->index];
833 cmd_buffer->execbuf2.bos[bo->index] = bo;
834
835 obj->handle = bo->gem_handle;
836 obj->relocation_count = 0;
837 obj->relocs_ptr = 0;
838 obj->alignment = 0;
839 obj->offset = bo->offset;
840 obj->flags = 0;
841 obj->rsvd1 = 0;
842 obj->rsvd2 = 0;
843 }
844
845 if (relocs != NULL && obj->relocation_count == 0) {
846 /* This is the first time we've ever seen a list of relocations for
847 * this BO. Go ahead and set the relocations and then walk the list
848 * of relocations and add them all.
849 */
850 obj->relocation_count = relocs->num_relocs;
851 obj->relocs_ptr = (uintptr_t) relocs->relocs;
852
853 for (size_t i = 0; i < relocs->num_relocs; i++)
854 anv_cmd_buffer_add_bo(cmd_buffer, relocs->reloc_bos[i], NULL);
855 }
856
857 return VK_SUCCESS;
858 }
859
860 static void
861 anv_cmd_buffer_process_relocs(struct anv_cmd_buffer *cmd_buffer,
862 struct anv_reloc_list *list)
863 {
864 struct anv_bo *bo;
865
866 /* If the kernel supports I915_EXEC_NO_RELOC, it will compare offset in
867 * struct drm_i915_gem_exec_object2 against the bos current offset and if
868 * all bos haven't moved it will skip relocation processing alltogether.
869 * If I915_EXEC_NO_RELOC is not supported, the kernel ignores the incoming
870 * value of offset so we can set it either way. For that to work we need
871 * to make sure all relocs use the same presumed offset.
872 */
873
874 for (size_t i = 0; i < list->num_relocs; i++) {
875 bo = list->reloc_bos[i];
876 if (bo->offset != list->relocs[i].presumed_offset)
877 cmd_buffer->execbuf2.need_reloc = true;
878
879 list->relocs[i].target_handle = bo->index;
880 }
881 }
882
883 void
884 anv_cmd_buffer_prepare_execbuf(struct anv_cmd_buffer *cmd_buffer)
885 {
886 struct anv_batch *batch = &cmd_buffer->batch;
887
888 cmd_buffer->execbuf2.bo_count = 0;
889 cmd_buffer->execbuf2.need_reloc = false;
890
891 /* First, we walk over all of the bos we've seen and add them and their
892 * relocations to the validate list.
893 */
894 struct anv_batch_bo **bbo;
895 anv_vector_foreach(bbo, &cmd_buffer->seen_bbos)
896 anv_cmd_buffer_add_bo(cmd_buffer, &(*bbo)->bo, &(*bbo)->relocs);
897
898 struct anv_batch_bo *first_batch_bo =
899 list_first_entry(&cmd_buffer->batch_bos, struct anv_batch_bo, link);
900
901 /* The kernel requires that the last entry in the validation list be the
902 * batch buffer to execute. We can simply swap the element
903 * corresponding to the first batch_bo in the chain with the last
904 * element in the list.
905 */
906 if (first_batch_bo->bo.index != cmd_buffer->execbuf2.bo_count - 1) {
907 uint32_t idx = first_batch_bo->bo.index;
908 uint32_t last_idx = cmd_buffer->execbuf2.bo_count - 1;
909
910 struct drm_i915_gem_exec_object2 tmp_obj =
911 cmd_buffer->execbuf2.objects[idx];
912 assert(cmd_buffer->execbuf2.bos[idx] == &first_batch_bo->bo);
913
914 cmd_buffer->execbuf2.objects[idx] = cmd_buffer->execbuf2.objects[last_idx];
915 cmd_buffer->execbuf2.bos[idx] = cmd_buffer->execbuf2.bos[last_idx];
916 cmd_buffer->execbuf2.bos[idx]->index = idx;
917
918 cmd_buffer->execbuf2.objects[last_idx] = tmp_obj;
919 cmd_buffer->execbuf2.bos[last_idx] = &first_batch_bo->bo;
920 first_batch_bo->bo.index = last_idx;
921 }
922
923 /* Now we go through and fixup all of the relocation lists to point to
924 * the correct indices in the object array. We have to do this after we
925 * reorder the list above as some of the indices may have changed.
926 */
927 anv_vector_foreach(bbo, &cmd_buffer->seen_bbos)
928 anv_cmd_buffer_process_relocs(cmd_buffer, &(*bbo)->relocs);
929
930 cmd_buffer->execbuf2.execbuf = (struct drm_i915_gem_execbuffer2) {
931 .buffers_ptr = (uintptr_t) cmd_buffer->execbuf2.objects,
932 .buffer_count = cmd_buffer->execbuf2.bo_count,
933 .batch_start_offset = 0,
934 .batch_len = batch->next - batch->start,
935 .cliprects_ptr = 0,
936 .num_cliprects = 0,
937 .DR1 = 0,
938 .DR4 = 0,
939 .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER |
940 I915_EXEC_CONSTANTS_REL_GENERAL,
941 .rsvd1 = cmd_buffer->device->context_id,
942 .rsvd2 = 0,
943 };
944
945 if (!cmd_buffer->execbuf2.need_reloc)
946 cmd_buffer->execbuf2.execbuf.flags |= I915_EXEC_NO_RELOC;
947 }