vk/cmd_buffer: Rename emit_batch_buffer_end to end_batch_buffer
[mesa.git] / src / vulkan / anv_cmd_buffer.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31
32 /** \file anv_cmd_buffer.c
33 *
34 * This file contains functions related to anv_cmd_buffer as a data
35 * structure. This involves everything required to create and destroy
36 * the actual batch buffers as well as link them together and handle
37 * relocations and surface state. It specifically does *not* contain any
38 * handling of actual vkCmd calls beyond vkCmdExecuteCommands.
39 */
40
41 /*-----------------------------------------------------------------------*
42 * Functions related to anv_reloc_list
43 *-----------------------------------------------------------------------*/
44
45 VkResult
46 anv_reloc_list_init(struct anv_reloc_list *list, struct anv_device *device)
47 {
48 list->num_relocs = 0;
49 list->array_length = 256;
50 list->relocs =
51 anv_device_alloc(device, list->array_length * sizeof(*list->relocs), 8,
52 VK_SYSTEM_ALLOC_TYPE_INTERNAL);
53
54 if (list->relocs == NULL)
55 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
56
57 list->reloc_bos =
58 anv_device_alloc(device, list->array_length * sizeof(*list->reloc_bos), 8,
59 VK_SYSTEM_ALLOC_TYPE_INTERNAL);
60
61 if (list->relocs == NULL) {
62 anv_device_free(device, list->relocs);
63 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
64 }
65
66 return VK_SUCCESS;
67 }
68
69 void
70 anv_reloc_list_finish(struct anv_reloc_list *list, struct anv_device *device)
71 {
72 anv_device_free(device, list->relocs);
73 anv_device_free(device, list->reloc_bos);
74 }
75
76 static VkResult
77 anv_reloc_list_grow(struct anv_reloc_list *list, struct anv_device *device,
78 size_t num_additional_relocs)
79 {
80 if (list->num_relocs + num_additional_relocs <= list->array_length)
81 return VK_SUCCESS;
82
83 size_t new_length = list->array_length * 2;
84 while (new_length < list->num_relocs + num_additional_relocs)
85 new_length *= 2;
86
87 struct drm_i915_gem_relocation_entry *new_relocs =
88 anv_device_alloc(device, new_length * sizeof(*list->relocs), 8,
89 VK_SYSTEM_ALLOC_TYPE_INTERNAL);
90 if (new_relocs == NULL)
91 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
92
93 struct anv_bo **new_reloc_bos =
94 anv_device_alloc(device, new_length * sizeof(*list->reloc_bos), 8,
95 VK_SYSTEM_ALLOC_TYPE_INTERNAL);
96 if (new_relocs == NULL) {
97 anv_device_free(device, new_relocs);
98 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
99 }
100
101 memcpy(new_relocs, list->relocs, list->num_relocs * sizeof(*list->relocs));
102 memcpy(new_reloc_bos, list->reloc_bos,
103 list->num_relocs * sizeof(*list->reloc_bos));
104
105 anv_device_free(device, list->relocs);
106 anv_device_free(device, list->reloc_bos);
107
108 list->relocs = new_relocs;
109 list->reloc_bos = new_reloc_bos;
110
111 return VK_SUCCESS;
112 }
113
114 uint64_t
115 anv_reloc_list_add(struct anv_reloc_list *list, struct anv_device *device,
116 uint32_t offset, struct anv_bo *target_bo, uint32_t delta)
117 {
118 struct drm_i915_gem_relocation_entry *entry;
119 int index;
120
121 anv_reloc_list_grow(list, device, 1);
122 /* TODO: Handle failure */
123
124 /* XXX: Can we use I915_EXEC_HANDLE_LUT? */
125 index = list->num_relocs++;
126 list->reloc_bos[index] = target_bo;
127 entry = &list->relocs[index];
128 entry->target_handle = target_bo->gem_handle;
129 entry->delta = delta;
130 entry->offset = offset;
131 entry->presumed_offset = target_bo->offset;
132 entry->read_domains = 0;
133 entry->write_domain = 0;
134
135 return target_bo->offset + delta;
136 }
137
138 static void
139 anv_reloc_list_append(struct anv_reloc_list *list, struct anv_device *device,
140 struct anv_reloc_list *other, uint32_t offset)
141 {
142 anv_reloc_list_grow(list, device, other->num_relocs);
143 /* TODO: Handle failure */
144
145 memcpy(&list->relocs[list->num_relocs], &other->relocs[0],
146 other->num_relocs * sizeof(other->relocs[0]));
147 memcpy(&list->reloc_bos[list->num_relocs], &other->reloc_bos[0],
148 other->num_relocs * sizeof(other->reloc_bos[0]));
149
150 for (uint32_t i = 0; i < other->num_relocs; i++)
151 list->relocs[i + list->num_relocs].offset += offset;
152
153 list->num_relocs += other->num_relocs;
154 }
155
156 /*-----------------------------------------------------------------------*
157 * Functions related to anv_batch
158 *-----------------------------------------------------------------------*/
159
160 void *
161 anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords)
162 {
163 if (batch->next + num_dwords * 4 > batch->end)
164 batch->extend_cb(batch, batch->user_data);
165
166 void *p = batch->next;
167
168 batch->next += num_dwords * 4;
169 assert(batch->next <= batch->end);
170
171 return p;
172 }
173
174 uint64_t
175 anv_batch_emit_reloc(struct anv_batch *batch,
176 void *location, struct anv_bo *bo, uint32_t delta)
177 {
178 return anv_reloc_list_add(batch->relocs, batch->device,
179 location - batch->start, bo, delta);
180 }
181
182 void
183 anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other)
184 {
185 uint32_t size, offset;
186
187 size = other->next - other->start;
188 assert(size % 4 == 0);
189
190 if (batch->next + size > batch->end)
191 batch->extend_cb(batch, batch->user_data);
192
193 assert(batch->next + size <= batch->end);
194
195 VG(VALGRIND_CHECK_MEM_IS_DEFINED(other->start, size));
196 memcpy(batch->next, other->start, size);
197
198 offset = batch->next - batch->start;
199 anv_reloc_list_append(batch->relocs, batch->device,
200 other->relocs, offset);
201
202 batch->next += size;
203 }
204
205 /*-----------------------------------------------------------------------*
206 * Functions related to anv_batch_bo
207 *-----------------------------------------------------------------------*/
208
209 static VkResult
210 anv_batch_bo_create(struct anv_device *device, struct anv_batch_bo **bbo_out)
211 {
212 VkResult result;
213
214 struct anv_batch_bo *bbo =
215 anv_device_alloc(device, sizeof(*bbo), 8, VK_SYSTEM_ALLOC_TYPE_INTERNAL);
216 if (bbo == NULL)
217 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
218
219 result = anv_bo_pool_alloc(&device->batch_bo_pool, &bbo->bo);
220 if (result != VK_SUCCESS)
221 goto fail_alloc;
222
223 result = anv_reloc_list_init(&bbo->relocs, device);
224 if (result != VK_SUCCESS)
225 goto fail_bo_alloc;
226
227 *bbo_out = bbo;
228
229 return VK_SUCCESS;
230
231 fail_bo_alloc:
232 anv_bo_pool_free(&device->batch_bo_pool, &bbo->bo);
233 fail_alloc:
234 anv_device_free(device, bbo);
235
236 return result;
237 }
238
239 static void
240 anv_batch_bo_start(struct anv_batch_bo *bbo, struct anv_batch *batch,
241 size_t batch_padding)
242 {
243 batch->next = batch->start = bbo->bo.map;
244 batch->end = bbo->bo.map + bbo->bo.size - batch_padding;
245 batch->relocs = &bbo->relocs;
246 bbo->relocs.num_relocs = 0;
247 }
248
249 static void
250 anv_batch_bo_finish(struct anv_batch_bo *bbo, struct anv_batch *batch)
251 {
252 assert(batch->start == bbo->bo.map);
253 bbo->length = batch->next - batch->start;
254 VG(VALGRIND_CHECK_MEM_IS_DEFINED(batch->start, bbo->length));
255 }
256
257 static void
258 anv_batch_bo_destroy(struct anv_batch_bo *bbo, struct anv_device *device)
259 {
260 anv_reloc_list_finish(&bbo->relocs, device);
261 anv_bo_pool_free(&device->batch_bo_pool, &bbo->bo);
262 anv_device_free(device, bbo);
263 }
264
265 /*-----------------------------------------------------------------------*
266 * Functions related to anv_batch_bo
267 *-----------------------------------------------------------------------*/
268
269 static inline struct anv_batch_bo *
270 anv_cmd_buffer_current_batch_bo(struct anv_cmd_buffer *cmd_buffer)
271 {
272 return LIST_ENTRY(struct anv_batch_bo, cmd_buffer->batch_bos.prev, link);
273 }
274
275 static inline struct anv_batch_bo *
276 anv_cmd_buffer_current_surface_bbo(struct anv_cmd_buffer *cmd_buffer)
277 {
278 return LIST_ENTRY(struct anv_batch_bo, cmd_buffer->surface_bos.prev, link);
279 }
280
281 struct anv_bo *
282 anv_cmd_buffer_current_surface_bo(struct anv_cmd_buffer *cmd_buffer)
283 {
284 return &anv_cmd_buffer_current_surface_bbo(cmd_buffer)->bo;
285 }
286
287 struct anv_reloc_list *
288 anv_cmd_buffer_current_surface_relocs(struct anv_cmd_buffer *cmd_buffer)
289 {
290 return &anv_cmd_buffer_current_surface_bbo(cmd_buffer)->relocs;
291 }
292
293 static VkResult
294 anv_cmd_buffer_chain_batch(struct anv_batch *batch, void *_data)
295 {
296 struct anv_cmd_buffer *cmd_buffer = _data;
297 struct anv_batch_bo *new_bbo, *old_bbo =
298 anv_cmd_buffer_current_batch_bo(cmd_buffer);
299
300 VkResult result = anv_batch_bo_create(cmd_buffer->device, &new_bbo);
301 if (result != VK_SUCCESS)
302 return result;
303
304 struct anv_batch_bo **seen_bbo = anv_vector_add(&cmd_buffer->seen_bbos);
305 if (seen_bbo == NULL) {
306 anv_batch_bo_destroy(new_bbo, cmd_buffer->device);
307 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
308 }
309 *seen_bbo = new_bbo;
310
311 /* We set the end of the batch a little short so we would be sure we
312 * have room for the chaining command. Since we're about to emit the
313 * chaining command, let's set it back where it should go.
314 */
315 batch->end += GEN8_MI_BATCH_BUFFER_START_length * 4;
316 assert(batch->end == old_bbo->bo.map + old_bbo->bo.size);
317
318 anv_batch_emit(batch, GEN8_MI_BATCH_BUFFER_START,
319 GEN8_MI_BATCH_BUFFER_START_header,
320 ._2ndLevelBatchBuffer = _1stlevelbatch,
321 .AddressSpaceIndicator = ASI_PPGTT,
322 .BatchBufferStartAddress = { &new_bbo->bo, 0 },
323 );
324
325 anv_batch_bo_finish(old_bbo, batch);
326
327 list_addtail(&new_bbo->link, &cmd_buffer->batch_bos);
328
329 anv_batch_bo_start(new_bbo, batch, GEN8_MI_BATCH_BUFFER_START_length * 4);
330
331 return VK_SUCCESS;
332 }
333
334 struct anv_state
335 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer *cmd_buffer,
336 uint32_t size, uint32_t alignment)
337 {
338 struct anv_bo *surface_bo =
339 anv_cmd_buffer_current_surface_bo(cmd_buffer);
340 struct anv_state state;
341
342 state.offset = align_u32(cmd_buffer->surface_next, alignment);
343 if (state.offset + size > surface_bo->size)
344 return (struct anv_state) { 0 };
345
346 state.map = surface_bo->map + state.offset;
347 state.alloc_size = size;
348 cmd_buffer->surface_next = state.offset + size;
349
350 assert(state.offset + size <= surface_bo->size);
351
352 return state;
353 }
354
355 struct anv_state
356 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer *cmd_buffer,
357 uint32_t size, uint32_t alignment)
358 {
359 return anv_state_stream_alloc(&cmd_buffer->dynamic_state_stream,
360 size, alignment);
361 }
362
363 VkResult
364 anv_cmd_buffer_new_surface_state_bo(struct anv_cmd_buffer *cmd_buffer)
365 {
366 struct anv_batch_bo *new_bbo, *old_bbo =
367 anv_cmd_buffer_current_surface_bbo(cmd_buffer);
368
369 /* Finish off the old buffer */
370 old_bbo->length = cmd_buffer->surface_next;
371
372 VkResult result = anv_batch_bo_create(cmd_buffer->device, &new_bbo);
373 if (result != VK_SUCCESS)
374 return result;
375
376 struct anv_batch_bo **seen_bbo = anv_vector_add(&cmd_buffer->seen_bbos);
377 if (seen_bbo == NULL) {
378 anv_batch_bo_destroy(new_bbo, cmd_buffer->device);
379 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
380 }
381 *seen_bbo = new_bbo;
382
383 cmd_buffer->surface_next = 1;
384
385 list_addtail(&new_bbo->link, &cmd_buffer->surface_bos);
386
387 return VK_SUCCESS;
388 }
389
390 VkResult
391 anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
392 {
393 struct anv_batch_bo *batch_bo, *surface_bbo;
394 struct anv_device *device = cmd_buffer->device;
395 VkResult result;
396
397 list_inithead(&cmd_buffer->batch_bos);
398 list_inithead(&cmd_buffer->surface_bos);
399
400 result = anv_batch_bo_create(device, &batch_bo);
401 if (result != VK_SUCCESS)
402 return result;
403
404 list_addtail(&batch_bo->link, &cmd_buffer->batch_bos);
405
406 cmd_buffer->batch.device = device;
407 cmd_buffer->batch.extend_cb = anv_cmd_buffer_chain_batch;
408 cmd_buffer->batch.user_data = cmd_buffer;
409
410 anv_batch_bo_start(batch_bo, &cmd_buffer->batch,
411 GEN8_MI_BATCH_BUFFER_START_length * 4);
412
413 result = anv_batch_bo_create(device, &surface_bbo);
414 if (result != VK_SUCCESS)
415 goto fail_batch_bo;
416
417 list_addtail(&surface_bbo->link, &cmd_buffer->surface_bos);
418
419 int success = anv_vector_init(&cmd_buffer->seen_bbos,
420 sizeof(struct anv_bo *),
421 8 * sizeof(struct anv_bo *));
422 if (!success)
423 goto fail_surface_bo;
424
425 *(struct anv_batch_bo **)anv_vector_add(&cmd_buffer->seen_bbos) = batch_bo;
426 *(struct anv_batch_bo **)anv_vector_add(&cmd_buffer->seen_bbos) = surface_bbo;
427
428 /* Start surface_next at 1 so surface offset 0 is invalid. */
429 cmd_buffer->surface_next = 1;
430
431 cmd_buffer->execbuf2.objects = NULL;
432 cmd_buffer->execbuf2.bos = NULL;
433 cmd_buffer->execbuf2.array_length = 0;
434
435 return VK_SUCCESS;
436
437 fail_surface_bo:
438 anv_batch_bo_destroy(surface_bbo, device);
439 fail_batch_bo:
440 anv_batch_bo_destroy(batch_bo, device);
441
442 return result;
443 }
444
445 void
446 anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
447 {
448 struct anv_device *device = cmd_buffer->device;
449
450 anv_vector_finish(&cmd_buffer->seen_bbos);
451
452 /* Destroy all of the batch buffers */
453 list_for_each_entry_safe(struct anv_batch_bo, bbo,
454 &cmd_buffer->batch_bos, link) {
455 anv_batch_bo_destroy(bbo, device);
456 }
457
458 /* Destroy all of the surface state buffers */
459 list_for_each_entry_safe(struct anv_batch_bo, bbo,
460 &cmd_buffer->surface_bos, link) {
461 anv_batch_bo_destroy(bbo, device);
462 }
463
464 anv_device_free(device, cmd_buffer->execbuf2.objects);
465 anv_device_free(device, cmd_buffer->execbuf2.bos);
466 }
467
468 void
469 anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
470 {
471 struct anv_device *device = cmd_buffer->device;
472
473 /* Delete all but the first batch bo */
474 assert(!list_empty(&cmd_buffer->batch_bos));
475 while (cmd_buffer->batch_bos.next != cmd_buffer->batch_bos.prev) {
476 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
477 list_del(&bbo->link);
478 anv_batch_bo_destroy(bbo, device);
479 }
480 assert(!list_empty(&cmd_buffer->batch_bos));
481
482 anv_batch_bo_start(anv_cmd_buffer_current_batch_bo(cmd_buffer),
483 &cmd_buffer->batch,
484 GEN8_MI_BATCH_BUFFER_START_length * 4);
485
486 /* Delete all but the first batch bo */
487 assert(!list_empty(&cmd_buffer->batch_bos));
488 while (cmd_buffer->surface_bos.next != cmd_buffer->surface_bos.prev) {
489 struct anv_batch_bo *bbo = anv_cmd_buffer_current_surface_bbo(cmd_buffer);
490 list_del(&bbo->link);
491 anv_batch_bo_destroy(bbo, device);
492 }
493 assert(!list_empty(&cmd_buffer->batch_bos));
494
495 anv_cmd_buffer_current_surface_bbo(cmd_buffer)->relocs.num_relocs = 0;
496
497 cmd_buffer->surface_next = 1;
498
499 /* Reset the list of seen buffers */
500 cmd_buffer->seen_bbos.head = 0;
501 cmd_buffer->seen_bbos.tail = 0;
502
503 *(struct anv_batch_bo **)anv_vector_add(&cmd_buffer->seen_bbos) =
504 anv_cmd_buffer_current_batch_bo(cmd_buffer);
505 *(struct anv_batch_bo **)anv_vector_add(&cmd_buffer->seen_bbos) =
506 anv_cmd_buffer_current_surface_bbo(cmd_buffer);
507 }
508
509 void
510 anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer)
511 {
512 struct anv_batch_bo *batch_bo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
513 struct anv_batch_bo *surface_bbo =
514 anv_cmd_buffer_current_surface_bbo(cmd_buffer);
515
516 anv_batch_emit(&cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_END);
517
518 /* Round batch up to an even number of dwords. */
519 if ((cmd_buffer->batch.next - cmd_buffer->batch.start) & 4)
520 anv_batch_emit(&cmd_buffer->batch, GEN8_MI_NOOP);
521
522 anv_batch_bo_finish(batch_bo, &cmd_buffer->batch);
523
524 surface_bbo->length = cmd_buffer->surface_next;
525 }
526
527 static VkResult
528 anv_cmd_buffer_add_bo(struct anv_cmd_buffer *cmd_buffer,
529 struct anv_bo *bo,
530 struct anv_reloc_list *relocs)
531 {
532 struct drm_i915_gem_exec_object2 *obj = NULL;
533
534 if (bo->index < cmd_buffer->execbuf2.bo_count &&
535 cmd_buffer->execbuf2.bos[bo->index] == bo)
536 obj = &cmd_buffer->execbuf2.objects[bo->index];
537
538 if (obj == NULL) {
539 /* We've never seen this one before. Add it to the list and assign
540 * an id that we can use later.
541 */
542 if (cmd_buffer->execbuf2.bo_count >= cmd_buffer->execbuf2.array_length) {
543 uint32_t new_len = cmd_buffer->execbuf2.objects ?
544 cmd_buffer->execbuf2.array_length * 2 : 64;
545
546 struct drm_i915_gem_exec_object2 *new_objects =
547 anv_device_alloc(cmd_buffer->device, new_len * sizeof(*new_objects),
548 8, VK_SYSTEM_ALLOC_TYPE_INTERNAL);
549 if (new_objects == NULL)
550 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
551
552 struct anv_bo **new_bos =
553 anv_device_alloc(cmd_buffer->device, new_len * sizeof(*new_bos),
554 8, VK_SYSTEM_ALLOC_TYPE_INTERNAL);
555 if (new_objects == NULL) {
556 anv_device_free(cmd_buffer->device, new_objects);
557 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
558 }
559
560 if (cmd_buffer->execbuf2.objects) {
561 memcpy(new_objects, cmd_buffer->execbuf2.objects,
562 cmd_buffer->execbuf2.bo_count * sizeof(*new_objects));
563 memcpy(new_bos, cmd_buffer->execbuf2.bos,
564 cmd_buffer->execbuf2.bo_count * sizeof(*new_bos));
565 }
566
567 cmd_buffer->execbuf2.objects = new_objects;
568 cmd_buffer->execbuf2.bos = new_bos;
569 cmd_buffer->execbuf2.array_length = new_len;
570 }
571
572 assert(cmd_buffer->execbuf2.bo_count < cmd_buffer->execbuf2.array_length);
573
574 bo->index = cmd_buffer->execbuf2.bo_count++;
575 obj = &cmd_buffer->execbuf2.objects[bo->index];
576 cmd_buffer->execbuf2.bos[bo->index] = bo;
577
578 obj->handle = bo->gem_handle;
579 obj->relocation_count = 0;
580 obj->relocs_ptr = 0;
581 obj->alignment = 0;
582 obj->offset = bo->offset;
583 obj->flags = 0;
584 obj->rsvd1 = 0;
585 obj->rsvd2 = 0;
586 }
587
588 if (relocs != NULL && obj->relocation_count == 0) {
589 /* This is the first time we've ever seen a list of relocations for
590 * this BO. Go ahead and set the relocations and then walk the list
591 * of relocations and add them all.
592 */
593 obj->relocation_count = relocs->num_relocs;
594 obj->relocs_ptr = (uintptr_t) relocs->relocs;
595
596 for (size_t i = 0; i < relocs->num_relocs; i++)
597 anv_cmd_buffer_add_bo(cmd_buffer, relocs->reloc_bos[i], NULL);
598 }
599
600 return VK_SUCCESS;
601 }
602
603 static void
604 anv_cmd_buffer_process_relocs(struct anv_cmd_buffer *cmd_buffer,
605 struct anv_reloc_list *list)
606 {
607 struct anv_bo *bo;
608
609 /* If the kernel supports I915_EXEC_NO_RELOC, it will compare offset in
610 * struct drm_i915_gem_exec_object2 against the bos current offset and if
611 * all bos haven't moved it will skip relocation processing alltogether.
612 * If I915_EXEC_NO_RELOC is not supported, the kernel ignores the incoming
613 * value of offset so we can set it either way. For that to work we need
614 * to make sure all relocs use the same presumed offset.
615 */
616
617 for (size_t i = 0; i < list->num_relocs; i++) {
618 bo = list->reloc_bos[i];
619 if (bo->offset != list->relocs[i].presumed_offset)
620 cmd_buffer->execbuf2.need_reloc = true;
621
622 list->relocs[i].target_handle = bo->index;
623 }
624 }
625
626 void
627 anv_cmd_buffer_prepare_execbuf(struct anv_cmd_buffer *cmd_buffer)
628 {
629 struct anv_batch *batch = &cmd_buffer->batch;
630
631 cmd_buffer->execbuf2.bo_count = 0;
632 cmd_buffer->execbuf2.need_reloc = false;
633
634 /* First, we walk over all of the bos we've seen and add them and their
635 * relocations to the validate list.
636 */
637 struct anv_batch_bo **bbo;
638 anv_vector_foreach(bbo, &cmd_buffer->seen_bbos)
639 anv_cmd_buffer_add_bo(cmd_buffer, &(*bbo)->bo, &(*bbo)->relocs);
640
641 struct anv_batch_bo *first_batch_bo =
642 list_first_entry(&cmd_buffer->batch_bos, struct anv_batch_bo, link);
643
644 /* The kernel requires that the last entry in the validation list be the
645 * batch buffer to execute. We can simply swap the element
646 * corresponding to the first batch_bo in the chain with the last
647 * element in the list.
648 */
649 if (first_batch_bo->bo.index != cmd_buffer->execbuf2.bo_count - 1) {
650 uint32_t idx = first_batch_bo->bo.index;
651
652 struct drm_i915_gem_exec_object2 tmp_obj =
653 cmd_buffer->execbuf2.objects[idx];
654 assert(cmd_buffer->execbuf2.bos[idx] == &first_batch_bo->bo);
655
656 cmd_buffer->execbuf2.objects[idx] =
657 cmd_buffer->execbuf2.objects[cmd_buffer->execbuf2.bo_count - 1];
658 cmd_buffer->execbuf2.bos[idx] =
659 cmd_buffer->execbuf2.bos[cmd_buffer->execbuf2.bo_count - 1];
660 cmd_buffer->execbuf2.bos[idx]->index = idx;
661
662 cmd_buffer->execbuf2.objects[cmd_buffer->execbuf2.bo_count - 1] = tmp_obj;
663 cmd_buffer->execbuf2.bos[cmd_buffer->execbuf2.bo_count - 1] =
664 &first_batch_bo->bo;
665 first_batch_bo->bo.index = cmd_buffer->execbuf2.bo_count - 1;
666 }
667
668 /* Now we go through and fixup all of the relocation lists to point to
669 * the correct indices in the object array. We have to do this after we
670 * reorder the list above as some of the indices may have changed.
671 */
672 anv_vector_foreach(bbo, &cmd_buffer->seen_bbos)
673 anv_cmd_buffer_process_relocs(cmd_buffer, &(*bbo)->relocs);
674
675 cmd_buffer->execbuf2.execbuf = (struct drm_i915_gem_execbuffer2) {
676 .buffers_ptr = (uintptr_t) cmd_buffer->execbuf2.objects,
677 .buffer_count = cmd_buffer->execbuf2.bo_count,
678 .batch_start_offset = 0,
679 .batch_len = batch->next - batch->start,
680 .cliprects_ptr = 0,
681 .num_cliprects = 0,
682 .DR1 = 0,
683 .DR4 = 0,
684 .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER,
685 .rsvd1 = cmd_buffer->device->context_id,
686 .rsvd2 = 0,
687 };
688
689 if (!cmd_buffer->execbuf2.need_reloc)
690 cmd_buffer->execbuf2.execbuf.flags |= I915_EXEC_NO_RELOC;
691 }