vk: clflush all state for non-LLC GPUs
[mesa.git] / src / vulkan / anv_batch_chain.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31
32 #include "gen7_pack.h"
33 #include "gen8_pack.h"
34
35 /** \file anv_batch_chain.c
36 *
37 * This file contains functions related to anv_cmd_buffer as a data
38 * structure. This involves everything required to create and destroy
39 * the actual batch buffers as well as link them together and handle
40 * relocations and surface state. It specifically does *not* contain any
41 * handling of actual vkCmd calls beyond vkCmdExecuteCommands.
42 */
43
44 /*-----------------------------------------------------------------------*
45 * Functions related to anv_reloc_list
46 *-----------------------------------------------------------------------*/
47
48 static VkResult
49 anv_reloc_list_init_clone(struct anv_reloc_list *list,
50 const VkAllocationCallbacks *alloc,
51 const struct anv_reloc_list *other_list)
52 {
53 if (other_list) {
54 list->num_relocs = other_list->num_relocs;
55 list->array_length = other_list->array_length;
56 } else {
57 list->num_relocs = 0;
58 list->array_length = 256;
59 }
60
61 list->relocs =
62 anv_alloc(alloc, list->array_length * sizeof(*list->relocs), 8,
63 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
64
65 if (list->relocs == NULL)
66 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
67
68 list->reloc_bos =
69 anv_alloc(alloc, list->array_length * sizeof(*list->reloc_bos), 8,
70 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
71
72 if (list->reloc_bos == NULL) {
73 anv_free(alloc, list->relocs);
74 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
75 }
76
77 if (other_list) {
78 memcpy(list->relocs, other_list->relocs,
79 list->array_length * sizeof(*list->relocs));
80 memcpy(list->reloc_bos, other_list->reloc_bos,
81 list->array_length * sizeof(*list->reloc_bos));
82 }
83
84 return VK_SUCCESS;
85 }
86
87 VkResult
88 anv_reloc_list_init(struct anv_reloc_list *list,
89 const VkAllocationCallbacks *alloc)
90 {
91 return anv_reloc_list_init_clone(list, alloc, NULL);
92 }
93
94 void
95 anv_reloc_list_finish(struct anv_reloc_list *list,
96 const VkAllocationCallbacks *alloc)
97 {
98 anv_free(alloc, list->relocs);
99 anv_free(alloc, list->reloc_bos);
100 }
101
102 static VkResult
103 anv_reloc_list_grow(struct anv_reloc_list *list,
104 const VkAllocationCallbacks *alloc,
105 size_t num_additional_relocs)
106 {
107 if (list->num_relocs + num_additional_relocs <= list->array_length)
108 return VK_SUCCESS;
109
110 size_t new_length = list->array_length * 2;
111 while (new_length < list->num_relocs + num_additional_relocs)
112 new_length *= 2;
113
114 struct drm_i915_gem_relocation_entry *new_relocs =
115 anv_alloc(alloc, new_length * sizeof(*list->relocs), 8,
116 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
117 if (new_relocs == NULL)
118 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
119
120 struct anv_bo **new_reloc_bos =
121 anv_alloc(alloc, new_length * sizeof(*list->reloc_bos), 8,
122 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
123 if (new_relocs == NULL) {
124 anv_free(alloc, new_relocs);
125 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
126 }
127
128 memcpy(new_relocs, list->relocs, list->num_relocs * sizeof(*list->relocs));
129 memcpy(new_reloc_bos, list->reloc_bos,
130 list->num_relocs * sizeof(*list->reloc_bos));
131
132 anv_free(alloc, list->relocs);
133 anv_free(alloc, list->reloc_bos);
134
135 list->array_length = new_length;
136 list->relocs = new_relocs;
137 list->reloc_bos = new_reloc_bos;
138
139 return VK_SUCCESS;
140 }
141
142 uint64_t
143 anv_reloc_list_add(struct anv_reloc_list *list,
144 const VkAllocationCallbacks *alloc,
145 uint32_t offset, struct anv_bo *target_bo, uint32_t delta)
146 {
147 struct drm_i915_gem_relocation_entry *entry;
148 int index;
149
150 anv_reloc_list_grow(list, alloc, 1);
151 /* TODO: Handle failure */
152
153 /* XXX: Can we use I915_EXEC_HANDLE_LUT? */
154 index = list->num_relocs++;
155 list->reloc_bos[index] = target_bo;
156 entry = &list->relocs[index];
157 entry->target_handle = target_bo->gem_handle;
158 entry->delta = delta;
159 entry->offset = offset;
160 entry->presumed_offset = target_bo->offset;
161 entry->read_domains = 0;
162 entry->write_domain = 0;
163
164 return target_bo->offset + delta;
165 }
166
167 static void
168 anv_reloc_list_append(struct anv_reloc_list *list,
169 const VkAllocationCallbacks *alloc,
170 struct anv_reloc_list *other, uint32_t offset)
171 {
172 anv_reloc_list_grow(list, alloc, other->num_relocs);
173 /* TODO: Handle failure */
174
175 memcpy(&list->relocs[list->num_relocs], &other->relocs[0],
176 other->num_relocs * sizeof(other->relocs[0]));
177 memcpy(&list->reloc_bos[list->num_relocs], &other->reloc_bos[0],
178 other->num_relocs * sizeof(other->reloc_bos[0]));
179
180 for (uint32_t i = 0; i < other->num_relocs; i++)
181 list->relocs[i + list->num_relocs].offset += offset;
182
183 list->num_relocs += other->num_relocs;
184 }
185
186 /*-----------------------------------------------------------------------*
187 * Functions related to anv_batch
188 *-----------------------------------------------------------------------*/
189
190 void *
191 anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords)
192 {
193 if (batch->next + num_dwords * 4 > batch->end)
194 batch->extend_cb(batch, batch->user_data);
195
196 void *p = batch->next;
197
198 batch->next += num_dwords * 4;
199 assert(batch->next <= batch->end);
200
201 return p;
202 }
203
204 uint64_t
205 anv_batch_emit_reloc(struct anv_batch *batch,
206 void *location, struct anv_bo *bo, uint32_t delta)
207 {
208 return anv_reloc_list_add(batch->relocs, batch->alloc,
209 location - batch->start, bo, delta);
210 }
211
212 void
213 anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other)
214 {
215 uint32_t size, offset;
216
217 size = other->next - other->start;
218 assert(size % 4 == 0);
219
220 if (batch->next + size > batch->end)
221 batch->extend_cb(batch, batch->user_data);
222
223 assert(batch->next + size <= batch->end);
224
225 VG(VALGRIND_CHECK_MEM_IS_DEFINED(other->start, size));
226 memcpy(batch->next, other->start, size);
227
228 offset = batch->next - batch->start;
229 anv_reloc_list_append(batch->relocs, batch->alloc,
230 other->relocs, offset);
231
232 batch->next += size;
233 }
234
235 /*-----------------------------------------------------------------------*
236 * Functions related to anv_batch_bo
237 *-----------------------------------------------------------------------*/
238
239 static VkResult
240 anv_batch_bo_create(struct anv_cmd_buffer *cmd_buffer,
241 struct anv_batch_bo **bbo_out)
242 {
243 VkResult result;
244
245 struct anv_batch_bo *bbo = anv_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo),
246 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
247 if (bbo == NULL)
248 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
249
250 result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
251 if (result != VK_SUCCESS)
252 goto fail_alloc;
253
254 result = anv_reloc_list_init(&bbo->relocs, &cmd_buffer->pool->alloc);
255 if (result != VK_SUCCESS)
256 goto fail_bo_alloc;
257
258 *bbo_out = bbo;
259
260 return VK_SUCCESS;
261
262 fail_bo_alloc:
263 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
264 fail_alloc:
265 anv_free(&cmd_buffer->pool->alloc, bbo);
266
267 return result;
268 }
269
270 static VkResult
271 anv_batch_bo_clone(struct anv_cmd_buffer *cmd_buffer,
272 const struct anv_batch_bo *other_bbo,
273 struct anv_batch_bo **bbo_out)
274 {
275 VkResult result;
276
277 struct anv_batch_bo *bbo = anv_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo),
278 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
279 if (bbo == NULL)
280 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
281
282 result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
283 if (result != VK_SUCCESS)
284 goto fail_alloc;
285
286 result = anv_reloc_list_init_clone(&bbo->relocs, &cmd_buffer->pool->alloc,
287 &other_bbo->relocs);
288 if (result != VK_SUCCESS)
289 goto fail_bo_alloc;
290
291 bbo->length = other_bbo->length;
292 memcpy(bbo->bo.map, other_bbo->bo.map, other_bbo->length);
293
294 bbo->last_ss_pool_bo_offset = other_bbo->last_ss_pool_bo_offset;
295
296 *bbo_out = bbo;
297
298 return VK_SUCCESS;
299
300 fail_bo_alloc:
301 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
302 fail_alloc:
303 anv_free(&cmd_buffer->pool->alloc, bbo);
304
305 return result;
306 }
307
308 static void
309 anv_batch_bo_start(struct anv_batch_bo *bbo, struct anv_batch *batch,
310 size_t batch_padding)
311 {
312 batch->next = batch->start = bbo->bo.map;
313 batch->end = bbo->bo.map + bbo->bo.size - batch_padding;
314 batch->relocs = &bbo->relocs;
315 bbo->last_ss_pool_bo_offset = 0;
316 bbo->relocs.num_relocs = 0;
317 }
318
319 static void
320 anv_batch_bo_continue(struct anv_batch_bo *bbo, struct anv_batch *batch,
321 size_t batch_padding)
322 {
323 batch->start = bbo->bo.map;
324 batch->next = bbo->bo.map + bbo->length;
325 batch->end = bbo->bo.map + bbo->bo.size - batch_padding;
326 batch->relocs = &bbo->relocs;
327 }
328
329 static void
330 anv_batch_bo_finish(struct anv_batch_bo *bbo, struct anv_batch *batch)
331 {
332 assert(batch->start == bbo->bo.map);
333 bbo->length = batch->next - batch->start;
334 VG(VALGRIND_CHECK_MEM_IS_DEFINED(batch->start, bbo->length));
335 }
336
337 static void
338 anv_batch_bo_destroy(struct anv_batch_bo *bbo,
339 struct anv_cmd_buffer *cmd_buffer)
340 {
341 anv_reloc_list_finish(&bbo->relocs, &cmd_buffer->pool->alloc);
342 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
343 anv_free(&cmd_buffer->pool->alloc, bbo);
344 }
345
346 static VkResult
347 anv_batch_bo_list_clone(const struct list_head *list,
348 struct anv_cmd_buffer *cmd_buffer,
349 struct list_head *new_list)
350 {
351 VkResult result = VK_SUCCESS;
352
353 list_inithead(new_list);
354
355 struct anv_batch_bo *prev_bbo = NULL;
356 list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
357 struct anv_batch_bo *new_bbo;
358 result = anv_batch_bo_clone(cmd_buffer, bbo, &new_bbo);
359 if (result != VK_SUCCESS)
360 break;
361 list_addtail(&new_bbo->link, new_list);
362
363 if (prev_bbo) {
364 /* As we clone this list of batch_bo's, they chain one to the
365 * other using MI_BATCH_BUFFER_START commands. We need to fix up
366 * those relocations as we go. Fortunately, this is pretty easy
367 * as it will always be the last relocation in the list.
368 */
369 uint32_t last_idx = prev_bbo->relocs.num_relocs - 1;
370 assert(prev_bbo->relocs.reloc_bos[last_idx] == &bbo->bo);
371 prev_bbo->relocs.reloc_bos[last_idx] = &new_bbo->bo;
372 }
373
374 prev_bbo = new_bbo;
375 }
376
377 if (result != VK_SUCCESS) {
378 list_for_each_entry_safe(struct anv_batch_bo, bbo, new_list, link)
379 anv_batch_bo_destroy(bbo, cmd_buffer);
380 }
381
382 return result;
383 }
384
385 /*-----------------------------------------------------------------------*
386 * Functions related to anv_batch_bo
387 *-----------------------------------------------------------------------*/
388
389 static inline struct anv_batch_bo *
390 anv_cmd_buffer_current_batch_bo(struct anv_cmd_buffer *cmd_buffer)
391 {
392 return LIST_ENTRY(struct anv_batch_bo, cmd_buffer->batch_bos.prev, link);
393 }
394
395 struct anv_address
396 anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer *cmd_buffer)
397 {
398 return (struct anv_address) {
399 .bo = &cmd_buffer->device->surface_state_block_pool.bo,
400 .offset = *(int32_t *)anv_vector_head(&cmd_buffer->bt_blocks),
401 };
402 }
403
404 static void
405 emit_batch_buffer_start(struct anv_cmd_buffer *cmd_buffer,
406 struct anv_bo *bo, uint32_t offset)
407 {
408 /* In gen8+ the address field grew to two dwords to accomodate 48 bit
409 * offsets. The high 16 bits are in the last dword, so we can use the gen8
410 * version in either case, as long as we set the instruction length in the
411 * header accordingly. This means that we always emit three dwords here
412 * and all the padding and adjustment we do in this file works for all
413 * gens.
414 */
415
416 const uint32_t gen7_length =
417 GEN7_MI_BATCH_BUFFER_START_length - GEN7_MI_BATCH_BUFFER_START_length_bias;
418 const uint32_t gen8_length =
419 GEN8_MI_BATCH_BUFFER_START_length - GEN8_MI_BATCH_BUFFER_START_length_bias;
420
421 anv_batch_emit(&cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_START,
422 .DwordLength = cmd_buffer->device->info.gen < 8 ?
423 gen7_length : gen8_length,
424 ._2ndLevelBatchBuffer = _1stlevelbatch,
425 .AddressSpaceIndicator = ASI_PPGTT,
426 .BatchBufferStartAddress = { bo, offset });
427 }
428
429 static void
430 cmd_buffer_chain_to_batch_bo(struct anv_cmd_buffer *cmd_buffer,
431 struct anv_batch_bo *bbo)
432 {
433 struct anv_batch *batch = &cmd_buffer->batch;
434 struct anv_batch_bo *current_bbo =
435 anv_cmd_buffer_current_batch_bo(cmd_buffer);
436
437 /* We set the end of the batch a little short so we would be sure we
438 * have room for the chaining command. Since we're about to emit the
439 * chaining command, let's set it back where it should go.
440 */
441 batch->end += GEN8_MI_BATCH_BUFFER_START_length * 4;
442 assert(batch->end == current_bbo->bo.map + current_bbo->bo.size);
443
444 emit_batch_buffer_start(cmd_buffer, &bbo->bo, 0);
445
446 anv_batch_bo_finish(current_bbo, batch);
447 }
448
449 static VkResult
450 anv_cmd_buffer_chain_batch(struct anv_batch *batch, void *_data)
451 {
452 struct anv_cmd_buffer *cmd_buffer = _data;
453 struct anv_batch_bo *new_bbo;
454
455 VkResult result = anv_batch_bo_create(cmd_buffer, &new_bbo);
456 if (result != VK_SUCCESS)
457 return result;
458
459 struct anv_batch_bo **seen_bbo = anv_vector_add(&cmd_buffer->seen_bbos);
460 if (seen_bbo == NULL) {
461 anv_batch_bo_destroy(new_bbo, cmd_buffer);
462 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
463 }
464 *seen_bbo = new_bbo;
465
466 cmd_buffer_chain_to_batch_bo(cmd_buffer, new_bbo);
467
468 list_addtail(&new_bbo->link, &cmd_buffer->batch_bos);
469
470 anv_batch_bo_start(new_bbo, batch, GEN8_MI_BATCH_BUFFER_START_length * 4);
471
472 return VK_SUCCESS;
473 }
474
475 struct anv_state
476 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
477 uint32_t entries, uint32_t *state_offset)
478 {
479 struct anv_block_pool *block_pool =
480 &cmd_buffer->device->surface_state_block_pool;
481 int32_t *bt_block = anv_vector_head(&cmd_buffer->bt_blocks);
482 struct anv_state state;
483
484 state.alloc_size = align_u32(entries * 4, 32);
485
486 if (cmd_buffer->bt_next + state.alloc_size > block_pool->block_size)
487 return (struct anv_state) { 0 };
488
489 state.offset = cmd_buffer->bt_next;
490 state.map = block_pool->map + *bt_block + state.offset;
491
492 cmd_buffer->bt_next += state.alloc_size;
493
494 assert(*bt_block < 0);
495 *state_offset = -(*bt_block);
496
497 return state;
498 }
499
500 struct anv_state
501 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer *cmd_buffer)
502 {
503 return anv_state_stream_alloc(&cmd_buffer->surface_state_stream, 64, 64);
504 }
505
506 struct anv_state
507 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer *cmd_buffer,
508 uint32_t size, uint32_t alignment)
509 {
510 return anv_state_stream_alloc(&cmd_buffer->dynamic_state_stream,
511 size, alignment);
512 }
513
514 VkResult
515 anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer *cmd_buffer)
516 {
517 struct anv_block_pool *block_pool =
518 &cmd_buffer->device->surface_state_block_pool;
519
520 int32_t *offset = anv_vector_add(&cmd_buffer->bt_blocks);
521 if (offset == NULL)
522 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
523
524 *offset = anv_block_pool_alloc_back(block_pool);
525 cmd_buffer->bt_next = 0;
526
527 return VK_SUCCESS;
528 }
529
530 VkResult
531 anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
532 {
533 struct anv_batch_bo *batch_bo;
534 VkResult result;
535
536 list_inithead(&cmd_buffer->batch_bos);
537
538 result = anv_batch_bo_create(cmd_buffer, &batch_bo);
539 if (result != VK_SUCCESS)
540 return result;
541
542 list_addtail(&batch_bo->link, &cmd_buffer->batch_bos);
543
544 cmd_buffer->batch.alloc = &cmd_buffer->pool->alloc;
545 cmd_buffer->batch.extend_cb = anv_cmd_buffer_chain_batch;
546 cmd_buffer->batch.user_data = cmd_buffer;
547
548 anv_batch_bo_start(batch_bo, &cmd_buffer->batch,
549 GEN8_MI_BATCH_BUFFER_START_length * 4);
550
551 int success = anv_vector_init(&cmd_buffer->seen_bbos,
552 sizeof(struct anv_bo *),
553 8 * sizeof(struct anv_bo *));
554 if (!success)
555 goto fail_batch_bo;
556
557 *(struct anv_batch_bo **)anv_vector_add(&cmd_buffer->seen_bbos) = batch_bo;
558
559 success = anv_vector_init(&cmd_buffer->bt_blocks, sizeof(int32_t),
560 8 * sizeof(int32_t));
561 if (!success)
562 goto fail_seen_bbos;
563
564 result = anv_reloc_list_init(&cmd_buffer->surface_relocs,
565 &cmd_buffer->pool->alloc);
566 if (result != VK_SUCCESS)
567 goto fail_bt_blocks;
568
569 anv_cmd_buffer_new_binding_table_block(cmd_buffer);
570
571 cmd_buffer->execbuf2.objects = NULL;
572 cmd_buffer->execbuf2.bos = NULL;
573 cmd_buffer->execbuf2.array_length = 0;
574
575 return VK_SUCCESS;
576
577 fail_bt_blocks:
578 anv_vector_finish(&cmd_buffer->bt_blocks);
579 fail_seen_bbos:
580 anv_vector_finish(&cmd_buffer->seen_bbos);
581 fail_batch_bo:
582 anv_batch_bo_destroy(batch_bo, cmd_buffer);
583
584 return result;
585 }
586
587 void
588 anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
589 {
590 int32_t *bt_block;
591 anv_vector_foreach(bt_block, &cmd_buffer->bt_blocks) {
592 anv_block_pool_free(&cmd_buffer->device->surface_state_block_pool,
593 *bt_block);
594 }
595 anv_vector_finish(&cmd_buffer->bt_blocks);
596
597 anv_reloc_list_finish(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc);
598
599 anv_vector_finish(&cmd_buffer->seen_bbos);
600
601 /* Destroy all of the batch buffers */
602 list_for_each_entry_safe(struct anv_batch_bo, bbo,
603 &cmd_buffer->batch_bos, link) {
604 anv_batch_bo_destroy(bbo, cmd_buffer);
605 }
606
607 anv_free(&cmd_buffer->pool->alloc, cmd_buffer->execbuf2.objects);
608 anv_free(&cmd_buffer->pool->alloc, cmd_buffer->execbuf2.bos);
609 }
610
611 void
612 anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
613 {
614 /* Delete all but the first batch bo */
615 assert(!list_empty(&cmd_buffer->batch_bos));
616 while (cmd_buffer->batch_bos.next != cmd_buffer->batch_bos.prev) {
617 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
618 list_del(&bbo->link);
619 anv_batch_bo_destroy(bbo, cmd_buffer);
620 }
621 assert(!list_empty(&cmd_buffer->batch_bos));
622
623 anv_batch_bo_start(anv_cmd_buffer_current_batch_bo(cmd_buffer),
624 &cmd_buffer->batch,
625 GEN8_MI_BATCH_BUFFER_START_length * 4);
626
627 while (anv_vector_length(&cmd_buffer->bt_blocks) > 1) {
628 int32_t *bt_block = anv_vector_remove(&cmd_buffer->bt_blocks);
629 anv_block_pool_free(&cmd_buffer->device->surface_state_block_pool,
630 *bt_block);
631 }
632 assert(anv_vector_length(&cmd_buffer->bt_blocks) == 1);
633 cmd_buffer->bt_next = 0;
634
635 cmd_buffer->surface_relocs.num_relocs = 0;
636
637 /* Reset the list of seen buffers */
638 cmd_buffer->seen_bbos.head = 0;
639 cmd_buffer->seen_bbos.tail = 0;
640
641 *(struct anv_batch_bo **)anv_vector_add(&cmd_buffer->seen_bbos) =
642 anv_cmd_buffer_current_batch_bo(cmd_buffer);
643 }
644
645 void
646 anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer)
647 {
648 struct anv_batch_bo *batch_bo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
649
650 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
651 anv_batch_emit(&cmd_buffer->batch, GEN7_MI_BATCH_BUFFER_END);
652
653 /* Round batch up to an even number of dwords. */
654 if ((cmd_buffer->batch.next - cmd_buffer->batch.start) & 4)
655 anv_batch_emit(&cmd_buffer->batch, GEN7_MI_NOOP);
656
657 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_PRIMARY;
658 }
659
660 anv_batch_bo_finish(batch_bo, &cmd_buffer->batch);
661
662 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
663 /* If this is a secondary command buffer, we need to determine the
664 * mode in which it will be executed with vkExecuteCommands. We
665 * determine this statically here so that this stays in sync with the
666 * actual ExecuteCommands implementation.
667 */
668 if ((cmd_buffer->batch_bos.next == cmd_buffer->batch_bos.prev) &&
669 (anv_cmd_buffer_current_batch_bo(cmd_buffer)->length <
670 ANV_CMD_BUFFER_BATCH_SIZE / 2)) {
671 /* If the secondary has exactly one batch buffer in its list *and*
672 * that batch buffer is less than half of the maximum size, we're
673 * probably better of simply copying it into our batch.
674 */
675 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_EMIT;
676 } else if (!(cmd_buffer->usage_flags &
677 VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
678 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_CHAIN;
679
680 /* When we chain, we need to add an MI_BATCH_BUFFER_START command
681 * with its relocation. In order to handle this we'll increment here
682 * so we can unconditionally decrement right before adding the
683 * MI_BATCH_BUFFER_START command.
684 */
685 anv_cmd_buffer_current_batch_bo(cmd_buffer)->relocs.num_relocs++;
686 cmd_buffer->batch.next += GEN8_MI_BATCH_BUFFER_START_length * 4;
687 } else {
688 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN;
689 }
690 }
691 }
692
693 static inline VkResult
694 anv_cmd_buffer_add_seen_bbos(struct anv_cmd_buffer *cmd_buffer,
695 struct list_head *list)
696 {
697 list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
698 struct anv_batch_bo **bbo_ptr = anv_vector_add(&cmd_buffer->seen_bbos);
699 if (bbo_ptr == NULL)
700 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
701
702 *bbo_ptr = bbo;
703 }
704
705 return VK_SUCCESS;
706 }
707
708 void
709 anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary,
710 struct anv_cmd_buffer *secondary)
711 {
712 switch (secondary->exec_mode) {
713 case ANV_CMD_BUFFER_EXEC_MODE_EMIT:
714 anv_batch_emit_batch(&primary->batch, &secondary->batch);
715 break;
716 case ANV_CMD_BUFFER_EXEC_MODE_CHAIN: {
717 struct anv_batch_bo *first_bbo =
718 list_first_entry(&secondary->batch_bos, struct anv_batch_bo, link);
719 struct anv_batch_bo *last_bbo =
720 list_last_entry(&secondary->batch_bos, struct anv_batch_bo, link);
721
722 emit_batch_buffer_start(primary, &first_bbo->bo, 0);
723
724 struct anv_batch_bo *this_bbo = anv_cmd_buffer_current_batch_bo(primary);
725 assert(primary->batch.start == this_bbo->bo.map);
726 uint32_t offset = primary->batch.next - primary->batch.start;
727 const uint32_t inst_size = GEN8_MI_BATCH_BUFFER_START_length * 4;
728
729 /* Roll back the previous MI_BATCH_BUFFER_START and its relocation so we
730 * can emit a new command and relocation for the current splice. In
731 * order to handle the initial-use case, we incremented next and
732 * num_relocs in end_batch_buffer() so we can alyways just subtract
733 * here.
734 */
735 last_bbo->relocs.num_relocs--;
736 secondary->batch.next -= inst_size;
737 emit_batch_buffer_start(secondary, &this_bbo->bo, offset);
738 anv_cmd_buffer_add_seen_bbos(primary, &secondary->batch_bos);
739
740 /* After patching up the secondary buffer, we need to clflush the
741 * modified instruction in case we're on a !llc platform. We use a
742 * little loop to handle the case where the instruction crosses a cache
743 * line boundary.
744 */
745 if (!primary->device->info.has_llc) {
746 void *inst = secondary->batch.next - inst_size;
747 void *p = (void *) (((uintptr_t) inst) & ~CACHELINE_MASK);
748 __builtin_ia32_sfence();
749 while (p < secondary->batch.next) {
750 __builtin_ia32_clflush(p);
751 p += CACHELINE_SIZE;
752 }
753 }
754
755 break;
756 }
757 case ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN: {
758 struct list_head copy_list;
759 VkResult result = anv_batch_bo_list_clone(&secondary->batch_bos,
760 secondary,
761 &copy_list);
762 if (result != VK_SUCCESS)
763 return; /* FIXME */
764
765 anv_cmd_buffer_add_seen_bbos(primary, &copy_list);
766
767 struct anv_batch_bo *first_bbo =
768 list_first_entry(&copy_list, struct anv_batch_bo, link);
769 struct anv_batch_bo *last_bbo =
770 list_last_entry(&copy_list, struct anv_batch_bo, link);
771
772 cmd_buffer_chain_to_batch_bo(primary, first_bbo);
773
774 list_splicetail(&copy_list, &primary->batch_bos);
775
776 anv_batch_bo_continue(last_bbo, &primary->batch,
777 GEN8_MI_BATCH_BUFFER_START_length * 4);
778
779 anv_cmd_buffer_emit_state_base_address(primary);
780 break;
781 }
782 default:
783 assert(!"Invalid execution mode");
784 }
785
786 anv_reloc_list_append(&primary->surface_relocs, &primary->pool->alloc,
787 &secondary->surface_relocs, 0);
788 }
789
790 static VkResult
791 anv_cmd_buffer_add_bo(struct anv_cmd_buffer *cmd_buffer,
792 struct anv_bo *bo,
793 struct anv_reloc_list *relocs)
794 {
795 struct drm_i915_gem_exec_object2 *obj = NULL;
796
797 if (bo->index < cmd_buffer->execbuf2.bo_count &&
798 cmd_buffer->execbuf2.bos[bo->index] == bo)
799 obj = &cmd_buffer->execbuf2.objects[bo->index];
800
801 if (obj == NULL) {
802 /* We've never seen this one before. Add it to the list and assign
803 * an id that we can use later.
804 */
805 if (cmd_buffer->execbuf2.bo_count >= cmd_buffer->execbuf2.array_length) {
806 uint32_t new_len = cmd_buffer->execbuf2.objects ?
807 cmd_buffer->execbuf2.array_length * 2 : 64;
808
809 struct drm_i915_gem_exec_object2 *new_objects =
810 anv_alloc(&cmd_buffer->pool->alloc, new_len * sizeof(*new_objects),
811 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
812 if (new_objects == NULL)
813 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
814
815 struct anv_bo **new_bos =
816 anv_alloc(&cmd_buffer->pool->alloc, new_len * sizeof(*new_bos),
817 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
818 if (new_objects == NULL) {
819 anv_free(&cmd_buffer->pool->alloc, new_objects);
820 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
821 }
822
823 if (cmd_buffer->execbuf2.objects) {
824 memcpy(new_objects, cmd_buffer->execbuf2.objects,
825 cmd_buffer->execbuf2.bo_count * sizeof(*new_objects));
826 memcpy(new_bos, cmd_buffer->execbuf2.bos,
827 cmd_buffer->execbuf2.bo_count * sizeof(*new_bos));
828 }
829
830 cmd_buffer->execbuf2.objects = new_objects;
831 cmd_buffer->execbuf2.bos = new_bos;
832 cmd_buffer->execbuf2.array_length = new_len;
833 }
834
835 assert(cmd_buffer->execbuf2.bo_count < cmd_buffer->execbuf2.array_length);
836
837 bo->index = cmd_buffer->execbuf2.bo_count++;
838 obj = &cmd_buffer->execbuf2.objects[bo->index];
839 cmd_buffer->execbuf2.bos[bo->index] = bo;
840
841 obj->handle = bo->gem_handle;
842 obj->relocation_count = 0;
843 obj->relocs_ptr = 0;
844 obj->alignment = 0;
845 obj->offset = bo->offset;
846 obj->flags = 0;
847 obj->rsvd1 = 0;
848 obj->rsvd2 = 0;
849 }
850
851 if (relocs != NULL && obj->relocation_count == 0) {
852 /* This is the first time we've ever seen a list of relocations for
853 * this BO. Go ahead and set the relocations and then walk the list
854 * of relocations and add them all.
855 */
856 obj->relocation_count = relocs->num_relocs;
857 obj->relocs_ptr = (uintptr_t) relocs->relocs;
858
859 for (size_t i = 0; i < relocs->num_relocs; i++) {
860 /* A quick sanity check on relocations */
861 assert(relocs->relocs[i].offset < bo->size);
862 anv_cmd_buffer_add_bo(cmd_buffer, relocs->reloc_bos[i], NULL);
863 }
864 }
865
866 return VK_SUCCESS;
867 }
868
869 static void
870 anv_cmd_buffer_process_relocs(struct anv_cmd_buffer *cmd_buffer,
871 struct anv_reloc_list *list)
872 {
873 struct anv_bo *bo;
874
875 /* If the kernel supports I915_EXEC_NO_RELOC, it will compare offset in
876 * struct drm_i915_gem_exec_object2 against the bos current offset and if
877 * all bos haven't moved it will skip relocation processing alltogether.
878 * If I915_EXEC_NO_RELOC is not supported, the kernel ignores the incoming
879 * value of offset so we can set it either way. For that to work we need
880 * to make sure all relocs use the same presumed offset.
881 */
882
883 for (size_t i = 0; i < list->num_relocs; i++) {
884 bo = list->reloc_bos[i];
885 if (bo->offset != list->relocs[i].presumed_offset)
886 cmd_buffer->execbuf2.need_reloc = true;
887
888 list->relocs[i].target_handle = bo->index;
889 }
890 }
891
892 static void
893 adjust_relocations_from_block_pool(struct anv_block_pool *pool,
894 struct anv_reloc_list *relocs)
895 {
896 for (size_t i = 0; i < relocs->num_relocs; i++) {
897 /* In general, we don't know how stale the relocated value is. It
898 * may have been used last time or it may not. Since we don't want
899 * to stomp it while the GPU may be accessing it, we haven't updated
900 * it anywhere else in the code. Instead, we just set the presumed
901 * offset to what it is now based on the delta and the data in the
902 * block pool. Then the kernel will update it for us if needed.
903 */
904 assert(relocs->relocs[i].offset < pool->state.end);
905 uint32_t *reloc_data = pool->map + relocs->relocs[i].offset;
906
907 /* We're reading back the relocated value from potentially incoherent
908 * memory here. However, any change to the value will be from the kernel
909 * writing out relocations, which will keep the CPU cache up to date.
910 */
911 relocs->relocs[i].presumed_offset = *reloc_data - relocs->relocs[i].delta;
912
913 /* All of the relocations from this block pool to other BO's should
914 * have been emitted relative to the surface block pool center. We
915 * need to add the center offset to make them relative to the
916 * beginning of the actual GEM bo.
917 */
918 relocs->relocs[i].offset += pool->center_bo_offset;
919 }
920 }
921
922 static void
923 adjust_relocations_to_block_pool(struct anv_block_pool *pool,
924 struct anv_bo *from_bo,
925 struct anv_reloc_list *relocs,
926 uint32_t *last_pool_center_bo_offset)
927 {
928 assert(*last_pool_center_bo_offset <= pool->center_bo_offset);
929 uint32_t delta = pool->center_bo_offset - *last_pool_center_bo_offset;
930
931 /* When we initially emit relocations into a block pool, we don't
932 * actually know what the final center_bo_offset will be so we just emit
933 * it as if center_bo_offset == 0. Now that we know what the center
934 * offset is, we need to walk the list of relocations and adjust any
935 * relocations that point to the pool bo with the correct offset.
936 */
937 for (size_t i = 0; i < relocs->num_relocs; i++) {
938 if (relocs->reloc_bos[i] == &pool->bo) {
939 /* Adjust the delta value in the relocation to correctly
940 * correspond to the new delta. Initially, this value may have
941 * been negative (if treated as unsigned), but we trust in
942 * uint32_t roll-over to fix that for us at this point.
943 */
944 relocs->relocs[i].delta += delta;
945
946 /* Since the delta has changed, we need to update the actual
947 * relocated value with the new presumed value. This function
948 * should only be called on batch buffers, so we know it isn't in
949 * use by the GPU at the moment.
950 */
951 assert(relocs->relocs[i].offset < from_bo->size);
952 uint32_t *reloc_data = from_bo->map + relocs->relocs[i].offset;
953 *reloc_data = relocs->relocs[i].presumed_offset +
954 relocs->relocs[i].delta;
955 }
956 }
957
958 *last_pool_center_bo_offset = pool->center_bo_offset;
959 }
960
961 void
962 anv_cmd_buffer_prepare_execbuf(struct anv_cmd_buffer *cmd_buffer)
963 {
964 struct anv_batch *batch = &cmd_buffer->batch;
965 struct anv_block_pool *ss_pool =
966 &cmd_buffer->device->surface_state_block_pool;
967
968 cmd_buffer->execbuf2.bo_count = 0;
969 cmd_buffer->execbuf2.need_reloc = false;
970
971 adjust_relocations_from_block_pool(ss_pool, &cmd_buffer->surface_relocs);
972 anv_cmd_buffer_add_bo(cmd_buffer, &ss_pool->bo, &cmd_buffer->surface_relocs);
973
974 /* First, we walk over all of the bos we've seen and add them and their
975 * relocations to the validate list.
976 */
977 struct anv_batch_bo **bbo;
978 anv_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
979 adjust_relocations_to_block_pool(ss_pool, &(*bbo)->bo, &(*bbo)->relocs,
980 &(*bbo)->last_ss_pool_bo_offset);
981
982 anv_cmd_buffer_add_bo(cmd_buffer, &(*bbo)->bo, &(*bbo)->relocs);
983 }
984
985 struct anv_batch_bo *first_batch_bo =
986 list_first_entry(&cmd_buffer->batch_bos, struct anv_batch_bo, link);
987
988 /* The kernel requires that the last entry in the validation list be the
989 * batch buffer to execute. We can simply swap the element
990 * corresponding to the first batch_bo in the chain with the last
991 * element in the list.
992 */
993 if (first_batch_bo->bo.index != cmd_buffer->execbuf2.bo_count - 1) {
994 uint32_t idx = first_batch_bo->bo.index;
995 uint32_t last_idx = cmd_buffer->execbuf2.bo_count - 1;
996
997 struct drm_i915_gem_exec_object2 tmp_obj =
998 cmd_buffer->execbuf2.objects[idx];
999 assert(cmd_buffer->execbuf2.bos[idx] == &first_batch_bo->bo);
1000
1001 cmd_buffer->execbuf2.objects[idx] = cmd_buffer->execbuf2.objects[last_idx];
1002 cmd_buffer->execbuf2.bos[idx] = cmd_buffer->execbuf2.bos[last_idx];
1003 cmd_buffer->execbuf2.bos[idx]->index = idx;
1004
1005 cmd_buffer->execbuf2.objects[last_idx] = tmp_obj;
1006 cmd_buffer->execbuf2.bos[last_idx] = &first_batch_bo->bo;
1007 first_batch_bo->bo.index = last_idx;
1008 }
1009
1010 /* Now we go through and fixup all of the relocation lists to point to
1011 * the correct indices in the object array. We have to do this after we
1012 * reorder the list above as some of the indices may have changed.
1013 */
1014 anv_vector_foreach(bbo, &cmd_buffer->seen_bbos)
1015 anv_cmd_buffer_process_relocs(cmd_buffer, &(*bbo)->relocs);
1016
1017 anv_cmd_buffer_process_relocs(cmd_buffer, &cmd_buffer->surface_relocs);
1018
1019 if (!cmd_buffer->device->info.has_llc) {
1020 __builtin_ia32_sfence();
1021 anv_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1022 for (uint32_t i = 0; i < (*bbo)->length; i += CACHELINE_SIZE)
1023 __builtin_ia32_clflush((*bbo)->bo.map + i);
1024 }
1025 }
1026
1027 cmd_buffer->execbuf2.execbuf = (struct drm_i915_gem_execbuffer2) {
1028 .buffers_ptr = (uintptr_t) cmd_buffer->execbuf2.objects,
1029 .buffer_count = cmd_buffer->execbuf2.bo_count,
1030 .batch_start_offset = 0,
1031 .batch_len = batch->next - batch->start,
1032 .cliprects_ptr = 0,
1033 .num_cliprects = 0,
1034 .DR1 = 0,
1035 .DR4 = 0,
1036 .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER |
1037 I915_EXEC_CONSTANTS_REL_GENERAL,
1038 .rsvd1 = cmd_buffer->device->context_id,
1039 .rsvd2 = 0,
1040 };
1041
1042 if (!cmd_buffer->execbuf2.need_reloc)
1043 cmd_buffer->execbuf2.execbuf.flags |= I915_EXEC_NO_RELOC;
1044 }