intel/isl: Add some basic info about RENDER_SURFACE_STATE to isl_device
[mesa.git] / src / intel / vulkan / anv_batch_chain.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31
32 #include "genxml/gen7_pack.h"
33 #include "genxml/gen8_pack.h"
34
35 #include "util/debug.h"
36
37 /** \file anv_batch_chain.c
38 *
39 * This file contains functions related to anv_cmd_buffer as a data
40 * structure. This involves everything required to create and destroy
41 * the actual batch buffers as well as link them together and handle
42 * relocations and surface state. It specifically does *not* contain any
43 * handling of actual vkCmd calls beyond vkCmdExecuteCommands.
44 */
45
46 /*-----------------------------------------------------------------------*
47 * Functions related to anv_reloc_list
48 *-----------------------------------------------------------------------*/
49
50 static VkResult
51 anv_reloc_list_init_clone(struct anv_reloc_list *list,
52 const VkAllocationCallbacks *alloc,
53 const struct anv_reloc_list *other_list)
54 {
55 if (other_list) {
56 list->num_relocs = other_list->num_relocs;
57 list->array_length = other_list->array_length;
58 } else {
59 list->num_relocs = 0;
60 list->array_length = 256;
61 }
62
63 list->relocs =
64 vk_alloc(alloc, list->array_length * sizeof(*list->relocs), 8,
65 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
66
67 if (list->relocs == NULL)
68 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
69
70 list->reloc_bos =
71 vk_alloc(alloc, list->array_length * sizeof(*list->reloc_bos), 8,
72 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
73
74 if (list->reloc_bos == NULL) {
75 vk_free(alloc, list->relocs);
76 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
77 }
78
79 if (other_list) {
80 memcpy(list->relocs, other_list->relocs,
81 list->array_length * sizeof(*list->relocs));
82 memcpy(list->reloc_bos, other_list->reloc_bos,
83 list->array_length * sizeof(*list->reloc_bos));
84 }
85
86 return VK_SUCCESS;
87 }
88
89 VkResult
90 anv_reloc_list_init(struct anv_reloc_list *list,
91 const VkAllocationCallbacks *alloc)
92 {
93 return anv_reloc_list_init_clone(list, alloc, NULL);
94 }
95
96 void
97 anv_reloc_list_finish(struct anv_reloc_list *list,
98 const VkAllocationCallbacks *alloc)
99 {
100 vk_free(alloc, list->relocs);
101 vk_free(alloc, list->reloc_bos);
102 }
103
104 static VkResult
105 anv_reloc_list_grow(struct anv_reloc_list *list,
106 const VkAllocationCallbacks *alloc,
107 size_t num_additional_relocs)
108 {
109 if (list->num_relocs + num_additional_relocs <= list->array_length)
110 return VK_SUCCESS;
111
112 size_t new_length = list->array_length * 2;
113 while (new_length < list->num_relocs + num_additional_relocs)
114 new_length *= 2;
115
116 struct drm_i915_gem_relocation_entry *new_relocs =
117 vk_alloc(alloc, new_length * sizeof(*list->relocs), 8,
118 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
119 if (new_relocs == NULL)
120 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
121
122 struct anv_bo **new_reloc_bos =
123 vk_alloc(alloc, new_length * sizeof(*list->reloc_bos), 8,
124 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
125 if (new_reloc_bos == NULL) {
126 vk_free(alloc, new_relocs);
127 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
128 }
129
130 memcpy(new_relocs, list->relocs, list->num_relocs * sizeof(*list->relocs));
131 memcpy(new_reloc_bos, list->reloc_bos,
132 list->num_relocs * sizeof(*list->reloc_bos));
133
134 vk_free(alloc, list->relocs);
135 vk_free(alloc, list->reloc_bos);
136
137 list->array_length = new_length;
138 list->relocs = new_relocs;
139 list->reloc_bos = new_reloc_bos;
140
141 return VK_SUCCESS;
142 }
143
144 uint64_t
145 anv_reloc_list_add(struct anv_reloc_list *list,
146 const VkAllocationCallbacks *alloc,
147 uint32_t offset, struct anv_bo *target_bo, uint32_t delta)
148 {
149 struct drm_i915_gem_relocation_entry *entry;
150 int index;
151
152 const uint32_t domain =
153 target_bo->is_winsys_bo ? I915_GEM_DOMAIN_RENDER : 0;
154
155 anv_reloc_list_grow(list, alloc, 1);
156 /* TODO: Handle failure */
157
158 /* XXX: Can we use I915_EXEC_HANDLE_LUT? */
159 index = list->num_relocs++;
160 list->reloc_bos[index] = target_bo;
161 entry = &list->relocs[index];
162 entry->target_handle = target_bo->gem_handle;
163 entry->delta = delta;
164 entry->offset = offset;
165 entry->presumed_offset = target_bo->offset;
166 entry->read_domains = domain;
167 entry->write_domain = domain;
168 VG(VALGRIND_CHECK_MEM_IS_DEFINED(entry, sizeof(*entry)));
169
170 return target_bo->offset + delta;
171 }
172
173 static void
174 anv_reloc_list_append(struct anv_reloc_list *list,
175 const VkAllocationCallbacks *alloc,
176 struct anv_reloc_list *other, uint32_t offset)
177 {
178 anv_reloc_list_grow(list, alloc, other->num_relocs);
179 /* TODO: Handle failure */
180
181 memcpy(&list->relocs[list->num_relocs], &other->relocs[0],
182 other->num_relocs * sizeof(other->relocs[0]));
183 memcpy(&list->reloc_bos[list->num_relocs], &other->reloc_bos[0],
184 other->num_relocs * sizeof(other->reloc_bos[0]));
185
186 for (uint32_t i = 0; i < other->num_relocs; i++)
187 list->relocs[i + list->num_relocs].offset += offset;
188
189 list->num_relocs += other->num_relocs;
190 }
191
192 /*-----------------------------------------------------------------------*
193 * Functions related to anv_batch
194 *-----------------------------------------------------------------------*/
195
196 void *
197 anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords)
198 {
199 if (batch->next + num_dwords * 4 > batch->end)
200 batch->extend_cb(batch, batch->user_data);
201
202 void *p = batch->next;
203
204 batch->next += num_dwords * 4;
205 assert(batch->next <= batch->end);
206
207 return p;
208 }
209
210 uint64_t
211 anv_batch_emit_reloc(struct anv_batch *batch,
212 void *location, struct anv_bo *bo, uint32_t delta)
213 {
214 return anv_reloc_list_add(batch->relocs, batch->alloc,
215 location - batch->start, bo, delta);
216 }
217
218 void
219 anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other)
220 {
221 uint32_t size, offset;
222
223 size = other->next - other->start;
224 assert(size % 4 == 0);
225
226 if (batch->next + size > batch->end)
227 batch->extend_cb(batch, batch->user_data);
228
229 assert(batch->next + size <= batch->end);
230
231 VG(VALGRIND_CHECK_MEM_IS_DEFINED(other->start, size));
232 memcpy(batch->next, other->start, size);
233
234 offset = batch->next - batch->start;
235 anv_reloc_list_append(batch->relocs, batch->alloc,
236 other->relocs, offset);
237
238 batch->next += size;
239 }
240
241 /*-----------------------------------------------------------------------*
242 * Functions related to anv_batch_bo
243 *-----------------------------------------------------------------------*/
244
245 static VkResult
246 anv_batch_bo_create(struct anv_cmd_buffer *cmd_buffer,
247 struct anv_batch_bo **bbo_out)
248 {
249 VkResult result;
250
251 struct anv_batch_bo *bbo = vk_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo),
252 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
253 if (bbo == NULL)
254 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
255
256 result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool, &bbo->bo,
257 ANV_CMD_BUFFER_BATCH_SIZE);
258 if (result != VK_SUCCESS)
259 goto fail_alloc;
260
261 result = anv_reloc_list_init(&bbo->relocs, &cmd_buffer->pool->alloc);
262 if (result != VK_SUCCESS)
263 goto fail_bo_alloc;
264
265 *bbo_out = bbo;
266
267 return VK_SUCCESS;
268
269 fail_bo_alloc:
270 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
271 fail_alloc:
272 vk_free(&cmd_buffer->pool->alloc, bbo);
273
274 return result;
275 }
276
277 static VkResult
278 anv_batch_bo_clone(struct anv_cmd_buffer *cmd_buffer,
279 const struct anv_batch_bo *other_bbo,
280 struct anv_batch_bo **bbo_out)
281 {
282 VkResult result;
283
284 struct anv_batch_bo *bbo = vk_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo),
285 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
286 if (bbo == NULL)
287 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
288
289 result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool, &bbo->bo,
290 other_bbo->bo.size);
291 if (result != VK_SUCCESS)
292 goto fail_alloc;
293
294 result = anv_reloc_list_init_clone(&bbo->relocs, &cmd_buffer->pool->alloc,
295 &other_bbo->relocs);
296 if (result != VK_SUCCESS)
297 goto fail_bo_alloc;
298
299 bbo->length = other_bbo->length;
300 memcpy(bbo->bo.map, other_bbo->bo.map, other_bbo->length);
301
302 *bbo_out = bbo;
303
304 return VK_SUCCESS;
305
306 fail_bo_alloc:
307 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
308 fail_alloc:
309 vk_free(&cmd_buffer->pool->alloc, bbo);
310
311 return result;
312 }
313
314 static void
315 anv_batch_bo_start(struct anv_batch_bo *bbo, struct anv_batch *batch,
316 size_t batch_padding)
317 {
318 batch->next = batch->start = bbo->bo.map;
319 batch->end = bbo->bo.map + bbo->bo.size - batch_padding;
320 batch->relocs = &bbo->relocs;
321 bbo->relocs.num_relocs = 0;
322 }
323
324 static void
325 anv_batch_bo_continue(struct anv_batch_bo *bbo, struct anv_batch *batch,
326 size_t batch_padding)
327 {
328 batch->start = bbo->bo.map;
329 batch->next = bbo->bo.map + bbo->length;
330 batch->end = bbo->bo.map + bbo->bo.size - batch_padding;
331 batch->relocs = &bbo->relocs;
332 }
333
334 static void
335 anv_batch_bo_finish(struct anv_batch_bo *bbo, struct anv_batch *batch)
336 {
337 assert(batch->start == bbo->bo.map);
338 bbo->length = batch->next - batch->start;
339 VG(VALGRIND_CHECK_MEM_IS_DEFINED(batch->start, bbo->length));
340 }
341
342 static VkResult
343 anv_batch_bo_grow(struct anv_cmd_buffer *cmd_buffer, struct anv_batch_bo *bbo,
344 struct anv_batch *batch, size_t aditional,
345 size_t batch_padding)
346 {
347 assert(batch->start == bbo->bo.map);
348 bbo->length = batch->next - batch->start;
349
350 size_t new_size = bbo->bo.size;
351 while (new_size <= bbo->length + aditional + batch_padding)
352 new_size *= 2;
353
354 if (new_size == bbo->bo.size)
355 return VK_SUCCESS;
356
357 struct anv_bo new_bo;
358 VkResult result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
359 &new_bo, new_size);
360 if (result != VK_SUCCESS)
361 return result;
362
363 memcpy(new_bo.map, bbo->bo.map, bbo->length);
364
365 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
366
367 bbo->bo = new_bo;
368 anv_batch_bo_continue(bbo, batch, batch_padding);
369
370 return VK_SUCCESS;
371 }
372
373 static void
374 anv_batch_bo_destroy(struct anv_batch_bo *bbo,
375 struct anv_cmd_buffer *cmd_buffer)
376 {
377 anv_reloc_list_finish(&bbo->relocs, &cmd_buffer->pool->alloc);
378 anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, &bbo->bo);
379 vk_free(&cmd_buffer->pool->alloc, bbo);
380 }
381
382 static VkResult
383 anv_batch_bo_list_clone(const struct list_head *list,
384 struct anv_cmd_buffer *cmd_buffer,
385 struct list_head *new_list)
386 {
387 VkResult result = VK_SUCCESS;
388
389 list_inithead(new_list);
390
391 struct anv_batch_bo *prev_bbo = NULL;
392 list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
393 struct anv_batch_bo *new_bbo = NULL;
394 result = anv_batch_bo_clone(cmd_buffer, bbo, &new_bbo);
395 if (result != VK_SUCCESS)
396 break;
397 list_addtail(&new_bbo->link, new_list);
398
399 if (prev_bbo) {
400 /* As we clone this list of batch_bo's, they chain one to the
401 * other using MI_BATCH_BUFFER_START commands. We need to fix up
402 * those relocations as we go. Fortunately, this is pretty easy
403 * as it will always be the last relocation in the list.
404 */
405 uint32_t last_idx = prev_bbo->relocs.num_relocs - 1;
406 assert(prev_bbo->relocs.reloc_bos[last_idx] == &bbo->bo);
407 prev_bbo->relocs.reloc_bos[last_idx] = &new_bbo->bo;
408 }
409
410 prev_bbo = new_bbo;
411 }
412
413 if (result != VK_SUCCESS) {
414 list_for_each_entry_safe(struct anv_batch_bo, bbo, new_list, link)
415 anv_batch_bo_destroy(bbo, cmd_buffer);
416 }
417
418 return result;
419 }
420
421 /*-----------------------------------------------------------------------*
422 * Functions related to anv_batch_bo
423 *-----------------------------------------------------------------------*/
424
425 static inline struct anv_batch_bo *
426 anv_cmd_buffer_current_batch_bo(struct anv_cmd_buffer *cmd_buffer)
427 {
428 return LIST_ENTRY(struct anv_batch_bo, cmd_buffer->batch_bos.prev, link);
429 }
430
431 struct anv_address
432 anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer *cmd_buffer)
433 {
434 return (struct anv_address) {
435 .bo = &cmd_buffer->device->surface_state_block_pool.bo,
436 .offset = *(int32_t *)u_vector_head(&cmd_buffer->bt_blocks),
437 };
438 }
439
440 static void
441 emit_batch_buffer_start(struct anv_cmd_buffer *cmd_buffer,
442 struct anv_bo *bo, uint32_t offset)
443 {
444 /* In gen8+ the address field grew to two dwords to accomodate 48 bit
445 * offsets. The high 16 bits are in the last dword, so we can use the gen8
446 * version in either case, as long as we set the instruction length in the
447 * header accordingly. This means that we always emit three dwords here
448 * and all the padding and adjustment we do in this file works for all
449 * gens.
450 */
451
452 const uint32_t gen7_length =
453 GEN7_MI_BATCH_BUFFER_START_length - GEN7_MI_BATCH_BUFFER_START_length_bias;
454 const uint32_t gen8_length =
455 GEN8_MI_BATCH_BUFFER_START_length - GEN8_MI_BATCH_BUFFER_START_length_bias;
456
457 anv_batch_emit(&cmd_buffer->batch, GEN8_MI_BATCH_BUFFER_START, bbs) {
458 bbs.DWordLength = cmd_buffer->device->info.gen < 8 ?
459 gen7_length : gen8_length;
460 bbs._2ndLevelBatchBuffer = _1stlevelbatch;
461 bbs.AddressSpaceIndicator = ASI_PPGTT;
462 bbs.BatchBufferStartAddress = (struct anv_address) { bo, offset };
463 }
464 }
465
466 static void
467 cmd_buffer_chain_to_batch_bo(struct anv_cmd_buffer *cmd_buffer,
468 struct anv_batch_bo *bbo)
469 {
470 struct anv_batch *batch = &cmd_buffer->batch;
471 struct anv_batch_bo *current_bbo =
472 anv_cmd_buffer_current_batch_bo(cmd_buffer);
473
474 /* We set the end of the batch a little short so we would be sure we
475 * have room for the chaining command. Since we're about to emit the
476 * chaining command, let's set it back where it should go.
477 */
478 batch->end += GEN8_MI_BATCH_BUFFER_START_length * 4;
479 assert(batch->end == current_bbo->bo.map + current_bbo->bo.size);
480
481 emit_batch_buffer_start(cmd_buffer, &bbo->bo, 0);
482
483 anv_batch_bo_finish(current_bbo, batch);
484 }
485
486 static VkResult
487 anv_cmd_buffer_chain_batch(struct anv_batch *batch, void *_data)
488 {
489 struct anv_cmd_buffer *cmd_buffer = _data;
490 struct anv_batch_bo *new_bbo;
491
492 VkResult result = anv_batch_bo_create(cmd_buffer, &new_bbo);
493 if (result != VK_SUCCESS)
494 return result;
495
496 struct anv_batch_bo **seen_bbo = u_vector_add(&cmd_buffer->seen_bbos);
497 if (seen_bbo == NULL) {
498 anv_batch_bo_destroy(new_bbo, cmd_buffer);
499 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
500 }
501 *seen_bbo = new_bbo;
502
503 cmd_buffer_chain_to_batch_bo(cmd_buffer, new_bbo);
504
505 list_addtail(&new_bbo->link, &cmd_buffer->batch_bos);
506
507 anv_batch_bo_start(new_bbo, batch, GEN8_MI_BATCH_BUFFER_START_length * 4);
508
509 return VK_SUCCESS;
510 }
511
512 static VkResult
513 anv_cmd_buffer_grow_batch(struct anv_batch *batch, void *_data)
514 {
515 struct anv_cmd_buffer *cmd_buffer = _data;
516 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
517
518 anv_batch_bo_grow(cmd_buffer, bbo, &cmd_buffer->batch, 4096,
519 GEN8_MI_BATCH_BUFFER_START_length * 4);
520
521 return VK_SUCCESS;
522 }
523
524 /** Allocate a binding table
525 *
526 * This function allocates a binding table. This is a bit more complicated
527 * than one would think due to a combination of Vulkan driver design and some
528 * unfortunate hardware restrictions.
529 *
530 * The 3DSTATE_BINDING_TABLE_POINTERS_* packets only have a 16-bit field for
531 * the binding table pointer which means that all binding tables need to live
532 * in the bottom 64k of surface state base address. The way the GL driver has
533 * classically dealt with this restriction is to emit all surface states
534 * on-the-fly into the batch and have a batch buffer smaller than 64k. This
535 * isn't really an option in Vulkan for a couple of reasons:
536 *
537 * 1) In Vulkan, we have growing (or chaining) batches so surface states have
538 * to live in their own buffer and we have to be able to re-emit
539 * STATE_BASE_ADDRESS as needed which requires a full pipeline stall. In
540 * order to avoid emitting STATE_BASE_ADDRESS any more often than needed
541 * (it's not that hard to hit 64k of just binding tables), we allocate
542 * surface state objects up-front when VkImageView is created. In order
543 * for this to work, surface state objects need to be allocated from a
544 * global buffer.
545 *
546 * 2) We tried to design the surface state system in such a way that it's
547 * already ready for bindless texturing. The way bindless texturing works
548 * on our hardware is that you have a big pool of surface state objects
549 * (with its own state base address) and the bindless handles are simply
550 * offsets into that pool. With the architecture we chose, we already
551 * have that pool and it's exactly the same pool that we use for regular
552 * surface states so we should already be ready for bindless.
553 *
554 * 3) For render targets, we need to be able to fill out the surface states
555 * later in vkBeginRenderPass so that we can assign clear colors
556 * correctly. One way to do this would be to just create the surface
557 * state data and then repeatedly copy it into the surface state BO every
558 * time we have to re-emit STATE_BASE_ADDRESS. While this works, it's
559 * rather annoying and just being able to allocate them up-front and
560 * re-use them for the entire render pass.
561 *
562 * While none of these are technically blockers for emitting state on the fly
563 * like we do in GL, the ability to have a single surface state pool is
564 * simplifies things greatly. Unfortunately, it comes at a cost...
565 *
566 * Because of the 64k limitation of 3DSTATE_BINDING_TABLE_POINTERS_*, we can't
567 * place the binding tables just anywhere in surface state base address.
568 * Because 64k isn't a whole lot of space, we can't simply restrict the
569 * surface state buffer to 64k, we have to be more clever. The solution we've
570 * chosen is to have a block pool with a maximum size of 2G that starts at
571 * zero and grows in both directions. All surface states are allocated from
572 * the top of the pool (positive offsets) and we allocate blocks (< 64k) of
573 * binding tables from the bottom of the pool (negative offsets). Every time
574 * we allocate a new binding table block, we set surface state base address to
575 * point to the bottom of the binding table block. This way all of the
576 * binding tables in the block are in the bottom 64k of surface state base
577 * address. When we fill out the binding table, we add the distance between
578 * the bottom of our binding table block and zero of the block pool to the
579 * surface state offsets so that they are correct relative to out new surface
580 * state base address at the bottom of the binding table block.
581 *
582 * \see adjust_relocations_from_block_pool()
583 * \see adjust_relocations_too_block_pool()
584 *
585 * \param[in] entries The number of surface state entries the binding
586 * table should be able to hold.
587 *
588 * \param[out] state_offset The offset surface surface state base address
589 * where the surface states live. This must be
590 * added to the surface state offset when it is
591 * written into the binding table entry.
592 *
593 * \return An anv_state representing the binding table
594 */
595 struct anv_state
596 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
597 uint32_t entries, uint32_t *state_offset)
598 {
599 struct anv_block_pool *block_pool =
600 &cmd_buffer->device->surface_state_block_pool;
601 int32_t *bt_block = u_vector_head(&cmd_buffer->bt_blocks);
602 struct anv_state state;
603
604 state.alloc_size = align_u32(entries * 4, 32);
605
606 if (cmd_buffer->bt_next + state.alloc_size > block_pool->block_size)
607 return (struct anv_state) { 0 };
608
609 state.offset = cmd_buffer->bt_next;
610 state.map = block_pool->map + *bt_block + state.offset;
611
612 cmd_buffer->bt_next += state.alloc_size;
613
614 assert(*bt_block < 0);
615 *state_offset = -(*bt_block);
616
617 return state;
618 }
619
620 struct anv_state
621 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer *cmd_buffer)
622 {
623 struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
624 return anv_state_stream_alloc(&cmd_buffer->surface_state_stream,
625 isl_dev->ss.size, isl_dev->ss.align);
626 }
627
628 struct anv_state
629 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer *cmd_buffer,
630 uint32_t size, uint32_t alignment)
631 {
632 return anv_state_stream_alloc(&cmd_buffer->dynamic_state_stream,
633 size, alignment);
634 }
635
636 VkResult
637 anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer *cmd_buffer)
638 {
639 struct anv_block_pool *block_pool =
640 &cmd_buffer->device->surface_state_block_pool;
641
642 int32_t *offset = u_vector_add(&cmd_buffer->bt_blocks);
643 if (offset == NULL)
644 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
645
646 *offset = anv_block_pool_alloc_back(block_pool);
647 cmd_buffer->bt_next = 0;
648
649 return VK_SUCCESS;
650 }
651
652 VkResult
653 anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
654 {
655 struct anv_batch_bo *batch_bo;
656 VkResult result;
657
658 list_inithead(&cmd_buffer->batch_bos);
659
660 result = anv_batch_bo_create(cmd_buffer, &batch_bo);
661 if (result != VK_SUCCESS)
662 return result;
663
664 list_addtail(&batch_bo->link, &cmd_buffer->batch_bos);
665
666 cmd_buffer->batch.alloc = &cmd_buffer->pool->alloc;
667 cmd_buffer->batch.user_data = cmd_buffer;
668
669 if (cmd_buffer->device->can_chain_batches) {
670 cmd_buffer->batch.extend_cb = anv_cmd_buffer_chain_batch;
671 } else {
672 cmd_buffer->batch.extend_cb = anv_cmd_buffer_grow_batch;
673 }
674
675 anv_batch_bo_start(batch_bo, &cmd_buffer->batch,
676 GEN8_MI_BATCH_BUFFER_START_length * 4);
677
678 int success = u_vector_init(&cmd_buffer->seen_bbos,
679 sizeof(struct anv_bo *),
680 8 * sizeof(struct anv_bo *));
681 if (!success)
682 goto fail_batch_bo;
683
684 *(struct anv_batch_bo **)u_vector_add(&cmd_buffer->seen_bbos) = batch_bo;
685
686 success = u_vector_init(&cmd_buffer->bt_blocks, sizeof(int32_t),
687 8 * sizeof(int32_t));
688 if (!success)
689 goto fail_seen_bbos;
690
691 result = anv_reloc_list_init(&cmd_buffer->surface_relocs,
692 &cmd_buffer->pool->alloc);
693 if (result != VK_SUCCESS)
694 goto fail_bt_blocks;
695 cmd_buffer->last_ss_pool_center = 0;
696
697 anv_cmd_buffer_new_binding_table_block(cmd_buffer);
698
699 return VK_SUCCESS;
700
701 fail_bt_blocks:
702 u_vector_finish(&cmd_buffer->bt_blocks);
703 fail_seen_bbos:
704 u_vector_finish(&cmd_buffer->seen_bbos);
705 fail_batch_bo:
706 anv_batch_bo_destroy(batch_bo, cmd_buffer);
707
708 return result;
709 }
710
711 void
712 anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
713 {
714 int32_t *bt_block;
715 u_vector_foreach(bt_block, &cmd_buffer->bt_blocks) {
716 anv_block_pool_free(&cmd_buffer->device->surface_state_block_pool,
717 *bt_block);
718 }
719 u_vector_finish(&cmd_buffer->bt_blocks);
720
721 anv_reloc_list_finish(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc);
722
723 u_vector_finish(&cmd_buffer->seen_bbos);
724
725 /* Destroy all of the batch buffers */
726 list_for_each_entry_safe(struct anv_batch_bo, bbo,
727 &cmd_buffer->batch_bos, link) {
728 anv_batch_bo_destroy(bbo, cmd_buffer);
729 }
730 }
731
732 void
733 anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
734 {
735 /* Delete all but the first batch bo */
736 assert(!list_empty(&cmd_buffer->batch_bos));
737 while (cmd_buffer->batch_bos.next != cmd_buffer->batch_bos.prev) {
738 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
739 list_del(&bbo->link);
740 anv_batch_bo_destroy(bbo, cmd_buffer);
741 }
742 assert(!list_empty(&cmd_buffer->batch_bos));
743
744 anv_batch_bo_start(anv_cmd_buffer_current_batch_bo(cmd_buffer),
745 &cmd_buffer->batch,
746 GEN8_MI_BATCH_BUFFER_START_length * 4);
747
748 while (u_vector_length(&cmd_buffer->bt_blocks) > 1) {
749 int32_t *bt_block = u_vector_remove(&cmd_buffer->bt_blocks);
750 anv_block_pool_free(&cmd_buffer->device->surface_state_block_pool,
751 *bt_block);
752 }
753 assert(u_vector_length(&cmd_buffer->bt_blocks) == 1);
754 cmd_buffer->bt_next = 0;
755
756 cmd_buffer->surface_relocs.num_relocs = 0;
757 cmd_buffer->last_ss_pool_center = 0;
758
759 /* Reset the list of seen buffers */
760 cmd_buffer->seen_bbos.head = 0;
761 cmd_buffer->seen_bbos.tail = 0;
762
763 *(struct anv_batch_bo **)u_vector_add(&cmd_buffer->seen_bbos) =
764 anv_cmd_buffer_current_batch_bo(cmd_buffer);
765 }
766
767 void
768 anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer)
769 {
770 struct anv_batch_bo *batch_bo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
771
772 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
773 /* When we start a batch buffer, we subtract a certain amount of
774 * padding from the end to ensure that we always have room to emit a
775 * BATCH_BUFFER_START to chain to the next BO. We need to remove
776 * that padding before we end the batch; otherwise, we may end up
777 * with our BATCH_BUFFER_END in another BO.
778 */
779 cmd_buffer->batch.end += GEN8_MI_BATCH_BUFFER_START_length * 4;
780 assert(cmd_buffer->batch.end == batch_bo->bo.map + batch_bo->bo.size);
781
782 anv_batch_emit(&cmd_buffer->batch, GEN7_MI_BATCH_BUFFER_END, bbe);
783
784 /* Round batch up to an even number of dwords. */
785 if ((cmd_buffer->batch.next - cmd_buffer->batch.start) & 4)
786 anv_batch_emit(&cmd_buffer->batch, GEN7_MI_NOOP, noop);
787
788 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_PRIMARY;
789 }
790
791 anv_batch_bo_finish(batch_bo, &cmd_buffer->batch);
792
793 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
794 /* If this is a secondary command buffer, we need to determine the
795 * mode in which it will be executed with vkExecuteCommands. We
796 * determine this statically here so that this stays in sync with the
797 * actual ExecuteCommands implementation.
798 */
799 if (!cmd_buffer->device->can_chain_batches) {
800 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT;
801 } else if ((cmd_buffer->batch_bos.next == cmd_buffer->batch_bos.prev) &&
802 (batch_bo->length < ANV_CMD_BUFFER_BATCH_SIZE / 2)) {
803 /* If the secondary has exactly one batch buffer in its list *and*
804 * that batch buffer is less than half of the maximum size, we're
805 * probably better of simply copying it into our batch.
806 */
807 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_EMIT;
808 } else if (!(cmd_buffer->usage_flags &
809 VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
810 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_CHAIN;
811
812 /* When we chain, we need to add an MI_BATCH_BUFFER_START command
813 * with its relocation. In order to handle this we'll increment here
814 * so we can unconditionally decrement right before adding the
815 * MI_BATCH_BUFFER_START command.
816 */
817 batch_bo->relocs.num_relocs++;
818 cmd_buffer->batch.next += GEN8_MI_BATCH_BUFFER_START_length * 4;
819 } else {
820 cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN;
821 }
822 }
823 }
824
825 static inline VkResult
826 anv_cmd_buffer_add_seen_bbos(struct anv_cmd_buffer *cmd_buffer,
827 struct list_head *list)
828 {
829 list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
830 struct anv_batch_bo **bbo_ptr = u_vector_add(&cmd_buffer->seen_bbos);
831 if (bbo_ptr == NULL)
832 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
833
834 *bbo_ptr = bbo;
835 }
836
837 return VK_SUCCESS;
838 }
839
840 void
841 anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary,
842 struct anv_cmd_buffer *secondary)
843 {
844 switch (secondary->exec_mode) {
845 case ANV_CMD_BUFFER_EXEC_MODE_EMIT:
846 anv_batch_emit_batch(&primary->batch, &secondary->batch);
847 break;
848 case ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT: {
849 struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(primary);
850 unsigned length = secondary->batch.end - secondary->batch.start;
851 anv_batch_bo_grow(primary, bbo, &primary->batch, length,
852 GEN8_MI_BATCH_BUFFER_START_length * 4);
853 anv_batch_emit_batch(&primary->batch, &secondary->batch);
854 break;
855 }
856 case ANV_CMD_BUFFER_EXEC_MODE_CHAIN: {
857 struct anv_batch_bo *first_bbo =
858 list_first_entry(&secondary->batch_bos, struct anv_batch_bo, link);
859 struct anv_batch_bo *last_bbo =
860 list_last_entry(&secondary->batch_bos, struct anv_batch_bo, link);
861
862 emit_batch_buffer_start(primary, &first_bbo->bo, 0);
863
864 struct anv_batch_bo *this_bbo = anv_cmd_buffer_current_batch_bo(primary);
865 assert(primary->batch.start == this_bbo->bo.map);
866 uint32_t offset = primary->batch.next - primary->batch.start;
867 const uint32_t inst_size = GEN8_MI_BATCH_BUFFER_START_length * 4;
868
869 /* Roll back the previous MI_BATCH_BUFFER_START and its relocation so we
870 * can emit a new command and relocation for the current splice. In
871 * order to handle the initial-use case, we incremented next and
872 * num_relocs in end_batch_buffer() so we can alyways just subtract
873 * here.
874 */
875 last_bbo->relocs.num_relocs--;
876 secondary->batch.next -= inst_size;
877 emit_batch_buffer_start(secondary, &this_bbo->bo, offset);
878 anv_cmd_buffer_add_seen_bbos(primary, &secondary->batch_bos);
879
880 /* After patching up the secondary buffer, we need to clflush the
881 * modified instruction in case we're on a !llc platform. We use a
882 * little loop to handle the case where the instruction crosses a cache
883 * line boundary.
884 */
885 if (!primary->device->info.has_llc) {
886 void *inst = secondary->batch.next - inst_size;
887 void *p = (void *) (((uintptr_t) inst) & ~CACHELINE_MASK);
888 __builtin_ia32_mfence();
889 while (p < secondary->batch.next) {
890 __builtin_ia32_clflush(p);
891 p += CACHELINE_SIZE;
892 }
893 }
894 break;
895 }
896 case ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN: {
897 struct list_head copy_list;
898 VkResult result = anv_batch_bo_list_clone(&secondary->batch_bos,
899 secondary,
900 &copy_list);
901 if (result != VK_SUCCESS)
902 return; /* FIXME */
903
904 anv_cmd_buffer_add_seen_bbos(primary, &copy_list);
905
906 struct anv_batch_bo *first_bbo =
907 list_first_entry(&copy_list, struct anv_batch_bo, link);
908 struct anv_batch_bo *last_bbo =
909 list_last_entry(&copy_list, struct anv_batch_bo, link);
910
911 cmd_buffer_chain_to_batch_bo(primary, first_bbo);
912
913 list_splicetail(&copy_list, &primary->batch_bos);
914
915 anv_batch_bo_continue(last_bbo, &primary->batch,
916 GEN8_MI_BATCH_BUFFER_START_length * 4);
917 break;
918 }
919 default:
920 assert(!"Invalid execution mode");
921 }
922
923 anv_reloc_list_append(&primary->surface_relocs, &primary->pool->alloc,
924 &secondary->surface_relocs, 0);
925 }
926
927 struct anv_execbuf {
928 struct drm_i915_gem_execbuffer2 execbuf;
929
930 struct drm_i915_gem_exec_object2 * objects;
931 uint32_t bo_count;
932 struct anv_bo ** bos;
933
934 /* Allocated length of the 'objects' and 'bos' arrays */
935 uint32_t array_length;
936 };
937
938 static void
939 anv_execbuf_init(struct anv_execbuf *exec)
940 {
941 memset(exec, 0, sizeof(*exec));
942 }
943
944 static void
945 anv_execbuf_finish(struct anv_execbuf *exec,
946 const VkAllocationCallbacks *alloc)
947 {
948 vk_free(alloc, exec->objects);
949 vk_free(alloc, exec->bos);
950 }
951
952 static VkResult
953 anv_execbuf_add_bo(struct anv_execbuf *exec,
954 struct anv_bo *bo,
955 struct anv_reloc_list *relocs,
956 const VkAllocationCallbacks *alloc)
957 {
958 struct drm_i915_gem_exec_object2 *obj = NULL;
959
960 if (bo->index < exec->bo_count && exec->bos[bo->index] == bo)
961 obj = &exec->objects[bo->index];
962
963 if (obj == NULL) {
964 /* We've never seen this one before. Add it to the list and assign
965 * an id that we can use later.
966 */
967 if (exec->bo_count >= exec->array_length) {
968 uint32_t new_len = exec->objects ? exec->array_length * 2 : 64;
969
970 struct drm_i915_gem_exec_object2 *new_objects =
971 vk_alloc(alloc, new_len * sizeof(*new_objects),
972 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
973 if (new_objects == NULL)
974 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
975
976 struct anv_bo **new_bos =
977 vk_alloc(alloc, new_len * sizeof(*new_bos),
978 8, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
979 if (new_bos == NULL) {
980 vk_free(alloc, new_objects);
981 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
982 }
983
984 if (exec->objects) {
985 memcpy(new_objects, exec->objects,
986 exec->bo_count * sizeof(*new_objects));
987 memcpy(new_bos, exec->bos,
988 exec->bo_count * sizeof(*new_bos));
989 }
990
991 vk_free(alloc, exec->objects);
992 vk_free(alloc, exec->bos);
993
994 exec->objects = new_objects;
995 exec->bos = new_bos;
996 exec->array_length = new_len;
997 }
998
999 assert(exec->bo_count < exec->array_length);
1000
1001 bo->index = exec->bo_count++;
1002 obj = &exec->objects[bo->index];
1003 exec->bos[bo->index] = bo;
1004
1005 obj->handle = bo->gem_handle;
1006 obj->relocation_count = 0;
1007 obj->relocs_ptr = 0;
1008 obj->alignment = 0;
1009 obj->offset = bo->offset;
1010 obj->flags = bo->is_winsys_bo ? EXEC_OBJECT_WRITE : 0;
1011 obj->rsvd1 = 0;
1012 obj->rsvd2 = 0;
1013 }
1014
1015 if (relocs != NULL && obj->relocation_count == 0) {
1016 /* This is the first time we've ever seen a list of relocations for
1017 * this BO. Go ahead and set the relocations and then walk the list
1018 * of relocations and add them all.
1019 */
1020 obj->relocation_count = relocs->num_relocs;
1021 obj->relocs_ptr = (uintptr_t) relocs->relocs;
1022
1023 for (size_t i = 0; i < relocs->num_relocs; i++) {
1024 /* A quick sanity check on relocations */
1025 assert(relocs->relocs[i].offset < bo->size);
1026 anv_execbuf_add_bo(exec, relocs->reloc_bos[i], NULL, alloc);
1027 }
1028 }
1029
1030 return VK_SUCCESS;
1031 }
1032
1033 static void
1034 anv_cmd_buffer_process_relocs(struct anv_cmd_buffer *cmd_buffer,
1035 struct anv_reloc_list *list)
1036 {
1037 for (size_t i = 0; i < list->num_relocs; i++)
1038 list->relocs[i].target_handle = list->reloc_bos[i]->index;
1039 }
1040
1041 static void
1042 write_reloc(const struct anv_device *device, void *p, uint64_t v, bool flush)
1043 {
1044 unsigned reloc_size = 0;
1045 if (device->info.gen >= 8) {
1046 /* From the Broadwell PRM Vol. 2a, MI_LOAD_REGISTER_MEM::MemoryAddress:
1047 *
1048 * "This field specifies the address of the memory location where the
1049 * register value specified in the DWord above will read from. The
1050 * address specifies the DWord location of the data. Range =
1051 * GraphicsVirtualAddress[63:2] for a DWord register GraphicsAddress
1052 * [63:48] are ignored by the HW and assumed to be in correct
1053 * canonical form [63:48] == [47]."
1054 */
1055 const int shift = 63 - 47;
1056 reloc_size = sizeof(uint64_t);
1057 *(uint64_t *)p = (((int64_t)v) << shift) >> shift;
1058 } else {
1059 reloc_size = sizeof(uint32_t);
1060 *(uint32_t *)p = v;
1061 }
1062
1063 if (flush && !device->info.has_llc)
1064 anv_clflush_range(p, reloc_size);
1065 }
1066
1067 static void
1068 adjust_relocations_from_state_pool(struct anv_block_pool *pool,
1069 struct anv_reloc_list *relocs,
1070 uint32_t last_pool_center_bo_offset)
1071 {
1072 assert(last_pool_center_bo_offset <= pool->center_bo_offset);
1073 uint32_t delta = pool->center_bo_offset - last_pool_center_bo_offset;
1074
1075 for (size_t i = 0; i < relocs->num_relocs; i++) {
1076 /* All of the relocations from this block pool to other BO's should
1077 * have been emitted relative to the surface block pool center. We
1078 * need to add the center offset to make them relative to the
1079 * beginning of the actual GEM bo.
1080 */
1081 relocs->relocs[i].offset += delta;
1082 }
1083 }
1084
1085 static void
1086 adjust_relocations_to_state_pool(struct anv_block_pool *pool,
1087 struct anv_bo *from_bo,
1088 struct anv_reloc_list *relocs,
1089 uint32_t last_pool_center_bo_offset)
1090 {
1091 assert(last_pool_center_bo_offset <= pool->center_bo_offset);
1092 uint32_t delta = pool->center_bo_offset - last_pool_center_bo_offset;
1093
1094 /* When we initially emit relocations into a block pool, we don't
1095 * actually know what the final center_bo_offset will be so we just emit
1096 * it as if center_bo_offset == 0. Now that we know what the center
1097 * offset is, we need to walk the list of relocations and adjust any
1098 * relocations that point to the pool bo with the correct offset.
1099 */
1100 for (size_t i = 0; i < relocs->num_relocs; i++) {
1101 if (relocs->reloc_bos[i] == &pool->bo) {
1102 /* Adjust the delta value in the relocation to correctly
1103 * correspond to the new delta. Initially, this value may have
1104 * been negative (if treated as unsigned), but we trust in
1105 * uint32_t roll-over to fix that for us at this point.
1106 */
1107 relocs->relocs[i].delta += delta;
1108
1109 /* Since the delta has changed, we need to update the actual
1110 * relocated value with the new presumed value. This function
1111 * should only be called on batch buffers, so we know it isn't in
1112 * use by the GPU at the moment.
1113 */
1114 assert(relocs->relocs[i].offset < from_bo->size);
1115 write_reloc(pool->device, from_bo->map + relocs->relocs[i].offset,
1116 relocs->relocs[i].presumed_offset +
1117 relocs->relocs[i].delta, false);
1118 }
1119 }
1120 }
1121
1122 static void
1123 anv_reloc_list_apply(struct anv_device *device,
1124 struct anv_reloc_list *list,
1125 struct anv_bo *bo,
1126 bool always_relocate)
1127 {
1128 for (size_t i = 0; i < list->num_relocs; i++) {
1129 struct anv_bo *target_bo = list->reloc_bos[i];
1130 if (list->relocs[i].presumed_offset == target_bo->offset &&
1131 !always_relocate)
1132 continue;
1133
1134 void *p = bo->map + list->relocs[i].offset;
1135 write_reloc(device, p, target_bo->offset + list->relocs[i].delta, true);
1136 list->relocs[i].presumed_offset = target_bo->offset;
1137 }
1138 }
1139
1140 /**
1141 * This function applies the relocation for a command buffer and writes the
1142 * actual addresses into the buffers as per what we were told by the kernel on
1143 * the previous execbuf2 call. This should be safe to do because, for each
1144 * relocated address, we have two cases:
1145 *
1146 * 1) The target BO is inactive (as seen by the kernel). In this case, it is
1147 * not in use by the GPU so updating the address is 100% ok. It won't be
1148 * in-use by the GPU (from our context) again until the next execbuf2
1149 * happens. If the kernel decides to move it in the next execbuf2, it
1150 * will have to do the relocations itself, but that's ok because it should
1151 * have all of the information needed to do so.
1152 *
1153 * 2) The target BO is active (as seen by the kernel). In this case, it
1154 * hasn't moved since the last execbuffer2 call because GTT shuffling
1155 * *only* happens when the BO is idle. (From our perspective, it only
1156 * happens inside the execbuffer2 ioctl, but the shuffling may be
1157 * triggered by another ioctl, with full-ppgtt this is limited to only
1158 * execbuffer2 ioctls on the same context, or memory pressure.) Since the
1159 * target BO hasn't moved, our anv_bo::offset exactly matches the BO's GTT
1160 * address and the relocated value we are writing into the BO will be the
1161 * same as the value that is already there.
1162 *
1163 * There is also a possibility that the target BO is active but the exact
1164 * RENDER_SURFACE_STATE object we are writing the relocation into isn't in
1165 * use. In this case, the address currently in the RENDER_SURFACE_STATE
1166 * may be stale but it's still safe to write the relocation because that
1167 * particular RENDER_SURFACE_STATE object isn't in-use by the GPU and
1168 * won't be until the next execbuf2 call.
1169 *
1170 * By doing relocations on the CPU, we can tell the kernel that it doesn't
1171 * need to bother. We want to do this because the surface state buffer is
1172 * used by every command buffer so, if the kernel does the relocations, it
1173 * will always be busy and the kernel will always stall. This is also
1174 * probably the fastest mechanism for doing relocations since the kernel would
1175 * have to make a full copy of all the relocations lists.
1176 */
1177 static bool
1178 relocate_cmd_buffer(struct anv_cmd_buffer *cmd_buffer,
1179 struct anv_execbuf *exec)
1180 {
1181 static int userspace_relocs = -1;
1182 if (userspace_relocs < 0)
1183 userspace_relocs = env_var_as_boolean("ANV_USERSPACE_RELOCS", true);
1184 if (!userspace_relocs)
1185 return false;
1186
1187 /* First, we have to check to see whether or not we can even do the
1188 * relocation. New buffers which have never been submitted to the kernel
1189 * don't have a valid offset so we need to let the kernel do relocations so
1190 * that we can get offsets for them. On future execbuf2 calls, those
1191 * buffers will have offsets and we will be able to skip relocating.
1192 * Invalid offsets are indicated by anv_bo::offset == (uint64_t)-1.
1193 */
1194 for (uint32_t i = 0; i < exec->bo_count; i++) {
1195 if (exec->bos[i]->offset == (uint64_t)-1)
1196 return false;
1197 }
1198
1199 /* Since surface states are shared between command buffers and we don't
1200 * know what order they will be submitted to the kernel, we don't know
1201 * what address is actually written in the surface state object at any
1202 * given time. The only option is to always relocate them.
1203 */
1204 anv_reloc_list_apply(cmd_buffer->device, &cmd_buffer->surface_relocs,
1205 &cmd_buffer->device->surface_state_block_pool.bo,
1206 true /* always relocate surface states */);
1207
1208 /* Since we own all of the batch buffers, we know what values are stored
1209 * in the relocated addresses and only have to update them if the offsets
1210 * have changed.
1211 */
1212 struct anv_batch_bo **bbo;
1213 u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1214 anv_reloc_list_apply(cmd_buffer->device,
1215 &(*bbo)->relocs, &(*bbo)->bo, false);
1216 }
1217
1218 for (uint32_t i = 0; i < exec->bo_count; i++)
1219 exec->objects[i].offset = exec->bos[i]->offset;
1220
1221 return true;
1222 }
1223
1224 VkResult
1225 anv_cmd_buffer_execbuf(struct anv_device *device,
1226 struct anv_cmd_buffer *cmd_buffer)
1227 {
1228 struct anv_batch *batch = &cmd_buffer->batch;
1229 struct anv_block_pool *ss_pool =
1230 &cmd_buffer->device->surface_state_block_pool;
1231
1232 struct anv_execbuf execbuf;
1233 anv_execbuf_init(&execbuf);
1234
1235 adjust_relocations_from_state_pool(ss_pool, &cmd_buffer->surface_relocs,
1236 cmd_buffer->last_ss_pool_center);
1237 anv_execbuf_add_bo(&execbuf, &ss_pool->bo, &cmd_buffer->surface_relocs,
1238 &cmd_buffer->pool->alloc);
1239
1240 /* First, we walk over all of the bos we've seen and add them and their
1241 * relocations to the validate list.
1242 */
1243 struct anv_batch_bo **bbo;
1244 u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1245 adjust_relocations_to_state_pool(ss_pool, &(*bbo)->bo, &(*bbo)->relocs,
1246 cmd_buffer->last_ss_pool_center);
1247
1248 anv_execbuf_add_bo(&execbuf, &(*bbo)->bo, &(*bbo)->relocs,
1249 &cmd_buffer->pool->alloc);
1250 }
1251
1252 /* Now that we've adjusted all of the surface state relocations, we need to
1253 * record the surface state pool center so future executions of the command
1254 * buffer can adjust correctly.
1255 */
1256 cmd_buffer->last_ss_pool_center = ss_pool->center_bo_offset;
1257
1258 struct anv_batch_bo *first_batch_bo =
1259 list_first_entry(&cmd_buffer->batch_bos, struct anv_batch_bo, link);
1260
1261 /* The kernel requires that the last entry in the validation list be the
1262 * batch buffer to execute. We can simply swap the element
1263 * corresponding to the first batch_bo in the chain with the last
1264 * element in the list.
1265 */
1266 if (first_batch_bo->bo.index != execbuf.bo_count - 1) {
1267 uint32_t idx = first_batch_bo->bo.index;
1268 uint32_t last_idx = execbuf.bo_count - 1;
1269
1270 struct drm_i915_gem_exec_object2 tmp_obj = execbuf.objects[idx];
1271 assert(execbuf.bos[idx] == &first_batch_bo->bo);
1272
1273 execbuf.objects[idx] = execbuf.objects[last_idx];
1274 execbuf.bos[idx] = execbuf.bos[last_idx];
1275 execbuf.bos[idx]->index = idx;
1276
1277 execbuf.objects[last_idx] = tmp_obj;
1278 execbuf.bos[last_idx] = &first_batch_bo->bo;
1279 first_batch_bo->bo.index = last_idx;
1280 }
1281
1282 /* Now we go through and fixup all of the relocation lists to point to
1283 * the correct indices in the object array. We have to do this after we
1284 * reorder the list above as some of the indices may have changed.
1285 */
1286 u_vector_foreach(bbo, &cmd_buffer->seen_bbos)
1287 anv_cmd_buffer_process_relocs(cmd_buffer, &(*bbo)->relocs);
1288
1289 anv_cmd_buffer_process_relocs(cmd_buffer, &cmd_buffer->surface_relocs);
1290
1291 if (!cmd_buffer->device->info.has_llc) {
1292 __builtin_ia32_mfence();
1293 u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1294 for (uint32_t i = 0; i < (*bbo)->length; i += CACHELINE_SIZE)
1295 __builtin_ia32_clflush((*bbo)->bo.map + i);
1296 }
1297 }
1298
1299 execbuf.execbuf = (struct drm_i915_gem_execbuffer2) {
1300 .buffers_ptr = (uintptr_t) execbuf.objects,
1301 .buffer_count = execbuf.bo_count,
1302 .batch_start_offset = 0,
1303 .batch_len = batch->next - batch->start,
1304 .cliprects_ptr = 0,
1305 .num_cliprects = 0,
1306 .DR1 = 0,
1307 .DR4 = 0,
1308 .flags = I915_EXEC_HANDLE_LUT | I915_EXEC_RENDER |
1309 I915_EXEC_CONSTANTS_REL_GENERAL,
1310 .rsvd1 = cmd_buffer->device->context_id,
1311 .rsvd2 = 0,
1312 };
1313
1314 if (relocate_cmd_buffer(cmd_buffer, &execbuf)) {
1315 /* If we were able to successfully relocate everything, tell the kernel
1316 * that it can skip doing relocations. The requirement for using
1317 * NO_RELOC is:
1318 *
1319 * 1) The addresses written in the objects must match the corresponding
1320 * reloc.presumed_offset which in turn must match the corresponding
1321 * execobject.offset.
1322 *
1323 * 2) To avoid stalling, execobject.offset should match the current
1324 * address of that object within the active context.
1325 *
1326 * In order to satisfy all of the invariants that make userspace
1327 * relocations to be safe (see relocate_cmd_buffer()), we need to
1328 * further ensure that the addresses we use match those used by the
1329 * kernel for the most recent execbuf2.
1330 *
1331 * The kernel may still choose to do relocations anyway if something has
1332 * moved in the GTT. In this case, the relocation list still needs to be
1333 * valid. All relocations on the batch buffers are already valid and
1334 * kept up-to-date. For surface state relocations, by applying the
1335 * relocations in relocate_cmd_buffer, we ensured that the address in
1336 * the RENDER_SURFACE_STATE matches presumed_offset, so it should be
1337 * safe for the kernel to relocate them as needed.
1338 */
1339 execbuf.execbuf.flags |= I915_EXEC_NO_RELOC;
1340 } else {
1341 /* In the case where we fall back to doing kernel relocations, we need
1342 * to ensure that the relocation list is valid. All relocations on the
1343 * batch buffers are already valid and kept up-to-date. Since surface
1344 * states are shared between command buffers and we don't know what
1345 * order they will be submitted to the kernel, we don't know what
1346 * address is actually written in the surface state object at any given
1347 * time. The only option is to set a bogus presumed offset and let the
1348 * kernel relocate them.
1349 */
1350 for (size_t i = 0; i < cmd_buffer->surface_relocs.num_relocs; i++)
1351 cmd_buffer->surface_relocs.relocs[i].presumed_offset = -1;
1352 }
1353
1354 VkResult result = anv_device_execbuf(device, &execbuf.execbuf, execbuf.bos);
1355
1356 anv_execbuf_finish(&execbuf, &cmd_buffer->pool->alloc);
1357
1358 return result;
1359 }