gallium/radeon: clean up (domain, flags) <-> (slab heap) translations
[mesa.git] / src / gallium / winsys / amdgpu / drm / amdgpu_bo.c
1 /*
2 * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
3 * Copyright © 2015 Advanced Micro Devices, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
16 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
18 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
25 * of the Software.
26 */
27 /*
28 * Authors:
29 * Marek Olšák <maraeo@gmail.com>
30 */
31
32 #include "amdgpu_cs.h"
33
34 #include "os/os_time.h"
35 #include "state_tracker/drm_driver.h"
36 #include <amdgpu_drm.h>
37 #include <xf86drm.h>
38 #include <stdio.h>
39 #include <inttypes.h>
40
41 /* Set to 1 for verbose output showing committed sparse buffer ranges. */
42 #define DEBUG_SPARSE_COMMITS 0
43
44 struct amdgpu_sparse_backing_chunk {
45 uint32_t begin, end;
46 };
47
48 static struct pb_buffer *
49 amdgpu_bo_create(struct radeon_winsys *rws,
50 uint64_t size,
51 unsigned alignment,
52 enum radeon_bo_domain domain,
53 enum radeon_bo_flag flags);
54
55 static bool amdgpu_bo_wait(struct pb_buffer *_buf, uint64_t timeout,
56 enum radeon_bo_usage usage)
57 {
58 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
59 struct amdgpu_winsys *ws = bo->ws;
60 int64_t abs_timeout;
61
62 if (timeout == 0) {
63 if (p_atomic_read(&bo->num_active_ioctls))
64 return false;
65
66 } else {
67 abs_timeout = os_time_get_absolute_timeout(timeout);
68
69 /* Wait if any ioctl is being submitted with this buffer. */
70 if (!os_wait_until_zero_abs_timeout(&bo->num_active_ioctls, abs_timeout))
71 return false;
72 }
73
74 if (bo->is_shared) {
75 /* We can't use user fences for shared buffers, because user fences
76 * are local to this process only. If we want to wait for all buffer
77 * uses in all processes, we have to use amdgpu_bo_wait_for_idle.
78 */
79 bool buffer_busy = true;
80 int r;
81
82 r = amdgpu_bo_wait_for_idle(bo->bo, timeout, &buffer_busy);
83 if (r)
84 fprintf(stderr, "%s: amdgpu_bo_wait_for_idle failed %i\n", __func__,
85 r);
86 return !buffer_busy;
87 }
88
89 if (timeout == 0) {
90 unsigned idle_fences;
91 bool buffer_idle;
92
93 mtx_lock(&ws->bo_fence_lock);
94
95 for (idle_fences = 0; idle_fences < bo->num_fences; ++idle_fences) {
96 if (!amdgpu_fence_wait(bo->fences[idle_fences], 0, false))
97 break;
98 }
99
100 /* Release the idle fences to avoid checking them again later. */
101 for (unsigned i = 0; i < idle_fences; ++i)
102 amdgpu_fence_reference(&bo->fences[i], NULL);
103
104 memmove(&bo->fences[0], &bo->fences[idle_fences],
105 (bo->num_fences - idle_fences) * sizeof(*bo->fences));
106 bo->num_fences -= idle_fences;
107
108 buffer_idle = !bo->num_fences;
109 mtx_unlock(&ws->bo_fence_lock);
110
111 return buffer_idle;
112 } else {
113 bool buffer_idle = true;
114
115 mtx_lock(&ws->bo_fence_lock);
116 while (bo->num_fences && buffer_idle) {
117 struct pipe_fence_handle *fence = NULL;
118 bool fence_idle = false;
119
120 amdgpu_fence_reference(&fence, bo->fences[0]);
121
122 /* Wait for the fence. */
123 mtx_unlock(&ws->bo_fence_lock);
124 if (amdgpu_fence_wait(fence, abs_timeout, true))
125 fence_idle = true;
126 else
127 buffer_idle = false;
128 mtx_lock(&ws->bo_fence_lock);
129
130 /* Release an idle fence to avoid checking it again later, keeping in
131 * mind that the fence array may have been modified by other threads.
132 */
133 if (fence_idle && bo->num_fences && bo->fences[0] == fence) {
134 amdgpu_fence_reference(&bo->fences[0], NULL);
135 memmove(&bo->fences[0], &bo->fences[1],
136 (bo->num_fences - 1) * sizeof(*bo->fences));
137 bo->num_fences--;
138 }
139
140 amdgpu_fence_reference(&fence, NULL);
141 }
142 mtx_unlock(&ws->bo_fence_lock);
143
144 return buffer_idle;
145 }
146 }
147
148 static enum radeon_bo_domain amdgpu_bo_get_initial_domain(
149 struct pb_buffer *buf)
150 {
151 return ((struct amdgpu_winsys_bo*)buf)->initial_domain;
152 }
153
154 static void amdgpu_bo_remove_fences(struct amdgpu_winsys_bo *bo)
155 {
156 for (unsigned i = 0; i < bo->num_fences; ++i)
157 amdgpu_fence_reference(&bo->fences[i], NULL);
158
159 FREE(bo->fences);
160 bo->num_fences = 0;
161 bo->max_fences = 0;
162 }
163
164 void amdgpu_bo_destroy(struct pb_buffer *_buf)
165 {
166 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
167
168 assert(bo->bo && "must not be called for slab entries");
169
170 mtx_lock(&bo->ws->global_bo_list_lock);
171 LIST_DEL(&bo->u.real.global_list_item);
172 bo->ws->num_buffers--;
173 mtx_unlock(&bo->ws->global_bo_list_lock);
174
175 amdgpu_bo_va_op(bo->bo, 0, bo->base.size, bo->va, 0, AMDGPU_VA_OP_UNMAP);
176 amdgpu_va_range_free(bo->u.real.va_handle);
177 amdgpu_bo_free(bo->bo);
178
179 amdgpu_bo_remove_fences(bo);
180
181 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
182 bo->ws->allocated_vram -= align64(bo->base.size, bo->ws->info.gart_page_size);
183 else if (bo->initial_domain & RADEON_DOMAIN_GTT)
184 bo->ws->allocated_gtt -= align64(bo->base.size, bo->ws->info.gart_page_size);
185
186 if (bo->u.real.map_count >= 1) {
187 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
188 bo->ws->mapped_vram -= bo->base.size;
189 else if (bo->initial_domain & RADEON_DOMAIN_GTT)
190 bo->ws->mapped_gtt -= bo->base.size;
191 bo->ws->num_mapped_buffers--;
192 }
193
194 FREE(bo);
195 }
196
197 static void amdgpu_bo_destroy_or_cache(struct pb_buffer *_buf)
198 {
199 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
200
201 assert(bo->bo); /* slab buffers have a separate vtbl */
202
203 if (bo->u.real.use_reusable_pool)
204 pb_cache_add_buffer(&bo->u.real.cache_entry);
205 else
206 amdgpu_bo_destroy(_buf);
207 }
208
209 static void *amdgpu_bo_map(struct pb_buffer *buf,
210 struct radeon_winsys_cs *rcs,
211 enum pipe_transfer_usage usage)
212 {
213 struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
214 struct amdgpu_winsys_bo *real;
215 struct amdgpu_cs *cs = (struct amdgpu_cs*)rcs;
216 int r;
217 void *cpu = NULL;
218 uint64_t offset = 0;
219
220 assert(!bo->sparse);
221
222 /* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
223 if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
224 /* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
225 if (usage & PIPE_TRANSFER_DONTBLOCK) {
226 if (!(usage & PIPE_TRANSFER_WRITE)) {
227 /* Mapping for read.
228 *
229 * Since we are mapping for read, we don't need to wait
230 * if the GPU is using the buffer for read too
231 * (neither one is changing it).
232 *
233 * Only check whether the buffer is being used for write. */
234 if (cs && amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
235 RADEON_USAGE_WRITE)) {
236 cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
237 return NULL;
238 }
239
240 if (!amdgpu_bo_wait((struct pb_buffer*)bo, 0,
241 RADEON_USAGE_WRITE)) {
242 return NULL;
243 }
244 } else {
245 if (cs && amdgpu_bo_is_referenced_by_cs(cs, bo)) {
246 cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
247 return NULL;
248 }
249
250 if (!amdgpu_bo_wait((struct pb_buffer*)bo, 0,
251 RADEON_USAGE_READWRITE)) {
252 return NULL;
253 }
254 }
255 } else {
256 uint64_t time = os_time_get_nano();
257
258 if (!(usage & PIPE_TRANSFER_WRITE)) {
259 /* Mapping for read.
260 *
261 * Since we are mapping for read, we don't need to wait
262 * if the GPU is using the buffer for read too
263 * (neither one is changing it).
264 *
265 * Only check whether the buffer is being used for write. */
266 if (cs) {
267 if (amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
268 RADEON_USAGE_WRITE)) {
269 cs->flush_cs(cs->flush_data, 0, NULL);
270 } else {
271 /* Try to avoid busy-waiting in amdgpu_bo_wait. */
272 if (p_atomic_read(&bo->num_active_ioctls))
273 amdgpu_cs_sync_flush(rcs);
274 }
275 }
276
277 amdgpu_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
278 RADEON_USAGE_WRITE);
279 } else {
280 /* Mapping for write. */
281 if (cs) {
282 if (amdgpu_bo_is_referenced_by_cs(cs, bo)) {
283 cs->flush_cs(cs->flush_data, 0, NULL);
284 } else {
285 /* Try to avoid busy-waiting in amdgpu_bo_wait. */
286 if (p_atomic_read(&bo->num_active_ioctls))
287 amdgpu_cs_sync_flush(rcs);
288 }
289 }
290
291 amdgpu_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
292 RADEON_USAGE_READWRITE);
293 }
294
295 bo->ws->buffer_wait_time += os_time_get_nano() - time;
296 }
297 }
298
299 /* If the buffer is created from user memory, return the user pointer. */
300 if (bo->user_ptr)
301 return bo->user_ptr;
302
303 if (bo->bo) {
304 real = bo;
305 } else {
306 real = bo->u.slab.real;
307 offset = bo->va - real->va;
308 }
309
310 r = amdgpu_bo_cpu_map(real->bo, &cpu);
311 if (r) {
312 /* Clear the cache and try again. */
313 pb_cache_release_all_buffers(&real->ws->bo_cache);
314 r = amdgpu_bo_cpu_map(real->bo, &cpu);
315 if (r)
316 return NULL;
317 }
318
319 if (p_atomic_inc_return(&real->u.real.map_count) == 1) {
320 if (real->initial_domain & RADEON_DOMAIN_VRAM)
321 real->ws->mapped_vram += real->base.size;
322 else if (real->initial_domain & RADEON_DOMAIN_GTT)
323 real->ws->mapped_gtt += real->base.size;
324 real->ws->num_mapped_buffers++;
325 }
326 return (uint8_t*)cpu + offset;
327 }
328
329 static void amdgpu_bo_unmap(struct pb_buffer *buf)
330 {
331 struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
332 struct amdgpu_winsys_bo *real;
333
334 assert(!bo->sparse);
335
336 if (bo->user_ptr)
337 return;
338
339 real = bo->bo ? bo : bo->u.slab.real;
340
341 if (p_atomic_dec_zero(&real->u.real.map_count)) {
342 if (real->initial_domain & RADEON_DOMAIN_VRAM)
343 real->ws->mapped_vram -= real->base.size;
344 else if (real->initial_domain & RADEON_DOMAIN_GTT)
345 real->ws->mapped_gtt -= real->base.size;
346 real->ws->num_mapped_buffers--;
347 }
348
349 amdgpu_bo_cpu_unmap(real->bo);
350 }
351
352 static const struct pb_vtbl amdgpu_winsys_bo_vtbl = {
353 amdgpu_bo_destroy_or_cache
354 /* other functions are never called */
355 };
356
357 static void amdgpu_add_buffer_to_global_list(struct amdgpu_winsys_bo *bo)
358 {
359 struct amdgpu_winsys *ws = bo->ws;
360
361 assert(bo->bo);
362
363 mtx_lock(&ws->global_bo_list_lock);
364 LIST_ADDTAIL(&bo->u.real.global_list_item, &ws->global_bo_list);
365 ws->num_buffers++;
366 mtx_unlock(&ws->global_bo_list_lock);
367 }
368
369 static struct amdgpu_winsys_bo *amdgpu_create_bo(struct amdgpu_winsys *ws,
370 uint64_t size,
371 unsigned alignment,
372 unsigned usage,
373 enum radeon_bo_domain initial_domain,
374 unsigned flags,
375 unsigned pb_cache_bucket)
376 {
377 struct amdgpu_bo_alloc_request request = {0};
378 amdgpu_bo_handle buf_handle;
379 uint64_t va = 0;
380 struct amdgpu_winsys_bo *bo;
381 amdgpu_va_handle va_handle;
382 unsigned va_gap_size;
383 int r;
384
385 assert(initial_domain & RADEON_DOMAIN_VRAM_GTT);
386 bo = CALLOC_STRUCT(amdgpu_winsys_bo);
387 if (!bo) {
388 return NULL;
389 }
390
391 pb_cache_init_entry(&ws->bo_cache, &bo->u.real.cache_entry, &bo->base,
392 pb_cache_bucket);
393 request.alloc_size = size;
394 request.phys_alignment = alignment;
395
396 if (initial_domain & RADEON_DOMAIN_VRAM)
397 request.preferred_heap |= AMDGPU_GEM_DOMAIN_VRAM;
398 if (initial_domain & RADEON_DOMAIN_GTT)
399 request.preferred_heap |= AMDGPU_GEM_DOMAIN_GTT;
400
401 if (flags & RADEON_FLAG_NO_CPU_ACCESS)
402 request.flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
403 if (flags & RADEON_FLAG_GTT_WC)
404 request.flags |= AMDGPU_GEM_CREATE_CPU_GTT_USWC;
405
406 r = amdgpu_bo_alloc(ws->dev, &request, &buf_handle);
407 if (r) {
408 fprintf(stderr, "amdgpu: Failed to allocate a buffer:\n");
409 fprintf(stderr, "amdgpu: size : %"PRIu64" bytes\n", size);
410 fprintf(stderr, "amdgpu: alignment : %u bytes\n", alignment);
411 fprintf(stderr, "amdgpu: domains : %u\n", initial_domain);
412 goto error_bo_alloc;
413 }
414
415 va_gap_size = ws->check_vm ? MAX2(4 * alignment, 64 * 1024) : 0;
416 if (size > ws->info.pte_fragment_size)
417 alignment = MAX2(alignment, ws->info.pte_fragment_size);
418 r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
419 size + va_gap_size, alignment, 0, &va, &va_handle, 0);
420 if (r)
421 goto error_va_alloc;
422
423 r = amdgpu_bo_va_op(buf_handle, 0, size, va, 0, AMDGPU_VA_OP_MAP);
424 if (r)
425 goto error_va_map;
426
427 pipe_reference_init(&bo->base.reference, 1);
428 bo->base.alignment = alignment;
429 bo->base.usage = usage;
430 bo->base.size = size;
431 bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
432 bo->ws = ws;
433 bo->bo = buf_handle;
434 bo->va = va;
435 bo->u.real.va_handle = va_handle;
436 bo->initial_domain = initial_domain;
437 bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
438
439 if (initial_domain & RADEON_DOMAIN_VRAM)
440 ws->allocated_vram += align64(size, ws->info.gart_page_size);
441 else if (initial_domain & RADEON_DOMAIN_GTT)
442 ws->allocated_gtt += align64(size, ws->info.gart_page_size);
443
444 amdgpu_add_buffer_to_global_list(bo);
445
446 return bo;
447
448 error_va_map:
449 amdgpu_va_range_free(va_handle);
450
451 error_va_alloc:
452 amdgpu_bo_free(buf_handle);
453
454 error_bo_alloc:
455 FREE(bo);
456 return NULL;
457 }
458
459 bool amdgpu_bo_can_reclaim(struct pb_buffer *_buf)
460 {
461 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
462
463 if (amdgpu_bo_is_referenced_by_any_cs(bo)) {
464 return false;
465 }
466
467 return amdgpu_bo_wait(_buf, 0, RADEON_USAGE_READWRITE);
468 }
469
470 bool amdgpu_bo_can_reclaim_slab(void *priv, struct pb_slab_entry *entry)
471 {
472 struct amdgpu_winsys_bo *bo = NULL; /* fix container_of */
473 bo = container_of(entry, bo, u.slab.entry);
474
475 return amdgpu_bo_can_reclaim(&bo->base);
476 }
477
478 static void amdgpu_bo_slab_destroy(struct pb_buffer *_buf)
479 {
480 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
481
482 assert(!bo->bo);
483
484 pb_slab_free(&bo->ws->bo_slabs, &bo->u.slab.entry);
485 }
486
487 static const struct pb_vtbl amdgpu_winsys_bo_slab_vtbl = {
488 amdgpu_bo_slab_destroy
489 /* other functions are never called */
490 };
491
492 struct pb_slab *amdgpu_bo_slab_alloc(void *priv, unsigned heap,
493 unsigned entry_size,
494 unsigned group_index)
495 {
496 struct amdgpu_winsys *ws = priv;
497 struct amdgpu_slab *slab = CALLOC_STRUCT(amdgpu_slab);
498 enum radeon_bo_domain domains = radeon_domain_from_heap(heap);
499 enum radeon_bo_flag flags = radeon_flags_from_heap(heap);
500 uint32_t base_id;
501
502 if (!slab)
503 return NULL;
504
505 slab->buffer = amdgpu_winsys_bo(amdgpu_bo_create(&ws->base,
506 64 * 1024, 64 * 1024,
507 domains, flags));
508 if (!slab->buffer)
509 goto fail;
510
511 assert(slab->buffer->bo);
512
513 slab->base.num_entries = slab->buffer->base.size / entry_size;
514 slab->base.num_free = slab->base.num_entries;
515 slab->entries = CALLOC(slab->base.num_entries, sizeof(*slab->entries));
516 if (!slab->entries)
517 goto fail_buffer;
518
519 LIST_INITHEAD(&slab->base.free);
520
521 base_id = __sync_fetch_and_add(&ws->next_bo_unique_id, slab->base.num_entries);
522
523 for (unsigned i = 0; i < slab->base.num_entries; ++i) {
524 struct amdgpu_winsys_bo *bo = &slab->entries[i];
525
526 bo->base.alignment = entry_size;
527 bo->base.usage = slab->buffer->base.usage;
528 bo->base.size = entry_size;
529 bo->base.vtbl = &amdgpu_winsys_bo_slab_vtbl;
530 bo->ws = ws;
531 bo->va = slab->buffer->va + i * entry_size;
532 bo->initial_domain = domains;
533 bo->unique_id = base_id + i;
534 bo->u.slab.entry.slab = &slab->base;
535 bo->u.slab.entry.group_index = group_index;
536 bo->u.slab.real = slab->buffer;
537
538 LIST_ADDTAIL(&bo->u.slab.entry.head, &slab->base.free);
539 }
540
541 return &slab->base;
542
543 fail_buffer:
544 amdgpu_winsys_bo_reference(&slab->buffer, NULL);
545 fail:
546 FREE(slab);
547 return NULL;
548 }
549
550 void amdgpu_bo_slab_free(void *priv, struct pb_slab *pslab)
551 {
552 struct amdgpu_slab *slab = amdgpu_slab(pslab);
553
554 for (unsigned i = 0; i < slab->base.num_entries; ++i)
555 amdgpu_bo_remove_fences(&slab->entries[i]);
556
557 FREE(slab->entries);
558 amdgpu_winsys_bo_reference(&slab->buffer, NULL);
559 FREE(slab);
560 }
561
562 #if DEBUG_SPARSE_COMMITS
563 static void
564 sparse_dump(struct amdgpu_winsys_bo *bo, const char *func)
565 {
566 fprintf(stderr, "%s: %p (size=%"PRIu64", num_va_pages=%u) @ %s\n"
567 "Commitments:\n",
568 __func__, bo, bo->base.size, bo->u.sparse.num_va_pages, func);
569
570 struct amdgpu_sparse_backing *span_backing = NULL;
571 uint32_t span_first_backing_page = 0;
572 uint32_t span_first_va_page = 0;
573 uint32_t va_page = 0;
574
575 for (;;) {
576 struct amdgpu_sparse_backing *backing = 0;
577 uint32_t backing_page = 0;
578
579 if (va_page < bo->u.sparse.num_va_pages) {
580 backing = bo->u.sparse.commitments[va_page].backing;
581 backing_page = bo->u.sparse.commitments[va_page].page;
582 }
583
584 if (span_backing &&
585 (backing != span_backing ||
586 backing_page != span_first_backing_page + (va_page - span_first_va_page))) {
587 fprintf(stderr, " %u..%u: backing=%p:%u..%u\n",
588 span_first_va_page, va_page - 1, span_backing,
589 span_first_backing_page,
590 span_first_backing_page + (va_page - span_first_va_page) - 1);
591
592 span_backing = NULL;
593 }
594
595 if (va_page >= bo->u.sparse.num_va_pages)
596 break;
597
598 if (backing && !span_backing) {
599 span_backing = backing;
600 span_first_backing_page = backing_page;
601 span_first_va_page = va_page;
602 }
603
604 va_page++;
605 }
606
607 fprintf(stderr, "Backing:\n");
608
609 list_for_each_entry(struct amdgpu_sparse_backing, backing, &bo->u.sparse.backing, list) {
610 fprintf(stderr, " %p (size=%"PRIu64")\n", backing, backing->bo->base.size);
611 for (unsigned i = 0; i < backing->num_chunks; ++i)
612 fprintf(stderr, " %u..%u\n", backing->chunks[i].begin, backing->chunks[i].end);
613 }
614 }
615 #endif
616
617 /*
618 * Attempt to allocate the given number of backing pages. Fewer pages may be
619 * allocated (depending on the fragmentation of existing backing buffers),
620 * which will be reflected by a change to *pnum_pages.
621 */
622 static struct amdgpu_sparse_backing *
623 sparse_backing_alloc(struct amdgpu_winsys_bo *bo, uint32_t *pstart_page, uint32_t *pnum_pages)
624 {
625 struct amdgpu_sparse_backing *best_backing;
626 unsigned best_idx;
627 uint32_t best_num_pages;
628
629 best_backing = NULL;
630 best_idx = 0;
631 best_num_pages = 0;
632
633 /* This is a very simple and inefficient best-fit algorithm. */
634 list_for_each_entry(struct amdgpu_sparse_backing, backing, &bo->u.sparse.backing, list) {
635 for (unsigned idx = 0; idx < backing->num_chunks; ++idx) {
636 uint32_t cur_num_pages = backing->chunks[idx].end - backing->chunks[idx].begin;
637 if ((best_num_pages < *pnum_pages && cur_num_pages > best_num_pages) ||
638 (best_num_pages > *pnum_pages && cur_num_pages < best_num_pages)) {
639 best_backing = backing;
640 best_idx = idx;
641 best_num_pages = cur_num_pages;
642 }
643 }
644 }
645
646 /* Allocate a new backing buffer if necessary. */
647 if (!best_backing) {
648 struct pb_buffer *buf;
649 uint64_t size;
650 uint32_t pages;
651
652 best_backing = CALLOC_STRUCT(amdgpu_sparse_backing);
653 if (!best_backing)
654 return NULL;
655
656 best_backing->max_chunks = 4;
657 best_backing->chunks = CALLOC(best_backing->max_chunks,
658 sizeof(*best_backing->chunks));
659 if (!best_backing->chunks) {
660 FREE(best_backing);
661 return NULL;
662 }
663
664 assert(bo->u.sparse.num_backing_pages < DIV_ROUND_UP(bo->base.size, RADEON_SPARSE_PAGE_SIZE));
665
666 size = MIN3(bo->base.size / 16,
667 8 * 1024 * 1024,
668 bo->base.size - (uint64_t)bo->u.sparse.num_backing_pages * RADEON_SPARSE_PAGE_SIZE);
669 size = MAX2(size, RADEON_SPARSE_PAGE_SIZE);
670
671 buf = amdgpu_bo_create(&bo->ws->base, size, RADEON_SPARSE_PAGE_SIZE,
672 bo->initial_domain,
673 bo->u.sparse.flags | RADEON_FLAG_NO_SUBALLOC);
674 if (!buf) {
675 FREE(best_backing->chunks);
676 FREE(best_backing);
677 return NULL;
678 }
679
680 /* We might have gotten a bigger buffer than requested via caching. */
681 pages = buf->size / RADEON_SPARSE_PAGE_SIZE;
682
683 best_backing->bo = amdgpu_winsys_bo(buf);
684 best_backing->num_chunks = 1;
685 best_backing->chunks[0].begin = 0;
686 best_backing->chunks[0].end = pages;
687
688 list_add(&best_backing->list, &bo->u.sparse.backing);
689 bo->u.sparse.num_backing_pages += pages;
690
691 best_idx = 0;
692 best_num_pages = pages;
693 }
694
695 *pnum_pages = MIN2(*pnum_pages, best_num_pages);
696 *pstart_page = best_backing->chunks[best_idx].begin;
697 best_backing->chunks[best_idx].begin += *pnum_pages;
698
699 if (best_backing->chunks[best_idx].begin >= best_backing->chunks[best_idx].end) {
700 memmove(&best_backing->chunks[best_idx], &best_backing->chunks[best_idx + 1],
701 sizeof(*best_backing->chunks) * (best_backing->num_chunks - best_idx - 1));
702 best_backing->num_chunks--;
703 }
704
705 return best_backing;
706 }
707
708 static void
709 sparse_free_backing_buffer(struct amdgpu_winsys_bo *bo,
710 struct amdgpu_sparse_backing *backing)
711 {
712 struct amdgpu_winsys *ws = backing->bo->ws;
713
714 bo->u.sparse.num_backing_pages -= backing->bo->base.size / RADEON_SPARSE_PAGE_SIZE;
715
716 mtx_lock(&ws->bo_fence_lock);
717 amdgpu_add_fences(backing->bo, bo->num_fences, bo->fences);
718 mtx_unlock(&ws->bo_fence_lock);
719
720 list_del(&backing->list);
721 amdgpu_winsys_bo_reference(&backing->bo, NULL);
722 FREE(backing->chunks);
723 FREE(backing);
724 }
725
726 /*
727 * Return a range of pages from the given backing buffer back into the
728 * free structure.
729 */
730 static bool
731 sparse_backing_free(struct amdgpu_winsys_bo *bo,
732 struct amdgpu_sparse_backing *backing,
733 uint32_t start_page, uint32_t num_pages)
734 {
735 uint32_t end_page = start_page + num_pages;
736 unsigned low = 0;
737 unsigned high = backing->num_chunks;
738
739 /* Find the first chunk with begin >= start_page. */
740 while (low < high) {
741 unsigned mid = low + (high - low) / 2;
742
743 if (backing->chunks[mid].begin >= start_page)
744 high = mid;
745 else
746 low = mid + 1;
747 }
748
749 assert(low >= backing->num_chunks || end_page <= backing->chunks[low].begin);
750 assert(low == 0 || backing->chunks[low - 1].end <= start_page);
751
752 if (low > 0 && backing->chunks[low - 1].end == start_page) {
753 backing->chunks[low - 1].end = end_page;
754
755 if (low < backing->num_chunks && end_page == backing->chunks[low].begin) {
756 backing->chunks[low - 1].end = backing->chunks[low].end;
757 memmove(&backing->chunks[low], &backing->chunks[low + 1],
758 sizeof(*backing->chunks) * (backing->num_chunks - low - 1));
759 backing->num_chunks--;
760 }
761 } else if (low < backing->num_chunks && end_page == backing->chunks[low].begin) {
762 backing->chunks[low].begin = start_page;
763 } else {
764 if (backing->num_chunks >= backing->max_chunks) {
765 unsigned new_max_chunks = 2 * backing->max_chunks;
766 struct amdgpu_sparse_backing_chunk *new_chunks =
767 REALLOC(backing->chunks,
768 sizeof(*backing->chunks) * backing->max_chunks,
769 sizeof(*backing->chunks) * new_max_chunks);
770 if (!new_chunks)
771 return false;
772
773 backing->max_chunks = new_max_chunks;
774 backing->chunks = new_chunks;
775 }
776
777 memmove(&backing->chunks[low + 1], &backing->chunks[low],
778 sizeof(*backing->chunks) * (backing->num_chunks - low));
779 backing->chunks[low].begin = start_page;
780 backing->chunks[low].end = end_page;
781 backing->num_chunks++;
782 }
783
784 if (backing->num_chunks == 1 && backing->chunks[0].begin == 0 &&
785 backing->chunks[0].end == backing->bo->base.size / RADEON_SPARSE_PAGE_SIZE)
786 sparse_free_backing_buffer(bo, backing);
787
788 return true;
789 }
790
791 static void amdgpu_bo_sparse_destroy(struct pb_buffer *_buf)
792 {
793 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
794 int r;
795
796 assert(!bo->bo && bo->sparse);
797
798 r = amdgpu_bo_va_op_raw(bo->ws->dev, NULL, 0,
799 (uint64_t)bo->u.sparse.num_va_pages * RADEON_SPARSE_PAGE_SIZE,
800 bo->va, 0, AMDGPU_VA_OP_CLEAR);
801 if (r) {
802 fprintf(stderr, "amdgpu: clearing PRT VA region on destroy failed (%d)\n", r);
803 }
804
805 while (!list_empty(&bo->u.sparse.backing)) {
806 struct amdgpu_sparse_backing *dummy = NULL;
807 sparse_free_backing_buffer(bo,
808 container_of(bo->u.sparse.backing.next,
809 dummy, list));
810 }
811
812 amdgpu_va_range_free(bo->u.sparse.va_handle);
813 mtx_destroy(&bo->u.sparse.commit_lock);
814 FREE(bo->u.sparse.commitments);
815 FREE(bo);
816 }
817
818 static const struct pb_vtbl amdgpu_winsys_bo_sparse_vtbl = {
819 amdgpu_bo_sparse_destroy
820 /* other functions are never called */
821 };
822
823 static struct pb_buffer *
824 amdgpu_bo_sparse_create(struct amdgpu_winsys *ws, uint64_t size,
825 enum radeon_bo_domain domain,
826 enum radeon_bo_flag flags)
827 {
828 struct amdgpu_winsys_bo *bo;
829 uint64_t map_size;
830 uint64_t va_gap_size;
831 int r;
832
833 /* We use 32-bit page numbers; refuse to attempt allocating sparse buffers
834 * that exceed this limit. This is not really a restriction: we don't have
835 * that much virtual address space anyway.
836 */
837 if (size > (uint64_t)INT32_MAX * RADEON_SPARSE_PAGE_SIZE)
838 return NULL;
839
840 bo = CALLOC_STRUCT(amdgpu_winsys_bo);
841 if (!bo)
842 return NULL;
843
844 pipe_reference_init(&bo->base.reference, 1);
845 bo->base.alignment = RADEON_SPARSE_PAGE_SIZE;
846 bo->base.size = size;
847 bo->base.vtbl = &amdgpu_winsys_bo_sparse_vtbl;
848 bo->ws = ws;
849 bo->initial_domain = domain;
850 bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
851 bo->sparse = true;
852 bo->u.sparse.flags = flags & ~RADEON_FLAG_SPARSE;
853
854 bo->u.sparse.num_va_pages = DIV_ROUND_UP(size, RADEON_SPARSE_PAGE_SIZE);
855 bo->u.sparse.commitments = CALLOC(bo->u.sparse.num_va_pages,
856 sizeof(*bo->u.sparse.commitments));
857 if (!bo->u.sparse.commitments)
858 goto error_alloc_commitments;
859
860 mtx_init(&bo->u.sparse.commit_lock, mtx_plain);
861 LIST_INITHEAD(&bo->u.sparse.backing);
862
863 /* For simplicity, we always map a multiple of the page size. */
864 map_size = align64(size, RADEON_SPARSE_PAGE_SIZE);
865 va_gap_size = ws->check_vm ? 4 * RADEON_SPARSE_PAGE_SIZE : 0;
866 r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
867 map_size + va_gap_size, RADEON_SPARSE_PAGE_SIZE,
868 0, &bo->va, &bo->u.sparse.va_handle, 0);
869 if (r)
870 goto error_va_alloc;
871
872 r = amdgpu_bo_va_op_raw(bo->ws->dev, NULL, 0, size, bo->va,
873 AMDGPU_VM_PAGE_PRT, AMDGPU_VA_OP_MAP);
874 if (r)
875 goto error_va_map;
876
877 return &bo->base;
878
879 error_va_map:
880 amdgpu_va_range_free(bo->u.sparse.va_handle);
881 error_va_alloc:
882 mtx_destroy(&bo->u.sparse.commit_lock);
883 FREE(bo->u.sparse.commitments);
884 error_alloc_commitments:
885 FREE(bo);
886 return NULL;
887 }
888
889 static bool
890 amdgpu_bo_sparse_commit(struct pb_buffer *buf, uint64_t offset, uint64_t size,
891 bool commit)
892 {
893 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(buf);
894 struct amdgpu_sparse_commitment *comm;
895 uint32_t va_page, end_va_page;
896 bool ok = true;
897 int r;
898
899 assert(bo->sparse);
900 assert(offset % RADEON_SPARSE_PAGE_SIZE == 0);
901 assert(offset <= bo->base.size);
902 assert(size <= bo->base.size - offset);
903 assert(size % RADEON_SPARSE_PAGE_SIZE == 0 || offset + size == bo->base.size);
904
905 comm = bo->u.sparse.commitments;
906 va_page = offset / RADEON_SPARSE_PAGE_SIZE;
907 end_va_page = va_page + DIV_ROUND_UP(size, RADEON_SPARSE_PAGE_SIZE);
908
909 mtx_lock(&bo->u.sparse.commit_lock);
910
911 #if DEBUG_SPARSE_COMMITS
912 sparse_dump(bo, __func__);
913 #endif
914
915 if (commit) {
916 while (va_page < end_va_page) {
917 uint32_t span_va_page;
918
919 /* Skip pages that are already committed. */
920 if (comm[va_page].backing) {
921 va_page++;
922 continue;
923 }
924
925 /* Determine length of uncommitted span. */
926 span_va_page = va_page;
927 while (va_page < end_va_page && !comm[va_page].backing)
928 va_page++;
929
930 /* Fill the uncommitted span with chunks of backing memory. */
931 while (span_va_page < va_page) {
932 struct amdgpu_sparse_backing *backing;
933 uint32_t backing_start, backing_size;
934
935 backing_size = va_page - span_va_page;
936 backing = sparse_backing_alloc(bo, &backing_start, &backing_size);
937 if (!backing) {
938 ok = false;
939 goto out;
940 }
941
942 r = amdgpu_bo_va_op_raw(bo->ws->dev, backing->bo->bo,
943 (uint64_t)backing_start * RADEON_SPARSE_PAGE_SIZE,
944 (uint64_t)backing_size * RADEON_SPARSE_PAGE_SIZE,
945 bo->va + (uint64_t)span_va_page * RADEON_SPARSE_PAGE_SIZE,
946 AMDGPU_VM_PAGE_READABLE |
947 AMDGPU_VM_PAGE_WRITEABLE |
948 AMDGPU_VM_PAGE_EXECUTABLE,
949 AMDGPU_VA_OP_REPLACE);
950 if (r) {
951 ok = sparse_backing_free(bo, backing, backing_start, backing_size);
952 assert(ok && "sufficient memory should already be allocated");
953
954 ok = false;
955 goto out;
956 }
957
958 while (backing_size) {
959 comm[span_va_page].backing = backing;
960 comm[span_va_page].page = backing_start;
961 span_va_page++;
962 backing_start++;
963 backing_size--;
964 }
965 }
966 }
967 } else {
968 r = amdgpu_bo_va_op_raw(bo->ws->dev, NULL, 0,
969 (uint64_t)(end_va_page - va_page) * RADEON_SPARSE_PAGE_SIZE,
970 bo->va + (uint64_t)va_page * RADEON_SPARSE_PAGE_SIZE,
971 AMDGPU_VM_PAGE_PRT, AMDGPU_VA_OP_REPLACE);
972 if (r) {
973 ok = false;
974 goto out;
975 }
976
977 while (va_page < end_va_page) {
978 struct amdgpu_sparse_backing *backing;
979 uint32_t backing_start;
980 uint32_t span_pages;
981
982 /* Skip pages that are already uncommitted. */
983 if (!comm[va_page].backing) {
984 va_page++;
985 continue;
986 }
987
988 /* Group contiguous spans of pages. */
989 backing = comm[va_page].backing;
990 backing_start = comm[va_page].page;
991 comm[va_page].backing = NULL;
992
993 span_pages = 1;
994 va_page++;
995
996 while (va_page < end_va_page &&
997 comm[va_page].backing == backing &&
998 comm[va_page].page == backing_start + span_pages) {
999 comm[va_page].backing = NULL;
1000 va_page++;
1001 span_pages++;
1002 }
1003
1004 if (!sparse_backing_free(bo, backing, backing_start, span_pages)) {
1005 /* Couldn't allocate tracking data structures, so we have to leak */
1006 fprintf(stderr, "amdgpu: leaking PRT backing memory\n");
1007 ok = false;
1008 }
1009 }
1010 }
1011 out:
1012
1013 mtx_unlock(&bo->u.sparse.commit_lock);
1014
1015 return ok;
1016 }
1017
1018 static unsigned eg_tile_split(unsigned tile_split)
1019 {
1020 switch (tile_split) {
1021 case 0: tile_split = 64; break;
1022 case 1: tile_split = 128; break;
1023 case 2: tile_split = 256; break;
1024 case 3: tile_split = 512; break;
1025 default:
1026 case 4: tile_split = 1024; break;
1027 case 5: tile_split = 2048; break;
1028 case 6: tile_split = 4096; break;
1029 }
1030 return tile_split;
1031 }
1032
1033 static unsigned eg_tile_split_rev(unsigned eg_tile_split)
1034 {
1035 switch (eg_tile_split) {
1036 case 64: return 0;
1037 case 128: return 1;
1038 case 256: return 2;
1039 case 512: return 3;
1040 default:
1041 case 1024: return 4;
1042 case 2048: return 5;
1043 case 4096: return 6;
1044 }
1045 }
1046
1047 static void amdgpu_buffer_get_metadata(struct pb_buffer *_buf,
1048 struct radeon_bo_metadata *md)
1049 {
1050 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
1051 struct amdgpu_bo_info info = {0};
1052 uint64_t tiling_flags;
1053 int r;
1054
1055 assert(bo->bo && "must not be called for slab entries");
1056
1057 r = amdgpu_bo_query_info(bo->bo, &info);
1058 if (r)
1059 return;
1060
1061 tiling_flags = info.metadata.tiling_info;
1062
1063 if (bo->ws->info.chip_class >= GFX9) {
1064 md->u.gfx9.swizzle_mode = AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
1065 } else {
1066 md->u.legacy.microtile = RADEON_LAYOUT_LINEAR;
1067 md->u.legacy.macrotile = RADEON_LAYOUT_LINEAR;
1068
1069 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 4) /* 2D_TILED_THIN1 */
1070 md->u.legacy.macrotile = RADEON_LAYOUT_TILED;
1071 else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 2) /* 1D_TILED_THIN1 */
1072 md->u.legacy.microtile = RADEON_LAYOUT_TILED;
1073
1074 md->u.legacy.pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1075 md->u.legacy.bankw = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1076 md->u.legacy.bankh = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1077 md->u.legacy.tile_split = eg_tile_split(AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT));
1078 md->u.legacy.mtilea = 1 << AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1079 md->u.legacy.num_banks = 2 << AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1080 md->u.legacy.scanout = AMDGPU_TILING_GET(tiling_flags, MICRO_TILE_MODE) == 0; /* DISPLAY */
1081 }
1082
1083 md->size_metadata = info.metadata.size_metadata;
1084 memcpy(md->metadata, info.metadata.umd_metadata, sizeof(md->metadata));
1085 }
1086
1087 static void amdgpu_buffer_set_metadata(struct pb_buffer *_buf,
1088 struct radeon_bo_metadata *md)
1089 {
1090 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
1091 struct amdgpu_bo_metadata metadata = {0};
1092 uint64_t tiling_flags = 0;
1093
1094 assert(bo->bo && "must not be called for slab entries");
1095
1096 if (bo->ws->info.chip_class >= GFX9) {
1097 tiling_flags |= AMDGPU_TILING_SET(SWIZZLE_MODE, md->u.gfx9.swizzle_mode);
1098 } else {
1099 if (md->u.legacy.macrotile == RADEON_LAYOUT_TILED)
1100 tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 4); /* 2D_TILED_THIN1 */
1101 else if (md->u.legacy.microtile == RADEON_LAYOUT_TILED)
1102 tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 2); /* 1D_TILED_THIN1 */
1103 else
1104 tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 1); /* LINEAR_ALIGNED */
1105
1106 tiling_flags |= AMDGPU_TILING_SET(PIPE_CONFIG, md->u.legacy.pipe_config);
1107 tiling_flags |= AMDGPU_TILING_SET(BANK_WIDTH, util_logbase2(md->u.legacy.bankw));
1108 tiling_flags |= AMDGPU_TILING_SET(BANK_HEIGHT, util_logbase2(md->u.legacy.bankh));
1109 if (md->u.legacy.tile_split)
1110 tiling_flags |= AMDGPU_TILING_SET(TILE_SPLIT, eg_tile_split_rev(md->u.legacy.tile_split));
1111 tiling_flags |= AMDGPU_TILING_SET(MACRO_TILE_ASPECT, util_logbase2(md->u.legacy.mtilea));
1112 tiling_flags |= AMDGPU_TILING_SET(NUM_BANKS, util_logbase2(md->u.legacy.num_banks)-1);
1113
1114 if (md->u.legacy.scanout)
1115 tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 0); /* DISPLAY_MICRO_TILING */
1116 else
1117 tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 1); /* THIN_MICRO_TILING */
1118 }
1119
1120 metadata.tiling_info = tiling_flags;
1121 metadata.size_metadata = md->size_metadata;
1122 memcpy(metadata.umd_metadata, md->metadata, sizeof(md->metadata));
1123
1124 amdgpu_bo_set_metadata(bo->bo, &metadata);
1125 }
1126
1127 static struct pb_buffer *
1128 amdgpu_bo_create(struct radeon_winsys *rws,
1129 uint64_t size,
1130 unsigned alignment,
1131 enum radeon_bo_domain domain,
1132 enum radeon_bo_flag flags)
1133 {
1134 struct amdgpu_winsys *ws = amdgpu_winsys(rws);
1135 struct amdgpu_winsys_bo *bo;
1136 unsigned usage = 0, pb_cache_bucket;
1137
1138 /* VRAM implies WC. This is not optional. */
1139 assert(!(domain & RADEON_DOMAIN_VRAM) || flags & RADEON_FLAG_GTT_WC);
1140
1141 /* Sub-allocate small buffers from slabs. */
1142 if (!(flags & (RADEON_FLAG_NO_SUBALLOC | RADEON_FLAG_SPARSE)) &&
1143 size <= (1 << AMDGPU_SLAB_MAX_SIZE_LOG2) &&
1144 alignment <= MAX2(1 << AMDGPU_SLAB_MIN_SIZE_LOG2, util_next_power_of_two(size))) {
1145 struct pb_slab_entry *entry;
1146 int heap = radeon_get_heap_index(domain, flags);
1147
1148 if (heap < 0 || heap >= RADEON_MAX_SLAB_HEAPS)
1149 goto no_slab;
1150
1151 entry = pb_slab_alloc(&ws->bo_slabs, size, heap);
1152 if (!entry) {
1153 /* Clear the cache and try again. */
1154 pb_cache_release_all_buffers(&ws->bo_cache);
1155
1156 entry = pb_slab_alloc(&ws->bo_slabs, size, heap);
1157 }
1158 if (!entry)
1159 return NULL;
1160
1161 bo = NULL;
1162 bo = container_of(entry, bo, u.slab.entry);
1163
1164 pipe_reference_init(&bo->base.reference, 1);
1165
1166 return &bo->base;
1167 }
1168 no_slab:
1169
1170 if (flags & RADEON_FLAG_SPARSE) {
1171 assert(RADEON_SPARSE_PAGE_SIZE % alignment == 0);
1172
1173 flags |= RADEON_FLAG_NO_CPU_ACCESS;
1174
1175 return amdgpu_bo_sparse_create(ws, size, domain, flags);
1176 }
1177
1178 /* This flag is irrelevant for the cache. */
1179 flags &= ~RADEON_FLAG_NO_SUBALLOC;
1180
1181 /* Align size to page size. This is the minimum alignment for normal
1182 * BOs. Aligning this here helps the cached bufmgr. Especially small BOs,
1183 * like constant/uniform buffers, can benefit from better and more reuse.
1184 */
1185 size = align64(size, ws->info.gart_page_size);
1186 alignment = align(alignment, ws->info.gart_page_size);
1187
1188 /* Only set one usage bit each for domains and flags, or the cache manager
1189 * might consider different sets of domains / flags compatible
1190 */
1191 if (domain == RADEON_DOMAIN_VRAM_GTT)
1192 usage = 1 << 2;
1193 else
1194 usage = domain >> 1;
1195 assert(flags < sizeof(usage) * 8 - 3);
1196 usage |= 1 << (flags + 3);
1197
1198 /* Determine the pb_cache bucket for minimizing pb_cache misses. */
1199 pb_cache_bucket = 0;
1200 if (domain & RADEON_DOMAIN_VRAM) /* VRAM or VRAM+GTT */
1201 pb_cache_bucket += 1;
1202 if (flags == RADEON_FLAG_GTT_WC) /* WC */
1203 pb_cache_bucket += 2;
1204 assert(pb_cache_bucket < ARRAY_SIZE(ws->bo_cache.buckets));
1205
1206 /* Get a buffer from the cache. */
1207 bo = (struct amdgpu_winsys_bo*)
1208 pb_cache_reclaim_buffer(&ws->bo_cache, size, alignment, usage,
1209 pb_cache_bucket);
1210 if (bo)
1211 return &bo->base;
1212
1213 /* Create a new one. */
1214 bo = amdgpu_create_bo(ws, size, alignment, usage, domain, flags,
1215 pb_cache_bucket);
1216 if (!bo) {
1217 /* Clear the cache and try again. */
1218 pb_slabs_reclaim(&ws->bo_slabs);
1219 pb_cache_release_all_buffers(&ws->bo_cache);
1220 bo = amdgpu_create_bo(ws, size, alignment, usage, domain, flags,
1221 pb_cache_bucket);
1222 if (!bo)
1223 return NULL;
1224 }
1225
1226 bo->u.real.use_reusable_pool = true;
1227 return &bo->base;
1228 }
1229
1230 static struct pb_buffer *amdgpu_bo_from_handle(struct radeon_winsys *rws,
1231 struct winsys_handle *whandle,
1232 unsigned *stride,
1233 unsigned *offset)
1234 {
1235 struct amdgpu_winsys *ws = amdgpu_winsys(rws);
1236 struct amdgpu_winsys_bo *bo;
1237 enum amdgpu_bo_handle_type type;
1238 struct amdgpu_bo_import_result result = {0};
1239 uint64_t va;
1240 amdgpu_va_handle va_handle;
1241 struct amdgpu_bo_info info = {0};
1242 enum radeon_bo_domain initial = 0;
1243 int r;
1244
1245 /* Initialize the structure. */
1246 bo = CALLOC_STRUCT(amdgpu_winsys_bo);
1247 if (!bo) {
1248 return NULL;
1249 }
1250
1251 switch (whandle->type) {
1252 case DRM_API_HANDLE_TYPE_SHARED:
1253 type = amdgpu_bo_handle_type_gem_flink_name;
1254 break;
1255 case DRM_API_HANDLE_TYPE_FD:
1256 type = amdgpu_bo_handle_type_dma_buf_fd;
1257 break;
1258 default:
1259 return NULL;
1260 }
1261
1262 r = amdgpu_bo_import(ws->dev, type, whandle->handle, &result);
1263 if (r)
1264 goto error;
1265
1266 /* Get initial domains. */
1267 r = amdgpu_bo_query_info(result.buf_handle, &info);
1268 if (r)
1269 goto error_query;
1270
1271 r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
1272 result.alloc_size, 1 << 20, 0, &va, &va_handle, 0);
1273 if (r)
1274 goto error_query;
1275
1276 r = amdgpu_bo_va_op(result.buf_handle, 0, result.alloc_size, va, 0, AMDGPU_VA_OP_MAP);
1277 if (r)
1278 goto error_va_map;
1279
1280 if (info.preferred_heap & AMDGPU_GEM_DOMAIN_VRAM)
1281 initial |= RADEON_DOMAIN_VRAM;
1282 if (info.preferred_heap & AMDGPU_GEM_DOMAIN_GTT)
1283 initial |= RADEON_DOMAIN_GTT;
1284
1285
1286 pipe_reference_init(&bo->base.reference, 1);
1287 bo->base.alignment = info.phys_alignment;
1288 bo->bo = result.buf_handle;
1289 bo->base.size = result.alloc_size;
1290 bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
1291 bo->ws = ws;
1292 bo->va = va;
1293 bo->u.real.va_handle = va_handle;
1294 bo->initial_domain = initial;
1295 bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
1296 bo->is_shared = true;
1297
1298 if (stride)
1299 *stride = whandle->stride;
1300 if (offset)
1301 *offset = whandle->offset;
1302
1303 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
1304 ws->allocated_vram += align64(bo->base.size, ws->info.gart_page_size);
1305 else if (bo->initial_domain & RADEON_DOMAIN_GTT)
1306 ws->allocated_gtt += align64(bo->base.size, ws->info.gart_page_size);
1307
1308 amdgpu_add_buffer_to_global_list(bo);
1309
1310 return &bo->base;
1311
1312 error_va_map:
1313 amdgpu_va_range_free(va_handle);
1314
1315 error_query:
1316 amdgpu_bo_free(result.buf_handle);
1317
1318 error:
1319 FREE(bo);
1320 return NULL;
1321 }
1322
1323 static bool amdgpu_bo_get_handle(struct pb_buffer *buffer,
1324 unsigned stride, unsigned offset,
1325 unsigned slice_size,
1326 struct winsys_handle *whandle)
1327 {
1328 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(buffer);
1329 enum amdgpu_bo_handle_type type;
1330 int r;
1331
1332 /* Don't allow exports of slab entries and sparse buffers. */
1333 if (!bo->bo)
1334 return false;
1335
1336 bo->u.real.use_reusable_pool = false;
1337
1338 switch (whandle->type) {
1339 case DRM_API_HANDLE_TYPE_SHARED:
1340 type = amdgpu_bo_handle_type_gem_flink_name;
1341 break;
1342 case DRM_API_HANDLE_TYPE_FD:
1343 type = amdgpu_bo_handle_type_dma_buf_fd;
1344 break;
1345 case DRM_API_HANDLE_TYPE_KMS:
1346 type = amdgpu_bo_handle_type_kms;
1347 break;
1348 default:
1349 return false;
1350 }
1351
1352 r = amdgpu_bo_export(bo->bo, type, &whandle->handle);
1353 if (r)
1354 return false;
1355
1356 whandle->stride = stride;
1357 whandle->offset = offset;
1358 whandle->offset += slice_size * whandle->layer;
1359 bo->is_shared = true;
1360 return true;
1361 }
1362
1363 static struct pb_buffer *amdgpu_bo_from_ptr(struct radeon_winsys *rws,
1364 void *pointer, uint64_t size)
1365 {
1366 struct amdgpu_winsys *ws = amdgpu_winsys(rws);
1367 amdgpu_bo_handle buf_handle;
1368 struct amdgpu_winsys_bo *bo;
1369 uint64_t va;
1370 amdgpu_va_handle va_handle;
1371
1372 bo = CALLOC_STRUCT(amdgpu_winsys_bo);
1373 if (!bo)
1374 return NULL;
1375
1376 if (amdgpu_create_bo_from_user_mem(ws->dev, pointer, size, &buf_handle))
1377 goto error;
1378
1379 if (amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
1380 size, 1 << 12, 0, &va, &va_handle, 0))
1381 goto error_va_alloc;
1382
1383 if (amdgpu_bo_va_op(buf_handle, 0, size, va, 0, AMDGPU_VA_OP_MAP))
1384 goto error_va_map;
1385
1386 /* Initialize it. */
1387 pipe_reference_init(&bo->base.reference, 1);
1388 bo->bo = buf_handle;
1389 bo->base.alignment = 0;
1390 bo->base.size = size;
1391 bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
1392 bo->ws = ws;
1393 bo->user_ptr = pointer;
1394 bo->va = va;
1395 bo->u.real.va_handle = va_handle;
1396 bo->initial_domain = RADEON_DOMAIN_GTT;
1397 bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
1398
1399 ws->allocated_gtt += align64(bo->base.size, ws->info.gart_page_size);
1400
1401 amdgpu_add_buffer_to_global_list(bo);
1402
1403 return (struct pb_buffer*)bo;
1404
1405 error_va_map:
1406 amdgpu_va_range_free(va_handle);
1407
1408 error_va_alloc:
1409 amdgpu_bo_free(buf_handle);
1410
1411 error:
1412 FREE(bo);
1413 return NULL;
1414 }
1415
1416 static bool amdgpu_bo_is_user_ptr(struct pb_buffer *buf)
1417 {
1418 return ((struct amdgpu_winsys_bo*)buf)->user_ptr != NULL;
1419 }
1420
1421 static uint64_t amdgpu_bo_get_va(struct pb_buffer *buf)
1422 {
1423 return ((struct amdgpu_winsys_bo*)buf)->va;
1424 }
1425
1426 void amdgpu_bo_init_functions(struct amdgpu_winsys *ws)
1427 {
1428 ws->base.buffer_set_metadata = amdgpu_buffer_set_metadata;
1429 ws->base.buffer_get_metadata = amdgpu_buffer_get_metadata;
1430 ws->base.buffer_map = amdgpu_bo_map;
1431 ws->base.buffer_unmap = amdgpu_bo_unmap;
1432 ws->base.buffer_wait = amdgpu_bo_wait;
1433 ws->base.buffer_create = amdgpu_bo_create;
1434 ws->base.buffer_from_handle = amdgpu_bo_from_handle;
1435 ws->base.buffer_from_ptr = amdgpu_bo_from_ptr;
1436 ws->base.buffer_is_user_ptr = amdgpu_bo_is_user_ptr;
1437 ws->base.buffer_get_handle = amdgpu_bo_get_handle;
1438 ws->base.buffer_commit = amdgpu_bo_sparse_commit;
1439 ws->base.buffer_get_virtual_address = amdgpu_bo_get_va;
1440 ws->base.buffer_get_initial_domain = amdgpu_bo_get_initial_domain;
1441 }