winsys/amdgpu: disable local BOs on Raven
[mesa.git] / src / gallium / winsys / amdgpu / drm / amdgpu_bo.c
1 /*
2 * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
3 * Copyright © 2015 Advanced Micro Devices, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
16 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
18 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
25 * of the Software.
26 */
27 /*
28 * Authors:
29 * Marek Olšák <maraeo@gmail.com>
30 */
31
32 #include "amdgpu_cs.h"
33
34 #include "os/os_time.h"
35 #include "state_tracker/drm_driver.h"
36 #include <amdgpu_drm.h>
37 #include <xf86drm.h>
38 #include <stdio.h>
39 #include <inttypes.h>
40
41 #ifndef AMDGPU_GEM_CREATE_VM_ALWAYS_VALID
42 #define AMDGPU_GEM_CREATE_VM_ALWAYS_VALID (1 << 6)
43 #endif
44
45 /* Set to 1 for verbose output showing committed sparse buffer ranges. */
46 #define DEBUG_SPARSE_COMMITS 0
47
48 struct amdgpu_sparse_backing_chunk {
49 uint32_t begin, end;
50 };
51
52 static struct pb_buffer *
53 amdgpu_bo_create(struct radeon_winsys *rws,
54 uint64_t size,
55 unsigned alignment,
56 enum radeon_bo_domain domain,
57 enum radeon_bo_flag flags);
58
59 static bool amdgpu_bo_wait(struct pb_buffer *_buf, uint64_t timeout,
60 enum radeon_bo_usage usage)
61 {
62 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
63 struct amdgpu_winsys *ws = bo->ws;
64 int64_t abs_timeout;
65
66 if (timeout == 0) {
67 if (p_atomic_read(&bo->num_active_ioctls))
68 return false;
69
70 } else {
71 abs_timeout = os_time_get_absolute_timeout(timeout);
72
73 /* Wait if any ioctl is being submitted with this buffer. */
74 if (!os_wait_until_zero_abs_timeout(&bo->num_active_ioctls, abs_timeout))
75 return false;
76 }
77
78 if (bo->is_shared) {
79 /* We can't use user fences for shared buffers, because user fences
80 * are local to this process only. If we want to wait for all buffer
81 * uses in all processes, we have to use amdgpu_bo_wait_for_idle.
82 */
83 bool buffer_busy = true;
84 int r;
85
86 r = amdgpu_bo_wait_for_idle(bo->bo, timeout, &buffer_busy);
87 if (r)
88 fprintf(stderr, "%s: amdgpu_bo_wait_for_idle failed %i\n", __func__,
89 r);
90 return !buffer_busy;
91 }
92
93 if (timeout == 0) {
94 unsigned idle_fences;
95 bool buffer_idle;
96
97 mtx_lock(&ws->bo_fence_lock);
98
99 for (idle_fences = 0; idle_fences < bo->num_fences; ++idle_fences) {
100 if (!amdgpu_fence_wait(bo->fences[idle_fences], 0, false))
101 break;
102 }
103
104 /* Release the idle fences to avoid checking them again later. */
105 for (unsigned i = 0; i < idle_fences; ++i)
106 amdgpu_fence_reference(&bo->fences[i], NULL);
107
108 memmove(&bo->fences[0], &bo->fences[idle_fences],
109 (bo->num_fences - idle_fences) * sizeof(*bo->fences));
110 bo->num_fences -= idle_fences;
111
112 buffer_idle = !bo->num_fences;
113 mtx_unlock(&ws->bo_fence_lock);
114
115 return buffer_idle;
116 } else {
117 bool buffer_idle = true;
118
119 mtx_lock(&ws->bo_fence_lock);
120 while (bo->num_fences && buffer_idle) {
121 struct pipe_fence_handle *fence = NULL;
122 bool fence_idle = false;
123
124 amdgpu_fence_reference(&fence, bo->fences[0]);
125
126 /* Wait for the fence. */
127 mtx_unlock(&ws->bo_fence_lock);
128 if (amdgpu_fence_wait(fence, abs_timeout, true))
129 fence_idle = true;
130 else
131 buffer_idle = false;
132 mtx_lock(&ws->bo_fence_lock);
133
134 /* Release an idle fence to avoid checking it again later, keeping in
135 * mind that the fence array may have been modified by other threads.
136 */
137 if (fence_idle && bo->num_fences && bo->fences[0] == fence) {
138 amdgpu_fence_reference(&bo->fences[0], NULL);
139 memmove(&bo->fences[0], &bo->fences[1],
140 (bo->num_fences - 1) * sizeof(*bo->fences));
141 bo->num_fences--;
142 }
143
144 amdgpu_fence_reference(&fence, NULL);
145 }
146 mtx_unlock(&ws->bo_fence_lock);
147
148 return buffer_idle;
149 }
150 }
151
152 static enum radeon_bo_domain amdgpu_bo_get_initial_domain(
153 struct pb_buffer *buf)
154 {
155 return ((struct amdgpu_winsys_bo*)buf)->initial_domain;
156 }
157
158 static void amdgpu_bo_remove_fences(struct amdgpu_winsys_bo *bo)
159 {
160 for (unsigned i = 0; i < bo->num_fences; ++i)
161 amdgpu_fence_reference(&bo->fences[i], NULL);
162
163 FREE(bo->fences);
164 bo->num_fences = 0;
165 bo->max_fences = 0;
166 }
167
168 void amdgpu_bo_destroy(struct pb_buffer *_buf)
169 {
170 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
171
172 assert(bo->bo && "must not be called for slab entries");
173
174 if (bo->ws->debug_all_bos) {
175 mtx_lock(&bo->ws->global_bo_list_lock);
176 LIST_DEL(&bo->u.real.global_list_item);
177 bo->ws->num_buffers--;
178 mtx_unlock(&bo->ws->global_bo_list_lock);
179 }
180
181 amdgpu_bo_va_op(bo->bo, 0, bo->base.size, bo->va, 0, AMDGPU_VA_OP_UNMAP);
182 amdgpu_va_range_free(bo->u.real.va_handle);
183 amdgpu_bo_free(bo->bo);
184
185 amdgpu_bo_remove_fences(bo);
186
187 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
188 bo->ws->allocated_vram -= align64(bo->base.size, bo->ws->info.gart_page_size);
189 else if (bo->initial_domain & RADEON_DOMAIN_GTT)
190 bo->ws->allocated_gtt -= align64(bo->base.size, bo->ws->info.gart_page_size);
191
192 if (bo->u.real.map_count >= 1) {
193 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
194 bo->ws->mapped_vram -= bo->base.size;
195 else if (bo->initial_domain & RADEON_DOMAIN_GTT)
196 bo->ws->mapped_gtt -= bo->base.size;
197 bo->ws->num_mapped_buffers--;
198 }
199
200 FREE(bo);
201 }
202
203 static void amdgpu_bo_destroy_or_cache(struct pb_buffer *_buf)
204 {
205 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
206
207 assert(bo->bo); /* slab buffers have a separate vtbl */
208
209 if (bo->u.real.use_reusable_pool)
210 pb_cache_add_buffer(&bo->u.real.cache_entry);
211 else
212 amdgpu_bo_destroy(_buf);
213 }
214
215 static void *amdgpu_bo_map(struct pb_buffer *buf,
216 struct radeon_winsys_cs *rcs,
217 enum pipe_transfer_usage usage)
218 {
219 struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
220 struct amdgpu_winsys_bo *real;
221 struct amdgpu_cs *cs = (struct amdgpu_cs*)rcs;
222 int r;
223 void *cpu = NULL;
224 uint64_t offset = 0;
225
226 assert(!bo->sparse);
227
228 /* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
229 if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
230 /* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
231 if (usage & PIPE_TRANSFER_DONTBLOCK) {
232 if (!(usage & PIPE_TRANSFER_WRITE)) {
233 /* Mapping for read.
234 *
235 * Since we are mapping for read, we don't need to wait
236 * if the GPU is using the buffer for read too
237 * (neither one is changing it).
238 *
239 * Only check whether the buffer is being used for write. */
240 if (cs && amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
241 RADEON_USAGE_WRITE)) {
242 cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
243 return NULL;
244 }
245
246 if (!amdgpu_bo_wait((struct pb_buffer*)bo, 0,
247 RADEON_USAGE_WRITE)) {
248 return NULL;
249 }
250 } else {
251 if (cs && amdgpu_bo_is_referenced_by_cs(cs, bo)) {
252 cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
253 return NULL;
254 }
255
256 if (!amdgpu_bo_wait((struct pb_buffer*)bo, 0,
257 RADEON_USAGE_READWRITE)) {
258 return NULL;
259 }
260 }
261 } else {
262 uint64_t time = os_time_get_nano();
263
264 if (!(usage & PIPE_TRANSFER_WRITE)) {
265 /* Mapping for read.
266 *
267 * Since we are mapping for read, we don't need to wait
268 * if the GPU is using the buffer for read too
269 * (neither one is changing it).
270 *
271 * Only check whether the buffer is being used for write. */
272 if (cs) {
273 if (amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
274 RADEON_USAGE_WRITE)) {
275 cs->flush_cs(cs->flush_data, 0, NULL);
276 } else {
277 /* Try to avoid busy-waiting in amdgpu_bo_wait. */
278 if (p_atomic_read(&bo->num_active_ioctls))
279 amdgpu_cs_sync_flush(rcs);
280 }
281 }
282
283 amdgpu_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
284 RADEON_USAGE_WRITE);
285 } else {
286 /* Mapping for write. */
287 if (cs) {
288 if (amdgpu_bo_is_referenced_by_cs(cs, bo)) {
289 cs->flush_cs(cs->flush_data, 0, NULL);
290 } else {
291 /* Try to avoid busy-waiting in amdgpu_bo_wait. */
292 if (p_atomic_read(&bo->num_active_ioctls))
293 amdgpu_cs_sync_flush(rcs);
294 }
295 }
296
297 amdgpu_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
298 RADEON_USAGE_READWRITE);
299 }
300
301 bo->ws->buffer_wait_time += os_time_get_nano() - time;
302 }
303 }
304
305 /* If the buffer is created from user memory, return the user pointer. */
306 if (bo->user_ptr)
307 return bo->user_ptr;
308
309 if (bo->bo) {
310 real = bo;
311 } else {
312 real = bo->u.slab.real;
313 offset = bo->va - real->va;
314 }
315
316 r = amdgpu_bo_cpu_map(real->bo, &cpu);
317 if (r) {
318 /* Clear the cache and try again. */
319 pb_cache_release_all_buffers(&real->ws->bo_cache);
320 r = amdgpu_bo_cpu_map(real->bo, &cpu);
321 if (r)
322 return NULL;
323 }
324
325 if (p_atomic_inc_return(&real->u.real.map_count) == 1) {
326 if (real->initial_domain & RADEON_DOMAIN_VRAM)
327 real->ws->mapped_vram += real->base.size;
328 else if (real->initial_domain & RADEON_DOMAIN_GTT)
329 real->ws->mapped_gtt += real->base.size;
330 real->ws->num_mapped_buffers++;
331 }
332 return (uint8_t*)cpu + offset;
333 }
334
335 static void amdgpu_bo_unmap(struct pb_buffer *buf)
336 {
337 struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
338 struct amdgpu_winsys_bo *real;
339
340 assert(!bo->sparse);
341
342 if (bo->user_ptr)
343 return;
344
345 real = bo->bo ? bo : bo->u.slab.real;
346
347 if (p_atomic_dec_zero(&real->u.real.map_count)) {
348 if (real->initial_domain & RADEON_DOMAIN_VRAM)
349 real->ws->mapped_vram -= real->base.size;
350 else if (real->initial_domain & RADEON_DOMAIN_GTT)
351 real->ws->mapped_gtt -= real->base.size;
352 real->ws->num_mapped_buffers--;
353 }
354
355 amdgpu_bo_cpu_unmap(real->bo);
356 }
357
358 static const struct pb_vtbl amdgpu_winsys_bo_vtbl = {
359 amdgpu_bo_destroy_or_cache
360 /* other functions are never called */
361 };
362
363 static void amdgpu_add_buffer_to_global_list(struct amdgpu_winsys_bo *bo)
364 {
365 struct amdgpu_winsys *ws = bo->ws;
366
367 assert(bo->bo);
368
369 if (ws->debug_all_bos) {
370 mtx_lock(&ws->global_bo_list_lock);
371 LIST_ADDTAIL(&bo->u.real.global_list_item, &ws->global_bo_list);
372 ws->num_buffers++;
373 mtx_unlock(&ws->global_bo_list_lock);
374 }
375 }
376
377 static struct amdgpu_winsys_bo *amdgpu_create_bo(struct amdgpu_winsys *ws,
378 uint64_t size,
379 unsigned alignment,
380 unsigned usage,
381 enum radeon_bo_domain initial_domain,
382 unsigned flags,
383 unsigned pb_cache_bucket)
384 {
385 struct amdgpu_bo_alloc_request request = {0};
386 amdgpu_bo_handle buf_handle;
387 uint64_t va = 0;
388 struct amdgpu_winsys_bo *bo;
389 amdgpu_va_handle va_handle;
390 unsigned va_gap_size;
391 int r;
392
393 assert(initial_domain & RADEON_DOMAIN_VRAM_GTT);
394 bo = CALLOC_STRUCT(amdgpu_winsys_bo);
395 if (!bo) {
396 return NULL;
397 }
398
399 pb_cache_init_entry(&ws->bo_cache, &bo->u.real.cache_entry, &bo->base,
400 pb_cache_bucket);
401 request.alloc_size = size;
402 request.phys_alignment = alignment;
403
404 if (initial_domain & RADEON_DOMAIN_VRAM)
405 request.preferred_heap |= AMDGPU_GEM_DOMAIN_VRAM;
406 if (initial_domain & RADEON_DOMAIN_GTT)
407 request.preferred_heap |= AMDGPU_GEM_DOMAIN_GTT;
408
409 if (flags & RADEON_FLAG_NO_CPU_ACCESS)
410 request.flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
411 if (flags & RADEON_FLAG_GTT_WC)
412 request.flags |= AMDGPU_GEM_CREATE_CPU_GTT_USWC;
413 if (flags & RADEON_FLAG_NO_INTERPROCESS_SHARING &&
414 ws->info.drm_minor >= 20 &&
415 ws->info.family != CHIP_RAVEN)
416 request.flags |= AMDGPU_GEM_CREATE_VM_ALWAYS_VALID;
417
418 r = amdgpu_bo_alloc(ws->dev, &request, &buf_handle);
419 if (r) {
420 fprintf(stderr, "amdgpu: Failed to allocate a buffer:\n");
421 fprintf(stderr, "amdgpu: size : %"PRIu64" bytes\n", size);
422 fprintf(stderr, "amdgpu: alignment : %u bytes\n", alignment);
423 fprintf(stderr, "amdgpu: domains : %u\n", initial_domain);
424 goto error_bo_alloc;
425 }
426
427 va_gap_size = ws->check_vm ? MAX2(4 * alignment, 64 * 1024) : 0;
428 if (size > ws->info.pte_fragment_size)
429 alignment = MAX2(alignment, ws->info.pte_fragment_size);
430 r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
431 size + va_gap_size, alignment, 0, &va, &va_handle, 0);
432 if (r)
433 goto error_va_alloc;
434
435 r = amdgpu_bo_va_op(buf_handle, 0, size, va, 0, AMDGPU_VA_OP_MAP);
436 if (r)
437 goto error_va_map;
438
439 pipe_reference_init(&bo->base.reference, 1);
440 bo->base.alignment = alignment;
441 bo->base.usage = usage;
442 bo->base.size = size;
443 bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
444 bo->ws = ws;
445 bo->bo = buf_handle;
446 bo->va = va;
447 bo->u.real.va_handle = va_handle;
448 bo->initial_domain = initial_domain;
449 bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
450 bo->is_local = !!(request.flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID);
451
452 if (initial_domain & RADEON_DOMAIN_VRAM)
453 ws->allocated_vram += align64(size, ws->info.gart_page_size);
454 else if (initial_domain & RADEON_DOMAIN_GTT)
455 ws->allocated_gtt += align64(size, ws->info.gart_page_size);
456
457 amdgpu_add_buffer_to_global_list(bo);
458
459 return bo;
460
461 error_va_map:
462 amdgpu_va_range_free(va_handle);
463
464 error_va_alloc:
465 amdgpu_bo_free(buf_handle);
466
467 error_bo_alloc:
468 FREE(bo);
469 return NULL;
470 }
471
472 bool amdgpu_bo_can_reclaim(struct pb_buffer *_buf)
473 {
474 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
475
476 if (amdgpu_bo_is_referenced_by_any_cs(bo)) {
477 return false;
478 }
479
480 return amdgpu_bo_wait(_buf, 0, RADEON_USAGE_READWRITE);
481 }
482
483 bool amdgpu_bo_can_reclaim_slab(void *priv, struct pb_slab_entry *entry)
484 {
485 struct amdgpu_winsys_bo *bo = NULL; /* fix container_of */
486 bo = container_of(entry, bo, u.slab.entry);
487
488 return amdgpu_bo_can_reclaim(&bo->base);
489 }
490
491 static void amdgpu_bo_slab_destroy(struct pb_buffer *_buf)
492 {
493 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
494
495 assert(!bo->bo);
496
497 pb_slab_free(&bo->ws->bo_slabs, &bo->u.slab.entry);
498 }
499
500 static const struct pb_vtbl amdgpu_winsys_bo_slab_vtbl = {
501 amdgpu_bo_slab_destroy
502 /* other functions are never called */
503 };
504
505 struct pb_slab *amdgpu_bo_slab_alloc(void *priv, unsigned heap,
506 unsigned entry_size,
507 unsigned group_index)
508 {
509 struct amdgpu_winsys *ws = priv;
510 struct amdgpu_slab *slab = CALLOC_STRUCT(amdgpu_slab);
511 enum radeon_bo_domain domains = radeon_domain_from_heap(heap);
512 enum radeon_bo_flag flags = radeon_flags_from_heap(heap);
513 uint32_t base_id;
514
515 if (!slab)
516 return NULL;
517
518 unsigned slab_size = 1 << AMDGPU_SLAB_BO_SIZE_LOG2;
519 slab->buffer = amdgpu_winsys_bo(amdgpu_bo_create(&ws->base,
520 slab_size, slab_size,
521 domains, flags));
522 if (!slab->buffer)
523 goto fail;
524
525 assert(slab->buffer->bo);
526
527 slab->base.num_entries = slab->buffer->base.size / entry_size;
528 slab->base.num_free = slab->base.num_entries;
529 slab->entries = CALLOC(slab->base.num_entries, sizeof(*slab->entries));
530 if (!slab->entries)
531 goto fail_buffer;
532
533 LIST_INITHEAD(&slab->base.free);
534
535 base_id = __sync_fetch_and_add(&ws->next_bo_unique_id, slab->base.num_entries);
536
537 for (unsigned i = 0; i < slab->base.num_entries; ++i) {
538 struct amdgpu_winsys_bo *bo = &slab->entries[i];
539
540 bo->base.alignment = entry_size;
541 bo->base.usage = slab->buffer->base.usage;
542 bo->base.size = entry_size;
543 bo->base.vtbl = &amdgpu_winsys_bo_slab_vtbl;
544 bo->ws = ws;
545 bo->va = slab->buffer->va + i * entry_size;
546 bo->initial_domain = domains;
547 bo->unique_id = base_id + i;
548 bo->u.slab.entry.slab = &slab->base;
549 bo->u.slab.entry.group_index = group_index;
550 bo->u.slab.real = slab->buffer;
551
552 LIST_ADDTAIL(&bo->u.slab.entry.head, &slab->base.free);
553 }
554
555 return &slab->base;
556
557 fail_buffer:
558 amdgpu_winsys_bo_reference(&slab->buffer, NULL);
559 fail:
560 FREE(slab);
561 return NULL;
562 }
563
564 void amdgpu_bo_slab_free(void *priv, struct pb_slab *pslab)
565 {
566 struct amdgpu_slab *slab = amdgpu_slab(pslab);
567
568 for (unsigned i = 0; i < slab->base.num_entries; ++i)
569 amdgpu_bo_remove_fences(&slab->entries[i]);
570
571 FREE(slab->entries);
572 amdgpu_winsys_bo_reference(&slab->buffer, NULL);
573 FREE(slab);
574 }
575
576 #if DEBUG_SPARSE_COMMITS
577 static void
578 sparse_dump(struct amdgpu_winsys_bo *bo, const char *func)
579 {
580 fprintf(stderr, "%s: %p (size=%"PRIu64", num_va_pages=%u) @ %s\n"
581 "Commitments:\n",
582 __func__, bo, bo->base.size, bo->u.sparse.num_va_pages, func);
583
584 struct amdgpu_sparse_backing *span_backing = NULL;
585 uint32_t span_first_backing_page = 0;
586 uint32_t span_first_va_page = 0;
587 uint32_t va_page = 0;
588
589 for (;;) {
590 struct amdgpu_sparse_backing *backing = 0;
591 uint32_t backing_page = 0;
592
593 if (va_page < bo->u.sparse.num_va_pages) {
594 backing = bo->u.sparse.commitments[va_page].backing;
595 backing_page = bo->u.sparse.commitments[va_page].page;
596 }
597
598 if (span_backing &&
599 (backing != span_backing ||
600 backing_page != span_first_backing_page + (va_page - span_first_va_page))) {
601 fprintf(stderr, " %u..%u: backing=%p:%u..%u\n",
602 span_first_va_page, va_page - 1, span_backing,
603 span_first_backing_page,
604 span_first_backing_page + (va_page - span_first_va_page) - 1);
605
606 span_backing = NULL;
607 }
608
609 if (va_page >= bo->u.sparse.num_va_pages)
610 break;
611
612 if (backing && !span_backing) {
613 span_backing = backing;
614 span_first_backing_page = backing_page;
615 span_first_va_page = va_page;
616 }
617
618 va_page++;
619 }
620
621 fprintf(stderr, "Backing:\n");
622
623 list_for_each_entry(struct amdgpu_sparse_backing, backing, &bo->u.sparse.backing, list) {
624 fprintf(stderr, " %p (size=%"PRIu64")\n", backing, backing->bo->base.size);
625 for (unsigned i = 0; i < backing->num_chunks; ++i)
626 fprintf(stderr, " %u..%u\n", backing->chunks[i].begin, backing->chunks[i].end);
627 }
628 }
629 #endif
630
631 /*
632 * Attempt to allocate the given number of backing pages. Fewer pages may be
633 * allocated (depending on the fragmentation of existing backing buffers),
634 * which will be reflected by a change to *pnum_pages.
635 */
636 static struct amdgpu_sparse_backing *
637 sparse_backing_alloc(struct amdgpu_winsys_bo *bo, uint32_t *pstart_page, uint32_t *pnum_pages)
638 {
639 struct amdgpu_sparse_backing *best_backing;
640 unsigned best_idx;
641 uint32_t best_num_pages;
642
643 best_backing = NULL;
644 best_idx = 0;
645 best_num_pages = 0;
646
647 /* This is a very simple and inefficient best-fit algorithm. */
648 list_for_each_entry(struct amdgpu_sparse_backing, backing, &bo->u.sparse.backing, list) {
649 for (unsigned idx = 0; idx < backing->num_chunks; ++idx) {
650 uint32_t cur_num_pages = backing->chunks[idx].end - backing->chunks[idx].begin;
651 if ((best_num_pages < *pnum_pages && cur_num_pages > best_num_pages) ||
652 (best_num_pages > *pnum_pages && cur_num_pages < best_num_pages)) {
653 best_backing = backing;
654 best_idx = idx;
655 best_num_pages = cur_num_pages;
656 }
657 }
658 }
659
660 /* Allocate a new backing buffer if necessary. */
661 if (!best_backing) {
662 struct pb_buffer *buf;
663 uint64_t size;
664 uint32_t pages;
665
666 best_backing = CALLOC_STRUCT(amdgpu_sparse_backing);
667 if (!best_backing)
668 return NULL;
669
670 best_backing->max_chunks = 4;
671 best_backing->chunks = CALLOC(best_backing->max_chunks,
672 sizeof(*best_backing->chunks));
673 if (!best_backing->chunks) {
674 FREE(best_backing);
675 return NULL;
676 }
677
678 assert(bo->u.sparse.num_backing_pages < DIV_ROUND_UP(bo->base.size, RADEON_SPARSE_PAGE_SIZE));
679
680 size = MIN3(bo->base.size / 16,
681 8 * 1024 * 1024,
682 bo->base.size - (uint64_t)bo->u.sparse.num_backing_pages * RADEON_SPARSE_PAGE_SIZE);
683 size = MAX2(size, RADEON_SPARSE_PAGE_SIZE);
684
685 buf = amdgpu_bo_create(&bo->ws->base, size, RADEON_SPARSE_PAGE_SIZE,
686 bo->initial_domain,
687 bo->u.sparse.flags | RADEON_FLAG_NO_SUBALLOC);
688 if (!buf) {
689 FREE(best_backing->chunks);
690 FREE(best_backing);
691 return NULL;
692 }
693
694 /* We might have gotten a bigger buffer than requested via caching. */
695 pages = buf->size / RADEON_SPARSE_PAGE_SIZE;
696
697 best_backing->bo = amdgpu_winsys_bo(buf);
698 best_backing->num_chunks = 1;
699 best_backing->chunks[0].begin = 0;
700 best_backing->chunks[0].end = pages;
701
702 list_add(&best_backing->list, &bo->u.sparse.backing);
703 bo->u.sparse.num_backing_pages += pages;
704
705 best_idx = 0;
706 best_num_pages = pages;
707 }
708
709 *pnum_pages = MIN2(*pnum_pages, best_num_pages);
710 *pstart_page = best_backing->chunks[best_idx].begin;
711 best_backing->chunks[best_idx].begin += *pnum_pages;
712
713 if (best_backing->chunks[best_idx].begin >= best_backing->chunks[best_idx].end) {
714 memmove(&best_backing->chunks[best_idx], &best_backing->chunks[best_idx + 1],
715 sizeof(*best_backing->chunks) * (best_backing->num_chunks - best_idx - 1));
716 best_backing->num_chunks--;
717 }
718
719 return best_backing;
720 }
721
722 static void
723 sparse_free_backing_buffer(struct amdgpu_winsys_bo *bo,
724 struct amdgpu_sparse_backing *backing)
725 {
726 struct amdgpu_winsys *ws = backing->bo->ws;
727
728 bo->u.sparse.num_backing_pages -= backing->bo->base.size / RADEON_SPARSE_PAGE_SIZE;
729
730 mtx_lock(&ws->bo_fence_lock);
731 amdgpu_add_fences(backing->bo, bo->num_fences, bo->fences);
732 mtx_unlock(&ws->bo_fence_lock);
733
734 list_del(&backing->list);
735 amdgpu_winsys_bo_reference(&backing->bo, NULL);
736 FREE(backing->chunks);
737 FREE(backing);
738 }
739
740 /*
741 * Return a range of pages from the given backing buffer back into the
742 * free structure.
743 */
744 static bool
745 sparse_backing_free(struct amdgpu_winsys_bo *bo,
746 struct amdgpu_sparse_backing *backing,
747 uint32_t start_page, uint32_t num_pages)
748 {
749 uint32_t end_page = start_page + num_pages;
750 unsigned low = 0;
751 unsigned high = backing->num_chunks;
752
753 /* Find the first chunk with begin >= start_page. */
754 while (low < high) {
755 unsigned mid = low + (high - low) / 2;
756
757 if (backing->chunks[mid].begin >= start_page)
758 high = mid;
759 else
760 low = mid + 1;
761 }
762
763 assert(low >= backing->num_chunks || end_page <= backing->chunks[low].begin);
764 assert(low == 0 || backing->chunks[low - 1].end <= start_page);
765
766 if (low > 0 && backing->chunks[low - 1].end == start_page) {
767 backing->chunks[low - 1].end = end_page;
768
769 if (low < backing->num_chunks && end_page == backing->chunks[low].begin) {
770 backing->chunks[low - 1].end = backing->chunks[low].end;
771 memmove(&backing->chunks[low], &backing->chunks[low + 1],
772 sizeof(*backing->chunks) * (backing->num_chunks - low - 1));
773 backing->num_chunks--;
774 }
775 } else if (low < backing->num_chunks && end_page == backing->chunks[low].begin) {
776 backing->chunks[low].begin = start_page;
777 } else {
778 if (backing->num_chunks >= backing->max_chunks) {
779 unsigned new_max_chunks = 2 * backing->max_chunks;
780 struct amdgpu_sparse_backing_chunk *new_chunks =
781 REALLOC(backing->chunks,
782 sizeof(*backing->chunks) * backing->max_chunks,
783 sizeof(*backing->chunks) * new_max_chunks);
784 if (!new_chunks)
785 return false;
786
787 backing->max_chunks = new_max_chunks;
788 backing->chunks = new_chunks;
789 }
790
791 memmove(&backing->chunks[low + 1], &backing->chunks[low],
792 sizeof(*backing->chunks) * (backing->num_chunks - low));
793 backing->chunks[low].begin = start_page;
794 backing->chunks[low].end = end_page;
795 backing->num_chunks++;
796 }
797
798 if (backing->num_chunks == 1 && backing->chunks[0].begin == 0 &&
799 backing->chunks[0].end == backing->bo->base.size / RADEON_SPARSE_PAGE_SIZE)
800 sparse_free_backing_buffer(bo, backing);
801
802 return true;
803 }
804
805 static void amdgpu_bo_sparse_destroy(struct pb_buffer *_buf)
806 {
807 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
808 int r;
809
810 assert(!bo->bo && bo->sparse);
811
812 r = amdgpu_bo_va_op_raw(bo->ws->dev, NULL, 0,
813 (uint64_t)bo->u.sparse.num_va_pages * RADEON_SPARSE_PAGE_SIZE,
814 bo->va, 0, AMDGPU_VA_OP_CLEAR);
815 if (r) {
816 fprintf(stderr, "amdgpu: clearing PRT VA region on destroy failed (%d)\n", r);
817 }
818
819 while (!list_empty(&bo->u.sparse.backing)) {
820 struct amdgpu_sparse_backing *dummy = NULL;
821 sparse_free_backing_buffer(bo,
822 container_of(bo->u.sparse.backing.next,
823 dummy, list));
824 }
825
826 amdgpu_va_range_free(bo->u.sparse.va_handle);
827 mtx_destroy(&bo->u.sparse.commit_lock);
828 FREE(bo->u.sparse.commitments);
829 FREE(bo);
830 }
831
832 static const struct pb_vtbl amdgpu_winsys_bo_sparse_vtbl = {
833 amdgpu_bo_sparse_destroy
834 /* other functions are never called */
835 };
836
837 static struct pb_buffer *
838 amdgpu_bo_sparse_create(struct amdgpu_winsys *ws, uint64_t size,
839 enum radeon_bo_domain domain,
840 enum radeon_bo_flag flags)
841 {
842 struct amdgpu_winsys_bo *bo;
843 uint64_t map_size;
844 uint64_t va_gap_size;
845 int r;
846
847 /* We use 32-bit page numbers; refuse to attempt allocating sparse buffers
848 * that exceed this limit. This is not really a restriction: we don't have
849 * that much virtual address space anyway.
850 */
851 if (size > (uint64_t)INT32_MAX * RADEON_SPARSE_PAGE_SIZE)
852 return NULL;
853
854 bo = CALLOC_STRUCT(amdgpu_winsys_bo);
855 if (!bo)
856 return NULL;
857
858 pipe_reference_init(&bo->base.reference, 1);
859 bo->base.alignment = RADEON_SPARSE_PAGE_SIZE;
860 bo->base.size = size;
861 bo->base.vtbl = &amdgpu_winsys_bo_sparse_vtbl;
862 bo->ws = ws;
863 bo->initial_domain = domain;
864 bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
865 bo->sparse = true;
866 bo->u.sparse.flags = flags & ~RADEON_FLAG_SPARSE;
867
868 bo->u.sparse.num_va_pages = DIV_ROUND_UP(size, RADEON_SPARSE_PAGE_SIZE);
869 bo->u.sparse.commitments = CALLOC(bo->u.sparse.num_va_pages,
870 sizeof(*bo->u.sparse.commitments));
871 if (!bo->u.sparse.commitments)
872 goto error_alloc_commitments;
873
874 mtx_init(&bo->u.sparse.commit_lock, mtx_plain);
875 LIST_INITHEAD(&bo->u.sparse.backing);
876
877 /* For simplicity, we always map a multiple of the page size. */
878 map_size = align64(size, RADEON_SPARSE_PAGE_SIZE);
879 va_gap_size = ws->check_vm ? 4 * RADEON_SPARSE_PAGE_SIZE : 0;
880 r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
881 map_size + va_gap_size, RADEON_SPARSE_PAGE_SIZE,
882 0, &bo->va, &bo->u.sparse.va_handle, 0);
883 if (r)
884 goto error_va_alloc;
885
886 r = amdgpu_bo_va_op_raw(bo->ws->dev, NULL, 0, size, bo->va,
887 AMDGPU_VM_PAGE_PRT, AMDGPU_VA_OP_MAP);
888 if (r)
889 goto error_va_map;
890
891 return &bo->base;
892
893 error_va_map:
894 amdgpu_va_range_free(bo->u.sparse.va_handle);
895 error_va_alloc:
896 mtx_destroy(&bo->u.sparse.commit_lock);
897 FREE(bo->u.sparse.commitments);
898 error_alloc_commitments:
899 FREE(bo);
900 return NULL;
901 }
902
903 static bool
904 amdgpu_bo_sparse_commit(struct pb_buffer *buf, uint64_t offset, uint64_t size,
905 bool commit)
906 {
907 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(buf);
908 struct amdgpu_sparse_commitment *comm;
909 uint32_t va_page, end_va_page;
910 bool ok = true;
911 int r;
912
913 assert(bo->sparse);
914 assert(offset % RADEON_SPARSE_PAGE_SIZE == 0);
915 assert(offset <= bo->base.size);
916 assert(size <= bo->base.size - offset);
917 assert(size % RADEON_SPARSE_PAGE_SIZE == 0 || offset + size == bo->base.size);
918
919 comm = bo->u.sparse.commitments;
920 va_page = offset / RADEON_SPARSE_PAGE_SIZE;
921 end_va_page = va_page + DIV_ROUND_UP(size, RADEON_SPARSE_PAGE_SIZE);
922
923 mtx_lock(&bo->u.sparse.commit_lock);
924
925 #if DEBUG_SPARSE_COMMITS
926 sparse_dump(bo, __func__);
927 #endif
928
929 if (commit) {
930 while (va_page < end_va_page) {
931 uint32_t span_va_page;
932
933 /* Skip pages that are already committed. */
934 if (comm[va_page].backing) {
935 va_page++;
936 continue;
937 }
938
939 /* Determine length of uncommitted span. */
940 span_va_page = va_page;
941 while (va_page < end_va_page && !comm[va_page].backing)
942 va_page++;
943
944 /* Fill the uncommitted span with chunks of backing memory. */
945 while (span_va_page < va_page) {
946 struct amdgpu_sparse_backing *backing;
947 uint32_t backing_start, backing_size;
948
949 backing_size = va_page - span_va_page;
950 backing = sparse_backing_alloc(bo, &backing_start, &backing_size);
951 if (!backing) {
952 ok = false;
953 goto out;
954 }
955
956 r = amdgpu_bo_va_op_raw(bo->ws->dev, backing->bo->bo,
957 (uint64_t)backing_start * RADEON_SPARSE_PAGE_SIZE,
958 (uint64_t)backing_size * RADEON_SPARSE_PAGE_SIZE,
959 bo->va + (uint64_t)span_va_page * RADEON_SPARSE_PAGE_SIZE,
960 AMDGPU_VM_PAGE_READABLE |
961 AMDGPU_VM_PAGE_WRITEABLE |
962 AMDGPU_VM_PAGE_EXECUTABLE,
963 AMDGPU_VA_OP_REPLACE);
964 if (r) {
965 ok = sparse_backing_free(bo, backing, backing_start, backing_size);
966 assert(ok && "sufficient memory should already be allocated");
967
968 ok = false;
969 goto out;
970 }
971
972 while (backing_size) {
973 comm[span_va_page].backing = backing;
974 comm[span_va_page].page = backing_start;
975 span_va_page++;
976 backing_start++;
977 backing_size--;
978 }
979 }
980 }
981 } else {
982 r = amdgpu_bo_va_op_raw(bo->ws->dev, NULL, 0,
983 (uint64_t)(end_va_page - va_page) * RADEON_SPARSE_PAGE_SIZE,
984 bo->va + (uint64_t)va_page * RADEON_SPARSE_PAGE_SIZE,
985 AMDGPU_VM_PAGE_PRT, AMDGPU_VA_OP_REPLACE);
986 if (r) {
987 ok = false;
988 goto out;
989 }
990
991 while (va_page < end_va_page) {
992 struct amdgpu_sparse_backing *backing;
993 uint32_t backing_start;
994 uint32_t span_pages;
995
996 /* Skip pages that are already uncommitted. */
997 if (!comm[va_page].backing) {
998 va_page++;
999 continue;
1000 }
1001
1002 /* Group contiguous spans of pages. */
1003 backing = comm[va_page].backing;
1004 backing_start = comm[va_page].page;
1005 comm[va_page].backing = NULL;
1006
1007 span_pages = 1;
1008 va_page++;
1009
1010 while (va_page < end_va_page &&
1011 comm[va_page].backing == backing &&
1012 comm[va_page].page == backing_start + span_pages) {
1013 comm[va_page].backing = NULL;
1014 va_page++;
1015 span_pages++;
1016 }
1017
1018 if (!sparse_backing_free(bo, backing, backing_start, span_pages)) {
1019 /* Couldn't allocate tracking data structures, so we have to leak */
1020 fprintf(stderr, "amdgpu: leaking PRT backing memory\n");
1021 ok = false;
1022 }
1023 }
1024 }
1025 out:
1026
1027 mtx_unlock(&bo->u.sparse.commit_lock);
1028
1029 return ok;
1030 }
1031
1032 static unsigned eg_tile_split(unsigned tile_split)
1033 {
1034 switch (tile_split) {
1035 case 0: tile_split = 64; break;
1036 case 1: tile_split = 128; break;
1037 case 2: tile_split = 256; break;
1038 case 3: tile_split = 512; break;
1039 default:
1040 case 4: tile_split = 1024; break;
1041 case 5: tile_split = 2048; break;
1042 case 6: tile_split = 4096; break;
1043 }
1044 return tile_split;
1045 }
1046
1047 static unsigned eg_tile_split_rev(unsigned eg_tile_split)
1048 {
1049 switch (eg_tile_split) {
1050 case 64: return 0;
1051 case 128: return 1;
1052 case 256: return 2;
1053 case 512: return 3;
1054 default:
1055 case 1024: return 4;
1056 case 2048: return 5;
1057 case 4096: return 6;
1058 }
1059 }
1060
1061 static void amdgpu_buffer_get_metadata(struct pb_buffer *_buf,
1062 struct radeon_bo_metadata *md)
1063 {
1064 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
1065 struct amdgpu_bo_info info = {0};
1066 uint64_t tiling_flags;
1067 int r;
1068
1069 assert(bo->bo && "must not be called for slab entries");
1070
1071 r = amdgpu_bo_query_info(bo->bo, &info);
1072 if (r)
1073 return;
1074
1075 tiling_flags = info.metadata.tiling_info;
1076
1077 if (bo->ws->info.chip_class >= GFX9) {
1078 md->u.gfx9.swizzle_mode = AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
1079 } else {
1080 md->u.legacy.microtile = RADEON_LAYOUT_LINEAR;
1081 md->u.legacy.macrotile = RADEON_LAYOUT_LINEAR;
1082
1083 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 4) /* 2D_TILED_THIN1 */
1084 md->u.legacy.macrotile = RADEON_LAYOUT_TILED;
1085 else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 2) /* 1D_TILED_THIN1 */
1086 md->u.legacy.microtile = RADEON_LAYOUT_TILED;
1087
1088 md->u.legacy.pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1089 md->u.legacy.bankw = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1090 md->u.legacy.bankh = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1091 md->u.legacy.tile_split = eg_tile_split(AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT));
1092 md->u.legacy.mtilea = 1 << AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1093 md->u.legacy.num_banks = 2 << AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1094 md->u.legacy.scanout = AMDGPU_TILING_GET(tiling_flags, MICRO_TILE_MODE) == 0; /* DISPLAY */
1095 }
1096
1097 md->size_metadata = info.metadata.size_metadata;
1098 memcpy(md->metadata, info.metadata.umd_metadata, sizeof(md->metadata));
1099 }
1100
1101 static void amdgpu_buffer_set_metadata(struct pb_buffer *_buf,
1102 struct radeon_bo_metadata *md)
1103 {
1104 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
1105 struct amdgpu_bo_metadata metadata = {0};
1106 uint64_t tiling_flags = 0;
1107
1108 assert(bo->bo && "must not be called for slab entries");
1109
1110 if (bo->ws->info.chip_class >= GFX9) {
1111 tiling_flags |= AMDGPU_TILING_SET(SWIZZLE_MODE, md->u.gfx9.swizzle_mode);
1112 } else {
1113 if (md->u.legacy.macrotile == RADEON_LAYOUT_TILED)
1114 tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 4); /* 2D_TILED_THIN1 */
1115 else if (md->u.legacy.microtile == RADEON_LAYOUT_TILED)
1116 tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 2); /* 1D_TILED_THIN1 */
1117 else
1118 tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 1); /* LINEAR_ALIGNED */
1119
1120 tiling_flags |= AMDGPU_TILING_SET(PIPE_CONFIG, md->u.legacy.pipe_config);
1121 tiling_flags |= AMDGPU_TILING_SET(BANK_WIDTH, util_logbase2(md->u.legacy.bankw));
1122 tiling_flags |= AMDGPU_TILING_SET(BANK_HEIGHT, util_logbase2(md->u.legacy.bankh));
1123 if (md->u.legacy.tile_split)
1124 tiling_flags |= AMDGPU_TILING_SET(TILE_SPLIT, eg_tile_split_rev(md->u.legacy.tile_split));
1125 tiling_flags |= AMDGPU_TILING_SET(MACRO_TILE_ASPECT, util_logbase2(md->u.legacy.mtilea));
1126 tiling_flags |= AMDGPU_TILING_SET(NUM_BANKS, util_logbase2(md->u.legacy.num_banks)-1);
1127
1128 if (md->u.legacy.scanout)
1129 tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 0); /* DISPLAY_MICRO_TILING */
1130 else
1131 tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 1); /* THIN_MICRO_TILING */
1132 }
1133
1134 metadata.tiling_info = tiling_flags;
1135 metadata.size_metadata = md->size_metadata;
1136 memcpy(metadata.umd_metadata, md->metadata, sizeof(md->metadata));
1137
1138 amdgpu_bo_set_metadata(bo->bo, &metadata);
1139 }
1140
1141 static struct pb_buffer *
1142 amdgpu_bo_create(struct radeon_winsys *rws,
1143 uint64_t size,
1144 unsigned alignment,
1145 enum radeon_bo_domain domain,
1146 enum radeon_bo_flag flags)
1147 {
1148 struct amdgpu_winsys *ws = amdgpu_winsys(rws);
1149 struct amdgpu_winsys_bo *bo;
1150 unsigned usage = 0, pb_cache_bucket = 0;
1151
1152 /* VRAM implies WC. This is not optional. */
1153 assert(!(domain & RADEON_DOMAIN_VRAM) || flags & RADEON_FLAG_GTT_WC);
1154
1155 /* NO_CPU_ACCESS is valid with VRAM only. */
1156 assert(domain == RADEON_DOMAIN_VRAM || !(flags & RADEON_FLAG_NO_CPU_ACCESS));
1157
1158 /* Sub-allocate small buffers from slabs. */
1159 if (!(flags & (RADEON_FLAG_NO_SUBALLOC | RADEON_FLAG_SPARSE)) &&
1160 size <= (1 << AMDGPU_SLAB_MAX_SIZE_LOG2) &&
1161 alignment <= MAX2(1 << AMDGPU_SLAB_MIN_SIZE_LOG2, util_next_power_of_two(size))) {
1162 struct pb_slab_entry *entry;
1163 int heap = radeon_get_heap_index(domain, flags);
1164
1165 if (heap < 0 || heap >= RADEON_MAX_SLAB_HEAPS)
1166 goto no_slab;
1167
1168 entry = pb_slab_alloc(&ws->bo_slabs, size, heap);
1169 if (!entry) {
1170 /* Clear the cache and try again. */
1171 pb_cache_release_all_buffers(&ws->bo_cache);
1172
1173 entry = pb_slab_alloc(&ws->bo_slabs, size, heap);
1174 }
1175 if (!entry)
1176 return NULL;
1177
1178 bo = NULL;
1179 bo = container_of(entry, bo, u.slab.entry);
1180
1181 pipe_reference_init(&bo->base.reference, 1);
1182
1183 return &bo->base;
1184 }
1185 no_slab:
1186
1187 if (flags & RADEON_FLAG_SPARSE) {
1188 assert(RADEON_SPARSE_PAGE_SIZE % alignment == 0);
1189
1190 flags |= RADEON_FLAG_NO_CPU_ACCESS;
1191
1192 return amdgpu_bo_sparse_create(ws, size, domain, flags);
1193 }
1194
1195 /* This flag is irrelevant for the cache. */
1196 flags &= ~RADEON_FLAG_NO_SUBALLOC;
1197
1198 /* Align size to page size. This is the minimum alignment for normal
1199 * BOs. Aligning this here helps the cached bufmgr. Especially small BOs,
1200 * like constant/uniform buffers, can benefit from better and more reuse.
1201 */
1202 size = align64(size, ws->info.gart_page_size);
1203 alignment = align(alignment, ws->info.gart_page_size);
1204
1205 bool use_reusable_pool = flags & RADEON_FLAG_NO_INTERPROCESS_SHARING;
1206
1207 if (use_reusable_pool) {
1208 int heap = radeon_get_heap_index(domain, flags);
1209 assert(heap >= 0 && heap < RADEON_MAX_CACHED_HEAPS);
1210 usage = 1 << heap; /* Only set one usage bit for each heap. */
1211
1212 pb_cache_bucket = radeon_get_pb_cache_bucket_index(heap);
1213 assert(pb_cache_bucket < ARRAY_SIZE(ws->bo_cache.buckets));
1214
1215 /* Get a buffer from the cache. */
1216 bo = (struct amdgpu_winsys_bo*)
1217 pb_cache_reclaim_buffer(&ws->bo_cache, size, alignment, usage,
1218 pb_cache_bucket);
1219 if (bo)
1220 return &bo->base;
1221 }
1222
1223 /* Create a new one. */
1224 bo = amdgpu_create_bo(ws, size, alignment, usage, domain, flags,
1225 pb_cache_bucket);
1226 if (!bo) {
1227 /* Clear the cache and try again. */
1228 pb_slabs_reclaim(&ws->bo_slabs);
1229 pb_cache_release_all_buffers(&ws->bo_cache);
1230 bo = amdgpu_create_bo(ws, size, alignment, usage, domain, flags,
1231 pb_cache_bucket);
1232 if (!bo)
1233 return NULL;
1234 }
1235
1236 bo->u.real.use_reusable_pool = use_reusable_pool;
1237 return &bo->base;
1238 }
1239
1240 static struct pb_buffer *amdgpu_bo_from_handle(struct radeon_winsys *rws,
1241 struct winsys_handle *whandle,
1242 unsigned *stride,
1243 unsigned *offset)
1244 {
1245 struct amdgpu_winsys *ws = amdgpu_winsys(rws);
1246 struct amdgpu_winsys_bo *bo;
1247 enum amdgpu_bo_handle_type type;
1248 struct amdgpu_bo_import_result result = {0};
1249 uint64_t va;
1250 amdgpu_va_handle va_handle;
1251 struct amdgpu_bo_info info = {0};
1252 enum radeon_bo_domain initial = 0;
1253 int r;
1254
1255 /* Initialize the structure. */
1256 bo = CALLOC_STRUCT(amdgpu_winsys_bo);
1257 if (!bo) {
1258 return NULL;
1259 }
1260
1261 switch (whandle->type) {
1262 case DRM_API_HANDLE_TYPE_SHARED:
1263 type = amdgpu_bo_handle_type_gem_flink_name;
1264 break;
1265 case DRM_API_HANDLE_TYPE_FD:
1266 type = amdgpu_bo_handle_type_dma_buf_fd;
1267 break;
1268 default:
1269 return NULL;
1270 }
1271
1272 r = amdgpu_bo_import(ws->dev, type, whandle->handle, &result);
1273 if (r)
1274 goto error;
1275
1276 /* Get initial domains. */
1277 r = amdgpu_bo_query_info(result.buf_handle, &info);
1278 if (r)
1279 goto error_query;
1280
1281 r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
1282 result.alloc_size, 1 << 20, 0, &va, &va_handle, 0);
1283 if (r)
1284 goto error_query;
1285
1286 r = amdgpu_bo_va_op(result.buf_handle, 0, result.alloc_size, va, 0, AMDGPU_VA_OP_MAP);
1287 if (r)
1288 goto error_va_map;
1289
1290 if (info.preferred_heap & AMDGPU_GEM_DOMAIN_VRAM)
1291 initial |= RADEON_DOMAIN_VRAM;
1292 if (info.preferred_heap & AMDGPU_GEM_DOMAIN_GTT)
1293 initial |= RADEON_DOMAIN_GTT;
1294
1295
1296 pipe_reference_init(&bo->base.reference, 1);
1297 bo->base.alignment = info.phys_alignment;
1298 bo->bo = result.buf_handle;
1299 bo->base.size = result.alloc_size;
1300 bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
1301 bo->ws = ws;
1302 bo->va = va;
1303 bo->u.real.va_handle = va_handle;
1304 bo->initial_domain = initial;
1305 bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
1306 bo->is_shared = true;
1307
1308 if (stride)
1309 *stride = whandle->stride;
1310 if (offset)
1311 *offset = whandle->offset;
1312
1313 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
1314 ws->allocated_vram += align64(bo->base.size, ws->info.gart_page_size);
1315 else if (bo->initial_domain & RADEON_DOMAIN_GTT)
1316 ws->allocated_gtt += align64(bo->base.size, ws->info.gart_page_size);
1317
1318 amdgpu_add_buffer_to_global_list(bo);
1319
1320 return &bo->base;
1321
1322 error_va_map:
1323 amdgpu_va_range_free(va_handle);
1324
1325 error_query:
1326 amdgpu_bo_free(result.buf_handle);
1327
1328 error:
1329 FREE(bo);
1330 return NULL;
1331 }
1332
1333 static bool amdgpu_bo_get_handle(struct pb_buffer *buffer,
1334 unsigned stride, unsigned offset,
1335 unsigned slice_size,
1336 struct winsys_handle *whandle)
1337 {
1338 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(buffer);
1339 enum amdgpu_bo_handle_type type;
1340 int r;
1341
1342 /* Don't allow exports of slab entries and sparse buffers. */
1343 if (!bo->bo)
1344 return false;
1345
1346 bo->u.real.use_reusable_pool = false;
1347
1348 switch (whandle->type) {
1349 case DRM_API_HANDLE_TYPE_SHARED:
1350 type = amdgpu_bo_handle_type_gem_flink_name;
1351 break;
1352 case DRM_API_HANDLE_TYPE_FD:
1353 type = amdgpu_bo_handle_type_dma_buf_fd;
1354 break;
1355 case DRM_API_HANDLE_TYPE_KMS:
1356 type = amdgpu_bo_handle_type_kms;
1357 break;
1358 default:
1359 return false;
1360 }
1361
1362 r = amdgpu_bo_export(bo->bo, type, &whandle->handle);
1363 if (r)
1364 return false;
1365
1366 whandle->stride = stride;
1367 whandle->offset = offset;
1368 whandle->offset += slice_size * whandle->layer;
1369 bo->is_shared = true;
1370 return true;
1371 }
1372
1373 static struct pb_buffer *amdgpu_bo_from_ptr(struct radeon_winsys *rws,
1374 void *pointer, uint64_t size)
1375 {
1376 struct amdgpu_winsys *ws = amdgpu_winsys(rws);
1377 amdgpu_bo_handle buf_handle;
1378 struct amdgpu_winsys_bo *bo;
1379 uint64_t va;
1380 amdgpu_va_handle va_handle;
1381
1382 bo = CALLOC_STRUCT(amdgpu_winsys_bo);
1383 if (!bo)
1384 return NULL;
1385
1386 if (amdgpu_create_bo_from_user_mem(ws->dev, pointer, size, &buf_handle))
1387 goto error;
1388
1389 if (amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
1390 size, 1 << 12, 0, &va, &va_handle, 0))
1391 goto error_va_alloc;
1392
1393 if (amdgpu_bo_va_op(buf_handle, 0, size, va, 0, AMDGPU_VA_OP_MAP))
1394 goto error_va_map;
1395
1396 /* Initialize it. */
1397 pipe_reference_init(&bo->base.reference, 1);
1398 bo->bo = buf_handle;
1399 bo->base.alignment = 0;
1400 bo->base.size = size;
1401 bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
1402 bo->ws = ws;
1403 bo->user_ptr = pointer;
1404 bo->va = va;
1405 bo->u.real.va_handle = va_handle;
1406 bo->initial_domain = RADEON_DOMAIN_GTT;
1407 bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
1408
1409 ws->allocated_gtt += align64(bo->base.size, ws->info.gart_page_size);
1410
1411 amdgpu_add_buffer_to_global_list(bo);
1412
1413 return (struct pb_buffer*)bo;
1414
1415 error_va_map:
1416 amdgpu_va_range_free(va_handle);
1417
1418 error_va_alloc:
1419 amdgpu_bo_free(buf_handle);
1420
1421 error:
1422 FREE(bo);
1423 return NULL;
1424 }
1425
1426 static bool amdgpu_bo_is_user_ptr(struct pb_buffer *buf)
1427 {
1428 return ((struct amdgpu_winsys_bo*)buf)->user_ptr != NULL;
1429 }
1430
1431 static bool amdgpu_bo_is_suballocated(struct pb_buffer *buf)
1432 {
1433 struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
1434
1435 return !bo->bo && !bo->sparse;
1436 }
1437
1438 static uint64_t amdgpu_bo_get_va(struct pb_buffer *buf)
1439 {
1440 return ((struct amdgpu_winsys_bo*)buf)->va;
1441 }
1442
1443 void amdgpu_bo_init_functions(struct amdgpu_winsys *ws)
1444 {
1445 ws->base.buffer_set_metadata = amdgpu_buffer_set_metadata;
1446 ws->base.buffer_get_metadata = amdgpu_buffer_get_metadata;
1447 ws->base.buffer_map = amdgpu_bo_map;
1448 ws->base.buffer_unmap = amdgpu_bo_unmap;
1449 ws->base.buffer_wait = amdgpu_bo_wait;
1450 ws->base.buffer_create = amdgpu_bo_create;
1451 ws->base.buffer_from_handle = amdgpu_bo_from_handle;
1452 ws->base.buffer_from_ptr = amdgpu_bo_from_ptr;
1453 ws->base.buffer_is_user_ptr = amdgpu_bo_is_user_ptr;
1454 ws->base.buffer_is_suballocated = amdgpu_bo_is_suballocated;
1455 ws->base.buffer_get_handle = amdgpu_bo_get_handle;
1456 ws->base.buffer_commit = amdgpu_bo_sparse_commit;
1457 ws->base.buffer_get_virtual_address = amdgpu_bo_get_va;
1458 ws->base.buffer_get_initial_domain = amdgpu_bo_get_initial_domain;
1459 }