gallium/radeon: rename RADEON_FLAG_HANDLE -> RADEON_FLAG_NO_SUBALLOC
[mesa.git] / src / gallium / winsys / amdgpu / drm / amdgpu_bo.c
1 /*
2 * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
3 * Copyright © 2015 Advanced Micro Devices, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
16 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
18 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
25 * of the Software.
26 */
27 /*
28 * Authors:
29 * Marek Olšák <maraeo@gmail.com>
30 */
31
32 #include "amdgpu_cs.h"
33
34 #include "os/os_time.h"
35 #include "state_tracker/drm_driver.h"
36 #include <amdgpu_drm.h>
37 #include <xf86drm.h>
38 #include <stdio.h>
39 #include <inttypes.h>
40
41 /* Set to 1 for verbose output showing committed sparse buffer ranges. */
42 #define DEBUG_SPARSE_COMMITS 0
43
44 struct amdgpu_sparse_backing_chunk {
45 uint32_t begin, end;
46 };
47
48 static struct pb_buffer *
49 amdgpu_bo_create(struct radeon_winsys *rws,
50 uint64_t size,
51 unsigned alignment,
52 enum radeon_bo_domain domain,
53 enum radeon_bo_flag flags);
54
55 static bool amdgpu_bo_wait(struct pb_buffer *_buf, uint64_t timeout,
56 enum radeon_bo_usage usage)
57 {
58 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
59 struct amdgpu_winsys *ws = bo->ws;
60 int64_t abs_timeout;
61
62 if (timeout == 0) {
63 if (p_atomic_read(&bo->num_active_ioctls))
64 return false;
65
66 } else {
67 abs_timeout = os_time_get_absolute_timeout(timeout);
68
69 /* Wait if any ioctl is being submitted with this buffer. */
70 if (!os_wait_until_zero_abs_timeout(&bo->num_active_ioctls, abs_timeout))
71 return false;
72 }
73
74 if (bo->is_shared) {
75 /* We can't use user fences for shared buffers, because user fences
76 * are local to this process only. If we want to wait for all buffer
77 * uses in all processes, we have to use amdgpu_bo_wait_for_idle.
78 */
79 bool buffer_busy = true;
80 int r;
81
82 r = amdgpu_bo_wait_for_idle(bo->bo, timeout, &buffer_busy);
83 if (r)
84 fprintf(stderr, "%s: amdgpu_bo_wait_for_idle failed %i\n", __func__,
85 r);
86 return !buffer_busy;
87 }
88
89 if (timeout == 0) {
90 unsigned idle_fences;
91 bool buffer_idle;
92
93 mtx_lock(&ws->bo_fence_lock);
94
95 for (idle_fences = 0; idle_fences < bo->num_fences; ++idle_fences) {
96 if (!amdgpu_fence_wait(bo->fences[idle_fences], 0, false))
97 break;
98 }
99
100 /* Release the idle fences to avoid checking them again later. */
101 for (unsigned i = 0; i < idle_fences; ++i)
102 amdgpu_fence_reference(&bo->fences[i], NULL);
103
104 memmove(&bo->fences[0], &bo->fences[idle_fences],
105 (bo->num_fences - idle_fences) * sizeof(*bo->fences));
106 bo->num_fences -= idle_fences;
107
108 buffer_idle = !bo->num_fences;
109 mtx_unlock(&ws->bo_fence_lock);
110
111 return buffer_idle;
112 } else {
113 bool buffer_idle = true;
114
115 mtx_lock(&ws->bo_fence_lock);
116 while (bo->num_fences && buffer_idle) {
117 struct pipe_fence_handle *fence = NULL;
118 bool fence_idle = false;
119
120 amdgpu_fence_reference(&fence, bo->fences[0]);
121
122 /* Wait for the fence. */
123 mtx_unlock(&ws->bo_fence_lock);
124 if (amdgpu_fence_wait(fence, abs_timeout, true))
125 fence_idle = true;
126 else
127 buffer_idle = false;
128 mtx_lock(&ws->bo_fence_lock);
129
130 /* Release an idle fence to avoid checking it again later, keeping in
131 * mind that the fence array may have been modified by other threads.
132 */
133 if (fence_idle && bo->num_fences && bo->fences[0] == fence) {
134 amdgpu_fence_reference(&bo->fences[0], NULL);
135 memmove(&bo->fences[0], &bo->fences[1],
136 (bo->num_fences - 1) * sizeof(*bo->fences));
137 bo->num_fences--;
138 }
139
140 amdgpu_fence_reference(&fence, NULL);
141 }
142 mtx_unlock(&ws->bo_fence_lock);
143
144 return buffer_idle;
145 }
146 }
147
148 static enum radeon_bo_domain amdgpu_bo_get_initial_domain(
149 struct pb_buffer *buf)
150 {
151 return ((struct amdgpu_winsys_bo*)buf)->initial_domain;
152 }
153
154 static void amdgpu_bo_remove_fences(struct amdgpu_winsys_bo *bo)
155 {
156 for (unsigned i = 0; i < bo->num_fences; ++i)
157 amdgpu_fence_reference(&bo->fences[i], NULL);
158
159 FREE(bo->fences);
160 bo->num_fences = 0;
161 bo->max_fences = 0;
162 }
163
164 void amdgpu_bo_destroy(struct pb_buffer *_buf)
165 {
166 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
167
168 assert(bo->bo && "must not be called for slab entries");
169
170 mtx_lock(&bo->ws->global_bo_list_lock);
171 LIST_DEL(&bo->u.real.global_list_item);
172 bo->ws->num_buffers--;
173 mtx_unlock(&bo->ws->global_bo_list_lock);
174
175 amdgpu_bo_va_op(bo->bo, 0, bo->base.size, bo->va, 0, AMDGPU_VA_OP_UNMAP);
176 amdgpu_va_range_free(bo->u.real.va_handle);
177 amdgpu_bo_free(bo->bo);
178
179 amdgpu_bo_remove_fences(bo);
180
181 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
182 bo->ws->allocated_vram -= align64(bo->base.size, bo->ws->info.gart_page_size);
183 else if (bo->initial_domain & RADEON_DOMAIN_GTT)
184 bo->ws->allocated_gtt -= align64(bo->base.size, bo->ws->info.gart_page_size);
185
186 if (bo->u.real.map_count >= 1) {
187 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
188 bo->ws->mapped_vram -= bo->base.size;
189 else if (bo->initial_domain & RADEON_DOMAIN_GTT)
190 bo->ws->mapped_gtt -= bo->base.size;
191 bo->ws->num_mapped_buffers--;
192 }
193
194 FREE(bo);
195 }
196
197 static void amdgpu_bo_destroy_or_cache(struct pb_buffer *_buf)
198 {
199 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
200
201 assert(bo->bo); /* slab buffers have a separate vtbl */
202
203 if (bo->u.real.use_reusable_pool)
204 pb_cache_add_buffer(&bo->u.real.cache_entry);
205 else
206 amdgpu_bo_destroy(_buf);
207 }
208
209 static void *amdgpu_bo_map(struct pb_buffer *buf,
210 struct radeon_winsys_cs *rcs,
211 enum pipe_transfer_usage usage)
212 {
213 struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
214 struct amdgpu_winsys_bo *real;
215 struct amdgpu_cs *cs = (struct amdgpu_cs*)rcs;
216 int r;
217 void *cpu = NULL;
218 uint64_t offset = 0;
219
220 assert(!bo->sparse);
221
222 /* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
223 if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
224 /* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
225 if (usage & PIPE_TRANSFER_DONTBLOCK) {
226 if (!(usage & PIPE_TRANSFER_WRITE)) {
227 /* Mapping for read.
228 *
229 * Since we are mapping for read, we don't need to wait
230 * if the GPU is using the buffer for read too
231 * (neither one is changing it).
232 *
233 * Only check whether the buffer is being used for write. */
234 if (cs && amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
235 RADEON_USAGE_WRITE)) {
236 cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
237 return NULL;
238 }
239
240 if (!amdgpu_bo_wait((struct pb_buffer*)bo, 0,
241 RADEON_USAGE_WRITE)) {
242 return NULL;
243 }
244 } else {
245 if (cs && amdgpu_bo_is_referenced_by_cs(cs, bo)) {
246 cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
247 return NULL;
248 }
249
250 if (!amdgpu_bo_wait((struct pb_buffer*)bo, 0,
251 RADEON_USAGE_READWRITE)) {
252 return NULL;
253 }
254 }
255 } else {
256 uint64_t time = os_time_get_nano();
257
258 if (!(usage & PIPE_TRANSFER_WRITE)) {
259 /* Mapping for read.
260 *
261 * Since we are mapping for read, we don't need to wait
262 * if the GPU is using the buffer for read too
263 * (neither one is changing it).
264 *
265 * Only check whether the buffer is being used for write. */
266 if (cs) {
267 if (amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
268 RADEON_USAGE_WRITE)) {
269 cs->flush_cs(cs->flush_data, 0, NULL);
270 } else {
271 /* Try to avoid busy-waiting in amdgpu_bo_wait. */
272 if (p_atomic_read(&bo->num_active_ioctls))
273 amdgpu_cs_sync_flush(rcs);
274 }
275 }
276
277 amdgpu_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
278 RADEON_USAGE_WRITE);
279 } else {
280 /* Mapping for write. */
281 if (cs) {
282 if (amdgpu_bo_is_referenced_by_cs(cs, bo)) {
283 cs->flush_cs(cs->flush_data, 0, NULL);
284 } else {
285 /* Try to avoid busy-waiting in amdgpu_bo_wait. */
286 if (p_atomic_read(&bo->num_active_ioctls))
287 amdgpu_cs_sync_flush(rcs);
288 }
289 }
290
291 amdgpu_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
292 RADEON_USAGE_READWRITE);
293 }
294
295 bo->ws->buffer_wait_time += os_time_get_nano() - time;
296 }
297 }
298
299 /* If the buffer is created from user memory, return the user pointer. */
300 if (bo->user_ptr)
301 return bo->user_ptr;
302
303 if (bo->bo) {
304 real = bo;
305 } else {
306 real = bo->u.slab.real;
307 offset = bo->va - real->va;
308 }
309
310 r = amdgpu_bo_cpu_map(real->bo, &cpu);
311 if (r) {
312 /* Clear the cache and try again. */
313 pb_cache_release_all_buffers(&real->ws->bo_cache);
314 r = amdgpu_bo_cpu_map(real->bo, &cpu);
315 if (r)
316 return NULL;
317 }
318
319 if (p_atomic_inc_return(&real->u.real.map_count) == 1) {
320 if (real->initial_domain & RADEON_DOMAIN_VRAM)
321 real->ws->mapped_vram += real->base.size;
322 else if (real->initial_domain & RADEON_DOMAIN_GTT)
323 real->ws->mapped_gtt += real->base.size;
324 real->ws->num_mapped_buffers++;
325 }
326 return (uint8_t*)cpu + offset;
327 }
328
329 static void amdgpu_bo_unmap(struct pb_buffer *buf)
330 {
331 struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
332 struct amdgpu_winsys_bo *real;
333
334 assert(!bo->sparse);
335
336 if (bo->user_ptr)
337 return;
338
339 real = bo->bo ? bo : bo->u.slab.real;
340
341 if (p_atomic_dec_zero(&real->u.real.map_count)) {
342 if (real->initial_domain & RADEON_DOMAIN_VRAM)
343 real->ws->mapped_vram -= real->base.size;
344 else if (real->initial_domain & RADEON_DOMAIN_GTT)
345 real->ws->mapped_gtt -= real->base.size;
346 real->ws->num_mapped_buffers--;
347 }
348
349 amdgpu_bo_cpu_unmap(real->bo);
350 }
351
352 static const struct pb_vtbl amdgpu_winsys_bo_vtbl = {
353 amdgpu_bo_destroy_or_cache
354 /* other functions are never called */
355 };
356
357 static void amdgpu_add_buffer_to_global_list(struct amdgpu_winsys_bo *bo)
358 {
359 struct amdgpu_winsys *ws = bo->ws;
360
361 assert(bo->bo);
362
363 mtx_lock(&ws->global_bo_list_lock);
364 LIST_ADDTAIL(&bo->u.real.global_list_item, &ws->global_bo_list);
365 ws->num_buffers++;
366 mtx_unlock(&ws->global_bo_list_lock);
367 }
368
369 static struct amdgpu_winsys_bo *amdgpu_create_bo(struct amdgpu_winsys *ws,
370 uint64_t size,
371 unsigned alignment,
372 unsigned usage,
373 enum radeon_bo_domain initial_domain,
374 unsigned flags,
375 unsigned pb_cache_bucket)
376 {
377 struct amdgpu_bo_alloc_request request = {0};
378 amdgpu_bo_handle buf_handle;
379 uint64_t va = 0;
380 struct amdgpu_winsys_bo *bo;
381 amdgpu_va_handle va_handle;
382 unsigned va_gap_size;
383 int r;
384
385 assert(initial_domain & RADEON_DOMAIN_VRAM_GTT);
386 bo = CALLOC_STRUCT(amdgpu_winsys_bo);
387 if (!bo) {
388 return NULL;
389 }
390
391 pb_cache_init_entry(&ws->bo_cache, &bo->u.real.cache_entry, &bo->base,
392 pb_cache_bucket);
393 request.alloc_size = size;
394 request.phys_alignment = alignment;
395
396 if (initial_domain & RADEON_DOMAIN_VRAM)
397 request.preferred_heap |= AMDGPU_GEM_DOMAIN_VRAM;
398 if (initial_domain & RADEON_DOMAIN_GTT)
399 request.preferred_heap |= AMDGPU_GEM_DOMAIN_GTT;
400
401 if (flags & RADEON_FLAG_CPU_ACCESS)
402 request.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
403 if (flags & RADEON_FLAG_NO_CPU_ACCESS)
404 request.flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
405 if (flags & RADEON_FLAG_GTT_WC)
406 request.flags |= AMDGPU_GEM_CREATE_CPU_GTT_USWC;
407
408 r = amdgpu_bo_alloc(ws->dev, &request, &buf_handle);
409 if (r) {
410 fprintf(stderr, "amdgpu: Failed to allocate a buffer:\n");
411 fprintf(stderr, "amdgpu: size : %"PRIu64" bytes\n", size);
412 fprintf(stderr, "amdgpu: alignment : %u bytes\n", alignment);
413 fprintf(stderr, "amdgpu: domains : %u\n", initial_domain);
414 goto error_bo_alloc;
415 }
416
417 va_gap_size = ws->check_vm ? MAX2(4 * alignment, 64 * 1024) : 0;
418 if (size > ws->info.pte_fragment_size)
419 alignment = MAX2(alignment, ws->info.pte_fragment_size);
420 r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
421 size + va_gap_size, alignment, 0, &va, &va_handle, 0);
422 if (r)
423 goto error_va_alloc;
424
425 r = amdgpu_bo_va_op(buf_handle, 0, size, va, 0, AMDGPU_VA_OP_MAP);
426 if (r)
427 goto error_va_map;
428
429 pipe_reference_init(&bo->base.reference, 1);
430 bo->base.alignment = alignment;
431 bo->base.usage = usage;
432 bo->base.size = size;
433 bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
434 bo->ws = ws;
435 bo->bo = buf_handle;
436 bo->va = va;
437 bo->u.real.va_handle = va_handle;
438 bo->initial_domain = initial_domain;
439 bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
440
441 if (initial_domain & RADEON_DOMAIN_VRAM)
442 ws->allocated_vram += align64(size, ws->info.gart_page_size);
443 else if (initial_domain & RADEON_DOMAIN_GTT)
444 ws->allocated_gtt += align64(size, ws->info.gart_page_size);
445
446 amdgpu_add_buffer_to_global_list(bo);
447
448 return bo;
449
450 error_va_map:
451 amdgpu_va_range_free(va_handle);
452
453 error_va_alloc:
454 amdgpu_bo_free(buf_handle);
455
456 error_bo_alloc:
457 FREE(bo);
458 return NULL;
459 }
460
461 bool amdgpu_bo_can_reclaim(struct pb_buffer *_buf)
462 {
463 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
464
465 if (amdgpu_bo_is_referenced_by_any_cs(bo)) {
466 return false;
467 }
468
469 return amdgpu_bo_wait(_buf, 0, RADEON_USAGE_READWRITE);
470 }
471
472 bool amdgpu_bo_can_reclaim_slab(void *priv, struct pb_slab_entry *entry)
473 {
474 struct amdgpu_winsys_bo *bo = NULL; /* fix container_of */
475 bo = container_of(entry, bo, u.slab.entry);
476
477 return amdgpu_bo_can_reclaim(&bo->base);
478 }
479
480 static void amdgpu_bo_slab_destroy(struct pb_buffer *_buf)
481 {
482 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
483
484 assert(!bo->bo);
485
486 pb_slab_free(&bo->ws->bo_slabs, &bo->u.slab.entry);
487 }
488
489 static const struct pb_vtbl amdgpu_winsys_bo_slab_vtbl = {
490 amdgpu_bo_slab_destroy
491 /* other functions are never called */
492 };
493
494 struct pb_slab *amdgpu_bo_slab_alloc(void *priv, unsigned heap,
495 unsigned entry_size,
496 unsigned group_index)
497 {
498 struct amdgpu_winsys *ws = priv;
499 struct amdgpu_slab *slab = CALLOC_STRUCT(amdgpu_slab);
500 enum radeon_bo_domain domains;
501 enum radeon_bo_flag flags = 0;
502 uint32_t base_id;
503
504 if (!slab)
505 return NULL;
506
507 if (heap & 1)
508 flags |= RADEON_FLAG_GTT_WC;
509 if (heap & 2)
510 flags |= RADEON_FLAG_CPU_ACCESS;
511
512 switch (heap >> 2) {
513 case 0:
514 domains = RADEON_DOMAIN_VRAM;
515 break;
516 default:
517 case 1:
518 domains = RADEON_DOMAIN_VRAM_GTT;
519 break;
520 case 2:
521 domains = RADEON_DOMAIN_GTT;
522 break;
523 }
524
525 slab->buffer = amdgpu_winsys_bo(amdgpu_bo_create(&ws->base,
526 64 * 1024, 64 * 1024,
527 domains, flags));
528 if (!slab->buffer)
529 goto fail;
530
531 assert(slab->buffer->bo);
532
533 slab->base.num_entries = slab->buffer->base.size / entry_size;
534 slab->base.num_free = slab->base.num_entries;
535 slab->entries = CALLOC(slab->base.num_entries, sizeof(*slab->entries));
536 if (!slab->entries)
537 goto fail_buffer;
538
539 LIST_INITHEAD(&slab->base.free);
540
541 base_id = __sync_fetch_and_add(&ws->next_bo_unique_id, slab->base.num_entries);
542
543 for (unsigned i = 0; i < slab->base.num_entries; ++i) {
544 struct amdgpu_winsys_bo *bo = &slab->entries[i];
545
546 bo->base.alignment = entry_size;
547 bo->base.usage = slab->buffer->base.usage;
548 bo->base.size = entry_size;
549 bo->base.vtbl = &amdgpu_winsys_bo_slab_vtbl;
550 bo->ws = ws;
551 bo->va = slab->buffer->va + i * entry_size;
552 bo->initial_domain = domains;
553 bo->unique_id = base_id + i;
554 bo->u.slab.entry.slab = &slab->base;
555 bo->u.slab.entry.group_index = group_index;
556 bo->u.slab.real = slab->buffer;
557
558 LIST_ADDTAIL(&bo->u.slab.entry.head, &slab->base.free);
559 }
560
561 return &slab->base;
562
563 fail_buffer:
564 amdgpu_winsys_bo_reference(&slab->buffer, NULL);
565 fail:
566 FREE(slab);
567 return NULL;
568 }
569
570 void amdgpu_bo_slab_free(void *priv, struct pb_slab *pslab)
571 {
572 struct amdgpu_slab *slab = amdgpu_slab(pslab);
573
574 for (unsigned i = 0; i < slab->base.num_entries; ++i)
575 amdgpu_bo_remove_fences(&slab->entries[i]);
576
577 FREE(slab->entries);
578 amdgpu_winsys_bo_reference(&slab->buffer, NULL);
579 FREE(slab);
580 }
581
582 #if DEBUG_SPARSE_COMMITS
583 static void
584 sparse_dump(struct amdgpu_winsys_bo *bo, const char *func)
585 {
586 fprintf(stderr, "%s: %p (size=%"PRIu64", num_va_pages=%u) @ %s\n"
587 "Commitments:\n",
588 __func__, bo, bo->base.size, bo->u.sparse.num_va_pages, func);
589
590 struct amdgpu_sparse_backing *span_backing = NULL;
591 uint32_t span_first_backing_page = 0;
592 uint32_t span_first_va_page = 0;
593 uint32_t va_page = 0;
594
595 for (;;) {
596 struct amdgpu_sparse_backing *backing = 0;
597 uint32_t backing_page = 0;
598
599 if (va_page < bo->u.sparse.num_va_pages) {
600 backing = bo->u.sparse.commitments[va_page].backing;
601 backing_page = bo->u.sparse.commitments[va_page].page;
602 }
603
604 if (span_backing &&
605 (backing != span_backing ||
606 backing_page != span_first_backing_page + (va_page - span_first_va_page))) {
607 fprintf(stderr, " %u..%u: backing=%p:%u..%u\n",
608 span_first_va_page, va_page - 1, span_backing,
609 span_first_backing_page,
610 span_first_backing_page + (va_page - span_first_va_page) - 1);
611
612 span_backing = NULL;
613 }
614
615 if (va_page >= bo->u.sparse.num_va_pages)
616 break;
617
618 if (backing && !span_backing) {
619 span_backing = backing;
620 span_first_backing_page = backing_page;
621 span_first_va_page = va_page;
622 }
623
624 va_page++;
625 }
626
627 fprintf(stderr, "Backing:\n");
628
629 list_for_each_entry(struct amdgpu_sparse_backing, backing, &bo->u.sparse.backing, list) {
630 fprintf(stderr, " %p (size=%"PRIu64")\n", backing, backing->bo->base.size);
631 for (unsigned i = 0; i < backing->num_chunks; ++i)
632 fprintf(stderr, " %u..%u\n", backing->chunks[i].begin, backing->chunks[i].end);
633 }
634 }
635 #endif
636
637 /*
638 * Attempt to allocate the given number of backing pages. Fewer pages may be
639 * allocated (depending on the fragmentation of existing backing buffers),
640 * which will be reflected by a change to *pnum_pages.
641 */
642 static struct amdgpu_sparse_backing *
643 sparse_backing_alloc(struct amdgpu_winsys_bo *bo, uint32_t *pstart_page, uint32_t *pnum_pages)
644 {
645 struct amdgpu_sparse_backing *best_backing;
646 unsigned best_idx;
647 uint32_t best_num_pages;
648
649 best_backing = NULL;
650 best_idx = 0;
651 best_num_pages = 0;
652
653 /* This is a very simple and inefficient best-fit algorithm. */
654 list_for_each_entry(struct amdgpu_sparse_backing, backing, &bo->u.sparse.backing, list) {
655 for (unsigned idx = 0; idx < backing->num_chunks; ++idx) {
656 uint32_t cur_num_pages = backing->chunks[idx].end - backing->chunks[idx].begin;
657 if ((best_num_pages < *pnum_pages && cur_num_pages > best_num_pages) ||
658 (best_num_pages > *pnum_pages && cur_num_pages < best_num_pages)) {
659 best_backing = backing;
660 best_idx = idx;
661 best_num_pages = cur_num_pages;
662 }
663 }
664 }
665
666 /* Allocate a new backing buffer if necessary. */
667 if (!best_backing) {
668 struct pb_buffer *buf;
669 uint64_t size;
670 uint32_t pages;
671
672 best_backing = CALLOC_STRUCT(amdgpu_sparse_backing);
673 if (!best_backing)
674 return NULL;
675
676 best_backing->max_chunks = 4;
677 best_backing->chunks = CALLOC(best_backing->max_chunks,
678 sizeof(*best_backing->chunks));
679 if (!best_backing->chunks) {
680 FREE(best_backing);
681 return NULL;
682 }
683
684 assert(bo->u.sparse.num_backing_pages < DIV_ROUND_UP(bo->base.size, RADEON_SPARSE_PAGE_SIZE));
685
686 size = MIN3(bo->base.size / 16,
687 8 * 1024 * 1024,
688 bo->base.size - (uint64_t)bo->u.sparse.num_backing_pages * RADEON_SPARSE_PAGE_SIZE);
689 size = MAX2(size, RADEON_SPARSE_PAGE_SIZE);
690
691 buf = amdgpu_bo_create(&bo->ws->base, size, RADEON_SPARSE_PAGE_SIZE,
692 bo->initial_domain,
693 bo->u.sparse.flags | RADEON_FLAG_NO_SUBALLOC);
694 if (!buf) {
695 FREE(best_backing->chunks);
696 FREE(best_backing);
697 return NULL;
698 }
699
700 /* We might have gotten a bigger buffer than requested via caching. */
701 pages = buf->size / RADEON_SPARSE_PAGE_SIZE;
702
703 best_backing->bo = amdgpu_winsys_bo(buf);
704 best_backing->num_chunks = 1;
705 best_backing->chunks[0].begin = 0;
706 best_backing->chunks[0].end = pages;
707
708 list_add(&best_backing->list, &bo->u.sparse.backing);
709 bo->u.sparse.num_backing_pages += pages;
710
711 best_idx = 0;
712 best_num_pages = pages;
713 }
714
715 *pnum_pages = MIN2(*pnum_pages, best_num_pages);
716 *pstart_page = best_backing->chunks[best_idx].begin;
717 best_backing->chunks[best_idx].begin += *pnum_pages;
718
719 if (best_backing->chunks[best_idx].begin >= best_backing->chunks[best_idx].end) {
720 memmove(&best_backing->chunks[best_idx], &best_backing->chunks[best_idx + 1],
721 sizeof(*best_backing->chunks) * (best_backing->num_chunks - best_idx - 1));
722 best_backing->num_chunks--;
723 }
724
725 return best_backing;
726 }
727
728 static void
729 sparse_free_backing_buffer(struct amdgpu_winsys_bo *bo,
730 struct amdgpu_sparse_backing *backing)
731 {
732 struct amdgpu_winsys *ws = backing->bo->ws;
733
734 bo->u.sparse.num_backing_pages -= backing->bo->base.size / RADEON_SPARSE_PAGE_SIZE;
735
736 mtx_lock(&ws->bo_fence_lock);
737 amdgpu_add_fences(backing->bo, bo->num_fences, bo->fences);
738 mtx_unlock(&ws->bo_fence_lock);
739
740 list_del(&backing->list);
741 amdgpu_winsys_bo_reference(&backing->bo, NULL);
742 FREE(backing->chunks);
743 FREE(backing);
744 }
745
746 /*
747 * Return a range of pages from the given backing buffer back into the
748 * free structure.
749 */
750 static bool
751 sparse_backing_free(struct amdgpu_winsys_bo *bo,
752 struct amdgpu_sparse_backing *backing,
753 uint32_t start_page, uint32_t num_pages)
754 {
755 uint32_t end_page = start_page + num_pages;
756 unsigned low = 0;
757 unsigned high = backing->num_chunks;
758
759 /* Find the first chunk with begin >= start_page. */
760 while (low < high) {
761 unsigned mid = low + (high - low) / 2;
762
763 if (backing->chunks[mid].begin >= start_page)
764 high = mid;
765 else
766 low = mid + 1;
767 }
768
769 assert(low >= backing->num_chunks || end_page <= backing->chunks[low].begin);
770 assert(low == 0 || backing->chunks[low - 1].end <= start_page);
771
772 if (low > 0 && backing->chunks[low - 1].end == start_page) {
773 backing->chunks[low - 1].end = end_page;
774
775 if (low < backing->num_chunks && end_page == backing->chunks[low].begin) {
776 backing->chunks[low - 1].end = backing->chunks[low].end;
777 memmove(&backing->chunks[low], &backing->chunks[low + 1],
778 sizeof(*backing->chunks) * (backing->num_chunks - low - 1));
779 backing->num_chunks--;
780 }
781 } else if (low < backing->num_chunks && end_page == backing->chunks[low].begin) {
782 backing->chunks[low].begin = start_page;
783 } else {
784 if (backing->num_chunks >= backing->max_chunks) {
785 unsigned new_max_chunks = 2 * backing->max_chunks;
786 struct amdgpu_sparse_backing_chunk *new_chunks =
787 REALLOC(backing->chunks,
788 sizeof(*backing->chunks) * backing->max_chunks,
789 sizeof(*backing->chunks) * new_max_chunks);
790 if (!new_chunks)
791 return false;
792
793 backing->max_chunks = new_max_chunks;
794 backing->chunks = new_chunks;
795 }
796
797 memmove(&backing->chunks[low + 1], &backing->chunks[low],
798 sizeof(*backing->chunks) * (backing->num_chunks - low));
799 backing->chunks[low].begin = start_page;
800 backing->chunks[low].end = end_page;
801 backing->num_chunks++;
802 }
803
804 if (backing->num_chunks == 1 && backing->chunks[0].begin == 0 &&
805 backing->chunks[0].end == backing->bo->base.size / RADEON_SPARSE_PAGE_SIZE)
806 sparse_free_backing_buffer(bo, backing);
807
808 return true;
809 }
810
811 static void amdgpu_bo_sparse_destroy(struct pb_buffer *_buf)
812 {
813 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
814 int r;
815
816 assert(!bo->bo && bo->sparse);
817
818 r = amdgpu_bo_va_op_raw(bo->ws->dev, NULL, 0,
819 (uint64_t)bo->u.sparse.num_va_pages * RADEON_SPARSE_PAGE_SIZE,
820 bo->va, 0, AMDGPU_VA_OP_CLEAR);
821 if (r) {
822 fprintf(stderr, "amdgpu: clearing PRT VA region on destroy failed (%d)\n", r);
823 }
824
825 while (!list_empty(&bo->u.sparse.backing)) {
826 struct amdgpu_sparse_backing *dummy = NULL;
827 sparse_free_backing_buffer(bo,
828 container_of(bo->u.sparse.backing.next,
829 dummy, list));
830 }
831
832 amdgpu_va_range_free(bo->u.sparse.va_handle);
833 mtx_destroy(&bo->u.sparse.commit_lock);
834 FREE(bo->u.sparse.commitments);
835 FREE(bo);
836 }
837
838 static const struct pb_vtbl amdgpu_winsys_bo_sparse_vtbl = {
839 amdgpu_bo_sparse_destroy
840 /* other functions are never called */
841 };
842
843 static struct pb_buffer *
844 amdgpu_bo_sparse_create(struct amdgpu_winsys *ws, uint64_t size,
845 enum radeon_bo_domain domain,
846 enum radeon_bo_flag flags)
847 {
848 struct amdgpu_winsys_bo *bo;
849 uint64_t map_size;
850 uint64_t va_gap_size;
851 int r;
852
853 /* We use 32-bit page numbers; refuse to attempt allocating sparse buffers
854 * that exceed this limit. This is not really a restriction: we don't have
855 * that much virtual address space anyway.
856 */
857 if (size > (uint64_t)INT32_MAX * RADEON_SPARSE_PAGE_SIZE)
858 return NULL;
859
860 bo = CALLOC_STRUCT(amdgpu_winsys_bo);
861 if (!bo)
862 return NULL;
863
864 pipe_reference_init(&bo->base.reference, 1);
865 bo->base.alignment = RADEON_SPARSE_PAGE_SIZE;
866 bo->base.size = size;
867 bo->base.vtbl = &amdgpu_winsys_bo_sparse_vtbl;
868 bo->ws = ws;
869 bo->initial_domain = domain;
870 bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
871 bo->sparse = true;
872 bo->u.sparse.flags = flags & ~RADEON_FLAG_SPARSE;
873
874 bo->u.sparse.num_va_pages = DIV_ROUND_UP(size, RADEON_SPARSE_PAGE_SIZE);
875 bo->u.sparse.commitments = CALLOC(bo->u.sparse.num_va_pages,
876 sizeof(*bo->u.sparse.commitments));
877 if (!bo->u.sparse.commitments)
878 goto error_alloc_commitments;
879
880 mtx_init(&bo->u.sparse.commit_lock, mtx_plain);
881 LIST_INITHEAD(&bo->u.sparse.backing);
882
883 /* For simplicity, we always map a multiple of the page size. */
884 map_size = align64(size, RADEON_SPARSE_PAGE_SIZE);
885 va_gap_size = ws->check_vm ? 4 * RADEON_SPARSE_PAGE_SIZE : 0;
886 r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
887 map_size + va_gap_size, RADEON_SPARSE_PAGE_SIZE,
888 0, &bo->va, &bo->u.sparse.va_handle, 0);
889 if (r)
890 goto error_va_alloc;
891
892 r = amdgpu_bo_va_op_raw(bo->ws->dev, NULL, 0, size, bo->va,
893 AMDGPU_VM_PAGE_PRT, AMDGPU_VA_OP_MAP);
894 if (r)
895 goto error_va_map;
896
897 return &bo->base;
898
899 error_va_map:
900 amdgpu_va_range_free(bo->u.sparse.va_handle);
901 error_va_alloc:
902 mtx_destroy(&bo->u.sparse.commit_lock);
903 FREE(bo->u.sparse.commitments);
904 error_alloc_commitments:
905 FREE(bo);
906 return NULL;
907 }
908
909 static bool
910 amdgpu_bo_sparse_commit(struct pb_buffer *buf, uint64_t offset, uint64_t size,
911 bool commit)
912 {
913 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(buf);
914 struct amdgpu_sparse_commitment *comm;
915 uint32_t va_page, end_va_page;
916 bool ok = true;
917 int r;
918
919 assert(bo->sparse);
920 assert(offset % RADEON_SPARSE_PAGE_SIZE == 0);
921 assert(offset <= bo->base.size);
922 assert(size <= bo->base.size - offset);
923 assert(size % RADEON_SPARSE_PAGE_SIZE == 0 || offset + size == bo->base.size);
924
925 comm = bo->u.sparse.commitments;
926 va_page = offset / RADEON_SPARSE_PAGE_SIZE;
927 end_va_page = va_page + DIV_ROUND_UP(size, RADEON_SPARSE_PAGE_SIZE);
928
929 mtx_lock(&bo->u.sparse.commit_lock);
930
931 #if DEBUG_SPARSE_COMMITS
932 sparse_dump(bo, __func__);
933 #endif
934
935 if (commit) {
936 while (va_page < end_va_page) {
937 uint32_t span_va_page;
938
939 /* Skip pages that are already committed. */
940 if (comm[va_page].backing) {
941 va_page++;
942 continue;
943 }
944
945 /* Determine length of uncommitted span. */
946 span_va_page = va_page;
947 while (va_page < end_va_page && !comm[va_page].backing)
948 va_page++;
949
950 /* Fill the uncommitted span with chunks of backing memory. */
951 while (span_va_page < va_page) {
952 struct amdgpu_sparse_backing *backing;
953 uint32_t backing_start, backing_size;
954
955 backing_size = va_page - span_va_page;
956 backing = sparse_backing_alloc(bo, &backing_start, &backing_size);
957 if (!backing) {
958 ok = false;
959 goto out;
960 }
961
962 r = amdgpu_bo_va_op_raw(bo->ws->dev, backing->bo->bo,
963 (uint64_t)backing_start * RADEON_SPARSE_PAGE_SIZE,
964 (uint64_t)backing_size * RADEON_SPARSE_PAGE_SIZE,
965 bo->va + (uint64_t)span_va_page * RADEON_SPARSE_PAGE_SIZE,
966 AMDGPU_VM_PAGE_READABLE |
967 AMDGPU_VM_PAGE_WRITEABLE |
968 AMDGPU_VM_PAGE_EXECUTABLE,
969 AMDGPU_VA_OP_REPLACE);
970 if (r) {
971 ok = sparse_backing_free(bo, backing, backing_start, backing_size);
972 assert(ok && "sufficient memory should already be allocated");
973
974 ok = false;
975 goto out;
976 }
977
978 while (backing_size) {
979 comm[span_va_page].backing = backing;
980 comm[span_va_page].page = backing_start;
981 span_va_page++;
982 backing_start++;
983 backing_size--;
984 }
985 }
986 }
987 } else {
988 r = amdgpu_bo_va_op_raw(bo->ws->dev, NULL, 0,
989 (uint64_t)(end_va_page - va_page) * RADEON_SPARSE_PAGE_SIZE,
990 bo->va + (uint64_t)va_page * RADEON_SPARSE_PAGE_SIZE,
991 AMDGPU_VM_PAGE_PRT, AMDGPU_VA_OP_REPLACE);
992 if (r) {
993 ok = false;
994 goto out;
995 }
996
997 while (va_page < end_va_page) {
998 struct amdgpu_sparse_backing *backing;
999 uint32_t backing_start;
1000 uint32_t span_pages;
1001
1002 /* Skip pages that are already uncommitted. */
1003 if (!comm[va_page].backing) {
1004 va_page++;
1005 continue;
1006 }
1007
1008 /* Group contiguous spans of pages. */
1009 backing = comm[va_page].backing;
1010 backing_start = comm[va_page].page;
1011 comm[va_page].backing = NULL;
1012
1013 span_pages = 1;
1014 va_page++;
1015
1016 while (va_page < end_va_page &&
1017 comm[va_page].backing == backing &&
1018 comm[va_page].page == backing_start + span_pages) {
1019 comm[va_page].backing = NULL;
1020 va_page++;
1021 span_pages++;
1022 }
1023
1024 if (!sparse_backing_free(bo, backing, backing_start, span_pages)) {
1025 /* Couldn't allocate tracking data structures, so we have to leak */
1026 fprintf(stderr, "amdgpu: leaking PRT backing memory\n");
1027 ok = false;
1028 }
1029 }
1030 }
1031 out:
1032
1033 mtx_unlock(&bo->u.sparse.commit_lock);
1034
1035 return ok;
1036 }
1037
1038 static unsigned eg_tile_split(unsigned tile_split)
1039 {
1040 switch (tile_split) {
1041 case 0: tile_split = 64; break;
1042 case 1: tile_split = 128; break;
1043 case 2: tile_split = 256; break;
1044 case 3: tile_split = 512; break;
1045 default:
1046 case 4: tile_split = 1024; break;
1047 case 5: tile_split = 2048; break;
1048 case 6: tile_split = 4096; break;
1049 }
1050 return tile_split;
1051 }
1052
1053 static unsigned eg_tile_split_rev(unsigned eg_tile_split)
1054 {
1055 switch (eg_tile_split) {
1056 case 64: return 0;
1057 case 128: return 1;
1058 case 256: return 2;
1059 case 512: return 3;
1060 default:
1061 case 1024: return 4;
1062 case 2048: return 5;
1063 case 4096: return 6;
1064 }
1065 }
1066
1067 static void amdgpu_buffer_get_metadata(struct pb_buffer *_buf,
1068 struct radeon_bo_metadata *md)
1069 {
1070 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
1071 struct amdgpu_bo_info info = {0};
1072 uint64_t tiling_flags;
1073 int r;
1074
1075 assert(bo->bo && "must not be called for slab entries");
1076
1077 r = amdgpu_bo_query_info(bo->bo, &info);
1078 if (r)
1079 return;
1080
1081 tiling_flags = info.metadata.tiling_info;
1082
1083 if (bo->ws->info.chip_class >= GFX9) {
1084 md->u.gfx9.swizzle_mode = AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
1085 } else {
1086 md->u.legacy.microtile = RADEON_LAYOUT_LINEAR;
1087 md->u.legacy.macrotile = RADEON_LAYOUT_LINEAR;
1088
1089 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 4) /* 2D_TILED_THIN1 */
1090 md->u.legacy.macrotile = RADEON_LAYOUT_TILED;
1091 else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 2) /* 1D_TILED_THIN1 */
1092 md->u.legacy.microtile = RADEON_LAYOUT_TILED;
1093
1094 md->u.legacy.pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1095 md->u.legacy.bankw = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1096 md->u.legacy.bankh = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1097 md->u.legacy.tile_split = eg_tile_split(AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT));
1098 md->u.legacy.mtilea = 1 << AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1099 md->u.legacy.num_banks = 2 << AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1100 md->u.legacy.scanout = AMDGPU_TILING_GET(tiling_flags, MICRO_TILE_MODE) == 0; /* DISPLAY */
1101 }
1102
1103 md->size_metadata = info.metadata.size_metadata;
1104 memcpy(md->metadata, info.metadata.umd_metadata, sizeof(md->metadata));
1105 }
1106
1107 static void amdgpu_buffer_set_metadata(struct pb_buffer *_buf,
1108 struct radeon_bo_metadata *md)
1109 {
1110 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
1111 struct amdgpu_bo_metadata metadata = {0};
1112 uint64_t tiling_flags = 0;
1113
1114 assert(bo->bo && "must not be called for slab entries");
1115
1116 if (bo->ws->info.chip_class >= GFX9) {
1117 tiling_flags |= AMDGPU_TILING_SET(SWIZZLE_MODE, md->u.gfx9.swizzle_mode);
1118 } else {
1119 if (md->u.legacy.macrotile == RADEON_LAYOUT_TILED)
1120 tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 4); /* 2D_TILED_THIN1 */
1121 else if (md->u.legacy.microtile == RADEON_LAYOUT_TILED)
1122 tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 2); /* 1D_TILED_THIN1 */
1123 else
1124 tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 1); /* LINEAR_ALIGNED */
1125
1126 tiling_flags |= AMDGPU_TILING_SET(PIPE_CONFIG, md->u.legacy.pipe_config);
1127 tiling_flags |= AMDGPU_TILING_SET(BANK_WIDTH, util_logbase2(md->u.legacy.bankw));
1128 tiling_flags |= AMDGPU_TILING_SET(BANK_HEIGHT, util_logbase2(md->u.legacy.bankh));
1129 if (md->u.legacy.tile_split)
1130 tiling_flags |= AMDGPU_TILING_SET(TILE_SPLIT, eg_tile_split_rev(md->u.legacy.tile_split));
1131 tiling_flags |= AMDGPU_TILING_SET(MACRO_TILE_ASPECT, util_logbase2(md->u.legacy.mtilea));
1132 tiling_flags |= AMDGPU_TILING_SET(NUM_BANKS, util_logbase2(md->u.legacy.num_banks)-1);
1133
1134 if (md->u.legacy.scanout)
1135 tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 0); /* DISPLAY_MICRO_TILING */
1136 else
1137 tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 1); /* THIN_MICRO_TILING */
1138 }
1139
1140 metadata.tiling_info = tiling_flags;
1141 metadata.size_metadata = md->size_metadata;
1142 memcpy(metadata.umd_metadata, md->metadata, sizeof(md->metadata));
1143
1144 amdgpu_bo_set_metadata(bo->bo, &metadata);
1145 }
1146
1147 static struct pb_buffer *
1148 amdgpu_bo_create(struct radeon_winsys *rws,
1149 uint64_t size,
1150 unsigned alignment,
1151 enum radeon_bo_domain domain,
1152 enum radeon_bo_flag flags)
1153 {
1154 struct amdgpu_winsys *ws = amdgpu_winsys(rws);
1155 struct amdgpu_winsys_bo *bo;
1156 unsigned usage = 0, pb_cache_bucket;
1157
1158 /* Sub-allocate small buffers from slabs. */
1159 if (!(flags & (RADEON_FLAG_NO_SUBALLOC | RADEON_FLAG_SPARSE)) &&
1160 size <= (1 << AMDGPU_SLAB_MAX_SIZE_LOG2) &&
1161 alignment <= MAX2(1 << AMDGPU_SLAB_MIN_SIZE_LOG2, util_next_power_of_two(size))) {
1162 struct pb_slab_entry *entry;
1163 unsigned heap = 0;
1164
1165 if (flags & RADEON_FLAG_GTT_WC)
1166 heap |= 1;
1167 if (flags & RADEON_FLAG_CPU_ACCESS)
1168 heap |= 2;
1169 if (flags & ~(RADEON_FLAG_GTT_WC | RADEON_FLAG_CPU_ACCESS))
1170 goto no_slab;
1171
1172 switch (domain) {
1173 case RADEON_DOMAIN_VRAM:
1174 heap |= 0 * 4;
1175 break;
1176 case RADEON_DOMAIN_VRAM_GTT:
1177 heap |= 1 * 4;
1178 break;
1179 case RADEON_DOMAIN_GTT:
1180 heap |= 2 * 4;
1181 break;
1182 default:
1183 goto no_slab;
1184 }
1185
1186 entry = pb_slab_alloc(&ws->bo_slabs, size, heap);
1187 if (!entry) {
1188 /* Clear the cache and try again. */
1189 pb_cache_release_all_buffers(&ws->bo_cache);
1190
1191 entry = pb_slab_alloc(&ws->bo_slabs, size, heap);
1192 }
1193 if (!entry)
1194 return NULL;
1195
1196 bo = NULL;
1197 bo = container_of(entry, bo, u.slab.entry);
1198
1199 pipe_reference_init(&bo->base.reference, 1);
1200
1201 return &bo->base;
1202 }
1203 no_slab:
1204
1205 if (flags & RADEON_FLAG_SPARSE) {
1206 assert(RADEON_SPARSE_PAGE_SIZE % alignment == 0);
1207 assert(!(flags & RADEON_FLAG_CPU_ACCESS));
1208
1209 flags |= RADEON_FLAG_NO_CPU_ACCESS;
1210
1211 return amdgpu_bo_sparse_create(ws, size, domain, flags);
1212 }
1213
1214 /* This flag is irrelevant for the cache. */
1215 flags &= ~RADEON_FLAG_NO_SUBALLOC;
1216
1217 /* Align size to page size. This is the minimum alignment for normal
1218 * BOs. Aligning this here helps the cached bufmgr. Especially small BOs,
1219 * like constant/uniform buffers, can benefit from better and more reuse.
1220 */
1221 size = align64(size, ws->info.gart_page_size);
1222 alignment = align(alignment, ws->info.gart_page_size);
1223
1224 /* Only set one usage bit each for domains and flags, or the cache manager
1225 * might consider different sets of domains / flags compatible
1226 */
1227 if (domain == RADEON_DOMAIN_VRAM_GTT)
1228 usage = 1 << 2;
1229 else
1230 usage = domain >> 1;
1231 assert(flags < sizeof(usage) * 8 - 3);
1232 usage |= 1 << (flags + 3);
1233
1234 /* Determine the pb_cache bucket for minimizing pb_cache misses. */
1235 pb_cache_bucket = 0;
1236 if (domain & RADEON_DOMAIN_VRAM) /* VRAM or VRAM+GTT */
1237 pb_cache_bucket += 1;
1238 if (flags == RADEON_FLAG_GTT_WC) /* WC */
1239 pb_cache_bucket += 2;
1240 assert(pb_cache_bucket < ARRAY_SIZE(ws->bo_cache.buckets));
1241
1242 /* Get a buffer from the cache. */
1243 bo = (struct amdgpu_winsys_bo*)
1244 pb_cache_reclaim_buffer(&ws->bo_cache, size, alignment, usage,
1245 pb_cache_bucket);
1246 if (bo)
1247 return &bo->base;
1248
1249 /* Create a new one. */
1250 bo = amdgpu_create_bo(ws, size, alignment, usage, domain, flags,
1251 pb_cache_bucket);
1252 if (!bo) {
1253 /* Clear the cache and try again. */
1254 pb_slabs_reclaim(&ws->bo_slabs);
1255 pb_cache_release_all_buffers(&ws->bo_cache);
1256 bo = amdgpu_create_bo(ws, size, alignment, usage, domain, flags,
1257 pb_cache_bucket);
1258 if (!bo)
1259 return NULL;
1260 }
1261
1262 bo->u.real.use_reusable_pool = true;
1263 return &bo->base;
1264 }
1265
1266 static struct pb_buffer *amdgpu_bo_from_handle(struct radeon_winsys *rws,
1267 struct winsys_handle *whandle,
1268 unsigned *stride,
1269 unsigned *offset)
1270 {
1271 struct amdgpu_winsys *ws = amdgpu_winsys(rws);
1272 struct amdgpu_winsys_bo *bo;
1273 enum amdgpu_bo_handle_type type;
1274 struct amdgpu_bo_import_result result = {0};
1275 uint64_t va;
1276 amdgpu_va_handle va_handle;
1277 struct amdgpu_bo_info info = {0};
1278 enum radeon_bo_domain initial = 0;
1279 int r;
1280
1281 /* Initialize the structure. */
1282 bo = CALLOC_STRUCT(amdgpu_winsys_bo);
1283 if (!bo) {
1284 return NULL;
1285 }
1286
1287 switch (whandle->type) {
1288 case DRM_API_HANDLE_TYPE_SHARED:
1289 type = amdgpu_bo_handle_type_gem_flink_name;
1290 break;
1291 case DRM_API_HANDLE_TYPE_FD:
1292 type = amdgpu_bo_handle_type_dma_buf_fd;
1293 break;
1294 default:
1295 return NULL;
1296 }
1297
1298 r = amdgpu_bo_import(ws->dev, type, whandle->handle, &result);
1299 if (r)
1300 goto error;
1301
1302 /* Get initial domains. */
1303 r = amdgpu_bo_query_info(result.buf_handle, &info);
1304 if (r)
1305 goto error_query;
1306
1307 r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
1308 result.alloc_size, 1 << 20, 0, &va, &va_handle, 0);
1309 if (r)
1310 goto error_query;
1311
1312 r = amdgpu_bo_va_op(result.buf_handle, 0, result.alloc_size, va, 0, AMDGPU_VA_OP_MAP);
1313 if (r)
1314 goto error_va_map;
1315
1316 if (info.preferred_heap & AMDGPU_GEM_DOMAIN_VRAM)
1317 initial |= RADEON_DOMAIN_VRAM;
1318 if (info.preferred_heap & AMDGPU_GEM_DOMAIN_GTT)
1319 initial |= RADEON_DOMAIN_GTT;
1320
1321
1322 pipe_reference_init(&bo->base.reference, 1);
1323 bo->base.alignment = info.phys_alignment;
1324 bo->bo = result.buf_handle;
1325 bo->base.size = result.alloc_size;
1326 bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
1327 bo->ws = ws;
1328 bo->va = va;
1329 bo->u.real.va_handle = va_handle;
1330 bo->initial_domain = initial;
1331 bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
1332 bo->is_shared = true;
1333
1334 if (stride)
1335 *stride = whandle->stride;
1336 if (offset)
1337 *offset = whandle->offset;
1338
1339 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
1340 ws->allocated_vram += align64(bo->base.size, ws->info.gart_page_size);
1341 else if (bo->initial_domain & RADEON_DOMAIN_GTT)
1342 ws->allocated_gtt += align64(bo->base.size, ws->info.gart_page_size);
1343
1344 amdgpu_add_buffer_to_global_list(bo);
1345
1346 return &bo->base;
1347
1348 error_va_map:
1349 amdgpu_va_range_free(va_handle);
1350
1351 error_query:
1352 amdgpu_bo_free(result.buf_handle);
1353
1354 error:
1355 FREE(bo);
1356 return NULL;
1357 }
1358
1359 static bool amdgpu_bo_get_handle(struct pb_buffer *buffer,
1360 unsigned stride, unsigned offset,
1361 unsigned slice_size,
1362 struct winsys_handle *whandle)
1363 {
1364 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(buffer);
1365 enum amdgpu_bo_handle_type type;
1366 int r;
1367
1368 if (!bo->bo) {
1369 offset += bo->va - bo->u.slab.real->va;
1370 bo = bo->u.slab.real;
1371 }
1372
1373 bo->u.real.use_reusable_pool = false;
1374
1375 switch (whandle->type) {
1376 case DRM_API_HANDLE_TYPE_SHARED:
1377 type = amdgpu_bo_handle_type_gem_flink_name;
1378 break;
1379 case DRM_API_HANDLE_TYPE_FD:
1380 type = amdgpu_bo_handle_type_dma_buf_fd;
1381 break;
1382 case DRM_API_HANDLE_TYPE_KMS:
1383 type = amdgpu_bo_handle_type_kms;
1384 break;
1385 default:
1386 return false;
1387 }
1388
1389 r = amdgpu_bo_export(bo->bo, type, &whandle->handle);
1390 if (r)
1391 return false;
1392
1393 whandle->stride = stride;
1394 whandle->offset = offset;
1395 whandle->offset += slice_size * whandle->layer;
1396 bo->is_shared = true;
1397 return true;
1398 }
1399
1400 static struct pb_buffer *amdgpu_bo_from_ptr(struct radeon_winsys *rws,
1401 void *pointer, uint64_t size)
1402 {
1403 struct amdgpu_winsys *ws = amdgpu_winsys(rws);
1404 amdgpu_bo_handle buf_handle;
1405 struct amdgpu_winsys_bo *bo;
1406 uint64_t va;
1407 amdgpu_va_handle va_handle;
1408
1409 bo = CALLOC_STRUCT(amdgpu_winsys_bo);
1410 if (!bo)
1411 return NULL;
1412
1413 if (amdgpu_create_bo_from_user_mem(ws->dev, pointer, size, &buf_handle))
1414 goto error;
1415
1416 if (amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
1417 size, 1 << 12, 0, &va, &va_handle, 0))
1418 goto error_va_alloc;
1419
1420 if (amdgpu_bo_va_op(buf_handle, 0, size, va, 0, AMDGPU_VA_OP_MAP))
1421 goto error_va_map;
1422
1423 /* Initialize it. */
1424 pipe_reference_init(&bo->base.reference, 1);
1425 bo->bo = buf_handle;
1426 bo->base.alignment = 0;
1427 bo->base.size = size;
1428 bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
1429 bo->ws = ws;
1430 bo->user_ptr = pointer;
1431 bo->va = va;
1432 bo->u.real.va_handle = va_handle;
1433 bo->initial_domain = RADEON_DOMAIN_GTT;
1434 bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
1435
1436 ws->allocated_gtt += align64(bo->base.size, ws->info.gart_page_size);
1437
1438 amdgpu_add_buffer_to_global_list(bo);
1439
1440 return (struct pb_buffer*)bo;
1441
1442 error_va_map:
1443 amdgpu_va_range_free(va_handle);
1444
1445 error_va_alloc:
1446 amdgpu_bo_free(buf_handle);
1447
1448 error:
1449 FREE(bo);
1450 return NULL;
1451 }
1452
1453 static bool amdgpu_bo_is_user_ptr(struct pb_buffer *buf)
1454 {
1455 return ((struct amdgpu_winsys_bo*)buf)->user_ptr != NULL;
1456 }
1457
1458 static uint64_t amdgpu_bo_get_va(struct pb_buffer *buf)
1459 {
1460 return ((struct amdgpu_winsys_bo*)buf)->va;
1461 }
1462
1463 void amdgpu_bo_init_functions(struct amdgpu_winsys *ws)
1464 {
1465 ws->base.buffer_set_metadata = amdgpu_buffer_set_metadata;
1466 ws->base.buffer_get_metadata = amdgpu_buffer_get_metadata;
1467 ws->base.buffer_map = amdgpu_bo_map;
1468 ws->base.buffer_unmap = amdgpu_bo_unmap;
1469 ws->base.buffer_wait = amdgpu_bo_wait;
1470 ws->base.buffer_create = amdgpu_bo_create;
1471 ws->base.buffer_from_handle = amdgpu_bo_from_handle;
1472 ws->base.buffer_from_ptr = amdgpu_bo_from_ptr;
1473 ws->base.buffer_is_user_ptr = amdgpu_bo_is_user_ptr;
1474 ws->base.buffer_get_handle = amdgpu_bo_get_handle;
1475 ws->base.buffer_commit = amdgpu_bo_sparse_commit;
1476 ws->base.buffer_get_virtual_address = amdgpu_bo_get_va;
1477 ws->base.buffer_get_initial_domain = amdgpu_bo_get_initial_domain;
1478 }