winsys/amdgpu: take fences when freeing a backing buffer
[mesa.git] / src / gallium / winsys / amdgpu / drm / amdgpu_bo.c
1 /*
2 * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
3 * Copyright © 2015 Advanced Micro Devices, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
16 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
18 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
25 * of the Software.
26 */
27 /*
28 * Authors:
29 * Marek Olšák <maraeo@gmail.com>
30 */
31
32 #include "amdgpu_cs.h"
33
34 #include "os/os_time.h"
35 #include "state_tracker/drm_driver.h"
36 #include <amdgpu_drm.h>
37 #include <xf86drm.h>
38 #include <stdio.h>
39 #include <inttypes.h>
40
41
42 struct amdgpu_sparse_backing_chunk {
43 uint32_t begin, end;
44 };
45
46 static struct pb_buffer *
47 amdgpu_bo_create(struct radeon_winsys *rws,
48 uint64_t size,
49 unsigned alignment,
50 enum radeon_bo_domain domain,
51 enum radeon_bo_flag flags);
52
53 static bool amdgpu_bo_wait(struct pb_buffer *_buf, uint64_t timeout,
54 enum radeon_bo_usage usage)
55 {
56 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
57 struct amdgpu_winsys *ws = bo->ws;
58 int64_t abs_timeout;
59
60 if (timeout == 0) {
61 if (p_atomic_read(&bo->num_active_ioctls))
62 return false;
63
64 } else {
65 abs_timeout = os_time_get_absolute_timeout(timeout);
66
67 /* Wait if any ioctl is being submitted with this buffer. */
68 if (!os_wait_until_zero_abs_timeout(&bo->num_active_ioctls, abs_timeout))
69 return false;
70 }
71
72 if (bo->is_shared) {
73 /* We can't use user fences for shared buffers, because user fences
74 * are local to this process only. If we want to wait for all buffer
75 * uses in all processes, we have to use amdgpu_bo_wait_for_idle.
76 */
77 bool buffer_busy = true;
78 int r;
79
80 r = amdgpu_bo_wait_for_idle(bo->bo, timeout, &buffer_busy);
81 if (r)
82 fprintf(stderr, "%s: amdgpu_bo_wait_for_idle failed %i\n", __func__,
83 r);
84 return !buffer_busy;
85 }
86
87 if (timeout == 0) {
88 unsigned idle_fences;
89 bool buffer_idle;
90
91 mtx_lock(&ws->bo_fence_lock);
92
93 for (idle_fences = 0; idle_fences < bo->num_fences; ++idle_fences) {
94 if (!amdgpu_fence_wait(bo->fences[idle_fences], 0, false))
95 break;
96 }
97
98 /* Release the idle fences to avoid checking them again later. */
99 for (unsigned i = 0; i < idle_fences; ++i)
100 amdgpu_fence_reference(&bo->fences[i], NULL);
101
102 memmove(&bo->fences[0], &bo->fences[idle_fences],
103 (bo->num_fences - idle_fences) * sizeof(*bo->fences));
104 bo->num_fences -= idle_fences;
105
106 buffer_idle = !bo->num_fences;
107 mtx_unlock(&ws->bo_fence_lock);
108
109 return buffer_idle;
110 } else {
111 bool buffer_idle = true;
112
113 mtx_lock(&ws->bo_fence_lock);
114 while (bo->num_fences && buffer_idle) {
115 struct pipe_fence_handle *fence = NULL;
116 bool fence_idle = false;
117
118 amdgpu_fence_reference(&fence, bo->fences[0]);
119
120 /* Wait for the fence. */
121 mtx_unlock(&ws->bo_fence_lock);
122 if (amdgpu_fence_wait(fence, abs_timeout, true))
123 fence_idle = true;
124 else
125 buffer_idle = false;
126 mtx_lock(&ws->bo_fence_lock);
127
128 /* Release an idle fence to avoid checking it again later, keeping in
129 * mind that the fence array may have been modified by other threads.
130 */
131 if (fence_idle && bo->num_fences && bo->fences[0] == fence) {
132 amdgpu_fence_reference(&bo->fences[0], NULL);
133 memmove(&bo->fences[0], &bo->fences[1],
134 (bo->num_fences - 1) * sizeof(*bo->fences));
135 bo->num_fences--;
136 }
137
138 amdgpu_fence_reference(&fence, NULL);
139 }
140 mtx_unlock(&ws->bo_fence_lock);
141
142 return buffer_idle;
143 }
144 }
145
146 static enum radeon_bo_domain amdgpu_bo_get_initial_domain(
147 struct pb_buffer *buf)
148 {
149 return ((struct amdgpu_winsys_bo*)buf)->initial_domain;
150 }
151
152 static void amdgpu_bo_remove_fences(struct amdgpu_winsys_bo *bo)
153 {
154 for (unsigned i = 0; i < bo->num_fences; ++i)
155 amdgpu_fence_reference(&bo->fences[i], NULL);
156
157 FREE(bo->fences);
158 bo->num_fences = 0;
159 bo->max_fences = 0;
160 }
161
162 void amdgpu_bo_destroy(struct pb_buffer *_buf)
163 {
164 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
165
166 assert(bo->bo && "must not be called for slab entries");
167
168 mtx_lock(&bo->ws->global_bo_list_lock);
169 LIST_DEL(&bo->u.real.global_list_item);
170 bo->ws->num_buffers--;
171 mtx_unlock(&bo->ws->global_bo_list_lock);
172
173 amdgpu_bo_va_op(bo->bo, 0, bo->base.size, bo->va, 0, AMDGPU_VA_OP_UNMAP);
174 amdgpu_va_range_free(bo->u.real.va_handle);
175 amdgpu_bo_free(bo->bo);
176
177 amdgpu_bo_remove_fences(bo);
178
179 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
180 bo->ws->allocated_vram -= align64(bo->base.size, bo->ws->info.gart_page_size);
181 else if (bo->initial_domain & RADEON_DOMAIN_GTT)
182 bo->ws->allocated_gtt -= align64(bo->base.size, bo->ws->info.gart_page_size);
183
184 if (bo->u.real.map_count >= 1) {
185 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
186 bo->ws->mapped_vram -= bo->base.size;
187 else if (bo->initial_domain & RADEON_DOMAIN_GTT)
188 bo->ws->mapped_gtt -= bo->base.size;
189 bo->ws->num_mapped_buffers--;
190 }
191
192 FREE(bo);
193 }
194
195 static void amdgpu_bo_destroy_or_cache(struct pb_buffer *_buf)
196 {
197 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
198
199 assert(bo->bo); /* slab buffers have a separate vtbl */
200
201 if (bo->u.real.use_reusable_pool)
202 pb_cache_add_buffer(&bo->u.real.cache_entry);
203 else
204 amdgpu_bo_destroy(_buf);
205 }
206
207 static void *amdgpu_bo_map(struct pb_buffer *buf,
208 struct radeon_winsys_cs *rcs,
209 enum pipe_transfer_usage usage)
210 {
211 struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
212 struct amdgpu_winsys_bo *real;
213 struct amdgpu_cs *cs = (struct amdgpu_cs*)rcs;
214 int r;
215 void *cpu = NULL;
216 uint64_t offset = 0;
217
218 assert(!bo->sparse);
219
220 /* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
221 if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
222 /* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
223 if (usage & PIPE_TRANSFER_DONTBLOCK) {
224 if (!(usage & PIPE_TRANSFER_WRITE)) {
225 /* Mapping for read.
226 *
227 * Since we are mapping for read, we don't need to wait
228 * if the GPU is using the buffer for read too
229 * (neither one is changing it).
230 *
231 * Only check whether the buffer is being used for write. */
232 if (cs && amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
233 RADEON_USAGE_WRITE)) {
234 cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
235 return NULL;
236 }
237
238 if (!amdgpu_bo_wait((struct pb_buffer*)bo, 0,
239 RADEON_USAGE_WRITE)) {
240 return NULL;
241 }
242 } else {
243 if (cs && amdgpu_bo_is_referenced_by_cs(cs, bo)) {
244 cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
245 return NULL;
246 }
247
248 if (!amdgpu_bo_wait((struct pb_buffer*)bo, 0,
249 RADEON_USAGE_READWRITE)) {
250 return NULL;
251 }
252 }
253 } else {
254 uint64_t time = os_time_get_nano();
255
256 if (!(usage & PIPE_TRANSFER_WRITE)) {
257 /* Mapping for read.
258 *
259 * Since we are mapping for read, we don't need to wait
260 * if the GPU is using the buffer for read too
261 * (neither one is changing it).
262 *
263 * Only check whether the buffer is being used for write. */
264 if (cs) {
265 if (amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
266 RADEON_USAGE_WRITE)) {
267 cs->flush_cs(cs->flush_data, 0, NULL);
268 } else {
269 /* Try to avoid busy-waiting in amdgpu_bo_wait. */
270 if (p_atomic_read(&bo->num_active_ioctls))
271 amdgpu_cs_sync_flush(rcs);
272 }
273 }
274
275 amdgpu_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
276 RADEON_USAGE_WRITE);
277 } else {
278 /* Mapping for write. */
279 if (cs) {
280 if (amdgpu_bo_is_referenced_by_cs(cs, bo)) {
281 cs->flush_cs(cs->flush_data, 0, NULL);
282 } else {
283 /* Try to avoid busy-waiting in amdgpu_bo_wait. */
284 if (p_atomic_read(&bo->num_active_ioctls))
285 amdgpu_cs_sync_flush(rcs);
286 }
287 }
288
289 amdgpu_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
290 RADEON_USAGE_READWRITE);
291 }
292
293 bo->ws->buffer_wait_time += os_time_get_nano() - time;
294 }
295 }
296
297 /* If the buffer is created from user memory, return the user pointer. */
298 if (bo->user_ptr)
299 return bo->user_ptr;
300
301 if (bo->bo) {
302 real = bo;
303 } else {
304 real = bo->u.slab.real;
305 offset = bo->va - real->va;
306 }
307
308 r = amdgpu_bo_cpu_map(real->bo, &cpu);
309 if (r) {
310 /* Clear the cache and try again. */
311 pb_cache_release_all_buffers(&real->ws->bo_cache);
312 r = amdgpu_bo_cpu_map(real->bo, &cpu);
313 if (r)
314 return NULL;
315 }
316
317 if (p_atomic_inc_return(&real->u.real.map_count) == 1) {
318 if (real->initial_domain & RADEON_DOMAIN_VRAM)
319 real->ws->mapped_vram += real->base.size;
320 else if (real->initial_domain & RADEON_DOMAIN_GTT)
321 real->ws->mapped_gtt += real->base.size;
322 real->ws->num_mapped_buffers++;
323 }
324 return (uint8_t*)cpu + offset;
325 }
326
327 static void amdgpu_bo_unmap(struct pb_buffer *buf)
328 {
329 struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
330 struct amdgpu_winsys_bo *real;
331
332 assert(!bo->sparse);
333
334 if (bo->user_ptr)
335 return;
336
337 real = bo->bo ? bo : bo->u.slab.real;
338
339 if (p_atomic_dec_zero(&real->u.real.map_count)) {
340 if (real->initial_domain & RADEON_DOMAIN_VRAM)
341 real->ws->mapped_vram -= real->base.size;
342 else if (real->initial_domain & RADEON_DOMAIN_GTT)
343 real->ws->mapped_gtt -= real->base.size;
344 real->ws->num_mapped_buffers--;
345 }
346
347 amdgpu_bo_cpu_unmap(real->bo);
348 }
349
350 static const struct pb_vtbl amdgpu_winsys_bo_vtbl = {
351 amdgpu_bo_destroy_or_cache
352 /* other functions are never called */
353 };
354
355 static void amdgpu_add_buffer_to_global_list(struct amdgpu_winsys_bo *bo)
356 {
357 struct amdgpu_winsys *ws = bo->ws;
358
359 assert(bo->bo);
360
361 mtx_lock(&ws->global_bo_list_lock);
362 LIST_ADDTAIL(&bo->u.real.global_list_item, &ws->global_bo_list);
363 ws->num_buffers++;
364 mtx_unlock(&ws->global_bo_list_lock);
365 }
366
367 static struct amdgpu_winsys_bo *amdgpu_create_bo(struct amdgpu_winsys *ws,
368 uint64_t size,
369 unsigned alignment,
370 unsigned usage,
371 enum radeon_bo_domain initial_domain,
372 unsigned flags,
373 unsigned pb_cache_bucket)
374 {
375 struct amdgpu_bo_alloc_request request = {0};
376 amdgpu_bo_handle buf_handle;
377 uint64_t va = 0;
378 struct amdgpu_winsys_bo *bo;
379 amdgpu_va_handle va_handle;
380 unsigned va_gap_size;
381 int r;
382
383 assert(initial_domain & RADEON_DOMAIN_VRAM_GTT);
384 bo = CALLOC_STRUCT(amdgpu_winsys_bo);
385 if (!bo) {
386 return NULL;
387 }
388
389 pb_cache_init_entry(&ws->bo_cache, &bo->u.real.cache_entry, &bo->base,
390 pb_cache_bucket);
391 request.alloc_size = size;
392 request.phys_alignment = alignment;
393
394 if (initial_domain & RADEON_DOMAIN_VRAM)
395 request.preferred_heap |= AMDGPU_GEM_DOMAIN_VRAM;
396 if (initial_domain & RADEON_DOMAIN_GTT)
397 request.preferred_heap |= AMDGPU_GEM_DOMAIN_GTT;
398
399 if (flags & RADEON_FLAG_CPU_ACCESS)
400 request.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
401 if (flags & RADEON_FLAG_NO_CPU_ACCESS)
402 request.flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
403 if (flags & RADEON_FLAG_GTT_WC)
404 request.flags |= AMDGPU_GEM_CREATE_CPU_GTT_USWC;
405
406 r = amdgpu_bo_alloc(ws->dev, &request, &buf_handle);
407 if (r) {
408 fprintf(stderr, "amdgpu: Failed to allocate a buffer:\n");
409 fprintf(stderr, "amdgpu: size : %"PRIu64" bytes\n", size);
410 fprintf(stderr, "amdgpu: alignment : %u bytes\n", alignment);
411 fprintf(stderr, "amdgpu: domains : %u\n", initial_domain);
412 goto error_bo_alloc;
413 }
414
415 va_gap_size = ws->check_vm ? MAX2(4 * alignment, 64 * 1024) : 0;
416 r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
417 size + va_gap_size, alignment, 0, &va, &va_handle, 0);
418 if (r)
419 goto error_va_alloc;
420
421 r = amdgpu_bo_va_op(buf_handle, 0, size, va, 0, AMDGPU_VA_OP_MAP);
422 if (r)
423 goto error_va_map;
424
425 pipe_reference_init(&bo->base.reference, 1);
426 bo->base.alignment = alignment;
427 bo->base.usage = usage;
428 bo->base.size = size;
429 bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
430 bo->ws = ws;
431 bo->bo = buf_handle;
432 bo->va = va;
433 bo->u.real.va_handle = va_handle;
434 bo->initial_domain = initial_domain;
435 bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
436
437 if (initial_domain & RADEON_DOMAIN_VRAM)
438 ws->allocated_vram += align64(size, ws->info.gart_page_size);
439 else if (initial_domain & RADEON_DOMAIN_GTT)
440 ws->allocated_gtt += align64(size, ws->info.gart_page_size);
441
442 amdgpu_add_buffer_to_global_list(bo);
443
444 return bo;
445
446 error_va_map:
447 amdgpu_va_range_free(va_handle);
448
449 error_va_alloc:
450 amdgpu_bo_free(buf_handle);
451
452 error_bo_alloc:
453 FREE(bo);
454 return NULL;
455 }
456
457 bool amdgpu_bo_can_reclaim(struct pb_buffer *_buf)
458 {
459 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
460
461 if (amdgpu_bo_is_referenced_by_any_cs(bo)) {
462 return false;
463 }
464
465 return amdgpu_bo_wait(_buf, 0, RADEON_USAGE_READWRITE);
466 }
467
468 bool amdgpu_bo_can_reclaim_slab(void *priv, struct pb_slab_entry *entry)
469 {
470 struct amdgpu_winsys_bo *bo = NULL; /* fix container_of */
471 bo = container_of(entry, bo, u.slab.entry);
472
473 return amdgpu_bo_can_reclaim(&bo->base);
474 }
475
476 static void amdgpu_bo_slab_destroy(struct pb_buffer *_buf)
477 {
478 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
479
480 assert(!bo->bo);
481
482 pb_slab_free(&bo->ws->bo_slabs, &bo->u.slab.entry);
483 }
484
485 static const struct pb_vtbl amdgpu_winsys_bo_slab_vtbl = {
486 amdgpu_bo_slab_destroy
487 /* other functions are never called */
488 };
489
490 struct pb_slab *amdgpu_bo_slab_alloc(void *priv, unsigned heap,
491 unsigned entry_size,
492 unsigned group_index)
493 {
494 struct amdgpu_winsys *ws = priv;
495 struct amdgpu_slab *slab = CALLOC_STRUCT(amdgpu_slab);
496 enum radeon_bo_domain domains;
497 enum radeon_bo_flag flags = 0;
498 uint32_t base_id;
499
500 if (!slab)
501 return NULL;
502
503 if (heap & 1)
504 flags |= RADEON_FLAG_GTT_WC;
505 if (heap & 2)
506 flags |= RADEON_FLAG_CPU_ACCESS;
507
508 switch (heap >> 2) {
509 case 0:
510 domains = RADEON_DOMAIN_VRAM;
511 break;
512 default:
513 case 1:
514 domains = RADEON_DOMAIN_VRAM_GTT;
515 break;
516 case 2:
517 domains = RADEON_DOMAIN_GTT;
518 break;
519 }
520
521 slab->buffer = amdgpu_winsys_bo(amdgpu_bo_create(&ws->base,
522 64 * 1024, 64 * 1024,
523 domains, flags));
524 if (!slab->buffer)
525 goto fail;
526
527 assert(slab->buffer->bo);
528
529 slab->base.num_entries = slab->buffer->base.size / entry_size;
530 slab->base.num_free = slab->base.num_entries;
531 slab->entries = CALLOC(slab->base.num_entries, sizeof(*slab->entries));
532 if (!slab->entries)
533 goto fail_buffer;
534
535 LIST_INITHEAD(&slab->base.free);
536
537 base_id = __sync_fetch_and_add(&ws->next_bo_unique_id, slab->base.num_entries);
538
539 for (unsigned i = 0; i < slab->base.num_entries; ++i) {
540 struct amdgpu_winsys_bo *bo = &slab->entries[i];
541
542 bo->base.alignment = entry_size;
543 bo->base.usage = slab->buffer->base.usage;
544 bo->base.size = entry_size;
545 bo->base.vtbl = &amdgpu_winsys_bo_slab_vtbl;
546 bo->ws = ws;
547 bo->va = slab->buffer->va + i * entry_size;
548 bo->initial_domain = domains;
549 bo->unique_id = base_id + i;
550 bo->u.slab.entry.slab = &slab->base;
551 bo->u.slab.entry.group_index = group_index;
552 bo->u.slab.real = slab->buffer;
553
554 LIST_ADDTAIL(&bo->u.slab.entry.head, &slab->base.free);
555 }
556
557 return &slab->base;
558
559 fail_buffer:
560 amdgpu_winsys_bo_reference(&slab->buffer, NULL);
561 fail:
562 FREE(slab);
563 return NULL;
564 }
565
566 void amdgpu_bo_slab_free(void *priv, struct pb_slab *pslab)
567 {
568 struct amdgpu_slab *slab = amdgpu_slab(pslab);
569
570 for (unsigned i = 0; i < slab->base.num_entries; ++i)
571 amdgpu_bo_remove_fences(&slab->entries[i]);
572
573 FREE(slab->entries);
574 amdgpu_winsys_bo_reference(&slab->buffer, NULL);
575 FREE(slab);
576 }
577
578 /*
579 * Attempt to allocate the given number of backing pages. Fewer pages may be
580 * allocated (depending on the fragmentation of existing backing buffers),
581 * which will be reflected by a change to *pnum_pages.
582 */
583 static struct amdgpu_sparse_backing *
584 sparse_backing_alloc(struct amdgpu_winsys_bo *bo, uint32_t *pstart_page, uint32_t *pnum_pages)
585 {
586 struct amdgpu_sparse_backing *best_backing;
587 unsigned best_idx;
588 uint32_t best_num_pages;
589
590 best_backing = NULL;
591 best_idx = 0;
592 best_num_pages = 0;
593
594 /* This is a very simple and inefficient best-fit algorithm. */
595 list_for_each_entry(struct amdgpu_sparse_backing, backing, &bo->u.sparse.backing, list) {
596 for (unsigned idx = 0; idx < backing->num_chunks; ++idx) {
597 uint32_t cur_num_pages = backing->chunks[idx].end - backing->chunks[idx].begin;
598 if ((best_num_pages < *pnum_pages && cur_num_pages > best_num_pages) ||
599 (best_num_pages > *pnum_pages && cur_num_pages < best_num_pages)) {
600 best_backing = backing;
601 best_idx = idx;
602 best_num_pages = cur_num_pages;
603 }
604 }
605 }
606
607 /* Allocate a new backing buffer if necessary. */
608 if (!best_backing) {
609 struct pb_buffer *buf;
610 uint64_t size;
611 uint32_t pages;
612
613 best_backing = CALLOC_STRUCT(amdgpu_sparse_backing);
614 if (!best_backing)
615 return NULL;
616
617 best_backing->max_chunks = 4;
618 best_backing->chunks = CALLOC(best_backing->max_chunks,
619 sizeof(*best_backing->chunks));
620 if (!best_backing->chunks) {
621 FREE(best_backing);
622 return NULL;
623 }
624
625 assert(bo->u.sparse.num_backing_pages < DIV_ROUND_UP(bo->base.size, RADEON_SPARSE_PAGE_SIZE));
626
627 size = MIN3(bo->base.size / 16,
628 8 * 1024 * 1024,
629 bo->base.size - (uint64_t)bo->u.sparse.num_backing_pages * RADEON_SPARSE_PAGE_SIZE);
630 size = MAX2(size, RADEON_SPARSE_PAGE_SIZE);
631
632 buf = amdgpu_bo_create(&bo->ws->base, size, RADEON_SPARSE_PAGE_SIZE,
633 bo->initial_domain,
634 bo->u.sparse.flags | RADEON_FLAG_HANDLE);
635 if (!buf) {
636 FREE(best_backing->chunks);
637 FREE(best_backing);
638 return NULL;
639 }
640
641 /* We might have gotten a bigger buffer than requested via caching. */
642 pages = buf->size / RADEON_SPARSE_PAGE_SIZE;
643
644 best_backing->bo = amdgpu_winsys_bo(buf);
645 best_backing->num_chunks = 1;
646 best_backing->chunks[0].begin = 0;
647 best_backing->chunks[0].end = pages;
648
649 list_add(&best_backing->list, &bo->u.sparse.backing);
650 bo->u.sparse.num_backing_pages += pages;
651
652 best_idx = 0;
653 best_num_pages = pages;
654 }
655
656 *pnum_pages = MIN2(*pnum_pages, best_num_pages);
657 *pstart_page = best_backing->chunks[best_idx].begin;
658 best_backing->chunks[best_idx].begin += *pnum_pages;
659
660 if (best_backing->chunks[best_idx].begin >= best_backing->chunks[best_idx].end) {
661 memmove(&best_backing->chunks[best_idx], &best_backing->chunks[best_idx + 1],
662 sizeof(*best_backing->chunks) * (best_backing->num_chunks - best_idx - 1));
663 best_backing->num_chunks--;
664 }
665
666 return best_backing;
667 }
668
669 static void
670 sparse_free_backing_buffer(struct amdgpu_winsys_bo *bo,
671 struct amdgpu_sparse_backing *backing)
672 {
673 struct amdgpu_winsys *ws = backing->bo->ws;
674
675 bo->u.sparse.num_backing_pages -= backing->bo->base.size / RADEON_SPARSE_PAGE_SIZE;
676
677 mtx_lock(&ws->bo_fence_lock);
678 amdgpu_add_fences(backing->bo, bo->num_fences, bo->fences);
679 mtx_unlock(&ws->bo_fence_lock);
680
681 list_del(&backing->list);
682 amdgpu_winsys_bo_reference(&backing->bo, NULL);
683 FREE(backing->chunks);
684 FREE(backing);
685 }
686
687 /*
688 * Return a range of pages from the given backing buffer back into the
689 * free structure.
690 */
691 static bool
692 sparse_backing_free(struct amdgpu_winsys_bo *bo,
693 struct amdgpu_sparse_backing *backing,
694 uint32_t start_page, uint32_t num_pages)
695 {
696 uint32_t end_page = start_page + num_pages;
697 unsigned low = 0;
698 unsigned high = backing->num_chunks;
699
700 /* Find the first chunk with begin >= start_page. */
701 while (low < high) {
702 unsigned mid = low + (high - low) / 2;
703
704 if (backing->chunks[mid].begin >= start_page)
705 high = mid;
706 else
707 low = mid + 1;
708 }
709
710 assert(low >= backing->num_chunks || end_page <= backing->chunks[low].begin);
711 assert(low == 0 || backing->chunks[low - 1].end <= start_page);
712
713 if (low > 0 && backing->chunks[low - 1].end == start_page) {
714 backing->chunks[low - 1].end = end_page;
715
716 if (low < backing->num_chunks && end_page == backing->chunks[low].begin) {
717 backing->chunks[low - 1].end = backing->chunks[low].end;
718 memmove(&backing->chunks[low], &backing->chunks[low + 1],
719 sizeof(*backing->chunks) * (backing->num_chunks - low - 1));
720 backing->num_chunks--;
721 }
722 } else if (low < backing->num_chunks && end_page == backing->chunks[low].begin) {
723 backing->chunks[low].begin = start_page;
724 } else {
725 if (backing->num_chunks >= backing->max_chunks) {
726 unsigned new_max_chunks = 2 * backing->max_chunks;
727 struct amdgpu_sparse_backing_chunk *new_chunks =
728 REALLOC(backing->chunks,
729 sizeof(*backing->chunks) * backing->max_chunks,
730 sizeof(*backing->chunks) * new_max_chunks);
731 if (!new_chunks)
732 return false;
733
734 backing->max_chunks = new_max_chunks;
735 backing->chunks = new_chunks;
736 }
737
738 memmove(&backing->chunks[low + 1], &backing->chunks[low],
739 sizeof(*backing->chunks) * (backing->num_chunks - low));
740 backing->chunks[low].begin = start_page;
741 backing->chunks[low].end = end_page;
742 backing->num_chunks++;
743 }
744
745 if (backing->num_chunks == 1 && backing->chunks[0].begin == 0 &&
746 backing->chunks[0].end == backing->bo->base.size / RADEON_SPARSE_PAGE_SIZE)
747 sparse_free_backing_buffer(bo, backing);
748
749 return true;
750 }
751
752 static void amdgpu_bo_sparse_destroy(struct pb_buffer *_buf)
753 {
754 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
755 int r;
756
757 assert(!bo->bo && bo->sparse);
758
759 r = amdgpu_bo_va_op_raw(bo->ws->dev, NULL, 0,
760 (uint64_t)bo->u.sparse.num_va_pages * RADEON_SPARSE_PAGE_SIZE,
761 bo->va, 0, AMDGPU_VA_OP_CLEAR);
762 if (r) {
763 fprintf(stderr, "amdgpu: clearing PRT VA region on destroy failed (%d)\n", r);
764 }
765
766 while (!list_empty(&bo->u.sparse.backing)) {
767 struct amdgpu_sparse_backing *dummy = NULL;
768 sparse_free_backing_buffer(bo,
769 container_of(bo->u.sparse.backing.next,
770 dummy, list));
771 }
772
773 amdgpu_va_range_free(bo->u.sparse.va_handle);
774 mtx_destroy(&bo->u.sparse.commit_lock);
775 FREE(bo->u.sparse.commitments);
776 FREE(bo);
777 }
778
779 static const struct pb_vtbl amdgpu_winsys_bo_sparse_vtbl = {
780 amdgpu_bo_sparse_destroy
781 /* other functions are never called */
782 };
783
784 static struct pb_buffer *
785 amdgpu_bo_sparse_create(struct amdgpu_winsys *ws, uint64_t size,
786 enum radeon_bo_domain domain,
787 enum radeon_bo_flag flags)
788 {
789 struct amdgpu_winsys_bo *bo;
790 uint64_t map_size;
791 uint64_t va_gap_size;
792 int r;
793
794 /* We use 32-bit page numbers; refuse to attempt allocating sparse buffers
795 * that exceed this limit. This is not really a restriction: we don't have
796 * that much virtual address space anyway.
797 */
798 if (size > (uint64_t)INT32_MAX * RADEON_SPARSE_PAGE_SIZE)
799 return NULL;
800
801 bo = CALLOC_STRUCT(amdgpu_winsys_bo);
802 if (!bo)
803 return NULL;
804
805 pipe_reference_init(&bo->base.reference, 1);
806 bo->base.alignment = RADEON_SPARSE_PAGE_SIZE;
807 bo->base.size = size;
808 bo->base.vtbl = &amdgpu_winsys_bo_sparse_vtbl;
809 bo->ws = ws;
810 bo->initial_domain = domain;
811 bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
812 bo->sparse = true;
813 bo->u.sparse.flags = flags & ~RADEON_FLAG_SPARSE;
814
815 bo->u.sparse.num_va_pages = DIV_ROUND_UP(size, RADEON_SPARSE_PAGE_SIZE);
816 bo->u.sparse.commitments = CALLOC(bo->u.sparse.num_va_pages,
817 sizeof(*bo->u.sparse.commitments));
818 if (!bo->u.sparse.commitments)
819 goto error_alloc_commitments;
820
821 mtx_init(&bo->u.sparse.commit_lock, mtx_plain);
822 LIST_INITHEAD(&bo->u.sparse.backing);
823
824 /* For simplicity, we always map a multiple of the page size. */
825 map_size = align64(size, RADEON_SPARSE_PAGE_SIZE);
826 va_gap_size = ws->check_vm ? 4 * RADEON_SPARSE_PAGE_SIZE : 0;
827 r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
828 map_size + va_gap_size, RADEON_SPARSE_PAGE_SIZE,
829 0, &bo->va, &bo->u.sparse.va_handle, 0);
830 if (r)
831 goto error_va_alloc;
832
833 r = amdgpu_bo_va_op_raw(bo->ws->dev, NULL, 0, size, bo->va,
834 AMDGPU_VM_PAGE_PRT, AMDGPU_VA_OP_MAP);
835 if (r)
836 goto error_va_map;
837
838 return &bo->base;
839
840 error_va_map:
841 amdgpu_va_range_free(bo->u.sparse.va_handle);
842 error_va_alloc:
843 mtx_destroy(&bo->u.sparse.commit_lock);
844 FREE(bo->u.sparse.commitments);
845 error_alloc_commitments:
846 FREE(bo);
847 return NULL;
848 }
849
850 static bool
851 amdgpu_bo_sparse_commit(struct pb_buffer *buf, uint64_t offset, uint64_t size,
852 bool commit)
853 {
854 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(buf);
855 struct amdgpu_sparse_commitment *comm;
856 uint32_t va_page, end_va_page;
857 bool ok = true;
858 int r;
859
860 assert(bo->sparse);
861 assert(offset % RADEON_SPARSE_PAGE_SIZE == 0);
862 assert(offset <= bo->base.size);
863 assert(size <= bo->base.size - offset);
864 assert(size % RADEON_SPARSE_PAGE_SIZE == 0 || offset + size == bo->base.size);
865
866 comm = bo->u.sparse.commitments;
867 va_page = offset / RADEON_SPARSE_PAGE_SIZE;
868 end_va_page = va_page + DIV_ROUND_UP(size, RADEON_SPARSE_PAGE_SIZE);
869
870 mtx_lock(&bo->u.sparse.commit_lock);
871
872 if (commit) {
873 while (va_page < end_va_page) {
874 uint32_t span_va_page;
875
876 /* Skip pages that are already committed. */
877 if (comm[va_page].backing) {
878 va_page++;
879 continue;
880 }
881
882 /* Determine length of uncommitted span. */
883 span_va_page = va_page;
884 while (va_page < end_va_page && !comm[va_page].backing)
885 va_page++;
886
887 /* Fill the uncommitted span with chunks of backing memory. */
888 while (span_va_page < va_page) {
889 struct amdgpu_sparse_backing *backing;
890 uint32_t backing_start, backing_size;
891
892 backing_size = va_page - span_va_page;
893 backing = sparse_backing_alloc(bo, &backing_start, &backing_size);
894 if (!backing) {
895 ok = false;
896 goto out;
897 }
898
899 r = amdgpu_bo_va_op_raw(bo->ws->dev, backing->bo->bo,
900 (uint64_t)backing_start * RADEON_SPARSE_PAGE_SIZE,
901 (uint64_t)backing_size * RADEON_SPARSE_PAGE_SIZE,
902 bo->va + (uint64_t)span_va_page * RADEON_SPARSE_PAGE_SIZE,
903 AMDGPU_VM_PAGE_READABLE |
904 AMDGPU_VM_PAGE_WRITEABLE |
905 AMDGPU_VM_PAGE_EXECUTABLE,
906 AMDGPU_VA_OP_REPLACE);
907 if (r) {
908 ok = sparse_backing_free(bo, backing, backing_start, backing_size);
909 assert(ok && "sufficient memory should already be allocated");
910
911 ok = false;
912 goto out;
913 }
914
915 while (backing_size) {
916 comm[span_va_page].backing = backing;
917 comm[span_va_page].page = backing_start;
918 span_va_page++;
919 backing_start++;
920 backing_size--;
921 }
922 }
923 }
924 } else {
925 r = amdgpu_bo_va_op_raw(bo->ws->dev, NULL, 0,
926 (uint64_t)(end_va_page - va_page) * RADEON_SPARSE_PAGE_SIZE,
927 bo->va + (uint64_t)va_page * RADEON_SPARSE_PAGE_SIZE,
928 AMDGPU_VM_PAGE_PRT, AMDGPU_VA_OP_REPLACE);
929 if (r) {
930 ok = false;
931 goto out;
932 }
933
934 while (va_page < end_va_page) {
935 struct amdgpu_sparse_backing *backing;
936 uint32_t backing_start;
937 uint32_t span_pages;
938
939 /* Skip pages that are already uncommitted. */
940 if (!comm[va_page].backing) {
941 va_page++;
942 continue;
943 }
944
945 /* Group contiguous spans of pages. */
946 backing = comm[va_page].backing;
947 backing_start = comm[va_page].page;
948 comm[va_page].backing = NULL;
949
950 span_pages = 1;
951 va_page++;
952
953 while (va_page < end_va_page &&
954 comm[va_page].backing == backing &&
955 comm[va_page].page == backing_start + span_pages) {
956 comm[va_page].backing = NULL;
957 va_page++;
958 span_pages++;
959 }
960
961 if (!sparse_backing_free(bo, backing, backing_start, span_pages)) {
962 /* Couldn't allocate tracking data structures, so we have to leak */
963 fprintf(stderr, "amdgpu: leaking PRT backing memory\n");
964 ok = false;
965 }
966 }
967 }
968 out:
969
970 mtx_unlock(&bo->u.sparse.commit_lock);
971
972 return ok;
973 }
974
975 static unsigned eg_tile_split(unsigned tile_split)
976 {
977 switch (tile_split) {
978 case 0: tile_split = 64; break;
979 case 1: tile_split = 128; break;
980 case 2: tile_split = 256; break;
981 case 3: tile_split = 512; break;
982 default:
983 case 4: tile_split = 1024; break;
984 case 5: tile_split = 2048; break;
985 case 6: tile_split = 4096; break;
986 }
987 return tile_split;
988 }
989
990 static unsigned eg_tile_split_rev(unsigned eg_tile_split)
991 {
992 switch (eg_tile_split) {
993 case 64: return 0;
994 case 128: return 1;
995 case 256: return 2;
996 case 512: return 3;
997 default:
998 case 1024: return 4;
999 case 2048: return 5;
1000 case 4096: return 6;
1001 }
1002 }
1003
1004 static void amdgpu_buffer_get_metadata(struct pb_buffer *_buf,
1005 struct radeon_bo_metadata *md)
1006 {
1007 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
1008 struct amdgpu_bo_info info = {0};
1009 uint64_t tiling_flags;
1010 int r;
1011
1012 assert(bo->bo && "must not be called for slab entries");
1013
1014 r = amdgpu_bo_query_info(bo->bo, &info);
1015 if (r)
1016 return;
1017
1018 tiling_flags = info.metadata.tiling_info;
1019
1020 if (bo->ws->info.chip_class >= GFX9) {
1021 md->u.gfx9.swizzle_mode = AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
1022 } else {
1023 md->u.legacy.microtile = RADEON_LAYOUT_LINEAR;
1024 md->u.legacy.macrotile = RADEON_LAYOUT_LINEAR;
1025
1026 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 4) /* 2D_TILED_THIN1 */
1027 md->u.legacy.macrotile = RADEON_LAYOUT_TILED;
1028 else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 2) /* 1D_TILED_THIN1 */
1029 md->u.legacy.microtile = RADEON_LAYOUT_TILED;
1030
1031 md->u.legacy.pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1032 md->u.legacy.bankw = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1033 md->u.legacy.bankh = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1034 md->u.legacy.tile_split = eg_tile_split(AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT));
1035 md->u.legacy.mtilea = 1 << AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1036 md->u.legacy.num_banks = 2 << AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1037 md->u.legacy.scanout = AMDGPU_TILING_GET(tiling_flags, MICRO_TILE_MODE) == 0; /* DISPLAY */
1038 }
1039
1040 md->size_metadata = info.metadata.size_metadata;
1041 memcpy(md->metadata, info.metadata.umd_metadata, sizeof(md->metadata));
1042 }
1043
1044 static void amdgpu_buffer_set_metadata(struct pb_buffer *_buf,
1045 struct radeon_bo_metadata *md)
1046 {
1047 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
1048 struct amdgpu_bo_metadata metadata = {0};
1049 uint64_t tiling_flags = 0;
1050
1051 assert(bo->bo && "must not be called for slab entries");
1052
1053 if (bo->ws->info.chip_class >= GFX9) {
1054 tiling_flags |= AMDGPU_TILING_SET(SWIZZLE_MODE, md->u.gfx9.swizzle_mode);
1055 } else {
1056 if (md->u.legacy.macrotile == RADEON_LAYOUT_TILED)
1057 tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 4); /* 2D_TILED_THIN1 */
1058 else if (md->u.legacy.microtile == RADEON_LAYOUT_TILED)
1059 tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 2); /* 1D_TILED_THIN1 */
1060 else
1061 tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 1); /* LINEAR_ALIGNED */
1062
1063 tiling_flags |= AMDGPU_TILING_SET(PIPE_CONFIG, md->u.legacy.pipe_config);
1064 tiling_flags |= AMDGPU_TILING_SET(BANK_WIDTH, util_logbase2(md->u.legacy.bankw));
1065 tiling_flags |= AMDGPU_TILING_SET(BANK_HEIGHT, util_logbase2(md->u.legacy.bankh));
1066 if (md->u.legacy.tile_split)
1067 tiling_flags |= AMDGPU_TILING_SET(TILE_SPLIT, eg_tile_split_rev(md->u.legacy.tile_split));
1068 tiling_flags |= AMDGPU_TILING_SET(MACRO_TILE_ASPECT, util_logbase2(md->u.legacy.mtilea));
1069 tiling_flags |= AMDGPU_TILING_SET(NUM_BANKS, util_logbase2(md->u.legacy.num_banks)-1);
1070
1071 if (md->u.legacy.scanout)
1072 tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 0); /* DISPLAY_MICRO_TILING */
1073 else
1074 tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 1); /* THIN_MICRO_TILING */
1075 }
1076
1077 metadata.tiling_info = tiling_flags;
1078 metadata.size_metadata = md->size_metadata;
1079 memcpy(metadata.umd_metadata, md->metadata, sizeof(md->metadata));
1080
1081 amdgpu_bo_set_metadata(bo->bo, &metadata);
1082 }
1083
1084 static struct pb_buffer *
1085 amdgpu_bo_create(struct radeon_winsys *rws,
1086 uint64_t size,
1087 unsigned alignment,
1088 enum radeon_bo_domain domain,
1089 enum radeon_bo_flag flags)
1090 {
1091 struct amdgpu_winsys *ws = amdgpu_winsys(rws);
1092 struct amdgpu_winsys_bo *bo;
1093 unsigned usage = 0, pb_cache_bucket;
1094
1095 /* Sub-allocate small buffers from slabs. */
1096 if (!(flags & (RADEON_FLAG_HANDLE | RADEON_FLAG_SPARSE)) &&
1097 size <= (1 << AMDGPU_SLAB_MAX_SIZE_LOG2) &&
1098 alignment <= MAX2(1 << AMDGPU_SLAB_MIN_SIZE_LOG2, util_next_power_of_two(size))) {
1099 struct pb_slab_entry *entry;
1100 unsigned heap = 0;
1101
1102 if (flags & RADEON_FLAG_GTT_WC)
1103 heap |= 1;
1104 if (flags & RADEON_FLAG_CPU_ACCESS)
1105 heap |= 2;
1106 if (flags & ~(RADEON_FLAG_GTT_WC | RADEON_FLAG_CPU_ACCESS))
1107 goto no_slab;
1108
1109 switch (domain) {
1110 case RADEON_DOMAIN_VRAM:
1111 heap |= 0 * 4;
1112 break;
1113 case RADEON_DOMAIN_VRAM_GTT:
1114 heap |= 1 * 4;
1115 break;
1116 case RADEON_DOMAIN_GTT:
1117 heap |= 2 * 4;
1118 break;
1119 default:
1120 goto no_slab;
1121 }
1122
1123 entry = pb_slab_alloc(&ws->bo_slabs, size, heap);
1124 if (!entry) {
1125 /* Clear the cache and try again. */
1126 pb_cache_release_all_buffers(&ws->bo_cache);
1127
1128 entry = pb_slab_alloc(&ws->bo_slabs, size, heap);
1129 }
1130 if (!entry)
1131 return NULL;
1132
1133 bo = NULL;
1134 bo = container_of(entry, bo, u.slab.entry);
1135
1136 pipe_reference_init(&bo->base.reference, 1);
1137
1138 return &bo->base;
1139 }
1140 no_slab:
1141
1142 if (flags & RADEON_FLAG_SPARSE) {
1143 assert(RADEON_SPARSE_PAGE_SIZE % alignment == 0);
1144 assert(!(flags & RADEON_FLAG_CPU_ACCESS));
1145
1146 flags |= RADEON_FLAG_NO_CPU_ACCESS;
1147
1148 return amdgpu_bo_sparse_create(ws, size, domain, flags);
1149 }
1150
1151 /* This flag is irrelevant for the cache. */
1152 flags &= ~RADEON_FLAG_HANDLE;
1153
1154 /* Align size to page size. This is the minimum alignment for normal
1155 * BOs. Aligning this here helps the cached bufmgr. Especially small BOs,
1156 * like constant/uniform buffers, can benefit from better and more reuse.
1157 */
1158 size = align64(size, ws->info.gart_page_size);
1159 alignment = align(alignment, ws->info.gart_page_size);
1160
1161 /* Only set one usage bit each for domains and flags, or the cache manager
1162 * might consider different sets of domains / flags compatible
1163 */
1164 if (domain == RADEON_DOMAIN_VRAM_GTT)
1165 usage = 1 << 2;
1166 else
1167 usage = domain >> 1;
1168 assert(flags < sizeof(usage) * 8 - 3);
1169 usage |= 1 << (flags + 3);
1170
1171 /* Determine the pb_cache bucket for minimizing pb_cache misses. */
1172 pb_cache_bucket = 0;
1173 if (domain & RADEON_DOMAIN_VRAM) /* VRAM or VRAM+GTT */
1174 pb_cache_bucket += 1;
1175 if (flags == RADEON_FLAG_GTT_WC) /* WC */
1176 pb_cache_bucket += 2;
1177 assert(pb_cache_bucket < ARRAY_SIZE(ws->bo_cache.buckets));
1178
1179 /* Get a buffer from the cache. */
1180 bo = (struct amdgpu_winsys_bo*)
1181 pb_cache_reclaim_buffer(&ws->bo_cache, size, alignment, usage,
1182 pb_cache_bucket);
1183 if (bo)
1184 return &bo->base;
1185
1186 /* Create a new one. */
1187 bo = amdgpu_create_bo(ws, size, alignment, usage, domain, flags,
1188 pb_cache_bucket);
1189 if (!bo) {
1190 /* Clear the cache and try again. */
1191 pb_slabs_reclaim(&ws->bo_slabs);
1192 pb_cache_release_all_buffers(&ws->bo_cache);
1193 bo = amdgpu_create_bo(ws, size, alignment, usage, domain, flags,
1194 pb_cache_bucket);
1195 if (!bo)
1196 return NULL;
1197 }
1198
1199 bo->u.real.use_reusable_pool = true;
1200 return &bo->base;
1201 }
1202
1203 static struct pb_buffer *amdgpu_bo_from_handle(struct radeon_winsys *rws,
1204 struct winsys_handle *whandle,
1205 unsigned *stride,
1206 unsigned *offset)
1207 {
1208 struct amdgpu_winsys *ws = amdgpu_winsys(rws);
1209 struct amdgpu_winsys_bo *bo;
1210 enum amdgpu_bo_handle_type type;
1211 struct amdgpu_bo_import_result result = {0};
1212 uint64_t va;
1213 amdgpu_va_handle va_handle;
1214 struct amdgpu_bo_info info = {0};
1215 enum radeon_bo_domain initial = 0;
1216 int r;
1217
1218 /* Initialize the structure. */
1219 bo = CALLOC_STRUCT(amdgpu_winsys_bo);
1220 if (!bo) {
1221 return NULL;
1222 }
1223
1224 switch (whandle->type) {
1225 case DRM_API_HANDLE_TYPE_SHARED:
1226 type = amdgpu_bo_handle_type_gem_flink_name;
1227 break;
1228 case DRM_API_HANDLE_TYPE_FD:
1229 type = amdgpu_bo_handle_type_dma_buf_fd;
1230 break;
1231 default:
1232 return NULL;
1233 }
1234
1235 r = amdgpu_bo_import(ws->dev, type, whandle->handle, &result);
1236 if (r)
1237 goto error;
1238
1239 /* Get initial domains. */
1240 r = amdgpu_bo_query_info(result.buf_handle, &info);
1241 if (r)
1242 goto error_query;
1243
1244 r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
1245 result.alloc_size, 1 << 20, 0, &va, &va_handle, 0);
1246 if (r)
1247 goto error_query;
1248
1249 r = amdgpu_bo_va_op(result.buf_handle, 0, result.alloc_size, va, 0, AMDGPU_VA_OP_MAP);
1250 if (r)
1251 goto error_va_map;
1252
1253 if (info.preferred_heap & AMDGPU_GEM_DOMAIN_VRAM)
1254 initial |= RADEON_DOMAIN_VRAM;
1255 if (info.preferred_heap & AMDGPU_GEM_DOMAIN_GTT)
1256 initial |= RADEON_DOMAIN_GTT;
1257
1258
1259 pipe_reference_init(&bo->base.reference, 1);
1260 bo->base.alignment = info.phys_alignment;
1261 bo->bo = result.buf_handle;
1262 bo->base.size = result.alloc_size;
1263 bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
1264 bo->ws = ws;
1265 bo->va = va;
1266 bo->u.real.va_handle = va_handle;
1267 bo->initial_domain = initial;
1268 bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
1269 bo->is_shared = true;
1270
1271 if (stride)
1272 *stride = whandle->stride;
1273 if (offset)
1274 *offset = whandle->offset;
1275
1276 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
1277 ws->allocated_vram += align64(bo->base.size, ws->info.gart_page_size);
1278 else if (bo->initial_domain & RADEON_DOMAIN_GTT)
1279 ws->allocated_gtt += align64(bo->base.size, ws->info.gart_page_size);
1280
1281 amdgpu_add_buffer_to_global_list(bo);
1282
1283 return &bo->base;
1284
1285 error_va_map:
1286 amdgpu_va_range_free(va_handle);
1287
1288 error_query:
1289 amdgpu_bo_free(result.buf_handle);
1290
1291 error:
1292 FREE(bo);
1293 return NULL;
1294 }
1295
1296 static bool amdgpu_bo_get_handle(struct pb_buffer *buffer,
1297 unsigned stride, unsigned offset,
1298 unsigned slice_size,
1299 struct winsys_handle *whandle)
1300 {
1301 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(buffer);
1302 enum amdgpu_bo_handle_type type;
1303 int r;
1304
1305 if (!bo->bo) {
1306 offset += bo->va - bo->u.slab.real->va;
1307 bo = bo->u.slab.real;
1308 }
1309
1310 bo->u.real.use_reusable_pool = false;
1311
1312 switch (whandle->type) {
1313 case DRM_API_HANDLE_TYPE_SHARED:
1314 type = amdgpu_bo_handle_type_gem_flink_name;
1315 break;
1316 case DRM_API_HANDLE_TYPE_FD:
1317 type = amdgpu_bo_handle_type_dma_buf_fd;
1318 break;
1319 case DRM_API_HANDLE_TYPE_KMS:
1320 type = amdgpu_bo_handle_type_kms;
1321 break;
1322 default:
1323 return false;
1324 }
1325
1326 r = amdgpu_bo_export(bo->bo, type, &whandle->handle);
1327 if (r)
1328 return false;
1329
1330 whandle->stride = stride;
1331 whandle->offset = offset;
1332 whandle->offset += slice_size * whandle->layer;
1333 bo->is_shared = true;
1334 return true;
1335 }
1336
1337 static struct pb_buffer *amdgpu_bo_from_ptr(struct radeon_winsys *rws,
1338 void *pointer, uint64_t size)
1339 {
1340 struct amdgpu_winsys *ws = amdgpu_winsys(rws);
1341 amdgpu_bo_handle buf_handle;
1342 struct amdgpu_winsys_bo *bo;
1343 uint64_t va;
1344 amdgpu_va_handle va_handle;
1345
1346 bo = CALLOC_STRUCT(amdgpu_winsys_bo);
1347 if (!bo)
1348 return NULL;
1349
1350 if (amdgpu_create_bo_from_user_mem(ws->dev, pointer, size, &buf_handle))
1351 goto error;
1352
1353 if (amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
1354 size, 1 << 12, 0, &va, &va_handle, 0))
1355 goto error_va_alloc;
1356
1357 if (amdgpu_bo_va_op(buf_handle, 0, size, va, 0, AMDGPU_VA_OP_MAP))
1358 goto error_va_map;
1359
1360 /* Initialize it. */
1361 pipe_reference_init(&bo->base.reference, 1);
1362 bo->bo = buf_handle;
1363 bo->base.alignment = 0;
1364 bo->base.size = size;
1365 bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
1366 bo->ws = ws;
1367 bo->user_ptr = pointer;
1368 bo->va = va;
1369 bo->u.real.va_handle = va_handle;
1370 bo->initial_domain = RADEON_DOMAIN_GTT;
1371 bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
1372
1373 ws->allocated_gtt += align64(bo->base.size, ws->info.gart_page_size);
1374
1375 amdgpu_add_buffer_to_global_list(bo);
1376
1377 return (struct pb_buffer*)bo;
1378
1379 error_va_map:
1380 amdgpu_va_range_free(va_handle);
1381
1382 error_va_alloc:
1383 amdgpu_bo_free(buf_handle);
1384
1385 error:
1386 FREE(bo);
1387 return NULL;
1388 }
1389
1390 static bool amdgpu_bo_is_user_ptr(struct pb_buffer *buf)
1391 {
1392 return ((struct amdgpu_winsys_bo*)buf)->user_ptr != NULL;
1393 }
1394
1395 static uint64_t amdgpu_bo_get_va(struct pb_buffer *buf)
1396 {
1397 return ((struct amdgpu_winsys_bo*)buf)->va;
1398 }
1399
1400 void amdgpu_bo_init_functions(struct amdgpu_winsys *ws)
1401 {
1402 ws->base.buffer_set_metadata = amdgpu_buffer_set_metadata;
1403 ws->base.buffer_get_metadata = amdgpu_buffer_get_metadata;
1404 ws->base.buffer_map = amdgpu_bo_map;
1405 ws->base.buffer_unmap = amdgpu_bo_unmap;
1406 ws->base.buffer_wait = amdgpu_bo_wait;
1407 ws->base.buffer_create = amdgpu_bo_create;
1408 ws->base.buffer_from_handle = amdgpu_bo_from_handle;
1409 ws->base.buffer_from_ptr = amdgpu_bo_from_ptr;
1410 ws->base.buffer_is_user_ptr = amdgpu_bo_is_user_ptr;
1411 ws->base.buffer_get_handle = amdgpu_bo_get_handle;
1412 ws->base.buffer_commit = amdgpu_bo_sparse_commit;
1413 ws->base.buffer_get_virtual_address = amdgpu_bo_get_va;
1414 ws->base.buffer_get_initial_domain = amdgpu_bo_get_initial_domain;
1415 }