winsys/amdgpu: fix VDPAU interop by having one amdgpu_winsys_bo per BO (v2)
[mesa.git] / src / gallium / winsys / amdgpu / drm / amdgpu_bo.c
1 /*
2 * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
3 * Copyright © 2015 Advanced Micro Devices, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
16 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
18 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
25 * of the Software.
26 */
27
28 #include "amdgpu_cs.h"
29
30 #include "util/os_time.h"
31 #include "util/u_hash_table.h"
32 #include "state_tracker/drm_driver.h"
33 #include <amdgpu_drm.h>
34 #include <xf86drm.h>
35 #include <stdio.h>
36 #include <inttypes.h>
37
38 #ifndef AMDGPU_GEM_CREATE_VM_ALWAYS_VALID
39 #define AMDGPU_GEM_CREATE_VM_ALWAYS_VALID (1 << 6)
40 #endif
41
42 #ifndef AMDGPU_VA_RANGE_HIGH
43 #define AMDGPU_VA_RANGE_HIGH 0x2
44 #endif
45
46 /* Set to 1 for verbose output showing committed sparse buffer ranges. */
47 #define DEBUG_SPARSE_COMMITS 0
48
49 struct amdgpu_sparse_backing_chunk {
50 uint32_t begin, end;
51 };
52
53 static struct pb_buffer *
54 amdgpu_bo_create(struct radeon_winsys *rws,
55 uint64_t size,
56 unsigned alignment,
57 enum radeon_bo_domain domain,
58 enum radeon_bo_flag flags);
59
60 static bool amdgpu_bo_wait(struct pb_buffer *_buf, uint64_t timeout,
61 enum radeon_bo_usage usage)
62 {
63 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
64 struct amdgpu_winsys *ws = bo->ws;
65 int64_t abs_timeout;
66
67 if (timeout == 0) {
68 if (p_atomic_read(&bo->num_active_ioctls))
69 return false;
70
71 } else {
72 abs_timeout = os_time_get_absolute_timeout(timeout);
73
74 /* Wait if any ioctl is being submitted with this buffer. */
75 if (!os_wait_until_zero_abs_timeout(&bo->num_active_ioctls, abs_timeout))
76 return false;
77 }
78
79 if (bo->is_shared) {
80 /* We can't use user fences for shared buffers, because user fences
81 * are local to this process only. If we want to wait for all buffer
82 * uses in all processes, we have to use amdgpu_bo_wait_for_idle.
83 */
84 bool buffer_busy = true;
85 int r;
86
87 r = amdgpu_bo_wait_for_idle(bo->bo, timeout, &buffer_busy);
88 if (r)
89 fprintf(stderr, "%s: amdgpu_bo_wait_for_idle failed %i\n", __func__,
90 r);
91 return !buffer_busy;
92 }
93
94 if (timeout == 0) {
95 unsigned idle_fences;
96 bool buffer_idle;
97
98 simple_mtx_lock(&ws->bo_fence_lock);
99
100 for (idle_fences = 0; idle_fences < bo->num_fences; ++idle_fences) {
101 if (!amdgpu_fence_wait(bo->fences[idle_fences], 0, false))
102 break;
103 }
104
105 /* Release the idle fences to avoid checking them again later. */
106 for (unsigned i = 0; i < idle_fences; ++i)
107 amdgpu_fence_reference(&bo->fences[i], NULL);
108
109 memmove(&bo->fences[0], &bo->fences[idle_fences],
110 (bo->num_fences - idle_fences) * sizeof(*bo->fences));
111 bo->num_fences -= idle_fences;
112
113 buffer_idle = !bo->num_fences;
114 simple_mtx_unlock(&ws->bo_fence_lock);
115
116 return buffer_idle;
117 } else {
118 bool buffer_idle = true;
119
120 simple_mtx_lock(&ws->bo_fence_lock);
121 while (bo->num_fences && buffer_idle) {
122 struct pipe_fence_handle *fence = NULL;
123 bool fence_idle = false;
124
125 amdgpu_fence_reference(&fence, bo->fences[0]);
126
127 /* Wait for the fence. */
128 simple_mtx_unlock(&ws->bo_fence_lock);
129 if (amdgpu_fence_wait(fence, abs_timeout, true))
130 fence_idle = true;
131 else
132 buffer_idle = false;
133 simple_mtx_lock(&ws->bo_fence_lock);
134
135 /* Release an idle fence to avoid checking it again later, keeping in
136 * mind that the fence array may have been modified by other threads.
137 */
138 if (fence_idle && bo->num_fences && bo->fences[0] == fence) {
139 amdgpu_fence_reference(&bo->fences[0], NULL);
140 memmove(&bo->fences[0], &bo->fences[1],
141 (bo->num_fences - 1) * sizeof(*bo->fences));
142 bo->num_fences--;
143 }
144
145 amdgpu_fence_reference(&fence, NULL);
146 }
147 simple_mtx_unlock(&ws->bo_fence_lock);
148
149 return buffer_idle;
150 }
151 }
152
153 static enum radeon_bo_domain amdgpu_bo_get_initial_domain(
154 struct pb_buffer *buf)
155 {
156 return ((struct amdgpu_winsys_bo*)buf)->initial_domain;
157 }
158
159 static void amdgpu_bo_remove_fences(struct amdgpu_winsys_bo *bo)
160 {
161 for (unsigned i = 0; i < bo->num_fences; ++i)
162 amdgpu_fence_reference(&bo->fences[i], NULL);
163
164 FREE(bo->fences);
165 bo->num_fences = 0;
166 bo->max_fences = 0;
167 }
168
169 void amdgpu_bo_destroy(struct pb_buffer *_buf)
170 {
171 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
172 struct amdgpu_winsys *ws = bo->ws;
173
174 assert(bo->bo && "must not be called for slab entries");
175
176 if (ws->debug_all_bos) {
177 simple_mtx_lock(&ws->global_bo_list_lock);
178 LIST_DEL(&bo->u.real.global_list_item);
179 ws->num_buffers--;
180 simple_mtx_unlock(&ws->global_bo_list_lock);
181 }
182
183 simple_mtx_lock(&ws->bo_export_table_lock);
184 util_hash_table_remove(ws->bo_export_table, bo->bo);
185 simple_mtx_unlock(&ws->bo_export_table_lock);
186
187 amdgpu_bo_va_op(bo->bo, 0, bo->base.size, bo->va, 0, AMDGPU_VA_OP_UNMAP);
188 amdgpu_va_range_free(bo->u.real.va_handle);
189 amdgpu_bo_free(bo->bo);
190
191 amdgpu_bo_remove_fences(bo);
192
193 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
194 ws->allocated_vram -= align64(bo->base.size, ws->info.gart_page_size);
195 else if (bo->initial_domain & RADEON_DOMAIN_GTT)
196 ws->allocated_gtt -= align64(bo->base.size, ws->info.gart_page_size);
197
198 if (bo->u.real.map_count >= 1) {
199 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
200 ws->mapped_vram -= bo->base.size;
201 else if (bo->initial_domain & RADEON_DOMAIN_GTT)
202 ws->mapped_gtt -= bo->base.size;
203 ws->num_mapped_buffers--;
204 }
205
206 FREE(bo);
207 }
208
209 static void amdgpu_bo_destroy_or_cache(struct pb_buffer *_buf)
210 {
211 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
212
213 assert(bo->bo); /* slab buffers have a separate vtbl */
214
215 if (bo->u.real.use_reusable_pool)
216 pb_cache_add_buffer(&bo->u.real.cache_entry);
217 else
218 amdgpu_bo_destroy(_buf);
219 }
220
221 static void *amdgpu_bo_map(struct pb_buffer *buf,
222 struct radeon_cmdbuf *rcs,
223 enum pipe_transfer_usage usage)
224 {
225 struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
226 struct amdgpu_winsys_bo *real;
227 struct amdgpu_cs *cs = (struct amdgpu_cs*)rcs;
228 int r;
229 void *cpu = NULL;
230 uint64_t offset = 0;
231
232 assert(!bo->sparse);
233
234 /* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
235 if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
236 /* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
237 if (usage & PIPE_TRANSFER_DONTBLOCK) {
238 if (!(usage & PIPE_TRANSFER_WRITE)) {
239 /* Mapping for read.
240 *
241 * Since we are mapping for read, we don't need to wait
242 * if the GPU is using the buffer for read too
243 * (neither one is changing it).
244 *
245 * Only check whether the buffer is being used for write. */
246 if (cs && amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
247 RADEON_USAGE_WRITE)) {
248 cs->flush_cs(cs->flush_data,
249 RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
250 return NULL;
251 }
252
253 if (!amdgpu_bo_wait((struct pb_buffer*)bo, 0,
254 RADEON_USAGE_WRITE)) {
255 return NULL;
256 }
257 } else {
258 if (cs && amdgpu_bo_is_referenced_by_cs(cs, bo)) {
259 cs->flush_cs(cs->flush_data,
260 RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
261 return NULL;
262 }
263
264 if (!amdgpu_bo_wait((struct pb_buffer*)bo, 0,
265 RADEON_USAGE_READWRITE)) {
266 return NULL;
267 }
268 }
269 } else {
270 uint64_t time = os_time_get_nano();
271
272 if (!(usage & PIPE_TRANSFER_WRITE)) {
273 /* Mapping for read.
274 *
275 * Since we are mapping for read, we don't need to wait
276 * if the GPU is using the buffer for read too
277 * (neither one is changing it).
278 *
279 * Only check whether the buffer is being used for write. */
280 if (cs) {
281 if (amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
282 RADEON_USAGE_WRITE)) {
283 cs->flush_cs(cs->flush_data,
284 RADEON_FLUSH_START_NEXT_GFX_IB_NOW, NULL);
285 } else {
286 /* Try to avoid busy-waiting in amdgpu_bo_wait. */
287 if (p_atomic_read(&bo->num_active_ioctls))
288 amdgpu_cs_sync_flush(rcs);
289 }
290 }
291
292 amdgpu_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
293 RADEON_USAGE_WRITE);
294 } else {
295 /* Mapping for write. */
296 if (cs) {
297 if (amdgpu_bo_is_referenced_by_cs(cs, bo)) {
298 cs->flush_cs(cs->flush_data,
299 RADEON_FLUSH_START_NEXT_GFX_IB_NOW, NULL);
300 } else {
301 /* Try to avoid busy-waiting in amdgpu_bo_wait. */
302 if (p_atomic_read(&bo->num_active_ioctls))
303 amdgpu_cs_sync_flush(rcs);
304 }
305 }
306
307 amdgpu_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
308 RADEON_USAGE_READWRITE);
309 }
310
311 bo->ws->buffer_wait_time += os_time_get_nano() - time;
312 }
313 }
314
315 /* If the buffer is created from user memory, return the user pointer. */
316 if (bo->user_ptr)
317 return bo->user_ptr;
318
319 if (bo->bo) {
320 real = bo;
321 } else {
322 real = bo->u.slab.real;
323 offset = bo->va - real->va;
324 }
325
326 r = amdgpu_bo_cpu_map(real->bo, &cpu);
327 if (r) {
328 /* Clear the cache and try again. */
329 pb_cache_release_all_buffers(&real->ws->bo_cache);
330 r = amdgpu_bo_cpu_map(real->bo, &cpu);
331 if (r)
332 return NULL;
333 }
334
335 if (p_atomic_inc_return(&real->u.real.map_count) == 1) {
336 if (real->initial_domain & RADEON_DOMAIN_VRAM)
337 real->ws->mapped_vram += real->base.size;
338 else if (real->initial_domain & RADEON_DOMAIN_GTT)
339 real->ws->mapped_gtt += real->base.size;
340 real->ws->num_mapped_buffers++;
341 }
342 return (uint8_t*)cpu + offset;
343 }
344
345 static void amdgpu_bo_unmap(struct pb_buffer *buf)
346 {
347 struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
348 struct amdgpu_winsys_bo *real;
349
350 assert(!bo->sparse);
351
352 if (bo->user_ptr)
353 return;
354
355 real = bo->bo ? bo : bo->u.slab.real;
356
357 if (p_atomic_dec_zero(&real->u.real.map_count)) {
358 if (real->initial_domain & RADEON_DOMAIN_VRAM)
359 real->ws->mapped_vram -= real->base.size;
360 else if (real->initial_domain & RADEON_DOMAIN_GTT)
361 real->ws->mapped_gtt -= real->base.size;
362 real->ws->num_mapped_buffers--;
363 }
364
365 amdgpu_bo_cpu_unmap(real->bo);
366 }
367
368 static const struct pb_vtbl amdgpu_winsys_bo_vtbl = {
369 amdgpu_bo_destroy_or_cache
370 /* other functions are never called */
371 };
372
373 static void amdgpu_add_buffer_to_global_list(struct amdgpu_winsys_bo *bo)
374 {
375 struct amdgpu_winsys *ws = bo->ws;
376
377 assert(bo->bo);
378
379 if (ws->debug_all_bos) {
380 simple_mtx_lock(&ws->global_bo_list_lock);
381 LIST_ADDTAIL(&bo->u.real.global_list_item, &ws->global_bo_list);
382 ws->num_buffers++;
383 simple_mtx_unlock(&ws->global_bo_list_lock);
384 }
385 }
386
387 static struct amdgpu_winsys_bo *amdgpu_create_bo(struct amdgpu_winsys *ws,
388 uint64_t size,
389 unsigned alignment,
390 enum radeon_bo_domain initial_domain,
391 unsigned flags,
392 int heap)
393 {
394 struct amdgpu_bo_alloc_request request = {0};
395 amdgpu_bo_handle buf_handle;
396 uint64_t va = 0;
397 struct amdgpu_winsys_bo *bo;
398 amdgpu_va_handle va_handle;
399 unsigned va_gap_size;
400 int r;
401
402 /* VRAM or GTT must be specified, but not both at the same time. */
403 assert(util_bitcount(initial_domain & RADEON_DOMAIN_VRAM_GTT) == 1);
404
405 bo = CALLOC_STRUCT(amdgpu_winsys_bo);
406 if (!bo) {
407 return NULL;
408 }
409
410 if (heap >= 0) {
411 pb_cache_init_entry(&ws->bo_cache, &bo->u.real.cache_entry, &bo->base,
412 heap);
413 }
414 request.alloc_size = size;
415 request.phys_alignment = alignment;
416
417 if (initial_domain & RADEON_DOMAIN_VRAM)
418 request.preferred_heap |= AMDGPU_GEM_DOMAIN_VRAM;
419 if (initial_domain & RADEON_DOMAIN_GTT)
420 request.preferred_heap |= AMDGPU_GEM_DOMAIN_GTT;
421
422 /* Since VRAM and GTT have almost the same performance on APUs, we could
423 * just set GTT. However, in order to decrease GTT(RAM) usage, which is
424 * shared with the OS, allow VRAM placements too. The idea is not to use
425 * VRAM usefully, but to use it so that it's not unused and wasted.
426 */
427 if (!ws->info.has_dedicated_vram)
428 request.preferred_heap |= AMDGPU_GEM_DOMAIN_GTT;
429
430 if (flags & RADEON_FLAG_NO_CPU_ACCESS)
431 request.flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
432 if (flags & RADEON_FLAG_GTT_WC)
433 request.flags |= AMDGPU_GEM_CREATE_CPU_GTT_USWC;
434 if (flags & RADEON_FLAG_NO_INTERPROCESS_SHARING &&
435 ws->info.has_local_buffers)
436 request.flags |= AMDGPU_GEM_CREATE_VM_ALWAYS_VALID;
437 if (ws->zero_all_vram_allocs &&
438 (request.preferred_heap & AMDGPU_GEM_DOMAIN_VRAM))
439 request.flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
440
441 r = amdgpu_bo_alloc(ws->dev, &request, &buf_handle);
442 if (r) {
443 fprintf(stderr, "amdgpu: Failed to allocate a buffer:\n");
444 fprintf(stderr, "amdgpu: size : %"PRIu64" bytes\n", size);
445 fprintf(stderr, "amdgpu: alignment : %u bytes\n", alignment);
446 fprintf(stderr, "amdgpu: domains : %u\n", initial_domain);
447 goto error_bo_alloc;
448 }
449
450 va_gap_size = ws->check_vm ? MAX2(4 * alignment, 64 * 1024) : 0;
451 if (size > ws->info.pte_fragment_size)
452 alignment = MAX2(alignment, ws->info.pte_fragment_size);
453 r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
454 size + va_gap_size, alignment, 0, &va, &va_handle,
455 (flags & RADEON_FLAG_32BIT ? AMDGPU_VA_RANGE_32_BIT : 0) |
456 AMDGPU_VA_RANGE_HIGH);
457 if (r)
458 goto error_va_alloc;
459
460 unsigned vm_flags = AMDGPU_VM_PAGE_READABLE |
461 AMDGPU_VM_PAGE_EXECUTABLE;
462
463 if (!(flags & RADEON_FLAG_READ_ONLY))
464 vm_flags |= AMDGPU_VM_PAGE_WRITEABLE;
465
466 r = amdgpu_bo_va_op_raw(ws->dev, buf_handle, 0, size, va, vm_flags,
467 AMDGPU_VA_OP_MAP);
468 if (r)
469 goto error_va_map;
470
471 pipe_reference_init(&bo->base.reference, 1);
472 bo->base.alignment = alignment;
473 bo->base.usage = 0;
474 bo->base.size = size;
475 bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
476 bo->ws = ws;
477 bo->bo = buf_handle;
478 bo->va = va;
479 bo->u.real.va_handle = va_handle;
480 bo->initial_domain = initial_domain;
481 bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
482 bo->is_local = !!(request.flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID);
483
484 if (initial_domain & RADEON_DOMAIN_VRAM)
485 ws->allocated_vram += align64(size, ws->info.gart_page_size);
486 else if (initial_domain & RADEON_DOMAIN_GTT)
487 ws->allocated_gtt += align64(size, ws->info.gart_page_size);
488
489 amdgpu_add_buffer_to_global_list(bo);
490
491 return bo;
492
493 error_va_map:
494 amdgpu_va_range_free(va_handle);
495
496 error_va_alloc:
497 amdgpu_bo_free(buf_handle);
498
499 error_bo_alloc:
500 FREE(bo);
501 return NULL;
502 }
503
504 bool amdgpu_bo_can_reclaim(struct pb_buffer *_buf)
505 {
506 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
507
508 if (amdgpu_bo_is_referenced_by_any_cs(bo)) {
509 return false;
510 }
511
512 return amdgpu_bo_wait(_buf, 0, RADEON_USAGE_READWRITE);
513 }
514
515 bool amdgpu_bo_can_reclaim_slab(void *priv, struct pb_slab_entry *entry)
516 {
517 struct amdgpu_winsys_bo *bo = NULL; /* fix container_of */
518 bo = container_of(entry, bo, u.slab.entry);
519
520 return amdgpu_bo_can_reclaim(&bo->base);
521 }
522
523 static void amdgpu_bo_slab_destroy(struct pb_buffer *_buf)
524 {
525 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
526
527 assert(!bo->bo);
528
529 pb_slab_free(&bo->ws->bo_slabs, &bo->u.slab.entry);
530 }
531
532 static const struct pb_vtbl amdgpu_winsys_bo_slab_vtbl = {
533 amdgpu_bo_slab_destroy
534 /* other functions are never called */
535 };
536
537 struct pb_slab *amdgpu_bo_slab_alloc(void *priv, unsigned heap,
538 unsigned entry_size,
539 unsigned group_index)
540 {
541 struct amdgpu_winsys *ws = priv;
542 struct amdgpu_slab *slab = CALLOC_STRUCT(amdgpu_slab);
543 enum radeon_bo_domain domains = radeon_domain_from_heap(heap);
544 enum radeon_bo_flag flags = radeon_flags_from_heap(heap);
545 uint32_t base_id;
546
547 if (!slab)
548 return NULL;
549
550 unsigned slab_size = 1 << AMDGPU_SLAB_BO_SIZE_LOG2;
551 slab->buffer = amdgpu_winsys_bo(amdgpu_bo_create(&ws->base,
552 slab_size, slab_size,
553 domains, flags));
554 if (!slab->buffer)
555 goto fail;
556
557 assert(slab->buffer->bo);
558
559 slab->base.num_entries = slab->buffer->base.size / entry_size;
560 slab->base.num_free = slab->base.num_entries;
561 slab->entries = CALLOC(slab->base.num_entries, sizeof(*slab->entries));
562 if (!slab->entries)
563 goto fail_buffer;
564
565 LIST_INITHEAD(&slab->base.free);
566
567 base_id = __sync_fetch_and_add(&ws->next_bo_unique_id, slab->base.num_entries);
568
569 for (unsigned i = 0; i < slab->base.num_entries; ++i) {
570 struct amdgpu_winsys_bo *bo = &slab->entries[i];
571
572 bo->base.alignment = entry_size;
573 bo->base.usage = slab->buffer->base.usage;
574 bo->base.size = entry_size;
575 bo->base.vtbl = &amdgpu_winsys_bo_slab_vtbl;
576 bo->ws = ws;
577 bo->va = slab->buffer->va + i * entry_size;
578 bo->initial_domain = domains;
579 bo->unique_id = base_id + i;
580 bo->u.slab.entry.slab = &slab->base;
581 bo->u.slab.entry.group_index = group_index;
582 bo->u.slab.real = slab->buffer;
583
584 LIST_ADDTAIL(&bo->u.slab.entry.head, &slab->base.free);
585 }
586
587 return &slab->base;
588
589 fail_buffer:
590 amdgpu_winsys_bo_reference(&slab->buffer, NULL);
591 fail:
592 FREE(slab);
593 return NULL;
594 }
595
596 void amdgpu_bo_slab_free(void *priv, struct pb_slab *pslab)
597 {
598 struct amdgpu_slab *slab = amdgpu_slab(pslab);
599
600 for (unsigned i = 0; i < slab->base.num_entries; ++i)
601 amdgpu_bo_remove_fences(&slab->entries[i]);
602
603 FREE(slab->entries);
604 amdgpu_winsys_bo_reference(&slab->buffer, NULL);
605 FREE(slab);
606 }
607
608 #if DEBUG_SPARSE_COMMITS
609 static void
610 sparse_dump(struct amdgpu_winsys_bo *bo, const char *func)
611 {
612 fprintf(stderr, "%s: %p (size=%"PRIu64", num_va_pages=%u) @ %s\n"
613 "Commitments:\n",
614 __func__, bo, bo->base.size, bo->u.sparse.num_va_pages, func);
615
616 struct amdgpu_sparse_backing *span_backing = NULL;
617 uint32_t span_first_backing_page = 0;
618 uint32_t span_first_va_page = 0;
619 uint32_t va_page = 0;
620
621 for (;;) {
622 struct amdgpu_sparse_backing *backing = 0;
623 uint32_t backing_page = 0;
624
625 if (va_page < bo->u.sparse.num_va_pages) {
626 backing = bo->u.sparse.commitments[va_page].backing;
627 backing_page = bo->u.sparse.commitments[va_page].page;
628 }
629
630 if (span_backing &&
631 (backing != span_backing ||
632 backing_page != span_first_backing_page + (va_page - span_first_va_page))) {
633 fprintf(stderr, " %u..%u: backing=%p:%u..%u\n",
634 span_first_va_page, va_page - 1, span_backing,
635 span_first_backing_page,
636 span_first_backing_page + (va_page - span_first_va_page) - 1);
637
638 span_backing = NULL;
639 }
640
641 if (va_page >= bo->u.sparse.num_va_pages)
642 break;
643
644 if (backing && !span_backing) {
645 span_backing = backing;
646 span_first_backing_page = backing_page;
647 span_first_va_page = va_page;
648 }
649
650 va_page++;
651 }
652
653 fprintf(stderr, "Backing:\n");
654
655 list_for_each_entry(struct amdgpu_sparse_backing, backing, &bo->u.sparse.backing, list) {
656 fprintf(stderr, " %p (size=%"PRIu64")\n", backing, backing->bo->base.size);
657 for (unsigned i = 0; i < backing->num_chunks; ++i)
658 fprintf(stderr, " %u..%u\n", backing->chunks[i].begin, backing->chunks[i].end);
659 }
660 }
661 #endif
662
663 /*
664 * Attempt to allocate the given number of backing pages. Fewer pages may be
665 * allocated (depending on the fragmentation of existing backing buffers),
666 * which will be reflected by a change to *pnum_pages.
667 */
668 static struct amdgpu_sparse_backing *
669 sparse_backing_alloc(struct amdgpu_winsys_bo *bo, uint32_t *pstart_page, uint32_t *pnum_pages)
670 {
671 struct amdgpu_sparse_backing *best_backing;
672 unsigned best_idx;
673 uint32_t best_num_pages;
674
675 best_backing = NULL;
676 best_idx = 0;
677 best_num_pages = 0;
678
679 /* This is a very simple and inefficient best-fit algorithm. */
680 list_for_each_entry(struct amdgpu_sparse_backing, backing, &bo->u.sparse.backing, list) {
681 for (unsigned idx = 0; idx < backing->num_chunks; ++idx) {
682 uint32_t cur_num_pages = backing->chunks[idx].end - backing->chunks[idx].begin;
683 if ((best_num_pages < *pnum_pages && cur_num_pages > best_num_pages) ||
684 (best_num_pages > *pnum_pages && cur_num_pages < best_num_pages)) {
685 best_backing = backing;
686 best_idx = idx;
687 best_num_pages = cur_num_pages;
688 }
689 }
690 }
691
692 /* Allocate a new backing buffer if necessary. */
693 if (!best_backing) {
694 struct pb_buffer *buf;
695 uint64_t size;
696 uint32_t pages;
697
698 best_backing = CALLOC_STRUCT(amdgpu_sparse_backing);
699 if (!best_backing)
700 return NULL;
701
702 best_backing->max_chunks = 4;
703 best_backing->chunks = CALLOC(best_backing->max_chunks,
704 sizeof(*best_backing->chunks));
705 if (!best_backing->chunks) {
706 FREE(best_backing);
707 return NULL;
708 }
709
710 assert(bo->u.sparse.num_backing_pages < DIV_ROUND_UP(bo->base.size, RADEON_SPARSE_PAGE_SIZE));
711
712 size = MIN3(bo->base.size / 16,
713 8 * 1024 * 1024,
714 bo->base.size - (uint64_t)bo->u.sparse.num_backing_pages * RADEON_SPARSE_PAGE_SIZE);
715 size = MAX2(size, RADEON_SPARSE_PAGE_SIZE);
716
717 buf = amdgpu_bo_create(&bo->ws->base, size, RADEON_SPARSE_PAGE_SIZE,
718 bo->initial_domain,
719 bo->u.sparse.flags | RADEON_FLAG_NO_SUBALLOC);
720 if (!buf) {
721 FREE(best_backing->chunks);
722 FREE(best_backing);
723 return NULL;
724 }
725
726 /* We might have gotten a bigger buffer than requested via caching. */
727 pages = buf->size / RADEON_SPARSE_PAGE_SIZE;
728
729 best_backing->bo = amdgpu_winsys_bo(buf);
730 best_backing->num_chunks = 1;
731 best_backing->chunks[0].begin = 0;
732 best_backing->chunks[0].end = pages;
733
734 list_add(&best_backing->list, &bo->u.sparse.backing);
735 bo->u.sparse.num_backing_pages += pages;
736
737 best_idx = 0;
738 best_num_pages = pages;
739 }
740
741 *pnum_pages = MIN2(*pnum_pages, best_num_pages);
742 *pstart_page = best_backing->chunks[best_idx].begin;
743 best_backing->chunks[best_idx].begin += *pnum_pages;
744
745 if (best_backing->chunks[best_idx].begin >= best_backing->chunks[best_idx].end) {
746 memmove(&best_backing->chunks[best_idx], &best_backing->chunks[best_idx + 1],
747 sizeof(*best_backing->chunks) * (best_backing->num_chunks - best_idx - 1));
748 best_backing->num_chunks--;
749 }
750
751 return best_backing;
752 }
753
754 static void
755 sparse_free_backing_buffer(struct amdgpu_winsys_bo *bo,
756 struct amdgpu_sparse_backing *backing)
757 {
758 struct amdgpu_winsys *ws = backing->bo->ws;
759
760 bo->u.sparse.num_backing_pages -= backing->bo->base.size / RADEON_SPARSE_PAGE_SIZE;
761
762 simple_mtx_lock(&ws->bo_fence_lock);
763 amdgpu_add_fences(backing->bo, bo->num_fences, bo->fences);
764 simple_mtx_unlock(&ws->bo_fence_lock);
765
766 list_del(&backing->list);
767 amdgpu_winsys_bo_reference(&backing->bo, NULL);
768 FREE(backing->chunks);
769 FREE(backing);
770 }
771
772 /*
773 * Return a range of pages from the given backing buffer back into the
774 * free structure.
775 */
776 static bool
777 sparse_backing_free(struct amdgpu_winsys_bo *bo,
778 struct amdgpu_sparse_backing *backing,
779 uint32_t start_page, uint32_t num_pages)
780 {
781 uint32_t end_page = start_page + num_pages;
782 unsigned low = 0;
783 unsigned high = backing->num_chunks;
784
785 /* Find the first chunk with begin >= start_page. */
786 while (low < high) {
787 unsigned mid = low + (high - low) / 2;
788
789 if (backing->chunks[mid].begin >= start_page)
790 high = mid;
791 else
792 low = mid + 1;
793 }
794
795 assert(low >= backing->num_chunks || end_page <= backing->chunks[low].begin);
796 assert(low == 0 || backing->chunks[low - 1].end <= start_page);
797
798 if (low > 0 && backing->chunks[low - 1].end == start_page) {
799 backing->chunks[low - 1].end = end_page;
800
801 if (low < backing->num_chunks && end_page == backing->chunks[low].begin) {
802 backing->chunks[low - 1].end = backing->chunks[low].end;
803 memmove(&backing->chunks[low], &backing->chunks[low + 1],
804 sizeof(*backing->chunks) * (backing->num_chunks - low - 1));
805 backing->num_chunks--;
806 }
807 } else if (low < backing->num_chunks && end_page == backing->chunks[low].begin) {
808 backing->chunks[low].begin = start_page;
809 } else {
810 if (backing->num_chunks >= backing->max_chunks) {
811 unsigned new_max_chunks = 2 * backing->max_chunks;
812 struct amdgpu_sparse_backing_chunk *new_chunks =
813 REALLOC(backing->chunks,
814 sizeof(*backing->chunks) * backing->max_chunks,
815 sizeof(*backing->chunks) * new_max_chunks);
816 if (!new_chunks)
817 return false;
818
819 backing->max_chunks = new_max_chunks;
820 backing->chunks = new_chunks;
821 }
822
823 memmove(&backing->chunks[low + 1], &backing->chunks[low],
824 sizeof(*backing->chunks) * (backing->num_chunks - low));
825 backing->chunks[low].begin = start_page;
826 backing->chunks[low].end = end_page;
827 backing->num_chunks++;
828 }
829
830 if (backing->num_chunks == 1 && backing->chunks[0].begin == 0 &&
831 backing->chunks[0].end == backing->bo->base.size / RADEON_SPARSE_PAGE_SIZE)
832 sparse_free_backing_buffer(bo, backing);
833
834 return true;
835 }
836
837 static void amdgpu_bo_sparse_destroy(struct pb_buffer *_buf)
838 {
839 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
840 int r;
841
842 assert(!bo->bo && bo->sparse);
843
844 r = amdgpu_bo_va_op_raw(bo->ws->dev, NULL, 0,
845 (uint64_t)bo->u.sparse.num_va_pages * RADEON_SPARSE_PAGE_SIZE,
846 bo->va, 0, AMDGPU_VA_OP_CLEAR);
847 if (r) {
848 fprintf(stderr, "amdgpu: clearing PRT VA region on destroy failed (%d)\n", r);
849 }
850
851 while (!list_empty(&bo->u.sparse.backing)) {
852 struct amdgpu_sparse_backing *dummy = NULL;
853 sparse_free_backing_buffer(bo,
854 container_of(bo->u.sparse.backing.next,
855 dummy, list));
856 }
857
858 amdgpu_va_range_free(bo->u.sparse.va_handle);
859 simple_mtx_destroy(&bo->u.sparse.commit_lock);
860 FREE(bo->u.sparse.commitments);
861 FREE(bo);
862 }
863
864 static const struct pb_vtbl amdgpu_winsys_bo_sparse_vtbl = {
865 amdgpu_bo_sparse_destroy
866 /* other functions are never called */
867 };
868
869 static struct pb_buffer *
870 amdgpu_bo_sparse_create(struct amdgpu_winsys *ws, uint64_t size,
871 enum radeon_bo_domain domain,
872 enum radeon_bo_flag flags)
873 {
874 struct amdgpu_winsys_bo *bo;
875 uint64_t map_size;
876 uint64_t va_gap_size;
877 int r;
878
879 /* We use 32-bit page numbers; refuse to attempt allocating sparse buffers
880 * that exceed this limit. This is not really a restriction: we don't have
881 * that much virtual address space anyway.
882 */
883 if (size > (uint64_t)INT32_MAX * RADEON_SPARSE_PAGE_SIZE)
884 return NULL;
885
886 bo = CALLOC_STRUCT(amdgpu_winsys_bo);
887 if (!bo)
888 return NULL;
889
890 pipe_reference_init(&bo->base.reference, 1);
891 bo->base.alignment = RADEON_SPARSE_PAGE_SIZE;
892 bo->base.size = size;
893 bo->base.vtbl = &amdgpu_winsys_bo_sparse_vtbl;
894 bo->ws = ws;
895 bo->initial_domain = domain;
896 bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
897 bo->sparse = true;
898 bo->u.sparse.flags = flags & ~RADEON_FLAG_SPARSE;
899
900 bo->u.sparse.num_va_pages = DIV_ROUND_UP(size, RADEON_SPARSE_PAGE_SIZE);
901 bo->u.sparse.commitments = CALLOC(bo->u.sparse.num_va_pages,
902 sizeof(*bo->u.sparse.commitments));
903 if (!bo->u.sparse.commitments)
904 goto error_alloc_commitments;
905
906 simple_mtx_init(&bo->u.sparse.commit_lock, mtx_plain);
907 LIST_INITHEAD(&bo->u.sparse.backing);
908
909 /* For simplicity, we always map a multiple of the page size. */
910 map_size = align64(size, RADEON_SPARSE_PAGE_SIZE);
911 va_gap_size = ws->check_vm ? 4 * RADEON_SPARSE_PAGE_SIZE : 0;
912 r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
913 map_size + va_gap_size, RADEON_SPARSE_PAGE_SIZE,
914 0, &bo->va, &bo->u.sparse.va_handle,
915 AMDGPU_VA_RANGE_HIGH);
916 if (r)
917 goto error_va_alloc;
918
919 r = amdgpu_bo_va_op_raw(bo->ws->dev, NULL, 0, size, bo->va,
920 AMDGPU_VM_PAGE_PRT, AMDGPU_VA_OP_MAP);
921 if (r)
922 goto error_va_map;
923
924 return &bo->base;
925
926 error_va_map:
927 amdgpu_va_range_free(bo->u.sparse.va_handle);
928 error_va_alloc:
929 simple_mtx_destroy(&bo->u.sparse.commit_lock);
930 FREE(bo->u.sparse.commitments);
931 error_alloc_commitments:
932 FREE(bo);
933 return NULL;
934 }
935
936 static bool
937 amdgpu_bo_sparse_commit(struct pb_buffer *buf, uint64_t offset, uint64_t size,
938 bool commit)
939 {
940 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(buf);
941 struct amdgpu_sparse_commitment *comm;
942 uint32_t va_page, end_va_page;
943 bool ok = true;
944 int r;
945
946 assert(bo->sparse);
947 assert(offset % RADEON_SPARSE_PAGE_SIZE == 0);
948 assert(offset <= bo->base.size);
949 assert(size <= bo->base.size - offset);
950 assert(size % RADEON_SPARSE_PAGE_SIZE == 0 || offset + size == bo->base.size);
951
952 comm = bo->u.sparse.commitments;
953 va_page = offset / RADEON_SPARSE_PAGE_SIZE;
954 end_va_page = va_page + DIV_ROUND_UP(size, RADEON_SPARSE_PAGE_SIZE);
955
956 simple_mtx_lock(&bo->u.sparse.commit_lock);
957
958 #if DEBUG_SPARSE_COMMITS
959 sparse_dump(bo, __func__);
960 #endif
961
962 if (commit) {
963 while (va_page < end_va_page) {
964 uint32_t span_va_page;
965
966 /* Skip pages that are already committed. */
967 if (comm[va_page].backing) {
968 va_page++;
969 continue;
970 }
971
972 /* Determine length of uncommitted span. */
973 span_va_page = va_page;
974 while (va_page < end_va_page && !comm[va_page].backing)
975 va_page++;
976
977 /* Fill the uncommitted span with chunks of backing memory. */
978 while (span_va_page < va_page) {
979 struct amdgpu_sparse_backing *backing;
980 uint32_t backing_start, backing_size;
981
982 backing_size = va_page - span_va_page;
983 backing = sparse_backing_alloc(bo, &backing_start, &backing_size);
984 if (!backing) {
985 ok = false;
986 goto out;
987 }
988
989 r = amdgpu_bo_va_op_raw(bo->ws->dev, backing->bo->bo,
990 (uint64_t)backing_start * RADEON_SPARSE_PAGE_SIZE,
991 (uint64_t)backing_size * RADEON_SPARSE_PAGE_SIZE,
992 bo->va + (uint64_t)span_va_page * RADEON_SPARSE_PAGE_SIZE,
993 AMDGPU_VM_PAGE_READABLE |
994 AMDGPU_VM_PAGE_WRITEABLE |
995 AMDGPU_VM_PAGE_EXECUTABLE,
996 AMDGPU_VA_OP_REPLACE);
997 if (r) {
998 ok = sparse_backing_free(bo, backing, backing_start, backing_size);
999 assert(ok && "sufficient memory should already be allocated");
1000
1001 ok = false;
1002 goto out;
1003 }
1004
1005 while (backing_size) {
1006 comm[span_va_page].backing = backing;
1007 comm[span_va_page].page = backing_start;
1008 span_va_page++;
1009 backing_start++;
1010 backing_size--;
1011 }
1012 }
1013 }
1014 } else {
1015 r = amdgpu_bo_va_op_raw(bo->ws->dev, NULL, 0,
1016 (uint64_t)(end_va_page - va_page) * RADEON_SPARSE_PAGE_SIZE,
1017 bo->va + (uint64_t)va_page * RADEON_SPARSE_PAGE_SIZE,
1018 AMDGPU_VM_PAGE_PRT, AMDGPU_VA_OP_REPLACE);
1019 if (r) {
1020 ok = false;
1021 goto out;
1022 }
1023
1024 while (va_page < end_va_page) {
1025 struct amdgpu_sparse_backing *backing;
1026 uint32_t backing_start;
1027 uint32_t span_pages;
1028
1029 /* Skip pages that are already uncommitted. */
1030 if (!comm[va_page].backing) {
1031 va_page++;
1032 continue;
1033 }
1034
1035 /* Group contiguous spans of pages. */
1036 backing = comm[va_page].backing;
1037 backing_start = comm[va_page].page;
1038 comm[va_page].backing = NULL;
1039
1040 span_pages = 1;
1041 va_page++;
1042
1043 while (va_page < end_va_page &&
1044 comm[va_page].backing == backing &&
1045 comm[va_page].page == backing_start + span_pages) {
1046 comm[va_page].backing = NULL;
1047 va_page++;
1048 span_pages++;
1049 }
1050
1051 if (!sparse_backing_free(bo, backing, backing_start, span_pages)) {
1052 /* Couldn't allocate tracking data structures, so we have to leak */
1053 fprintf(stderr, "amdgpu: leaking PRT backing memory\n");
1054 ok = false;
1055 }
1056 }
1057 }
1058 out:
1059
1060 simple_mtx_unlock(&bo->u.sparse.commit_lock);
1061
1062 return ok;
1063 }
1064
1065 static unsigned eg_tile_split(unsigned tile_split)
1066 {
1067 switch (tile_split) {
1068 case 0: tile_split = 64; break;
1069 case 1: tile_split = 128; break;
1070 case 2: tile_split = 256; break;
1071 case 3: tile_split = 512; break;
1072 default:
1073 case 4: tile_split = 1024; break;
1074 case 5: tile_split = 2048; break;
1075 case 6: tile_split = 4096; break;
1076 }
1077 return tile_split;
1078 }
1079
1080 static unsigned eg_tile_split_rev(unsigned eg_tile_split)
1081 {
1082 switch (eg_tile_split) {
1083 case 64: return 0;
1084 case 128: return 1;
1085 case 256: return 2;
1086 case 512: return 3;
1087 default:
1088 case 1024: return 4;
1089 case 2048: return 5;
1090 case 4096: return 6;
1091 }
1092 }
1093
1094 static void amdgpu_buffer_get_metadata(struct pb_buffer *_buf,
1095 struct radeon_bo_metadata *md)
1096 {
1097 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
1098 struct amdgpu_bo_info info = {0};
1099 uint64_t tiling_flags;
1100 int r;
1101
1102 assert(bo->bo && "must not be called for slab entries");
1103
1104 r = amdgpu_bo_query_info(bo->bo, &info);
1105 if (r)
1106 return;
1107
1108 tiling_flags = info.metadata.tiling_info;
1109
1110 if (bo->ws->info.chip_class >= GFX9) {
1111 md->u.gfx9.swizzle_mode = AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
1112 } else {
1113 md->u.legacy.microtile = RADEON_LAYOUT_LINEAR;
1114 md->u.legacy.macrotile = RADEON_LAYOUT_LINEAR;
1115
1116 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 4) /* 2D_TILED_THIN1 */
1117 md->u.legacy.macrotile = RADEON_LAYOUT_TILED;
1118 else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 2) /* 1D_TILED_THIN1 */
1119 md->u.legacy.microtile = RADEON_LAYOUT_TILED;
1120
1121 md->u.legacy.pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1122 md->u.legacy.bankw = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1123 md->u.legacy.bankh = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1124 md->u.legacy.tile_split = eg_tile_split(AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT));
1125 md->u.legacy.mtilea = 1 << AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1126 md->u.legacy.num_banks = 2 << AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1127 md->u.legacy.scanout = AMDGPU_TILING_GET(tiling_flags, MICRO_TILE_MODE) == 0; /* DISPLAY */
1128 }
1129
1130 md->size_metadata = info.metadata.size_metadata;
1131 memcpy(md->metadata, info.metadata.umd_metadata, sizeof(md->metadata));
1132 }
1133
1134 static void amdgpu_buffer_set_metadata(struct pb_buffer *_buf,
1135 struct radeon_bo_metadata *md)
1136 {
1137 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
1138 struct amdgpu_bo_metadata metadata = {0};
1139 uint64_t tiling_flags = 0;
1140
1141 assert(bo->bo && "must not be called for slab entries");
1142
1143 if (bo->ws->info.chip_class >= GFX9) {
1144 tiling_flags |= AMDGPU_TILING_SET(SWIZZLE_MODE, md->u.gfx9.swizzle_mode);
1145 } else {
1146 if (md->u.legacy.macrotile == RADEON_LAYOUT_TILED)
1147 tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 4); /* 2D_TILED_THIN1 */
1148 else if (md->u.legacy.microtile == RADEON_LAYOUT_TILED)
1149 tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 2); /* 1D_TILED_THIN1 */
1150 else
1151 tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 1); /* LINEAR_ALIGNED */
1152
1153 tiling_flags |= AMDGPU_TILING_SET(PIPE_CONFIG, md->u.legacy.pipe_config);
1154 tiling_flags |= AMDGPU_TILING_SET(BANK_WIDTH, util_logbase2(md->u.legacy.bankw));
1155 tiling_flags |= AMDGPU_TILING_SET(BANK_HEIGHT, util_logbase2(md->u.legacy.bankh));
1156 if (md->u.legacy.tile_split)
1157 tiling_flags |= AMDGPU_TILING_SET(TILE_SPLIT, eg_tile_split_rev(md->u.legacy.tile_split));
1158 tiling_flags |= AMDGPU_TILING_SET(MACRO_TILE_ASPECT, util_logbase2(md->u.legacy.mtilea));
1159 tiling_flags |= AMDGPU_TILING_SET(NUM_BANKS, util_logbase2(md->u.legacy.num_banks)-1);
1160
1161 if (md->u.legacy.scanout)
1162 tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 0); /* DISPLAY_MICRO_TILING */
1163 else
1164 tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 1); /* THIN_MICRO_TILING */
1165 }
1166
1167 metadata.tiling_info = tiling_flags;
1168 metadata.size_metadata = md->size_metadata;
1169 memcpy(metadata.umd_metadata, md->metadata, sizeof(md->metadata));
1170
1171 amdgpu_bo_set_metadata(bo->bo, &metadata);
1172 }
1173
1174 static struct pb_buffer *
1175 amdgpu_bo_create(struct radeon_winsys *rws,
1176 uint64_t size,
1177 unsigned alignment,
1178 enum radeon_bo_domain domain,
1179 enum radeon_bo_flag flags)
1180 {
1181 struct amdgpu_winsys *ws = amdgpu_winsys(rws);
1182 struct amdgpu_winsys_bo *bo;
1183 int heap = -1;
1184
1185 /* VRAM implies WC. This is not optional. */
1186 assert(!(domain & RADEON_DOMAIN_VRAM) || flags & RADEON_FLAG_GTT_WC);
1187
1188 /* NO_CPU_ACCESS is valid with VRAM only. */
1189 assert(domain == RADEON_DOMAIN_VRAM || !(flags & RADEON_FLAG_NO_CPU_ACCESS));
1190
1191 /* Sparse buffers must have NO_CPU_ACCESS set. */
1192 assert(!(flags & RADEON_FLAG_SPARSE) || flags & RADEON_FLAG_NO_CPU_ACCESS);
1193
1194 /* Sub-allocate small buffers from slabs. */
1195 if (!(flags & (RADEON_FLAG_NO_SUBALLOC | RADEON_FLAG_SPARSE)) &&
1196 size <= (1 << AMDGPU_SLAB_MAX_SIZE_LOG2) &&
1197 alignment <= MAX2(1 << AMDGPU_SLAB_MIN_SIZE_LOG2, util_next_power_of_two(size))) {
1198 struct pb_slab_entry *entry;
1199 int heap = radeon_get_heap_index(domain, flags);
1200
1201 if (heap < 0 || heap >= RADEON_MAX_SLAB_HEAPS)
1202 goto no_slab;
1203
1204 entry = pb_slab_alloc(&ws->bo_slabs, size, heap);
1205 if (!entry) {
1206 /* Clear the cache and try again. */
1207 pb_cache_release_all_buffers(&ws->bo_cache);
1208
1209 entry = pb_slab_alloc(&ws->bo_slabs, size, heap);
1210 }
1211 if (!entry)
1212 return NULL;
1213
1214 bo = NULL;
1215 bo = container_of(entry, bo, u.slab.entry);
1216
1217 pipe_reference_init(&bo->base.reference, 1);
1218
1219 return &bo->base;
1220 }
1221 no_slab:
1222
1223 if (flags & RADEON_FLAG_SPARSE) {
1224 assert(RADEON_SPARSE_PAGE_SIZE % alignment == 0);
1225
1226 return amdgpu_bo_sparse_create(ws, size, domain, flags);
1227 }
1228
1229 /* This flag is irrelevant for the cache. */
1230 flags &= ~RADEON_FLAG_NO_SUBALLOC;
1231
1232 /* Align size to page size. This is the minimum alignment for normal
1233 * BOs. Aligning this here helps the cached bufmgr. Especially small BOs,
1234 * like constant/uniform buffers, can benefit from better and more reuse.
1235 */
1236 size = align64(size, ws->info.gart_page_size);
1237 alignment = align(alignment, ws->info.gart_page_size);
1238
1239 bool use_reusable_pool = flags & RADEON_FLAG_NO_INTERPROCESS_SHARING;
1240
1241 if (use_reusable_pool) {
1242 heap = radeon_get_heap_index(domain, flags);
1243 assert(heap >= 0 && heap < RADEON_MAX_CACHED_HEAPS);
1244
1245 /* Get a buffer from the cache. */
1246 bo = (struct amdgpu_winsys_bo*)
1247 pb_cache_reclaim_buffer(&ws->bo_cache, size, alignment, 0, heap);
1248 if (bo)
1249 return &bo->base;
1250 }
1251
1252 /* Create a new one. */
1253 bo = amdgpu_create_bo(ws, size, alignment, domain, flags, heap);
1254 if (!bo) {
1255 /* Clear the cache and try again. */
1256 pb_slabs_reclaim(&ws->bo_slabs);
1257 pb_cache_release_all_buffers(&ws->bo_cache);
1258 bo = amdgpu_create_bo(ws, size, alignment, domain, flags, heap);
1259 if (!bo)
1260 return NULL;
1261 }
1262
1263 bo->u.real.use_reusable_pool = use_reusable_pool;
1264 return &bo->base;
1265 }
1266
1267 static struct pb_buffer *amdgpu_bo_from_handle(struct radeon_winsys *rws,
1268 struct winsys_handle *whandle,
1269 unsigned *stride,
1270 unsigned *offset)
1271 {
1272 struct amdgpu_winsys *ws = amdgpu_winsys(rws);
1273 struct amdgpu_winsys_bo *bo = NULL;
1274 enum amdgpu_bo_handle_type type;
1275 struct amdgpu_bo_import_result result = {0};
1276 uint64_t va;
1277 amdgpu_va_handle va_handle = NULL;
1278 struct amdgpu_bo_info info = {0};
1279 enum radeon_bo_domain initial = 0;
1280 int r;
1281
1282 switch (whandle->type) {
1283 case WINSYS_HANDLE_TYPE_SHARED:
1284 type = amdgpu_bo_handle_type_gem_flink_name;
1285 break;
1286 case WINSYS_HANDLE_TYPE_FD:
1287 type = amdgpu_bo_handle_type_dma_buf_fd;
1288 break;
1289 default:
1290 return NULL;
1291 }
1292
1293 if (stride)
1294 *stride = whandle->stride;
1295 if (offset)
1296 *offset = whandle->offset;
1297
1298 r = amdgpu_bo_import(ws->dev, type, whandle->handle, &result);
1299 if (r)
1300 return NULL;
1301
1302 simple_mtx_lock(&ws->bo_export_table_lock);
1303 bo = util_hash_table_get(ws->bo_export_table, result.buf_handle);
1304
1305 /* If the amdgpu_winsys_bo instance already exists, bump the reference
1306 * counter and return it.
1307 */
1308 if (bo) {
1309 p_atomic_inc(&bo->base.reference.count);
1310 simple_mtx_unlock(&ws->bo_export_table_lock);
1311 return &bo->base;
1312 }
1313
1314 /* Get initial domains. */
1315 r = amdgpu_bo_query_info(result.buf_handle, &info);
1316 if (r)
1317 goto error;
1318
1319 r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
1320 result.alloc_size, 1 << 20, 0, &va, &va_handle,
1321 AMDGPU_VA_RANGE_HIGH);
1322 if (r)
1323 goto error;
1324
1325 bo = CALLOC_STRUCT(amdgpu_winsys_bo);
1326 if (!bo)
1327 goto error;
1328
1329 r = amdgpu_bo_va_op(result.buf_handle, 0, result.alloc_size, va, 0, AMDGPU_VA_OP_MAP);
1330 if (r)
1331 goto error;
1332
1333 if (info.preferred_heap & AMDGPU_GEM_DOMAIN_VRAM)
1334 initial |= RADEON_DOMAIN_VRAM;
1335 if (info.preferred_heap & AMDGPU_GEM_DOMAIN_GTT)
1336 initial |= RADEON_DOMAIN_GTT;
1337
1338 /* Initialize the structure. */
1339 pipe_reference_init(&bo->base.reference, 1);
1340 bo->base.alignment = info.phys_alignment;
1341 bo->bo = result.buf_handle;
1342 bo->base.size = result.alloc_size;
1343 bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
1344 bo->ws = ws;
1345 bo->va = va;
1346 bo->u.real.va_handle = va_handle;
1347 bo->initial_domain = initial;
1348 bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
1349 bo->is_shared = true;
1350
1351 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
1352 ws->allocated_vram += align64(bo->base.size, ws->info.gart_page_size);
1353 else if (bo->initial_domain & RADEON_DOMAIN_GTT)
1354 ws->allocated_gtt += align64(bo->base.size, ws->info.gart_page_size);
1355
1356 amdgpu_add_buffer_to_global_list(bo);
1357
1358 util_hash_table_set(ws->bo_export_table, bo->bo, bo);
1359 simple_mtx_unlock(&ws->bo_export_table_lock);
1360
1361 return &bo->base;
1362
1363 error:
1364 simple_mtx_unlock(&ws->bo_export_table_lock);
1365 if (bo)
1366 FREE(bo);
1367 if (va_handle)
1368 amdgpu_va_range_free(va_handle);
1369 amdgpu_bo_free(result.buf_handle);
1370 return NULL;
1371 }
1372
1373 static bool amdgpu_bo_get_handle(struct pb_buffer *buffer,
1374 unsigned stride, unsigned offset,
1375 unsigned slice_size,
1376 struct winsys_handle *whandle)
1377 {
1378 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(buffer);
1379 struct amdgpu_winsys *ws = bo->ws;
1380 enum amdgpu_bo_handle_type type;
1381 int r;
1382
1383 /* Don't allow exports of slab entries and sparse buffers. */
1384 if (!bo->bo)
1385 return false;
1386
1387 bo->u.real.use_reusable_pool = false;
1388
1389 switch (whandle->type) {
1390 case WINSYS_HANDLE_TYPE_SHARED:
1391 type = amdgpu_bo_handle_type_gem_flink_name;
1392 break;
1393 case WINSYS_HANDLE_TYPE_FD:
1394 type = amdgpu_bo_handle_type_dma_buf_fd;
1395 break;
1396 case WINSYS_HANDLE_TYPE_KMS:
1397 type = amdgpu_bo_handle_type_kms;
1398 break;
1399 default:
1400 return false;
1401 }
1402
1403 r = amdgpu_bo_export(bo->bo, type, &whandle->handle);
1404 if (r)
1405 return false;
1406
1407 simple_mtx_lock(&ws->bo_export_table_lock);
1408 util_hash_table_set(ws->bo_export_table, bo->bo, bo);
1409 simple_mtx_unlock(&ws->bo_export_table_lock);
1410
1411 whandle->stride = stride;
1412 whandle->offset = offset;
1413 whandle->offset += slice_size * whandle->layer;
1414 bo->is_shared = true;
1415 return true;
1416 }
1417
1418 static struct pb_buffer *amdgpu_bo_from_ptr(struct radeon_winsys *rws,
1419 void *pointer, uint64_t size)
1420 {
1421 struct amdgpu_winsys *ws = amdgpu_winsys(rws);
1422 amdgpu_bo_handle buf_handle;
1423 struct amdgpu_winsys_bo *bo;
1424 uint64_t va;
1425 amdgpu_va_handle va_handle;
1426 /* Avoid failure when the size is not page aligned */
1427 uint64_t aligned_size = align64(size, ws->info.gart_page_size);
1428
1429 bo = CALLOC_STRUCT(amdgpu_winsys_bo);
1430 if (!bo)
1431 return NULL;
1432
1433 if (amdgpu_create_bo_from_user_mem(ws->dev, pointer,
1434 aligned_size, &buf_handle))
1435 goto error;
1436
1437 if (amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
1438 aligned_size, 1 << 12, 0, &va, &va_handle,
1439 AMDGPU_VA_RANGE_HIGH))
1440 goto error_va_alloc;
1441
1442 if (amdgpu_bo_va_op(buf_handle, 0, aligned_size, va, 0, AMDGPU_VA_OP_MAP))
1443 goto error_va_map;
1444
1445 /* Initialize it. */
1446 pipe_reference_init(&bo->base.reference, 1);
1447 bo->bo = buf_handle;
1448 bo->base.alignment = 0;
1449 bo->base.size = size;
1450 bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
1451 bo->ws = ws;
1452 bo->user_ptr = pointer;
1453 bo->va = va;
1454 bo->u.real.va_handle = va_handle;
1455 bo->initial_domain = RADEON_DOMAIN_GTT;
1456 bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
1457
1458 ws->allocated_gtt += aligned_size;
1459
1460 amdgpu_add_buffer_to_global_list(bo);
1461
1462 return (struct pb_buffer*)bo;
1463
1464 error_va_map:
1465 amdgpu_va_range_free(va_handle);
1466
1467 error_va_alloc:
1468 amdgpu_bo_free(buf_handle);
1469
1470 error:
1471 FREE(bo);
1472 return NULL;
1473 }
1474
1475 static bool amdgpu_bo_is_user_ptr(struct pb_buffer *buf)
1476 {
1477 return ((struct amdgpu_winsys_bo*)buf)->user_ptr != NULL;
1478 }
1479
1480 static bool amdgpu_bo_is_suballocated(struct pb_buffer *buf)
1481 {
1482 struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
1483
1484 return !bo->bo && !bo->sparse;
1485 }
1486
1487 static uint64_t amdgpu_bo_get_va(struct pb_buffer *buf)
1488 {
1489 return ((struct amdgpu_winsys_bo*)buf)->va;
1490 }
1491
1492 void amdgpu_bo_init_functions(struct amdgpu_winsys *ws)
1493 {
1494 ws->base.buffer_set_metadata = amdgpu_buffer_set_metadata;
1495 ws->base.buffer_get_metadata = amdgpu_buffer_get_metadata;
1496 ws->base.buffer_map = amdgpu_bo_map;
1497 ws->base.buffer_unmap = amdgpu_bo_unmap;
1498 ws->base.buffer_wait = amdgpu_bo_wait;
1499 ws->base.buffer_create = amdgpu_bo_create;
1500 ws->base.buffer_from_handle = amdgpu_bo_from_handle;
1501 ws->base.buffer_from_ptr = amdgpu_bo_from_ptr;
1502 ws->base.buffer_is_user_ptr = amdgpu_bo_is_user_ptr;
1503 ws->base.buffer_is_suballocated = amdgpu_bo_is_suballocated;
1504 ws->base.buffer_get_handle = amdgpu_bo_get_handle;
1505 ws->base.buffer_commit = amdgpu_bo_sparse_commit;
1506 ws->base.buffer_get_virtual_address = amdgpu_bo_get_va;
1507 ws->base.buffer_get_initial_domain = amdgpu_bo_get_initial_domain;
1508 }