winsys/amdgpu: clean up error handling in amdgpu_bo_from_handle
[mesa.git] / src / gallium / winsys / amdgpu / drm / amdgpu_bo.c
1 /*
2 * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
3 * Copyright © 2015 Advanced Micro Devices, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
16 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
18 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
25 * of the Software.
26 */
27
28 #include "amdgpu_cs.h"
29
30 #include "util/os_time.h"
31 #include "state_tracker/drm_driver.h"
32 #include <amdgpu_drm.h>
33 #include <xf86drm.h>
34 #include <stdio.h>
35 #include <inttypes.h>
36
37 #ifndef AMDGPU_GEM_CREATE_VM_ALWAYS_VALID
38 #define AMDGPU_GEM_CREATE_VM_ALWAYS_VALID (1 << 6)
39 #endif
40
41 #ifndef AMDGPU_VA_RANGE_HIGH
42 #define AMDGPU_VA_RANGE_HIGH 0x2
43 #endif
44
45 /* Set to 1 for verbose output showing committed sparse buffer ranges. */
46 #define DEBUG_SPARSE_COMMITS 0
47
48 struct amdgpu_sparse_backing_chunk {
49 uint32_t begin, end;
50 };
51
52 static struct pb_buffer *
53 amdgpu_bo_create(struct radeon_winsys *rws,
54 uint64_t size,
55 unsigned alignment,
56 enum radeon_bo_domain domain,
57 enum radeon_bo_flag flags);
58
59 static bool amdgpu_bo_wait(struct pb_buffer *_buf, uint64_t timeout,
60 enum radeon_bo_usage usage)
61 {
62 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
63 struct amdgpu_winsys *ws = bo->ws;
64 int64_t abs_timeout;
65
66 if (timeout == 0) {
67 if (p_atomic_read(&bo->num_active_ioctls))
68 return false;
69
70 } else {
71 abs_timeout = os_time_get_absolute_timeout(timeout);
72
73 /* Wait if any ioctl is being submitted with this buffer. */
74 if (!os_wait_until_zero_abs_timeout(&bo->num_active_ioctls, abs_timeout))
75 return false;
76 }
77
78 if (bo->is_shared) {
79 /* We can't use user fences for shared buffers, because user fences
80 * are local to this process only. If we want to wait for all buffer
81 * uses in all processes, we have to use amdgpu_bo_wait_for_idle.
82 */
83 bool buffer_busy = true;
84 int r;
85
86 r = amdgpu_bo_wait_for_idle(bo->bo, timeout, &buffer_busy);
87 if (r)
88 fprintf(stderr, "%s: amdgpu_bo_wait_for_idle failed %i\n", __func__,
89 r);
90 return !buffer_busy;
91 }
92
93 if (timeout == 0) {
94 unsigned idle_fences;
95 bool buffer_idle;
96
97 simple_mtx_lock(&ws->bo_fence_lock);
98
99 for (idle_fences = 0; idle_fences < bo->num_fences; ++idle_fences) {
100 if (!amdgpu_fence_wait(bo->fences[idle_fences], 0, false))
101 break;
102 }
103
104 /* Release the idle fences to avoid checking them again later. */
105 for (unsigned i = 0; i < idle_fences; ++i)
106 amdgpu_fence_reference(&bo->fences[i], NULL);
107
108 memmove(&bo->fences[0], &bo->fences[idle_fences],
109 (bo->num_fences - idle_fences) * sizeof(*bo->fences));
110 bo->num_fences -= idle_fences;
111
112 buffer_idle = !bo->num_fences;
113 simple_mtx_unlock(&ws->bo_fence_lock);
114
115 return buffer_idle;
116 } else {
117 bool buffer_idle = true;
118
119 simple_mtx_lock(&ws->bo_fence_lock);
120 while (bo->num_fences && buffer_idle) {
121 struct pipe_fence_handle *fence = NULL;
122 bool fence_idle = false;
123
124 amdgpu_fence_reference(&fence, bo->fences[0]);
125
126 /* Wait for the fence. */
127 simple_mtx_unlock(&ws->bo_fence_lock);
128 if (amdgpu_fence_wait(fence, abs_timeout, true))
129 fence_idle = true;
130 else
131 buffer_idle = false;
132 simple_mtx_lock(&ws->bo_fence_lock);
133
134 /* Release an idle fence to avoid checking it again later, keeping in
135 * mind that the fence array may have been modified by other threads.
136 */
137 if (fence_idle && bo->num_fences && bo->fences[0] == fence) {
138 amdgpu_fence_reference(&bo->fences[0], NULL);
139 memmove(&bo->fences[0], &bo->fences[1],
140 (bo->num_fences - 1) * sizeof(*bo->fences));
141 bo->num_fences--;
142 }
143
144 amdgpu_fence_reference(&fence, NULL);
145 }
146 simple_mtx_unlock(&ws->bo_fence_lock);
147
148 return buffer_idle;
149 }
150 }
151
152 static enum radeon_bo_domain amdgpu_bo_get_initial_domain(
153 struct pb_buffer *buf)
154 {
155 return ((struct amdgpu_winsys_bo*)buf)->initial_domain;
156 }
157
158 static void amdgpu_bo_remove_fences(struct amdgpu_winsys_bo *bo)
159 {
160 for (unsigned i = 0; i < bo->num_fences; ++i)
161 amdgpu_fence_reference(&bo->fences[i], NULL);
162
163 FREE(bo->fences);
164 bo->num_fences = 0;
165 bo->max_fences = 0;
166 }
167
168 void amdgpu_bo_destroy(struct pb_buffer *_buf)
169 {
170 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
171 struct amdgpu_winsys *ws = bo->ws;
172
173 assert(bo->bo && "must not be called for slab entries");
174
175 if (ws->debug_all_bos) {
176 simple_mtx_lock(&ws->global_bo_list_lock);
177 LIST_DEL(&bo->u.real.global_list_item);
178 ws->num_buffers--;
179 simple_mtx_unlock(&ws->global_bo_list_lock);
180 }
181
182 amdgpu_bo_va_op(bo->bo, 0, bo->base.size, bo->va, 0, AMDGPU_VA_OP_UNMAP);
183 amdgpu_va_range_free(bo->u.real.va_handle);
184 amdgpu_bo_free(bo->bo);
185
186 amdgpu_bo_remove_fences(bo);
187
188 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
189 ws->allocated_vram -= align64(bo->base.size, ws->info.gart_page_size);
190 else if (bo->initial_domain & RADEON_DOMAIN_GTT)
191 ws->allocated_gtt -= align64(bo->base.size, ws->info.gart_page_size);
192
193 if (bo->u.real.map_count >= 1) {
194 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
195 ws->mapped_vram -= bo->base.size;
196 else if (bo->initial_domain & RADEON_DOMAIN_GTT)
197 ws->mapped_gtt -= bo->base.size;
198 ws->num_mapped_buffers--;
199 }
200
201 FREE(bo);
202 }
203
204 static void amdgpu_bo_destroy_or_cache(struct pb_buffer *_buf)
205 {
206 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
207
208 assert(bo->bo); /* slab buffers have a separate vtbl */
209
210 if (bo->u.real.use_reusable_pool)
211 pb_cache_add_buffer(&bo->u.real.cache_entry);
212 else
213 amdgpu_bo_destroy(_buf);
214 }
215
216 static void *amdgpu_bo_map(struct pb_buffer *buf,
217 struct radeon_cmdbuf *rcs,
218 enum pipe_transfer_usage usage)
219 {
220 struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
221 struct amdgpu_winsys_bo *real;
222 struct amdgpu_cs *cs = (struct amdgpu_cs*)rcs;
223 int r;
224 void *cpu = NULL;
225 uint64_t offset = 0;
226
227 assert(!bo->sparse);
228
229 /* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
230 if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
231 /* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
232 if (usage & PIPE_TRANSFER_DONTBLOCK) {
233 if (!(usage & PIPE_TRANSFER_WRITE)) {
234 /* Mapping for read.
235 *
236 * Since we are mapping for read, we don't need to wait
237 * if the GPU is using the buffer for read too
238 * (neither one is changing it).
239 *
240 * Only check whether the buffer is being used for write. */
241 if (cs && amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
242 RADEON_USAGE_WRITE)) {
243 cs->flush_cs(cs->flush_data,
244 RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
245 return NULL;
246 }
247
248 if (!amdgpu_bo_wait((struct pb_buffer*)bo, 0,
249 RADEON_USAGE_WRITE)) {
250 return NULL;
251 }
252 } else {
253 if (cs && amdgpu_bo_is_referenced_by_cs(cs, bo)) {
254 cs->flush_cs(cs->flush_data,
255 RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
256 return NULL;
257 }
258
259 if (!amdgpu_bo_wait((struct pb_buffer*)bo, 0,
260 RADEON_USAGE_READWRITE)) {
261 return NULL;
262 }
263 }
264 } else {
265 uint64_t time = os_time_get_nano();
266
267 if (!(usage & PIPE_TRANSFER_WRITE)) {
268 /* Mapping for read.
269 *
270 * Since we are mapping for read, we don't need to wait
271 * if the GPU is using the buffer for read too
272 * (neither one is changing it).
273 *
274 * Only check whether the buffer is being used for write. */
275 if (cs) {
276 if (amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
277 RADEON_USAGE_WRITE)) {
278 cs->flush_cs(cs->flush_data,
279 RADEON_FLUSH_START_NEXT_GFX_IB_NOW, NULL);
280 } else {
281 /* Try to avoid busy-waiting in amdgpu_bo_wait. */
282 if (p_atomic_read(&bo->num_active_ioctls))
283 amdgpu_cs_sync_flush(rcs);
284 }
285 }
286
287 amdgpu_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
288 RADEON_USAGE_WRITE);
289 } else {
290 /* Mapping for write. */
291 if (cs) {
292 if (amdgpu_bo_is_referenced_by_cs(cs, bo)) {
293 cs->flush_cs(cs->flush_data,
294 RADEON_FLUSH_START_NEXT_GFX_IB_NOW, NULL);
295 } else {
296 /* Try to avoid busy-waiting in amdgpu_bo_wait. */
297 if (p_atomic_read(&bo->num_active_ioctls))
298 amdgpu_cs_sync_flush(rcs);
299 }
300 }
301
302 amdgpu_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
303 RADEON_USAGE_READWRITE);
304 }
305
306 bo->ws->buffer_wait_time += os_time_get_nano() - time;
307 }
308 }
309
310 /* If the buffer is created from user memory, return the user pointer. */
311 if (bo->user_ptr)
312 return bo->user_ptr;
313
314 if (bo->bo) {
315 real = bo;
316 } else {
317 real = bo->u.slab.real;
318 offset = bo->va - real->va;
319 }
320
321 r = amdgpu_bo_cpu_map(real->bo, &cpu);
322 if (r) {
323 /* Clear the cache and try again. */
324 pb_cache_release_all_buffers(&real->ws->bo_cache);
325 r = amdgpu_bo_cpu_map(real->bo, &cpu);
326 if (r)
327 return NULL;
328 }
329
330 if (p_atomic_inc_return(&real->u.real.map_count) == 1) {
331 if (real->initial_domain & RADEON_DOMAIN_VRAM)
332 real->ws->mapped_vram += real->base.size;
333 else if (real->initial_domain & RADEON_DOMAIN_GTT)
334 real->ws->mapped_gtt += real->base.size;
335 real->ws->num_mapped_buffers++;
336 }
337 return (uint8_t*)cpu + offset;
338 }
339
340 static void amdgpu_bo_unmap(struct pb_buffer *buf)
341 {
342 struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
343 struct amdgpu_winsys_bo *real;
344
345 assert(!bo->sparse);
346
347 if (bo->user_ptr)
348 return;
349
350 real = bo->bo ? bo : bo->u.slab.real;
351
352 if (p_atomic_dec_zero(&real->u.real.map_count)) {
353 if (real->initial_domain & RADEON_DOMAIN_VRAM)
354 real->ws->mapped_vram -= real->base.size;
355 else if (real->initial_domain & RADEON_DOMAIN_GTT)
356 real->ws->mapped_gtt -= real->base.size;
357 real->ws->num_mapped_buffers--;
358 }
359
360 amdgpu_bo_cpu_unmap(real->bo);
361 }
362
363 static const struct pb_vtbl amdgpu_winsys_bo_vtbl = {
364 amdgpu_bo_destroy_or_cache
365 /* other functions are never called */
366 };
367
368 static void amdgpu_add_buffer_to_global_list(struct amdgpu_winsys_bo *bo)
369 {
370 struct amdgpu_winsys *ws = bo->ws;
371
372 assert(bo->bo);
373
374 if (ws->debug_all_bos) {
375 simple_mtx_lock(&ws->global_bo_list_lock);
376 LIST_ADDTAIL(&bo->u.real.global_list_item, &ws->global_bo_list);
377 ws->num_buffers++;
378 simple_mtx_unlock(&ws->global_bo_list_lock);
379 }
380 }
381
382 static struct amdgpu_winsys_bo *amdgpu_create_bo(struct amdgpu_winsys *ws,
383 uint64_t size,
384 unsigned alignment,
385 enum radeon_bo_domain initial_domain,
386 unsigned flags,
387 int heap)
388 {
389 struct amdgpu_bo_alloc_request request = {0};
390 amdgpu_bo_handle buf_handle;
391 uint64_t va = 0;
392 struct amdgpu_winsys_bo *bo;
393 amdgpu_va_handle va_handle;
394 unsigned va_gap_size;
395 int r;
396
397 /* VRAM or GTT must be specified, but not both at the same time. */
398 assert(util_bitcount(initial_domain & RADEON_DOMAIN_VRAM_GTT) == 1);
399
400 bo = CALLOC_STRUCT(amdgpu_winsys_bo);
401 if (!bo) {
402 return NULL;
403 }
404
405 if (heap >= 0) {
406 pb_cache_init_entry(&ws->bo_cache, &bo->u.real.cache_entry, &bo->base,
407 heap);
408 }
409 request.alloc_size = size;
410 request.phys_alignment = alignment;
411
412 if (initial_domain & RADEON_DOMAIN_VRAM)
413 request.preferred_heap |= AMDGPU_GEM_DOMAIN_VRAM;
414 if (initial_domain & RADEON_DOMAIN_GTT)
415 request.preferred_heap |= AMDGPU_GEM_DOMAIN_GTT;
416
417 /* Since VRAM and GTT have almost the same performance on APUs, we could
418 * just set GTT. However, in order to decrease GTT(RAM) usage, which is
419 * shared with the OS, allow VRAM placements too. The idea is not to use
420 * VRAM usefully, but to use it so that it's not unused and wasted.
421 */
422 if (!ws->info.has_dedicated_vram)
423 request.preferred_heap |= AMDGPU_GEM_DOMAIN_GTT;
424
425 if (flags & RADEON_FLAG_NO_CPU_ACCESS)
426 request.flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
427 if (flags & RADEON_FLAG_GTT_WC)
428 request.flags |= AMDGPU_GEM_CREATE_CPU_GTT_USWC;
429 if (flags & RADEON_FLAG_NO_INTERPROCESS_SHARING &&
430 ws->info.has_local_buffers)
431 request.flags |= AMDGPU_GEM_CREATE_VM_ALWAYS_VALID;
432 if (ws->zero_all_vram_allocs &&
433 (request.preferred_heap & AMDGPU_GEM_DOMAIN_VRAM))
434 request.flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
435
436 r = amdgpu_bo_alloc(ws->dev, &request, &buf_handle);
437 if (r) {
438 fprintf(stderr, "amdgpu: Failed to allocate a buffer:\n");
439 fprintf(stderr, "amdgpu: size : %"PRIu64" bytes\n", size);
440 fprintf(stderr, "amdgpu: alignment : %u bytes\n", alignment);
441 fprintf(stderr, "amdgpu: domains : %u\n", initial_domain);
442 goto error_bo_alloc;
443 }
444
445 va_gap_size = ws->check_vm ? MAX2(4 * alignment, 64 * 1024) : 0;
446 if (size > ws->info.pte_fragment_size)
447 alignment = MAX2(alignment, ws->info.pte_fragment_size);
448 r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
449 size + va_gap_size, alignment, 0, &va, &va_handle,
450 (flags & RADEON_FLAG_32BIT ? AMDGPU_VA_RANGE_32_BIT : 0) |
451 AMDGPU_VA_RANGE_HIGH);
452 if (r)
453 goto error_va_alloc;
454
455 unsigned vm_flags = AMDGPU_VM_PAGE_READABLE |
456 AMDGPU_VM_PAGE_EXECUTABLE;
457
458 if (!(flags & RADEON_FLAG_READ_ONLY))
459 vm_flags |= AMDGPU_VM_PAGE_WRITEABLE;
460
461 r = amdgpu_bo_va_op_raw(ws->dev, buf_handle, 0, size, va, vm_flags,
462 AMDGPU_VA_OP_MAP);
463 if (r)
464 goto error_va_map;
465
466 pipe_reference_init(&bo->base.reference, 1);
467 bo->base.alignment = alignment;
468 bo->base.usage = 0;
469 bo->base.size = size;
470 bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
471 bo->ws = ws;
472 bo->bo = buf_handle;
473 bo->va = va;
474 bo->u.real.va_handle = va_handle;
475 bo->initial_domain = initial_domain;
476 bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
477 bo->is_local = !!(request.flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID);
478
479 if (initial_domain & RADEON_DOMAIN_VRAM)
480 ws->allocated_vram += align64(size, ws->info.gart_page_size);
481 else if (initial_domain & RADEON_DOMAIN_GTT)
482 ws->allocated_gtt += align64(size, ws->info.gart_page_size);
483
484 amdgpu_add_buffer_to_global_list(bo);
485
486 return bo;
487
488 error_va_map:
489 amdgpu_va_range_free(va_handle);
490
491 error_va_alloc:
492 amdgpu_bo_free(buf_handle);
493
494 error_bo_alloc:
495 FREE(bo);
496 return NULL;
497 }
498
499 bool amdgpu_bo_can_reclaim(struct pb_buffer *_buf)
500 {
501 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
502
503 if (amdgpu_bo_is_referenced_by_any_cs(bo)) {
504 return false;
505 }
506
507 return amdgpu_bo_wait(_buf, 0, RADEON_USAGE_READWRITE);
508 }
509
510 bool amdgpu_bo_can_reclaim_slab(void *priv, struct pb_slab_entry *entry)
511 {
512 struct amdgpu_winsys_bo *bo = NULL; /* fix container_of */
513 bo = container_of(entry, bo, u.slab.entry);
514
515 return amdgpu_bo_can_reclaim(&bo->base);
516 }
517
518 static void amdgpu_bo_slab_destroy(struct pb_buffer *_buf)
519 {
520 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
521
522 assert(!bo->bo);
523
524 pb_slab_free(&bo->ws->bo_slabs, &bo->u.slab.entry);
525 }
526
527 static const struct pb_vtbl amdgpu_winsys_bo_slab_vtbl = {
528 amdgpu_bo_slab_destroy
529 /* other functions are never called */
530 };
531
532 struct pb_slab *amdgpu_bo_slab_alloc(void *priv, unsigned heap,
533 unsigned entry_size,
534 unsigned group_index)
535 {
536 struct amdgpu_winsys *ws = priv;
537 struct amdgpu_slab *slab = CALLOC_STRUCT(amdgpu_slab);
538 enum radeon_bo_domain domains = radeon_domain_from_heap(heap);
539 enum radeon_bo_flag flags = radeon_flags_from_heap(heap);
540 uint32_t base_id;
541
542 if (!slab)
543 return NULL;
544
545 unsigned slab_size = 1 << AMDGPU_SLAB_BO_SIZE_LOG2;
546 slab->buffer = amdgpu_winsys_bo(amdgpu_bo_create(&ws->base,
547 slab_size, slab_size,
548 domains, flags));
549 if (!slab->buffer)
550 goto fail;
551
552 assert(slab->buffer->bo);
553
554 slab->base.num_entries = slab->buffer->base.size / entry_size;
555 slab->base.num_free = slab->base.num_entries;
556 slab->entries = CALLOC(slab->base.num_entries, sizeof(*slab->entries));
557 if (!slab->entries)
558 goto fail_buffer;
559
560 LIST_INITHEAD(&slab->base.free);
561
562 base_id = __sync_fetch_and_add(&ws->next_bo_unique_id, slab->base.num_entries);
563
564 for (unsigned i = 0; i < slab->base.num_entries; ++i) {
565 struct amdgpu_winsys_bo *bo = &slab->entries[i];
566
567 bo->base.alignment = entry_size;
568 bo->base.usage = slab->buffer->base.usage;
569 bo->base.size = entry_size;
570 bo->base.vtbl = &amdgpu_winsys_bo_slab_vtbl;
571 bo->ws = ws;
572 bo->va = slab->buffer->va + i * entry_size;
573 bo->initial_domain = domains;
574 bo->unique_id = base_id + i;
575 bo->u.slab.entry.slab = &slab->base;
576 bo->u.slab.entry.group_index = group_index;
577 bo->u.slab.real = slab->buffer;
578
579 LIST_ADDTAIL(&bo->u.slab.entry.head, &slab->base.free);
580 }
581
582 return &slab->base;
583
584 fail_buffer:
585 amdgpu_winsys_bo_reference(&slab->buffer, NULL);
586 fail:
587 FREE(slab);
588 return NULL;
589 }
590
591 void amdgpu_bo_slab_free(void *priv, struct pb_slab *pslab)
592 {
593 struct amdgpu_slab *slab = amdgpu_slab(pslab);
594
595 for (unsigned i = 0; i < slab->base.num_entries; ++i)
596 amdgpu_bo_remove_fences(&slab->entries[i]);
597
598 FREE(slab->entries);
599 amdgpu_winsys_bo_reference(&slab->buffer, NULL);
600 FREE(slab);
601 }
602
603 #if DEBUG_SPARSE_COMMITS
604 static void
605 sparse_dump(struct amdgpu_winsys_bo *bo, const char *func)
606 {
607 fprintf(stderr, "%s: %p (size=%"PRIu64", num_va_pages=%u) @ %s\n"
608 "Commitments:\n",
609 __func__, bo, bo->base.size, bo->u.sparse.num_va_pages, func);
610
611 struct amdgpu_sparse_backing *span_backing = NULL;
612 uint32_t span_first_backing_page = 0;
613 uint32_t span_first_va_page = 0;
614 uint32_t va_page = 0;
615
616 for (;;) {
617 struct amdgpu_sparse_backing *backing = 0;
618 uint32_t backing_page = 0;
619
620 if (va_page < bo->u.sparse.num_va_pages) {
621 backing = bo->u.sparse.commitments[va_page].backing;
622 backing_page = bo->u.sparse.commitments[va_page].page;
623 }
624
625 if (span_backing &&
626 (backing != span_backing ||
627 backing_page != span_first_backing_page + (va_page - span_first_va_page))) {
628 fprintf(stderr, " %u..%u: backing=%p:%u..%u\n",
629 span_first_va_page, va_page - 1, span_backing,
630 span_first_backing_page,
631 span_first_backing_page + (va_page - span_first_va_page) - 1);
632
633 span_backing = NULL;
634 }
635
636 if (va_page >= bo->u.sparse.num_va_pages)
637 break;
638
639 if (backing && !span_backing) {
640 span_backing = backing;
641 span_first_backing_page = backing_page;
642 span_first_va_page = va_page;
643 }
644
645 va_page++;
646 }
647
648 fprintf(stderr, "Backing:\n");
649
650 list_for_each_entry(struct amdgpu_sparse_backing, backing, &bo->u.sparse.backing, list) {
651 fprintf(stderr, " %p (size=%"PRIu64")\n", backing, backing->bo->base.size);
652 for (unsigned i = 0; i < backing->num_chunks; ++i)
653 fprintf(stderr, " %u..%u\n", backing->chunks[i].begin, backing->chunks[i].end);
654 }
655 }
656 #endif
657
658 /*
659 * Attempt to allocate the given number of backing pages. Fewer pages may be
660 * allocated (depending on the fragmentation of existing backing buffers),
661 * which will be reflected by a change to *pnum_pages.
662 */
663 static struct amdgpu_sparse_backing *
664 sparse_backing_alloc(struct amdgpu_winsys_bo *bo, uint32_t *pstart_page, uint32_t *pnum_pages)
665 {
666 struct amdgpu_sparse_backing *best_backing;
667 unsigned best_idx;
668 uint32_t best_num_pages;
669
670 best_backing = NULL;
671 best_idx = 0;
672 best_num_pages = 0;
673
674 /* This is a very simple and inefficient best-fit algorithm. */
675 list_for_each_entry(struct amdgpu_sparse_backing, backing, &bo->u.sparse.backing, list) {
676 for (unsigned idx = 0; idx < backing->num_chunks; ++idx) {
677 uint32_t cur_num_pages = backing->chunks[idx].end - backing->chunks[idx].begin;
678 if ((best_num_pages < *pnum_pages && cur_num_pages > best_num_pages) ||
679 (best_num_pages > *pnum_pages && cur_num_pages < best_num_pages)) {
680 best_backing = backing;
681 best_idx = idx;
682 best_num_pages = cur_num_pages;
683 }
684 }
685 }
686
687 /* Allocate a new backing buffer if necessary. */
688 if (!best_backing) {
689 struct pb_buffer *buf;
690 uint64_t size;
691 uint32_t pages;
692
693 best_backing = CALLOC_STRUCT(amdgpu_sparse_backing);
694 if (!best_backing)
695 return NULL;
696
697 best_backing->max_chunks = 4;
698 best_backing->chunks = CALLOC(best_backing->max_chunks,
699 sizeof(*best_backing->chunks));
700 if (!best_backing->chunks) {
701 FREE(best_backing);
702 return NULL;
703 }
704
705 assert(bo->u.sparse.num_backing_pages < DIV_ROUND_UP(bo->base.size, RADEON_SPARSE_PAGE_SIZE));
706
707 size = MIN3(bo->base.size / 16,
708 8 * 1024 * 1024,
709 bo->base.size - (uint64_t)bo->u.sparse.num_backing_pages * RADEON_SPARSE_PAGE_SIZE);
710 size = MAX2(size, RADEON_SPARSE_PAGE_SIZE);
711
712 buf = amdgpu_bo_create(&bo->ws->base, size, RADEON_SPARSE_PAGE_SIZE,
713 bo->initial_domain,
714 bo->u.sparse.flags | RADEON_FLAG_NO_SUBALLOC);
715 if (!buf) {
716 FREE(best_backing->chunks);
717 FREE(best_backing);
718 return NULL;
719 }
720
721 /* We might have gotten a bigger buffer than requested via caching. */
722 pages = buf->size / RADEON_SPARSE_PAGE_SIZE;
723
724 best_backing->bo = amdgpu_winsys_bo(buf);
725 best_backing->num_chunks = 1;
726 best_backing->chunks[0].begin = 0;
727 best_backing->chunks[0].end = pages;
728
729 list_add(&best_backing->list, &bo->u.sparse.backing);
730 bo->u.sparse.num_backing_pages += pages;
731
732 best_idx = 0;
733 best_num_pages = pages;
734 }
735
736 *pnum_pages = MIN2(*pnum_pages, best_num_pages);
737 *pstart_page = best_backing->chunks[best_idx].begin;
738 best_backing->chunks[best_idx].begin += *pnum_pages;
739
740 if (best_backing->chunks[best_idx].begin >= best_backing->chunks[best_idx].end) {
741 memmove(&best_backing->chunks[best_idx], &best_backing->chunks[best_idx + 1],
742 sizeof(*best_backing->chunks) * (best_backing->num_chunks - best_idx - 1));
743 best_backing->num_chunks--;
744 }
745
746 return best_backing;
747 }
748
749 static void
750 sparse_free_backing_buffer(struct amdgpu_winsys_bo *bo,
751 struct amdgpu_sparse_backing *backing)
752 {
753 struct amdgpu_winsys *ws = backing->bo->ws;
754
755 bo->u.sparse.num_backing_pages -= backing->bo->base.size / RADEON_SPARSE_PAGE_SIZE;
756
757 simple_mtx_lock(&ws->bo_fence_lock);
758 amdgpu_add_fences(backing->bo, bo->num_fences, bo->fences);
759 simple_mtx_unlock(&ws->bo_fence_lock);
760
761 list_del(&backing->list);
762 amdgpu_winsys_bo_reference(&backing->bo, NULL);
763 FREE(backing->chunks);
764 FREE(backing);
765 }
766
767 /*
768 * Return a range of pages from the given backing buffer back into the
769 * free structure.
770 */
771 static bool
772 sparse_backing_free(struct amdgpu_winsys_bo *bo,
773 struct amdgpu_sparse_backing *backing,
774 uint32_t start_page, uint32_t num_pages)
775 {
776 uint32_t end_page = start_page + num_pages;
777 unsigned low = 0;
778 unsigned high = backing->num_chunks;
779
780 /* Find the first chunk with begin >= start_page. */
781 while (low < high) {
782 unsigned mid = low + (high - low) / 2;
783
784 if (backing->chunks[mid].begin >= start_page)
785 high = mid;
786 else
787 low = mid + 1;
788 }
789
790 assert(low >= backing->num_chunks || end_page <= backing->chunks[low].begin);
791 assert(low == 0 || backing->chunks[low - 1].end <= start_page);
792
793 if (low > 0 && backing->chunks[low - 1].end == start_page) {
794 backing->chunks[low - 1].end = end_page;
795
796 if (low < backing->num_chunks && end_page == backing->chunks[low].begin) {
797 backing->chunks[low - 1].end = backing->chunks[low].end;
798 memmove(&backing->chunks[low], &backing->chunks[low + 1],
799 sizeof(*backing->chunks) * (backing->num_chunks - low - 1));
800 backing->num_chunks--;
801 }
802 } else if (low < backing->num_chunks && end_page == backing->chunks[low].begin) {
803 backing->chunks[low].begin = start_page;
804 } else {
805 if (backing->num_chunks >= backing->max_chunks) {
806 unsigned new_max_chunks = 2 * backing->max_chunks;
807 struct amdgpu_sparse_backing_chunk *new_chunks =
808 REALLOC(backing->chunks,
809 sizeof(*backing->chunks) * backing->max_chunks,
810 sizeof(*backing->chunks) * new_max_chunks);
811 if (!new_chunks)
812 return false;
813
814 backing->max_chunks = new_max_chunks;
815 backing->chunks = new_chunks;
816 }
817
818 memmove(&backing->chunks[low + 1], &backing->chunks[low],
819 sizeof(*backing->chunks) * (backing->num_chunks - low));
820 backing->chunks[low].begin = start_page;
821 backing->chunks[low].end = end_page;
822 backing->num_chunks++;
823 }
824
825 if (backing->num_chunks == 1 && backing->chunks[0].begin == 0 &&
826 backing->chunks[0].end == backing->bo->base.size / RADEON_SPARSE_PAGE_SIZE)
827 sparse_free_backing_buffer(bo, backing);
828
829 return true;
830 }
831
832 static void amdgpu_bo_sparse_destroy(struct pb_buffer *_buf)
833 {
834 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
835 int r;
836
837 assert(!bo->bo && bo->sparse);
838
839 r = amdgpu_bo_va_op_raw(bo->ws->dev, NULL, 0,
840 (uint64_t)bo->u.sparse.num_va_pages * RADEON_SPARSE_PAGE_SIZE,
841 bo->va, 0, AMDGPU_VA_OP_CLEAR);
842 if (r) {
843 fprintf(stderr, "amdgpu: clearing PRT VA region on destroy failed (%d)\n", r);
844 }
845
846 while (!list_empty(&bo->u.sparse.backing)) {
847 struct amdgpu_sparse_backing *dummy = NULL;
848 sparse_free_backing_buffer(bo,
849 container_of(bo->u.sparse.backing.next,
850 dummy, list));
851 }
852
853 amdgpu_va_range_free(bo->u.sparse.va_handle);
854 simple_mtx_destroy(&bo->u.sparse.commit_lock);
855 FREE(bo->u.sparse.commitments);
856 FREE(bo);
857 }
858
859 static const struct pb_vtbl amdgpu_winsys_bo_sparse_vtbl = {
860 amdgpu_bo_sparse_destroy
861 /* other functions are never called */
862 };
863
864 static struct pb_buffer *
865 amdgpu_bo_sparse_create(struct amdgpu_winsys *ws, uint64_t size,
866 enum radeon_bo_domain domain,
867 enum radeon_bo_flag flags)
868 {
869 struct amdgpu_winsys_bo *bo;
870 uint64_t map_size;
871 uint64_t va_gap_size;
872 int r;
873
874 /* We use 32-bit page numbers; refuse to attempt allocating sparse buffers
875 * that exceed this limit. This is not really a restriction: we don't have
876 * that much virtual address space anyway.
877 */
878 if (size > (uint64_t)INT32_MAX * RADEON_SPARSE_PAGE_SIZE)
879 return NULL;
880
881 bo = CALLOC_STRUCT(amdgpu_winsys_bo);
882 if (!bo)
883 return NULL;
884
885 pipe_reference_init(&bo->base.reference, 1);
886 bo->base.alignment = RADEON_SPARSE_PAGE_SIZE;
887 bo->base.size = size;
888 bo->base.vtbl = &amdgpu_winsys_bo_sparse_vtbl;
889 bo->ws = ws;
890 bo->initial_domain = domain;
891 bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
892 bo->sparse = true;
893 bo->u.sparse.flags = flags & ~RADEON_FLAG_SPARSE;
894
895 bo->u.sparse.num_va_pages = DIV_ROUND_UP(size, RADEON_SPARSE_PAGE_SIZE);
896 bo->u.sparse.commitments = CALLOC(bo->u.sparse.num_va_pages,
897 sizeof(*bo->u.sparse.commitments));
898 if (!bo->u.sparse.commitments)
899 goto error_alloc_commitments;
900
901 simple_mtx_init(&bo->u.sparse.commit_lock, mtx_plain);
902 LIST_INITHEAD(&bo->u.sparse.backing);
903
904 /* For simplicity, we always map a multiple of the page size. */
905 map_size = align64(size, RADEON_SPARSE_PAGE_SIZE);
906 va_gap_size = ws->check_vm ? 4 * RADEON_SPARSE_PAGE_SIZE : 0;
907 r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
908 map_size + va_gap_size, RADEON_SPARSE_PAGE_SIZE,
909 0, &bo->va, &bo->u.sparse.va_handle,
910 AMDGPU_VA_RANGE_HIGH);
911 if (r)
912 goto error_va_alloc;
913
914 r = amdgpu_bo_va_op_raw(bo->ws->dev, NULL, 0, size, bo->va,
915 AMDGPU_VM_PAGE_PRT, AMDGPU_VA_OP_MAP);
916 if (r)
917 goto error_va_map;
918
919 return &bo->base;
920
921 error_va_map:
922 amdgpu_va_range_free(bo->u.sparse.va_handle);
923 error_va_alloc:
924 simple_mtx_destroy(&bo->u.sparse.commit_lock);
925 FREE(bo->u.sparse.commitments);
926 error_alloc_commitments:
927 FREE(bo);
928 return NULL;
929 }
930
931 static bool
932 amdgpu_bo_sparse_commit(struct pb_buffer *buf, uint64_t offset, uint64_t size,
933 bool commit)
934 {
935 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(buf);
936 struct amdgpu_sparse_commitment *comm;
937 uint32_t va_page, end_va_page;
938 bool ok = true;
939 int r;
940
941 assert(bo->sparse);
942 assert(offset % RADEON_SPARSE_PAGE_SIZE == 0);
943 assert(offset <= bo->base.size);
944 assert(size <= bo->base.size - offset);
945 assert(size % RADEON_SPARSE_PAGE_SIZE == 0 || offset + size == bo->base.size);
946
947 comm = bo->u.sparse.commitments;
948 va_page = offset / RADEON_SPARSE_PAGE_SIZE;
949 end_va_page = va_page + DIV_ROUND_UP(size, RADEON_SPARSE_PAGE_SIZE);
950
951 simple_mtx_lock(&bo->u.sparse.commit_lock);
952
953 #if DEBUG_SPARSE_COMMITS
954 sparse_dump(bo, __func__);
955 #endif
956
957 if (commit) {
958 while (va_page < end_va_page) {
959 uint32_t span_va_page;
960
961 /* Skip pages that are already committed. */
962 if (comm[va_page].backing) {
963 va_page++;
964 continue;
965 }
966
967 /* Determine length of uncommitted span. */
968 span_va_page = va_page;
969 while (va_page < end_va_page && !comm[va_page].backing)
970 va_page++;
971
972 /* Fill the uncommitted span with chunks of backing memory. */
973 while (span_va_page < va_page) {
974 struct amdgpu_sparse_backing *backing;
975 uint32_t backing_start, backing_size;
976
977 backing_size = va_page - span_va_page;
978 backing = sparse_backing_alloc(bo, &backing_start, &backing_size);
979 if (!backing) {
980 ok = false;
981 goto out;
982 }
983
984 r = amdgpu_bo_va_op_raw(bo->ws->dev, backing->bo->bo,
985 (uint64_t)backing_start * RADEON_SPARSE_PAGE_SIZE,
986 (uint64_t)backing_size * RADEON_SPARSE_PAGE_SIZE,
987 bo->va + (uint64_t)span_va_page * RADEON_SPARSE_PAGE_SIZE,
988 AMDGPU_VM_PAGE_READABLE |
989 AMDGPU_VM_PAGE_WRITEABLE |
990 AMDGPU_VM_PAGE_EXECUTABLE,
991 AMDGPU_VA_OP_REPLACE);
992 if (r) {
993 ok = sparse_backing_free(bo, backing, backing_start, backing_size);
994 assert(ok && "sufficient memory should already be allocated");
995
996 ok = false;
997 goto out;
998 }
999
1000 while (backing_size) {
1001 comm[span_va_page].backing = backing;
1002 comm[span_va_page].page = backing_start;
1003 span_va_page++;
1004 backing_start++;
1005 backing_size--;
1006 }
1007 }
1008 }
1009 } else {
1010 r = amdgpu_bo_va_op_raw(bo->ws->dev, NULL, 0,
1011 (uint64_t)(end_va_page - va_page) * RADEON_SPARSE_PAGE_SIZE,
1012 bo->va + (uint64_t)va_page * RADEON_SPARSE_PAGE_SIZE,
1013 AMDGPU_VM_PAGE_PRT, AMDGPU_VA_OP_REPLACE);
1014 if (r) {
1015 ok = false;
1016 goto out;
1017 }
1018
1019 while (va_page < end_va_page) {
1020 struct amdgpu_sparse_backing *backing;
1021 uint32_t backing_start;
1022 uint32_t span_pages;
1023
1024 /* Skip pages that are already uncommitted. */
1025 if (!comm[va_page].backing) {
1026 va_page++;
1027 continue;
1028 }
1029
1030 /* Group contiguous spans of pages. */
1031 backing = comm[va_page].backing;
1032 backing_start = comm[va_page].page;
1033 comm[va_page].backing = NULL;
1034
1035 span_pages = 1;
1036 va_page++;
1037
1038 while (va_page < end_va_page &&
1039 comm[va_page].backing == backing &&
1040 comm[va_page].page == backing_start + span_pages) {
1041 comm[va_page].backing = NULL;
1042 va_page++;
1043 span_pages++;
1044 }
1045
1046 if (!sparse_backing_free(bo, backing, backing_start, span_pages)) {
1047 /* Couldn't allocate tracking data structures, so we have to leak */
1048 fprintf(stderr, "amdgpu: leaking PRT backing memory\n");
1049 ok = false;
1050 }
1051 }
1052 }
1053 out:
1054
1055 simple_mtx_unlock(&bo->u.sparse.commit_lock);
1056
1057 return ok;
1058 }
1059
1060 static unsigned eg_tile_split(unsigned tile_split)
1061 {
1062 switch (tile_split) {
1063 case 0: tile_split = 64; break;
1064 case 1: tile_split = 128; break;
1065 case 2: tile_split = 256; break;
1066 case 3: tile_split = 512; break;
1067 default:
1068 case 4: tile_split = 1024; break;
1069 case 5: tile_split = 2048; break;
1070 case 6: tile_split = 4096; break;
1071 }
1072 return tile_split;
1073 }
1074
1075 static unsigned eg_tile_split_rev(unsigned eg_tile_split)
1076 {
1077 switch (eg_tile_split) {
1078 case 64: return 0;
1079 case 128: return 1;
1080 case 256: return 2;
1081 case 512: return 3;
1082 default:
1083 case 1024: return 4;
1084 case 2048: return 5;
1085 case 4096: return 6;
1086 }
1087 }
1088
1089 static void amdgpu_buffer_get_metadata(struct pb_buffer *_buf,
1090 struct radeon_bo_metadata *md)
1091 {
1092 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
1093 struct amdgpu_bo_info info = {0};
1094 uint64_t tiling_flags;
1095 int r;
1096
1097 assert(bo->bo && "must not be called for slab entries");
1098
1099 r = amdgpu_bo_query_info(bo->bo, &info);
1100 if (r)
1101 return;
1102
1103 tiling_flags = info.metadata.tiling_info;
1104
1105 if (bo->ws->info.chip_class >= GFX9) {
1106 md->u.gfx9.swizzle_mode = AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
1107 } else {
1108 md->u.legacy.microtile = RADEON_LAYOUT_LINEAR;
1109 md->u.legacy.macrotile = RADEON_LAYOUT_LINEAR;
1110
1111 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 4) /* 2D_TILED_THIN1 */
1112 md->u.legacy.macrotile = RADEON_LAYOUT_TILED;
1113 else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 2) /* 1D_TILED_THIN1 */
1114 md->u.legacy.microtile = RADEON_LAYOUT_TILED;
1115
1116 md->u.legacy.pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1117 md->u.legacy.bankw = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1118 md->u.legacy.bankh = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1119 md->u.legacy.tile_split = eg_tile_split(AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT));
1120 md->u.legacy.mtilea = 1 << AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1121 md->u.legacy.num_banks = 2 << AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1122 md->u.legacy.scanout = AMDGPU_TILING_GET(tiling_flags, MICRO_TILE_MODE) == 0; /* DISPLAY */
1123 }
1124
1125 md->size_metadata = info.metadata.size_metadata;
1126 memcpy(md->metadata, info.metadata.umd_metadata, sizeof(md->metadata));
1127 }
1128
1129 static void amdgpu_buffer_set_metadata(struct pb_buffer *_buf,
1130 struct radeon_bo_metadata *md)
1131 {
1132 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
1133 struct amdgpu_bo_metadata metadata = {0};
1134 uint64_t tiling_flags = 0;
1135
1136 assert(bo->bo && "must not be called for slab entries");
1137
1138 if (bo->ws->info.chip_class >= GFX9) {
1139 tiling_flags |= AMDGPU_TILING_SET(SWIZZLE_MODE, md->u.gfx9.swizzle_mode);
1140 } else {
1141 if (md->u.legacy.macrotile == RADEON_LAYOUT_TILED)
1142 tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 4); /* 2D_TILED_THIN1 */
1143 else if (md->u.legacy.microtile == RADEON_LAYOUT_TILED)
1144 tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 2); /* 1D_TILED_THIN1 */
1145 else
1146 tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 1); /* LINEAR_ALIGNED */
1147
1148 tiling_flags |= AMDGPU_TILING_SET(PIPE_CONFIG, md->u.legacy.pipe_config);
1149 tiling_flags |= AMDGPU_TILING_SET(BANK_WIDTH, util_logbase2(md->u.legacy.bankw));
1150 tiling_flags |= AMDGPU_TILING_SET(BANK_HEIGHT, util_logbase2(md->u.legacy.bankh));
1151 if (md->u.legacy.tile_split)
1152 tiling_flags |= AMDGPU_TILING_SET(TILE_SPLIT, eg_tile_split_rev(md->u.legacy.tile_split));
1153 tiling_flags |= AMDGPU_TILING_SET(MACRO_TILE_ASPECT, util_logbase2(md->u.legacy.mtilea));
1154 tiling_flags |= AMDGPU_TILING_SET(NUM_BANKS, util_logbase2(md->u.legacy.num_banks)-1);
1155
1156 if (md->u.legacy.scanout)
1157 tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 0); /* DISPLAY_MICRO_TILING */
1158 else
1159 tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 1); /* THIN_MICRO_TILING */
1160 }
1161
1162 metadata.tiling_info = tiling_flags;
1163 metadata.size_metadata = md->size_metadata;
1164 memcpy(metadata.umd_metadata, md->metadata, sizeof(md->metadata));
1165
1166 amdgpu_bo_set_metadata(bo->bo, &metadata);
1167 }
1168
1169 static struct pb_buffer *
1170 amdgpu_bo_create(struct radeon_winsys *rws,
1171 uint64_t size,
1172 unsigned alignment,
1173 enum radeon_bo_domain domain,
1174 enum radeon_bo_flag flags)
1175 {
1176 struct amdgpu_winsys *ws = amdgpu_winsys(rws);
1177 struct amdgpu_winsys_bo *bo;
1178 int heap = -1;
1179
1180 /* VRAM implies WC. This is not optional. */
1181 assert(!(domain & RADEON_DOMAIN_VRAM) || flags & RADEON_FLAG_GTT_WC);
1182
1183 /* NO_CPU_ACCESS is valid with VRAM only. */
1184 assert(domain == RADEON_DOMAIN_VRAM || !(flags & RADEON_FLAG_NO_CPU_ACCESS));
1185
1186 /* Sparse buffers must have NO_CPU_ACCESS set. */
1187 assert(!(flags & RADEON_FLAG_SPARSE) || flags & RADEON_FLAG_NO_CPU_ACCESS);
1188
1189 /* Sub-allocate small buffers from slabs. */
1190 if (!(flags & (RADEON_FLAG_NO_SUBALLOC | RADEON_FLAG_SPARSE)) &&
1191 size <= (1 << AMDGPU_SLAB_MAX_SIZE_LOG2) &&
1192 alignment <= MAX2(1 << AMDGPU_SLAB_MIN_SIZE_LOG2, util_next_power_of_two(size))) {
1193 struct pb_slab_entry *entry;
1194 int heap = radeon_get_heap_index(domain, flags);
1195
1196 if (heap < 0 || heap >= RADEON_MAX_SLAB_HEAPS)
1197 goto no_slab;
1198
1199 entry = pb_slab_alloc(&ws->bo_slabs, size, heap);
1200 if (!entry) {
1201 /* Clear the cache and try again. */
1202 pb_cache_release_all_buffers(&ws->bo_cache);
1203
1204 entry = pb_slab_alloc(&ws->bo_slabs, size, heap);
1205 }
1206 if (!entry)
1207 return NULL;
1208
1209 bo = NULL;
1210 bo = container_of(entry, bo, u.slab.entry);
1211
1212 pipe_reference_init(&bo->base.reference, 1);
1213
1214 return &bo->base;
1215 }
1216 no_slab:
1217
1218 if (flags & RADEON_FLAG_SPARSE) {
1219 assert(RADEON_SPARSE_PAGE_SIZE % alignment == 0);
1220
1221 return amdgpu_bo_sparse_create(ws, size, domain, flags);
1222 }
1223
1224 /* This flag is irrelevant for the cache. */
1225 flags &= ~RADEON_FLAG_NO_SUBALLOC;
1226
1227 /* Align size to page size. This is the minimum alignment for normal
1228 * BOs. Aligning this here helps the cached bufmgr. Especially small BOs,
1229 * like constant/uniform buffers, can benefit from better and more reuse.
1230 */
1231 size = align64(size, ws->info.gart_page_size);
1232 alignment = align(alignment, ws->info.gart_page_size);
1233
1234 bool use_reusable_pool = flags & RADEON_FLAG_NO_INTERPROCESS_SHARING;
1235
1236 if (use_reusable_pool) {
1237 heap = radeon_get_heap_index(domain, flags);
1238 assert(heap >= 0 && heap < RADEON_MAX_CACHED_HEAPS);
1239
1240 /* Get a buffer from the cache. */
1241 bo = (struct amdgpu_winsys_bo*)
1242 pb_cache_reclaim_buffer(&ws->bo_cache, size, alignment, 0, heap);
1243 if (bo)
1244 return &bo->base;
1245 }
1246
1247 /* Create a new one. */
1248 bo = amdgpu_create_bo(ws, size, alignment, domain, flags, heap);
1249 if (!bo) {
1250 /* Clear the cache and try again. */
1251 pb_slabs_reclaim(&ws->bo_slabs);
1252 pb_cache_release_all_buffers(&ws->bo_cache);
1253 bo = amdgpu_create_bo(ws, size, alignment, domain, flags, heap);
1254 if (!bo)
1255 return NULL;
1256 }
1257
1258 bo->u.real.use_reusable_pool = use_reusable_pool;
1259 return &bo->base;
1260 }
1261
1262 static struct pb_buffer *amdgpu_bo_from_handle(struct radeon_winsys *rws,
1263 struct winsys_handle *whandle,
1264 unsigned *stride,
1265 unsigned *offset)
1266 {
1267 struct amdgpu_winsys *ws = amdgpu_winsys(rws);
1268 struct amdgpu_winsys_bo *bo = NULL;
1269 enum amdgpu_bo_handle_type type;
1270 struct amdgpu_bo_import_result result = {0};
1271 uint64_t va;
1272 amdgpu_va_handle va_handle = NULL;
1273 struct amdgpu_bo_info info = {0};
1274 enum radeon_bo_domain initial = 0;
1275 int r;
1276
1277 switch (whandle->type) {
1278 case WINSYS_HANDLE_TYPE_SHARED:
1279 type = amdgpu_bo_handle_type_gem_flink_name;
1280 break;
1281 case WINSYS_HANDLE_TYPE_FD:
1282 type = amdgpu_bo_handle_type_dma_buf_fd;
1283 break;
1284 default:
1285 return NULL;
1286 }
1287
1288 r = amdgpu_bo_import(ws->dev, type, whandle->handle, &result);
1289 if (r)
1290 return NULL;
1291
1292 /* Get initial domains. */
1293 r = amdgpu_bo_query_info(result.buf_handle, &info);
1294 if (r)
1295 goto error;
1296
1297 r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
1298 result.alloc_size, 1 << 20, 0, &va, &va_handle,
1299 AMDGPU_VA_RANGE_HIGH);
1300 if (r)
1301 goto error;
1302
1303 bo = CALLOC_STRUCT(amdgpu_winsys_bo);
1304 if (!bo)
1305 goto error;
1306
1307 r = amdgpu_bo_va_op(result.buf_handle, 0, result.alloc_size, va, 0, AMDGPU_VA_OP_MAP);
1308 if (r)
1309 goto error;
1310
1311 if (info.preferred_heap & AMDGPU_GEM_DOMAIN_VRAM)
1312 initial |= RADEON_DOMAIN_VRAM;
1313 if (info.preferred_heap & AMDGPU_GEM_DOMAIN_GTT)
1314 initial |= RADEON_DOMAIN_GTT;
1315
1316 /* Initialize the structure. */
1317 pipe_reference_init(&bo->base.reference, 1);
1318 bo->base.alignment = info.phys_alignment;
1319 bo->bo = result.buf_handle;
1320 bo->base.size = result.alloc_size;
1321 bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
1322 bo->ws = ws;
1323 bo->va = va;
1324 bo->u.real.va_handle = va_handle;
1325 bo->initial_domain = initial;
1326 bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
1327 bo->is_shared = true;
1328
1329 if (stride)
1330 *stride = whandle->stride;
1331 if (offset)
1332 *offset = whandle->offset;
1333
1334 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
1335 ws->allocated_vram += align64(bo->base.size, ws->info.gart_page_size);
1336 else if (bo->initial_domain & RADEON_DOMAIN_GTT)
1337 ws->allocated_gtt += align64(bo->base.size, ws->info.gart_page_size);
1338
1339 amdgpu_add_buffer_to_global_list(bo);
1340
1341 return &bo->base;
1342
1343 error:
1344 if (bo)
1345 FREE(bo);
1346 if (va_handle)
1347 amdgpu_va_range_free(va_handle);
1348 amdgpu_bo_free(result.buf_handle);
1349 return NULL;
1350 }
1351
1352 static bool amdgpu_bo_get_handle(struct pb_buffer *buffer,
1353 unsigned stride, unsigned offset,
1354 unsigned slice_size,
1355 struct winsys_handle *whandle)
1356 {
1357 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(buffer);
1358 enum amdgpu_bo_handle_type type;
1359 int r;
1360
1361 /* Don't allow exports of slab entries and sparse buffers. */
1362 if (!bo->bo)
1363 return false;
1364
1365 bo->u.real.use_reusable_pool = false;
1366
1367 switch (whandle->type) {
1368 case WINSYS_HANDLE_TYPE_SHARED:
1369 type = amdgpu_bo_handle_type_gem_flink_name;
1370 break;
1371 case WINSYS_HANDLE_TYPE_FD:
1372 type = amdgpu_bo_handle_type_dma_buf_fd;
1373 break;
1374 case WINSYS_HANDLE_TYPE_KMS:
1375 type = amdgpu_bo_handle_type_kms;
1376 break;
1377 default:
1378 return false;
1379 }
1380
1381 r = amdgpu_bo_export(bo->bo, type, &whandle->handle);
1382 if (r)
1383 return false;
1384
1385 whandle->stride = stride;
1386 whandle->offset = offset;
1387 whandle->offset += slice_size * whandle->layer;
1388 bo->is_shared = true;
1389 return true;
1390 }
1391
1392 static struct pb_buffer *amdgpu_bo_from_ptr(struct radeon_winsys *rws,
1393 void *pointer, uint64_t size)
1394 {
1395 struct amdgpu_winsys *ws = amdgpu_winsys(rws);
1396 amdgpu_bo_handle buf_handle;
1397 struct amdgpu_winsys_bo *bo;
1398 uint64_t va;
1399 amdgpu_va_handle va_handle;
1400 /* Avoid failure when the size is not page aligned */
1401 uint64_t aligned_size = align64(size, ws->info.gart_page_size);
1402
1403 bo = CALLOC_STRUCT(amdgpu_winsys_bo);
1404 if (!bo)
1405 return NULL;
1406
1407 if (amdgpu_create_bo_from_user_mem(ws->dev, pointer,
1408 aligned_size, &buf_handle))
1409 goto error;
1410
1411 if (amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
1412 aligned_size, 1 << 12, 0, &va, &va_handle,
1413 AMDGPU_VA_RANGE_HIGH))
1414 goto error_va_alloc;
1415
1416 if (amdgpu_bo_va_op(buf_handle, 0, aligned_size, va, 0, AMDGPU_VA_OP_MAP))
1417 goto error_va_map;
1418
1419 /* Initialize it. */
1420 pipe_reference_init(&bo->base.reference, 1);
1421 bo->bo = buf_handle;
1422 bo->base.alignment = 0;
1423 bo->base.size = size;
1424 bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
1425 bo->ws = ws;
1426 bo->user_ptr = pointer;
1427 bo->va = va;
1428 bo->u.real.va_handle = va_handle;
1429 bo->initial_domain = RADEON_DOMAIN_GTT;
1430 bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
1431
1432 ws->allocated_gtt += aligned_size;
1433
1434 amdgpu_add_buffer_to_global_list(bo);
1435
1436 return (struct pb_buffer*)bo;
1437
1438 error_va_map:
1439 amdgpu_va_range_free(va_handle);
1440
1441 error_va_alloc:
1442 amdgpu_bo_free(buf_handle);
1443
1444 error:
1445 FREE(bo);
1446 return NULL;
1447 }
1448
1449 static bool amdgpu_bo_is_user_ptr(struct pb_buffer *buf)
1450 {
1451 return ((struct amdgpu_winsys_bo*)buf)->user_ptr != NULL;
1452 }
1453
1454 static bool amdgpu_bo_is_suballocated(struct pb_buffer *buf)
1455 {
1456 struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
1457
1458 return !bo->bo && !bo->sparse;
1459 }
1460
1461 static uint64_t amdgpu_bo_get_va(struct pb_buffer *buf)
1462 {
1463 return ((struct amdgpu_winsys_bo*)buf)->va;
1464 }
1465
1466 void amdgpu_bo_init_functions(struct amdgpu_winsys *ws)
1467 {
1468 ws->base.buffer_set_metadata = amdgpu_buffer_set_metadata;
1469 ws->base.buffer_get_metadata = amdgpu_buffer_get_metadata;
1470 ws->base.buffer_map = amdgpu_bo_map;
1471 ws->base.buffer_unmap = amdgpu_bo_unmap;
1472 ws->base.buffer_wait = amdgpu_bo_wait;
1473 ws->base.buffer_create = amdgpu_bo_create;
1474 ws->base.buffer_from_handle = amdgpu_bo_from_handle;
1475 ws->base.buffer_from_ptr = amdgpu_bo_from_ptr;
1476 ws->base.buffer_is_user_ptr = amdgpu_bo_is_user_ptr;
1477 ws->base.buffer_is_suballocated = amdgpu_bo_is_suballocated;
1478 ws->base.buffer_get_handle = amdgpu_bo_get_handle;
1479 ws->base.buffer_commit = amdgpu_bo_sparse_commit;
1480 ws->base.buffer_get_virtual_address = amdgpu_bo_get_va;
1481 ws->base.buffer_get_initial_domain = amdgpu_bo_get_initial_domain;
1482 }