winsys/amdgpu: enable 32-bit VM allocations
[mesa.git] / src / gallium / winsys / amdgpu / drm / amdgpu_bo.c
1 /*
2 * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
3 * Copyright © 2015 Advanced Micro Devices, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
16 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
18 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
25 * of the Software.
26 */
27
28 #include "amdgpu_cs.h"
29
30 #include "util/os_time.h"
31 #include "state_tracker/drm_driver.h"
32 #include <amdgpu_drm.h>
33 #include <xf86drm.h>
34 #include <stdio.h>
35 #include <inttypes.h>
36
37 #ifndef AMDGPU_GEM_CREATE_VM_ALWAYS_VALID
38 #define AMDGPU_GEM_CREATE_VM_ALWAYS_VALID (1 << 6)
39 #endif
40
41 /* Set to 1 for verbose output showing committed sparse buffer ranges. */
42 #define DEBUG_SPARSE_COMMITS 0
43
44 struct amdgpu_sparse_backing_chunk {
45 uint32_t begin, end;
46 };
47
48 static struct pb_buffer *
49 amdgpu_bo_create(struct radeon_winsys *rws,
50 uint64_t size,
51 unsigned alignment,
52 enum radeon_bo_domain domain,
53 enum radeon_bo_flag flags);
54
55 static bool amdgpu_bo_wait(struct pb_buffer *_buf, uint64_t timeout,
56 enum radeon_bo_usage usage)
57 {
58 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
59 struct amdgpu_winsys *ws = bo->ws;
60 int64_t abs_timeout;
61
62 if (timeout == 0) {
63 if (p_atomic_read(&bo->num_active_ioctls))
64 return false;
65
66 } else {
67 abs_timeout = os_time_get_absolute_timeout(timeout);
68
69 /* Wait if any ioctl is being submitted with this buffer. */
70 if (!os_wait_until_zero_abs_timeout(&bo->num_active_ioctls, abs_timeout))
71 return false;
72 }
73
74 if (bo->is_shared) {
75 /* We can't use user fences for shared buffers, because user fences
76 * are local to this process only. If we want to wait for all buffer
77 * uses in all processes, we have to use amdgpu_bo_wait_for_idle.
78 */
79 bool buffer_busy = true;
80 int r;
81
82 r = amdgpu_bo_wait_for_idle(bo->bo, timeout, &buffer_busy);
83 if (r)
84 fprintf(stderr, "%s: amdgpu_bo_wait_for_idle failed %i\n", __func__,
85 r);
86 return !buffer_busy;
87 }
88
89 if (timeout == 0) {
90 unsigned idle_fences;
91 bool buffer_idle;
92
93 simple_mtx_lock(&ws->bo_fence_lock);
94
95 for (idle_fences = 0; idle_fences < bo->num_fences; ++idle_fences) {
96 if (!amdgpu_fence_wait(bo->fences[idle_fences], 0, false))
97 break;
98 }
99
100 /* Release the idle fences to avoid checking them again later. */
101 for (unsigned i = 0; i < idle_fences; ++i)
102 amdgpu_fence_reference(&bo->fences[i], NULL);
103
104 memmove(&bo->fences[0], &bo->fences[idle_fences],
105 (bo->num_fences - idle_fences) * sizeof(*bo->fences));
106 bo->num_fences -= idle_fences;
107
108 buffer_idle = !bo->num_fences;
109 simple_mtx_unlock(&ws->bo_fence_lock);
110
111 return buffer_idle;
112 } else {
113 bool buffer_idle = true;
114
115 simple_mtx_lock(&ws->bo_fence_lock);
116 while (bo->num_fences && buffer_idle) {
117 struct pipe_fence_handle *fence = NULL;
118 bool fence_idle = false;
119
120 amdgpu_fence_reference(&fence, bo->fences[0]);
121
122 /* Wait for the fence. */
123 simple_mtx_unlock(&ws->bo_fence_lock);
124 if (amdgpu_fence_wait(fence, abs_timeout, true))
125 fence_idle = true;
126 else
127 buffer_idle = false;
128 simple_mtx_lock(&ws->bo_fence_lock);
129
130 /* Release an idle fence to avoid checking it again later, keeping in
131 * mind that the fence array may have been modified by other threads.
132 */
133 if (fence_idle && bo->num_fences && bo->fences[0] == fence) {
134 amdgpu_fence_reference(&bo->fences[0], NULL);
135 memmove(&bo->fences[0], &bo->fences[1],
136 (bo->num_fences - 1) * sizeof(*bo->fences));
137 bo->num_fences--;
138 }
139
140 amdgpu_fence_reference(&fence, NULL);
141 }
142 simple_mtx_unlock(&ws->bo_fence_lock);
143
144 return buffer_idle;
145 }
146 }
147
148 static enum radeon_bo_domain amdgpu_bo_get_initial_domain(
149 struct pb_buffer *buf)
150 {
151 return ((struct amdgpu_winsys_bo*)buf)->initial_domain;
152 }
153
154 static void amdgpu_bo_remove_fences(struct amdgpu_winsys_bo *bo)
155 {
156 for (unsigned i = 0; i < bo->num_fences; ++i)
157 amdgpu_fence_reference(&bo->fences[i], NULL);
158
159 FREE(bo->fences);
160 bo->num_fences = 0;
161 bo->max_fences = 0;
162 }
163
164 void amdgpu_bo_destroy(struct pb_buffer *_buf)
165 {
166 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
167
168 assert(bo->bo && "must not be called for slab entries");
169
170 if (bo->ws->debug_all_bos) {
171 simple_mtx_lock(&bo->ws->global_bo_list_lock);
172 LIST_DEL(&bo->u.real.global_list_item);
173 bo->ws->num_buffers--;
174 simple_mtx_unlock(&bo->ws->global_bo_list_lock);
175 }
176
177 amdgpu_bo_va_op(bo->bo, 0, bo->base.size, bo->va, 0, AMDGPU_VA_OP_UNMAP);
178 amdgpu_va_range_free(bo->u.real.va_handle);
179 amdgpu_bo_free(bo->bo);
180
181 amdgpu_bo_remove_fences(bo);
182
183 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
184 bo->ws->allocated_vram -= align64(bo->base.size, bo->ws->info.gart_page_size);
185 else if (bo->initial_domain & RADEON_DOMAIN_GTT)
186 bo->ws->allocated_gtt -= align64(bo->base.size, bo->ws->info.gart_page_size);
187
188 if (bo->u.real.map_count >= 1) {
189 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
190 bo->ws->mapped_vram -= bo->base.size;
191 else if (bo->initial_domain & RADEON_DOMAIN_GTT)
192 bo->ws->mapped_gtt -= bo->base.size;
193 bo->ws->num_mapped_buffers--;
194 }
195
196 FREE(bo);
197 }
198
199 static void amdgpu_bo_destroy_or_cache(struct pb_buffer *_buf)
200 {
201 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
202
203 assert(bo->bo); /* slab buffers have a separate vtbl */
204
205 if (bo->u.real.use_reusable_pool)
206 pb_cache_add_buffer(&bo->u.real.cache_entry);
207 else
208 amdgpu_bo_destroy(_buf);
209 }
210
211 static void *amdgpu_bo_map(struct pb_buffer *buf,
212 struct radeon_winsys_cs *rcs,
213 enum pipe_transfer_usage usage)
214 {
215 struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
216 struct amdgpu_winsys_bo *real;
217 struct amdgpu_cs *cs = (struct amdgpu_cs*)rcs;
218 int r;
219 void *cpu = NULL;
220 uint64_t offset = 0;
221
222 assert(!bo->sparse);
223
224 /* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
225 if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
226 /* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
227 if (usage & PIPE_TRANSFER_DONTBLOCK) {
228 if (!(usage & PIPE_TRANSFER_WRITE)) {
229 /* Mapping for read.
230 *
231 * Since we are mapping for read, we don't need to wait
232 * if the GPU is using the buffer for read too
233 * (neither one is changing it).
234 *
235 * Only check whether the buffer is being used for write. */
236 if (cs && amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
237 RADEON_USAGE_WRITE)) {
238 cs->flush_cs(cs->flush_data, PIPE_FLUSH_ASYNC, NULL);
239 return NULL;
240 }
241
242 if (!amdgpu_bo_wait((struct pb_buffer*)bo, 0,
243 RADEON_USAGE_WRITE)) {
244 return NULL;
245 }
246 } else {
247 if (cs && amdgpu_bo_is_referenced_by_cs(cs, bo)) {
248 cs->flush_cs(cs->flush_data, PIPE_FLUSH_ASYNC, NULL);
249 return NULL;
250 }
251
252 if (!amdgpu_bo_wait((struct pb_buffer*)bo, 0,
253 RADEON_USAGE_READWRITE)) {
254 return NULL;
255 }
256 }
257 } else {
258 uint64_t time = os_time_get_nano();
259
260 if (!(usage & PIPE_TRANSFER_WRITE)) {
261 /* Mapping for read.
262 *
263 * Since we are mapping for read, we don't need to wait
264 * if the GPU is using the buffer for read too
265 * (neither one is changing it).
266 *
267 * Only check whether the buffer is being used for write. */
268 if (cs) {
269 if (amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
270 RADEON_USAGE_WRITE)) {
271 cs->flush_cs(cs->flush_data, 0, NULL);
272 } else {
273 /* Try to avoid busy-waiting in amdgpu_bo_wait. */
274 if (p_atomic_read(&bo->num_active_ioctls))
275 amdgpu_cs_sync_flush(rcs);
276 }
277 }
278
279 amdgpu_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
280 RADEON_USAGE_WRITE);
281 } else {
282 /* Mapping for write. */
283 if (cs) {
284 if (amdgpu_bo_is_referenced_by_cs(cs, bo)) {
285 cs->flush_cs(cs->flush_data, 0, NULL);
286 } else {
287 /* Try to avoid busy-waiting in amdgpu_bo_wait. */
288 if (p_atomic_read(&bo->num_active_ioctls))
289 amdgpu_cs_sync_flush(rcs);
290 }
291 }
292
293 amdgpu_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
294 RADEON_USAGE_READWRITE);
295 }
296
297 bo->ws->buffer_wait_time += os_time_get_nano() - time;
298 }
299 }
300
301 /* If the buffer is created from user memory, return the user pointer. */
302 if (bo->user_ptr)
303 return bo->user_ptr;
304
305 if (bo->bo) {
306 real = bo;
307 } else {
308 real = bo->u.slab.real;
309 offset = bo->va - real->va;
310 }
311
312 r = amdgpu_bo_cpu_map(real->bo, &cpu);
313 if (r) {
314 /* Clear the cache and try again. */
315 pb_cache_release_all_buffers(&real->ws->bo_cache);
316 r = amdgpu_bo_cpu_map(real->bo, &cpu);
317 if (r)
318 return NULL;
319 }
320
321 if (p_atomic_inc_return(&real->u.real.map_count) == 1) {
322 if (real->initial_domain & RADEON_DOMAIN_VRAM)
323 real->ws->mapped_vram += real->base.size;
324 else if (real->initial_domain & RADEON_DOMAIN_GTT)
325 real->ws->mapped_gtt += real->base.size;
326 real->ws->num_mapped_buffers++;
327 }
328 return (uint8_t*)cpu + offset;
329 }
330
331 static void amdgpu_bo_unmap(struct pb_buffer *buf)
332 {
333 struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
334 struct amdgpu_winsys_bo *real;
335
336 assert(!bo->sparse);
337
338 if (bo->user_ptr)
339 return;
340
341 real = bo->bo ? bo : bo->u.slab.real;
342
343 if (p_atomic_dec_zero(&real->u.real.map_count)) {
344 if (real->initial_domain & RADEON_DOMAIN_VRAM)
345 real->ws->mapped_vram -= real->base.size;
346 else if (real->initial_domain & RADEON_DOMAIN_GTT)
347 real->ws->mapped_gtt -= real->base.size;
348 real->ws->num_mapped_buffers--;
349 }
350
351 amdgpu_bo_cpu_unmap(real->bo);
352 }
353
354 static const struct pb_vtbl amdgpu_winsys_bo_vtbl = {
355 amdgpu_bo_destroy_or_cache
356 /* other functions are never called */
357 };
358
359 static void amdgpu_add_buffer_to_global_list(struct amdgpu_winsys_bo *bo)
360 {
361 struct amdgpu_winsys *ws = bo->ws;
362
363 assert(bo->bo);
364
365 if (ws->debug_all_bos) {
366 simple_mtx_lock(&ws->global_bo_list_lock);
367 LIST_ADDTAIL(&bo->u.real.global_list_item, &ws->global_bo_list);
368 ws->num_buffers++;
369 simple_mtx_unlock(&ws->global_bo_list_lock);
370 }
371 }
372
373 static struct amdgpu_winsys_bo *amdgpu_create_bo(struct amdgpu_winsys *ws,
374 uint64_t size,
375 unsigned alignment,
376 enum radeon_bo_domain initial_domain,
377 unsigned flags,
378 int heap)
379 {
380 struct amdgpu_bo_alloc_request request = {0};
381 amdgpu_bo_handle buf_handle;
382 uint64_t va = 0;
383 struct amdgpu_winsys_bo *bo;
384 amdgpu_va_handle va_handle;
385 unsigned va_gap_size;
386 int r;
387
388 /* VRAM or GTT must be specified, but not both at the same time. */
389 assert(util_bitcount(initial_domain & RADEON_DOMAIN_VRAM_GTT) == 1);
390
391 bo = CALLOC_STRUCT(amdgpu_winsys_bo);
392 if (!bo) {
393 return NULL;
394 }
395
396 if (heap >= 0) {
397 pb_cache_init_entry(&ws->bo_cache, &bo->u.real.cache_entry, &bo->base,
398 heap);
399 }
400 request.alloc_size = size;
401 request.phys_alignment = alignment;
402
403 if (initial_domain & RADEON_DOMAIN_VRAM)
404 request.preferred_heap |= AMDGPU_GEM_DOMAIN_VRAM;
405 if (initial_domain & RADEON_DOMAIN_GTT)
406 request.preferred_heap |= AMDGPU_GEM_DOMAIN_GTT;
407
408 /* If VRAM is just stolen system memory, allow both VRAM and
409 * GTT, whichever has free space. If a buffer is evicted from
410 * VRAM to GTT, it will stay there.
411 *
412 * DRM 3.6.0 has good BO move throttling, so we can allow VRAM-only
413 * placements even with a low amount of stolen VRAM.
414 */
415 if (!ws->info.has_dedicated_vram && ws->info.drm_minor < 6)
416 request.preferred_heap |= AMDGPU_GEM_DOMAIN_GTT;
417
418 if (flags & RADEON_FLAG_NO_CPU_ACCESS)
419 request.flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
420 if (flags & RADEON_FLAG_GTT_WC)
421 request.flags |= AMDGPU_GEM_CREATE_CPU_GTT_USWC;
422 /* TODO: Enable this once the kernel handles it efficiently. */
423 /*if (flags & RADEON_FLAG_NO_INTERPROCESS_SHARING &&
424 ws->info.drm_minor >= 20)
425 request.flags |= AMDGPU_GEM_CREATE_VM_ALWAYS_VALID;*/
426
427 r = amdgpu_bo_alloc(ws->dev, &request, &buf_handle);
428 if (r) {
429 fprintf(stderr, "amdgpu: Failed to allocate a buffer:\n");
430 fprintf(stderr, "amdgpu: size : %"PRIu64" bytes\n", size);
431 fprintf(stderr, "amdgpu: alignment : %u bytes\n", alignment);
432 fprintf(stderr, "amdgpu: domains : %u\n", initial_domain);
433 goto error_bo_alloc;
434 }
435
436 va_gap_size = ws->check_vm ? MAX2(4 * alignment, 64 * 1024) : 0;
437 if (size > ws->info.pte_fragment_size)
438 alignment = MAX2(alignment, ws->info.pte_fragment_size);
439 r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
440 size + va_gap_size, alignment, 0, &va, &va_handle,
441 flags & RADEON_FLAG_32BIT ? AMDGPU_VA_RANGE_32_BIT : 0);
442 if (r)
443 goto error_va_alloc;
444
445 unsigned vm_flags = AMDGPU_VM_PAGE_READABLE |
446 AMDGPU_VM_PAGE_EXECUTABLE;
447
448 if (!(flags & RADEON_FLAG_READ_ONLY))
449 vm_flags |= AMDGPU_VM_PAGE_WRITEABLE;
450
451 r = amdgpu_bo_va_op_raw(ws->dev, buf_handle, 0, size, va, vm_flags,
452 AMDGPU_VA_OP_MAP);
453 if (r)
454 goto error_va_map;
455
456 pipe_reference_init(&bo->base.reference, 1);
457 bo->base.alignment = alignment;
458 bo->base.usage = 0;
459 bo->base.size = size;
460 bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
461 bo->ws = ws;
462 bo->bo = buf_handle;
463 bo->va = va;
464 bo->u.real.va_handle = va_handle;
465 bo->initial_domain = initial_domain;
466 bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
467 bo->is_local = !!(request.flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID);
468
469 if (initial_domain & RADEON_DOMAIN_VRAM)
470 ws->allocated_vram += align64(size, ws->info.gart_page_size);
471 else if (initial_domain & RADEON_DOMAIN_GTT)
472 ws->allocated_gtt += align64(size, ws->info.gart_page_size);
473
474 amdgpu_add_buffer_to_global_list(bo);
475
476 return bo;
477
478 error_va_map:
479 amdgpu_va_range_free(va_handle);
480
481 error_va_alloc:
482 amdgpu_bo_free(buf_handle);
483
484 error_bo_alloc:
485 FREE(bo);
486 return NULL;
487 }
488
489 bool amdgpu_bo_can_reclaim(struct pb_buffer *_buf)
490 {
491 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
492
493 if (amdgpu_bo_is_referenced_by_any_cs(bo)) {
494 return false;
495 }
496
497 return amdgpu_bo_wait(_buf, 0, RADEON_USAGE_READWRITE);
498 }
499
500 bool amdgpu_bo_can_reclaim_slab(void *priv, struct pb_slab_entry *entry)
501 {
502 struct amdgpu_winsys_bo *bo = NULL; /* fix container_of */
503 bo = container_of(entry, bo, u.slab.entry);
504
505 return amdgpu_bo_can_reclaim(&bo->base);
506 }
507
508 static void amdgpu_bo_slab_destroy(struct pb_buffer *_buf)
509 {
510 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
511
512 assert(!bo->bo);
513
514 pb_slab_free(&bo->ws->bo_slabs, &bo->u.slab.entry);
515 }
516
517 static const struct pb_vtbl amdgpu_winsys_bo_slab_vtbl = {
518 amdgpu_bo_slab_destroy
519 /* other functions are never called */
520 };
521
522 struct pb_slab *amdgpu_bo_slab_alloc(void *priv, unsigned heap,
523 unsigned entry_size,
524 unsigned group_index)
525 {
526 struct amdgpu_winsys *ws = priv;
527 struct amdgpu_slab *slab = CALLOC_STRUCT(amdgpu_slab);
528 enum radeon_bo_domain domains = radeon_domain_from_heap(heap);
529 enum radeon_bo_flag flags = radeon_flags_from_heap(heap);
530 uint32_t base_id;
531
532 if (!slab)
533 return NULL;
534
535 unsigned slab_size = 1 << AMDGPU_SLAB_BO_SIZE_LOG2;
536 slab->buffer = amdgpu_winsys_bo(amdgpu_bo_create(&ws->base,
537 slab_size, slab_size,
538 domains, flags));
539 if (!slab->buffer)
540 goto fail;
541
542 assert(slab->buffer->bo);
543
544 slab->base.num_entries = slab->buffer->base.size / entry_size;
545 slab->base.num_free = slab->base.num_entries;
546 slab->entries = CALLOC(slab->base.num_entries, sizeof(*slab->entries));
547 if (!slab->entries)
548 goto fail_buffer;
549
550 LIST_INITHEAD(&slab->base.free);
551
552 base_id = __sync_fetch_and_add(&ws->next_bo_unique_id, slab->base.num_entries);
553
554 for (unsigned i = 0; i < slab->base.num_entries; ++i) {
555 struct amdgpu_winsys_bo *bo = &slab->entries[i];
556
557 bo->base.alignment = entry_size;
558 bo->base.usage = slab->buffer->base.usage;
559 bo->base.size = entry_size;
560 bo->base.vtbl = &amdgpu_winsys_bo_slab_vtbl;
561 bo->ws = ws;
562 bo->va = slab->buffer->va + i * entry_size;
563 bo->initial_domain = domains;
564 bo->unique_id = base_id + i;
565 bo->u.slab.entry.slab = &slab->base;
566 bo->u.slab.entry.group_index = group_index;
567 bo->u.slab.real = slab->buffer;
568
569 LIST_ADDTAIL(&bo->u.slab.entry.head, &slab->base.free);
570 }
571
572 return &slab->base;
573
574 fail_buffer:
575 amdgpu_winsys_bo_reference(&slab->buffer, NULL);
576 fail:
577 FREE(slab);
578 return NULL;
579 }
580
581 void amdgpu_bo_slab_free(void *priv, struct pb_slab *pslab)
582 {
583 struct amdgpu_slab *slab = amdgpu_slab(pslab);
584
585 for (unsigned i = 0; i < slab->base.num_entries; ++i)
586 amdgpu_bo_remove_fences(&slab->entries[i]);
587
588 FREE(slab->entries);
589 amdgpu_winsys_bo_reference(&slab->buffer, NULL);
590 FREE(slab);
591 }
592
593 #if DEBUG_SPARSE_COMMITS
594 static void
595 sparse_dump(struct amdgpu_winsys_bo *bo, const char *func)
596 {
597 fprintf(stderr, "%s: %p (size=%"PRIu64", num_va_pages=%u) @ %s\n"
598 "Commitments:\n",
599 __func__, bo, bo->base.size, bo->u.sparse.num_va_pages, func);
600
601 struct amdgpu_sparse_backing *span_backing = NULL;
602 uint32_t span_first_backing_page = 0;
603 uint32_t span_first_va_page = 0;
604 uint32_t va_page = 0;
605
606 for (;;) {
607 struct amdgpu_sparse_backing *backing = 0;
608 uint32_t backing_page = 0;
609
610 if (va_page < bo->u.sparse.num_va_pages) {
611 backing = bo->u.sparse.commitments[va_page].backing;
612 backing_page = bo->u.sparse.commitments[va_page].page;
613 }
614
615 if (span_backing &&
616 (backing != span_backing ||
617 backing_page != span_first_backing_page + (va_page - span_first_va_page))) {
618 fprintf(stderr, " %u..%u: backing=%p:%u..%u\n",
619 span_first_va_page, va_page - 1, span_backing,
620 span_first_backing_page,
621 span_first_backing_page + (va_page - span_first_va_page) - 1);
622
623 span_backing = NULL;
624 }
625
626 if (va_page >= bo->u.sparse.num_va_pages)
627 break;
628
629 if (backing && !span_backing) {
630 span_backing = backing;
631 span_first_backing_page = backing_page;
632 span_first_va_page = va_page;
633 }
634
635 va_page++;
636 }
637
638 fprintf(stderr, "Backing:\n");
639
640 list_for_each_entry(struct amdgpu_sparse_backing, backing, &bo->u.sparse.backing, list) {
641 fprintf(stderr, " %p (size=%"PRIu64")\n", backing, backing->bo->base.size);
642 for (unsigned i = 0; i < backing->num_chunks; ++i)
643 fprintf(stderr, " %u..%u\n", backing->chunks[i].begin, backing->chunks[i].end);
644 }
645 }
646 #endif
647
648 /*
649 * Attempt to allocate the given number of backing pages. Fewer pages may be
650 * allocated (depending on the fragmentation of existing backing buffers),
651 * which will be reflected by a change to *pnum_pages.
652 */
653 static struct amdgpu_sparse_backing *
654 sparse_backing_alloc(struct amdgpu_winsys_bo *bo, uint32_t *pstart_page, uint32_t *pnum_pages)
655 {
656 struct amdgpu_sparse_backing *best_backing;
657 unsigned best_idx;
658 uint32_t best_num_pages;
659
660 best_backing = NULL;
661 best_idx = 0;
662 best_num_pages = 0;
663
664 /* This is a very simple and inefficient best-fit algorithm. */
665 list_for_each_entry(struct amdgpu_sparse_backing, backing, &bo->u.sparse.backing, list) {
666 for (unsigned idx = 0; idx < backing->num_chunks; ++idx) {
667 uint32_t cur_num_pages = backing->chunks[idx].end - backing->chunks[idx].begin;
668 if ((best_num_pages < *pnum_pages && cur_num_pages > best_num_pages) ||
669 (best_num_pages > *pnum_pages && cur_num_pages < best_num_pages)) {
670 best_backing = backing;
671 best_idx = idx;
672 best_num_pages = cur_num_pages;
673 }
674 }
675 }
676
677 /* Allocate a new backing buffer if necessary. */
678 if (!best_backing) {
679 struct pb_buffer *buf;
680 uint64_t size;
681 uint32_t pages;
682
683 best_backing = CALLOC_STRUCT(amdgpu_sparse_backing);
684 if (!best_backing)
685 return NULL;
686
687 best_backing->max_chunks = 4;
688 best_backing->chunks = CALLOC(best_backing->max_chunks,
689 sizeof(*best_backing->chunks));
690 if (!best_backing->chunks) {
691 FREE(best_backing);
692 return NULL;
693 }
694
695 assert(bo->u.sparse.num_backing_pages < DIV_ROUND_UP(bo->base.size, RADEON_SPARSE_PAGE_SIZE));
696
697 size = MIN3(bo->base.size / 16,
698 8 * 1024 * 1024,
699 bo->base.size - (uint64_t)bo->u.sparse.num_backing_pages * RADEON_SPARSE_PAGE_SIZE);
700 size = MAX2(size, RADEON_SPARSE_PAGE_SIZE);
701
702 buf = amdgpu_bo_create(&bo->ws->base, size, RADEON_SPARSE_PAGE_SIZE,
703 bo->initial_domain,
704 bo->u.sparse.flags | RADEON_FLAG_NO_SUBALLOC);
705 if (!buf) {
706 FREE(best_backing->chunks);
707 FREE(best_backing);
708 return NULL;
709 }
710
711 /* We might have gotten a bigger buffer than requested via caching. */
712 pages = buf->size / RADEON_SPARSE_PAGE_SIZE;
713
714 best_backing->bo = amdgpu_winsys_bo(buf);
715 best_backing->num_chunks = 1;
716 best_backing->chunks[0].begin = 0;
717 best_backing->chunks[0].end = pages;
718
719 list_add(&best_backing->list, &bo->u.sparse.backing);
720 bo->u.sparse.num_backing_pages += pages;
721
722 best_idx = 0;
723 best_num_pages = pages;
724 }
725
726 *pnum_pages = MIN2(*pnum_pages, best_num_pages);
727 *pstart_page = best_backing->chunks[best_idx].begin;
728 best_backing->chunks[best_idx].begin += *pnum_pages;
729
730 if (best_backing->chunks[best_idx].begin >= best_backing->chunks[best_idx].end) {
731 memmove(&best_backing->chunks[best_idx], &best_backing->chunks[best_idx + 1],
732 sizeof(*best_backing->chunks) * (best_backing->num_chunks - best_idx - 1));
733 best_backing->num_chunks--;
734 }
735
736 return best_backing;
737 }
738
739 static void
740 sparse_free_backing_buffer(struct amdgpu_winsys_bo *bo,
741 struct amdgpu_sparse_backing *backing)
742 {
743 struct amdgpu_winsys *ws = backing->bo->ws;
744
745 bo->u.sparse.num_backing_pages -= backing->bo->base.size / RADEON_SPARSE_PAGE_SIZE;
746
747 simple_mtx_lock(&ws->bo_fence_lock);
748 amdgpu_add_fences(backing->bo, bo->num_fences, bo->fences);
749 simple_mtx_unlock(&ws->bo_fence_lock);
750
751 list_del(&backing->list);
752 amdgpu_winsys_bo_reference(&backing->bo, NULL);
753 FREE(backing->chunks);
754 FREE(backing);
755 }
756
757 /*
758 * Return a range of pages from the given backing buffer back into the
759 * free structure.
760 */
761 static bool
762 sparse_backing_free(struct amdgpu_winsys_bo *bo,
763 struct amdgpu_sparse_backing *backing,
764 uint32_t start_page, uint32_t num_pages)
765 {
766 uint32_t end_page = start_page + num_pages;
767 unsigned low = 0;
768 unsigned high = backing->num_chunks;
769
770 /* Find the first chunk with begin >= start_page. */
771 while (low < high) {
772 unsigned mid = low + (high - low) / 2;
773
774 if (backing->chunks[mid].begin >= start_page)
775 high = mid;
776 else
777 low = mid + 1;
778 }
779
780 assert(low >= backing->num_chunks || end_page <= backing->chunks[low].begin);
781 assert(low == 0 || backing->chunks[low - 1].end <= start_page);
782
783 if (low > 0 && backing->chunks[low - 1].end == start_page) {
784 backing->chunks[low - 1].end = end_page;
785
786 if (low < backing->num_chunks && end_page == backing->chunks[low].begin) {
787 backing->chunks[low - 1].end = backing->chunks[low].end;
788 memmove(&backing->chunks[low], &backing->chunks[low + 1],
789 sizeof(*backing->chunks) * (backing->num_chunks - low - 1));
790 backing->num_chunks--;
791 }
792 } else if (low < backing->num_chunks && end_page == backing->chunks[low].begin) {
793 backing->chunks[low].begin = start_page;
794 } else {
795 if (backing->num_chunks >= backing->max_chunks) {
796 unsigned new_max_chunks = 2 * backing->max_chunks;
797 struct amdgpu_sparse_backing_chunk *new_chunks =
798 REALLOC(backing->chunks,
799 sizeof(*backing->chunks) * backing->max_chunks,
800 sizeof(*backing->chunks) * new_max_chunks);
801 if (!new_chunks)
802 return false;
803
804 backing->max_chunks = new_max_chunks;
805 backing->chunks = new_chunks;
806 }
807
808 memmove(&backing->chunks[low + 1], &backing->chunks[low],
809 sizeof(*backing->chunks) * (backing->num_chunks - low));
810 backing->chunks[low].begin = start_page;
811 backing->chunks[low].end = end_page;
812 backing->num_chunks++;
813 }
814
815 if (backing->num_chunks == 1 && backing->chunks[0].begin == 0 &&
816 backing->chunks[0].end == backing->bo->base.size / RADEON_SPARSE_PAGE_SIZE)
817 sparse_free_backing_buffer(bo, backing);
818
819 return true;
820 }
821
822 static void amdgpu_bo_sparse_destroy(struct pb_buffer *_buf)
823 {
824 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
825 int r;
826
827 assert(!bo->bo && bo->sparse);
828
829 r = amdgpu_bo_va_op_raw(bo->ws->dev, NULL, 0,
830 (uint64_t)bo->u.sparse.num_va_pages * RADEON_SPARSE_PAGE_SIZE,
831 bo->va, 0, AMDGPU_VA_OP_CLEAR);
832 if (r) {
833 fprintf(stderr, "amdgpu: clearing PRT VA region on destroy failed (%d)\n", r);
834 }
835
836 while (!list_empty(&bo->u.sparse.backing)) {
837 struct amdgpu_sparse_backing *dummy = NULL;
838 sparse_free_backing_buffer(bo,
839 container_of(bo->u.sparse.backing.next,
840 dummy, list));
841 }
842
843 amdgpu_va_range_free(bo->u.sparse.va_handle);
844 simple_mtx_destroy(&bo->u.sparse.commit_lock);
845 FREE(bo->u.sparse.commitments);
846 FREE(bo);
847 }
848
849 static const struct pb_vtbl amdgpu_winsys_bo_sparse_vtbl = {
850 amdgpu_bo_sparse_destroy
851 /* other functions are never called */
852 };
853
854 static struct pb_buffer *
855 amdgpu_bo_sparse_create(struct amdgpu_winsys *ws, uint64_t size,
856 enum radeon_bo_domain domain,
857 enum radeon_bo_flag flags)
858 {
859 struct amdgpu_winsys_bo *bo;
860 uint64_t map_size;
861 uint64_t va_gap_size;
862 int r;
863
864 /* We use 32-bit page numbers; refuse to attempt allocating sparse buffers
865 * that exceed this limit. This is not really a restriction: we don't have
866 * that much virtual address space anyway.
867 */
868 if (size > (uint64_t)INT32_MAX * RADEON_SPARSE_PAGE_SIZE)
869 return NULL;
870
871 bo = CALLOC_STRUCT(amdgpu_winsys_bo);
872 if (!bo)
873 return NULL;
874
875 pipe_reference_init(&bo->base.reference, 1);
876 bo->base.alignment = RADEON_SPARSE_PAGE_SIZE;
877 bo->base.size = size;
878 bo->base.vtbl = &amdgpu_winsys_bo_sparse_vtbl;
879 bo->ws = ws;
880 bo->initial_domain = domain;
881 bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
882 bo->sparse = true;
883 bo->u.sparse.flags = flags & ~RADEON_FLAG_SPARSE;
884
885 bo->u.sparse.num_va_pages = DIV_ROUND_UP(size, RADEON_SPARSE_PAGE_SIZE);
886 bo->u.sparse.commitments = CALLOC(bo->u.sparse.num_va_pages,
887 sizeof(*bo->u.sparse.commitments));
888 if (!bo->u.sparse.commitments)
889 goto error_alloc_commitments;
890
891 simple_mtx_init(&bo->u.sparse.commit_lock, mtx_plain);
892 LIST_INITHEAD(&bo->u.sparse.backing);
893
894 /* For simplicity, we always map a multiple of the page size. */
895 map_size = align64(size, RADEON_SPARSE_PAGE_SIZE);
896 va_gap_size = ws->check_vm ? 4 * RADEON_SPARSE_PAGE_SIZE : 0;
897 r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
898 map_size + va_gap_size, RADEON_SPARSE_PAGE_SIZE,
899 0, &bo->va, &bo->u.sparse.va_handle, 0);
900 if (r)
901 goto error_va_alloc;
902
903 r = amdgpu_bo_va_op_raw(bo->ws->dev, NULL, 0, size, bo->va,
904 AMDGPU_VM_PAGE_PRT, AMDGPU_VA_OP_MAP);
905 if (r)
906 goto error_va_map;
907
908 return &bo->base;
909
910 error_va_map:
911 amdgpu_va_range_free(bo->u.sparse.va_handle);
912 error_va_alloc:
913 simple_mtx_destroy(&bo->u.sparse.commit_lock);
914 FREE(bo->u.sparse.commitments);
915 error_alloc_commitments:
916 FREE(bo);
917 return NULL;
918 }
919
920 static bool
921 amdgpu_bo_sparse_commit(struct pb_buffer *buf, uint64_t offset, uint64_t size,
922 bool commit)
923 {
924 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(buf);
925 struct amdgpu_sparse_commitment *comm;
926 uint32_t va_page, end_va_page;
927 bool ok = true;
928 int r;
929
930 assert(bo->sparse);
931 assert(offset % RADEON_SPARSE_PAGE_SIZE == 0);
932 assert(offset <= bo->base.size);
933 assert(size <= bo->base.size - offset);
934 assert(size % RADEON_SPARSE_PAGE_SIZE == 0 || offset + size == bo->base.size);
935
936 comm = bo->u.sparse.commitments;
937 va_page = offset / RADEON_SPARSE_PAGE_SIZE;
938 end_va_page = va_page + DIV_ROUND_UP(size, RADEON_SPARSE_PAGE_SIZE);
939
940 simple_mtx_lock(&bo->u.sparse.commit_lock);
941
942 #if DEBUG_SPARSE_COMMITS
943 sparse_dump(bo, __func__);
944 #endif
945
946 if (commit) {
947 while (va_page < end_va_page) {
948 uint32_t span_va_page;
949
950 /* Skip pages that are already committed. */
951 if (comm[va_page].backing) {
952 va_page++;
953 continue;
954 }
955
956 /* Determine length of uncommitted span. */
957 span_va_page = va_page;
958 while (va_page < end_va_page && !comm[va_page].backing)
959 va_page++;
960
961 /* Fill the uncommitted span with chunks of backing memory. */
962 while (span_va_page < va_page) {
963 struct amdgpu_sparse_backing *backing;
964 uint32_t backing_start, backing_size;
965
966 backing_size = va_page - span_va_page;
967 backing = sparse_backing_alloc(bo, &backing_start, &backing_size);
968 if (!backing) {
969 ok = false;
970 goto out;
971 }
972
973 r = amdgpu_bo_va_op_raw(bo->ws->dev, backing->bo->bo,
974 (uint64_t)backing_start * RADEON_SPARSE_PAGE_SIZE,
975 (uint64_t)backing_size * RADEON_SPARSE_PAGE_SIZE,
976 bo->va + (uint64_t)span_va_page * RADEON_SPARSE_PAGE_SIZE,
977 AMDGPU_VM_PAGE_READABLE |
978 AMDGPU_VM_PAGE_WRITEABLE |
979 AMDGPU_VM_PAGE_EXECUTABLE,
980 AMDGPU_VA_OP_REPLACE);
981 if (r) {
982 ok = sparse_backing_free(bo, backing, backing_start, backing_size);
983 assert(ok && "sufficient memory should already be allocated");
984
985 ok = false;
986 goto out;
987 }
988
989 while (backing_size) {
990 comm[span_va_page].backing = backing;
991 comm[span_va_page].page = backing_start;
992 span_va_page++;
993 backing_start++;
994 backing_size--;
995 }
996 }
997 }
998 } else {
999 r = amdgpu_bo_va_op_raw(bo->ws->dev, NULL, 0,
1000 (uint64_t)(end_va_page - va_page) * RADEON_SPARSE_PAGE_SIZE,
1001 bo->va + (uint64_t)va_page * RADEON_SPARSE_PAGE_SIZE,
1002 AMDGPU_VM_PAGE_PRT, AMDGPU_VA_OP_REPLACE);
1003 if (r) {
1004 ok = false;
1005 goto out;
1006 }
1007
1008 while (va_page < end_va_page) {
1009 struct amdgpu_sparse_backing *backing;
1010 uint32_t backing_start;
1011 uint32_t span_pages;
1012
1013 /* Skip pages that are already uncommitted. */
1014 if (!comm[va_page].backing) {
1015 va_page++;
1016 continue;
1017 }
1018
1019 /* Group contiguous spans of pages. */
1020 backing = comm[va_page].backing;
1021 backing_start = comm[va_page].page;
1022 comm[va_page].backing = NULL;
1023
1024 span_pages = 1;
1025 va_page++;
1026
1027 while (va_page < end_va_page &&
1028 comm[va_page].backing == backing &&
1029 comm[va_page].page == backing_start + span_pages) {
1030 comm[va_page].backing = NULL;
1031 va_page++;
1032 span_pages++;
1033 }
1034
1035 if (!sparse_backing_free(bo, backing, backing_start, span_pages)) {
1036 /* Couldn't allocate tracking data structures, so we have to leak */
1037 fprintf(stderr, "amdgpu: leaking PRT backing memory\n");
1038 ok = false;
1039 }
1040 }
1041 }
1042 out:
1043
1044 simple_mtx_unlock(&bo->u.sparse.commit_lock);
1045
1046 return ok;
1047 }
1048
1049 static unsigned eg_tile_split(unsigned tile_split)
1050 {
1051 switch (tile_split) {
1052 case 0: tile_split = 64; break;
1053 case 1: tile_split = 128; break;
1054 case 2: tile_split = 256; break;
1055 case 3: tile_split = 512; break;
1056 default:
1057 case 4: tile_split = 1024; break;
1058 case 5: tile_split = 2048; break;
1059 case 6: tile_split = 4096; break;
1060 }
1061 return tile_split;
1062 }
1063
1064 static unsigned eg_tile_split_rev(unsigned eg_tile_split)
1065 {
1066 switch (eg_tile_split) {
1067 case 64: return 0;
1068 case 128: return 1;
1069 case 256: return 2;
1070 case 512: return 3;
1071 default:
1072 case 1024: return 4;
1073 case 2048: return 5;
1074 case 4096: return 6;
1075 }
1076 }
1077
1078 static void amdgpu_buffer_get_metadata(struct pb_buffer *_buf,
1079 struct radeon_bo_metadata *md)
1080 {
1081 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
1082 struct amdgpu_bo_info info = {0};
1083 uint64_t tiling_flags;
1084 int r;
1085
1086 assert(bo->bo && "must not be called for slab entries");
1087
1088 r = amdgpu_bo_query_info(bo->bo, &info);
1089 if (r)
1090 return;
1091
1092 tiling_flags = info.metadata.tiling_info;
1093
1094 if (bo->ws->info.chip_class >= GFX9) {
1095 md->u.gfx9.swizzle_mode = AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
1096 } else {
1097 md->u.legacy.microtile = RADEON_LAYOUT_LINEAR;
1098 md->u.legacy.macrotile = RADEON_LAYOUT_LINEAR;
1099
1100 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 4) /* 2D_TILED_THIN1 */
1101 md->u.legacy.macrotile = RADEON_LAYOUT_TILED;
1102 else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 2) /* 1D_TILED_THIN1 */
1103 md->u.legacy.microtile = RADEON_LAYOUT_TILED;
1104
1105 md->u.legacy.pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1106 md->u.legacy.bankw = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1107 md->u.legacy.bankh = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1108 md->u.legacy.tile_split = eg_tile_split(AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT));
1109 md->u.legacy.mtilea = 1 << AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1110 md->u.legacy.num_banks = 2 << AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1111 md->u.legacy.scanout = AMDGPU_TILING_GET(tiling_flags, MICRO_TILE_MODE) == 0; /* DISPLAY */
1112 }
1113
1114 md->size_metadata = info.metadata.size_metadata;
1115 memcpy(md->metadata, info.metadata.umd_metadata, sizeof(md->metadata));
1116 }
1117
1118 static void amdgpu_buffer_set_metadata(struct pb_buffer *_buf,
1119 struct radeon_bo_metadata *md)
1120 {
1121 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
1122 struct amdgpu_bo_metadata metadata = {0};
1123 uint64_t tiling_flags = 0;
1124
1125 assert(bo->bo && "must not be called for slab entries");
1126
1127 if (bo->ws->info.chip_class >= GFX9) {
1128 tiling_flags |= AMDGPU_TILING_SET(SWIZZLE_MODE, md->u.gfx9.swizzle_mode);
1129 } else {
1130 if (md->u.legacy.macrotile == RADEON_LAYOUT_TILED)
1131 tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 4); /* 2D_TILED_THIN1 */
1132 else if (md->u.legacy.microtile == RADEON_LAYOUT_TILED)
1133 tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 2); /* 1D_TILED_THIN1 */
1134 else
1135 tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 1); /* LINEAR_ALIGNED */
1136
1137 tiling_flags |= AMDGPU_TILING_SET(PIPE_CONFIG, md->u.legacy.pipe_config);
1138 tiling_flags |= AMDGPU_TILING_SET(BANK_WIDTH, util_logbase2(md->u.legacy.bankw));
1139 tiling_flags |= AMDGPU_TILING_SET(BANK_HEIGHT, util_logbase2(md->u.legacy.bankh));
1140 if (md->u.legacy.tile_split)
1141 tiling_flags |= AMDGPU_TILING_SET(TILE_SPLIT, eg_tile_split_rev(md->u.legacy.tile_split));
1142 tiling_flags |= AMDGPU_TILING_SET(MACRO_TILE_ASPECT, util_logbase2(md->u.legacy.mtilea));
1143 tiling_flags |= AMDGPU_TILING_SET(NUM_BANKS, util_logbase2(md->u.legacy.num_banks)-1);
1144
1145 if (md->u.legacy.scanout)
1146 tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 0); /* DISPLAY_MICRO_TILING */
1147 else
1148 tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 1); /* THIN_MICRO_TILING */
1149 }
1150
1151 metadata.tiling_info = tiling_flags;
1152 metadata.size_metadata = md->size_metadata;
1153 memcpy(metadata.umd_metadata, md->metadata, sizeof(md->metadata));
1154
1155 amdgpu_bo_set_metadata(bo->bo, &metadata);
1156 }
1157
1158 static struct pb_buffer *
1159 amdgpu_bo_create(struct radeon_winsys *rws,
1160 uint64_t size,
1161 unsigned alignment,
1162 enum radeon_bo_domain domain,
1163 enum radeon_bo_flag flags)
1164 {
1165 struct amdgpu_winsys *ws = amdgpu_winsys(rws);
1166 struct amdgpu_winsys_bo *bo;
1167 int heap = -1;
1168
1169 /* VRAM implies WC. This is not optional. */
1170 assert(!(domain & RADEON_DOMAIN_VRAM) || flags & RADEON_FLAG_GTT_WC);
1171
1172 /* NO_CPU_ACCESS is valid with VRAM only. */
1173 assert(domain == RADEON_DOMAIN_VRAM || !(flags & RADEON_FLAG_NO_CPU_ACCESS));
1174
1175 /* Sparse buffers must have NO_CPU_ACCESS set. */
1176 assert(!(flags & RADEON_FLAG_SPARSE) || flags & RADEON_FLAG_NO_CPU_ACCESS);
1177
1178 /* Sub-allocate small buffers from slabs. */
1179 if (!(flags & (RADEON_FLAG_NO_SUBALLOC | RADEON_FLAG_SPARSE)) &&
1180 size <= (1 << AMDGPU_SLAB_MAX_SIZE_LOG2) &&
1181 alignment <= MAX2(1 << AMDGPU_SLAB_MIN_SIZE_LOG2, util_next_power_of_two(size))) {
1182 struct pb_slab_entry *entry;
1183 int heap = radeon_get_heap_index(domain, flags);
1184
1185 if (heap < 0 || heap >= RADEON_MAX_SLAB_HEAPS)
1186 goto no_slab;
1187
1188 entry = pb_slab_alloc(&ws->bo_slabs, size, heap);
1189 if (!entry) {
1190 /* Clear the cache and try again. */
1191 pb_cache_release_all_buffers(&ws->bo_cache);
1192
1193 entry = pb_slab_alloc(&ws->bo_slabs, size, heap);
1194 }
1195 if (!entry)
1196 return NULL;
1197
1198 bo = NULL;
1199 bo = container_of(entry, bo, u.slab.entry);
1200
1201 pipe_reference_init(&bo->base.reference, 1);
1202
1203 return &bo->base;
1204 }
1205 no_slab:
1206
1207 if (flags & RADEON_FLAG_SPARSE) {
1208 assert(RADEON_SPARSE_PAGE_SIZE % alignment == 0);
1209
1210 return amdgpu_bo_sparse_create(ws, size, domain, flags);
1211 }
1212
1213 /* This flag is irrelevant for the cache. */
1214 flags &= ~RADEON_FLAG_NO_SUBALLOC;
1215
1216 /* Align size to page size. This is the minimum alignment for normal
1217 * BOs. Aligning this here helps the cached bufmgr. Especially small BOs,
1218 * like constant/uniform buffers, can benefit from better and more reuse.
1219 */
1220 size = align64(size, ws->info.gart_page_size);
1221 alignment = align(alignment, ws->info.gart_page_size);
1222
1223 bool use_reusable_pool = flags & RADEON_FLAG_NO_INTERPROCESS_SHARING;
1224
1225 if (use_reusable_pool) {
1226 heap = radeon_get_heap_index(domain, flags);
1227 assert(heap >= 0 && heap < RADEON_MAX_CACHED_HEAPS);
1228
1229 /* Get a buffer from the cache. */
1230 bo = (struct amdgpu_winsys_bo*)
1231 pb_cache_reclaim_buffer(&ws->bo_cache, size, alignment, 0, heap);
1232 if (bo)
1233 return &bo->base;
1234 }
1235
1236 /* Create a new one. */
1237 bo = amdgpu_create_bo(ws, size, alignment, domain, flags, heap);
1238 if (!bo) {
1239 /* Clear the cache and try again. */
1240 pb_slabs_reclaim(&ws->bo_slabs);
1241 pb_cache_release_all_buffers(&ws->bo_cache);
1242 bo = amdgpu_create_bo(ws, size, alignment, domain, flags, heap);
1243 if (!bo)
1244 return NULL;
1245 }
1246
1247 bo->u.real.use_reusable_pool = use_reusable_pool;
1248 return &bo->base;
1249 }
1250
1251 static struct pb_buffer *amdgpu_bo_from_handle(struct radeon_winsys *rws,
1252 struct winsys_handle *whandle,
1253 unsigned *stride,
1254 unsigned *offset)
1255 {
1256 struct amdgpu_winsys *ws = amdgpu_winsys(rws);
1257 struct amdgpu_winsys_bo *bo;
1258 enum amdgpu_bo_handle_type type;
1259 struct amdgpu_bo_import_result result = {0};
1260 uint64_t va;
1261 amdgpu_va_handle va_handle;
1262 struct amdgpu_bo_info info = {0};
1263 enum radeon_bo_domain initial = 0;
1264 int r;
1265
1266 /* Initialize the structure. */
1267 bo = CALLOC_STRUCT(amdgpu_winsys_bo);
1268 if (!bo) {
1269 return NULL;
1270 }
1271
1272 switch (whandle->type) {
1273 case DRM_API_HANDLE_TYPE_SHARED:
1274 type = amdgpu_bo_handle_type_gem_flink_name;
1275 break;
1276 case DRM_API_HANDLE_TYPE_FD:
1277 type = amdgpu_bo_handle_type_dma_buf_fd;
1278 break;
1279 default:
1280 return NULL;
1281 }
1282
1283 r = amdgpu_bo_import(ws->dev, type, whandle->handle, &result);
1284 if (r)
1285 goto error;
1286
1287 /* Get initial domains. */
1288 r = amdgpu_bo_query_info(result.buf_handle, &info);
1289 if (r)
1290 goto error_query;
1291
1292 r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
1293 result.alloc_size, 1 << 20, 0, &va, &va_handle, 0);
1294 if (r)
1295 goto error_query;
1296
1297 r = amdgpu_bo_va_op(result.buf_handle, 0, result.alloc_size, va, 0, AMDGPU_VA_OP_MAP);
1298 if (r)
1299 goto error_va_map;
1300
1301 if (info.preferred_heap & AMDGPU_GEM_DOMAIN_VRAM)
1302 initial |= RADEON_DOMAIN_VRAM;
1303 if (info.preferred_heap & AMDGPU_GEM_DOMAIN_GTT)
1304 initial |= RADEON_DOMAIN_GTT;
1305
1306
1307 pipe_reference_init(&bo->base.reference, 1);
1308 bo->base.alignment = info.phys_alignment;
1309 bo->bo = result.buf_handle;
1310 bo->base.size = result.alloc_size;
1311 bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
1312 bo->ws = ws;
1313 bo->va = va;
1314 bo->u.real.va_handle = va_handle;
1315 bo->initial_domain = initial;
1316 bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
1317 bo->is_shared = true;
1318
1319 if (stride)
1320 *stride = whandle->stride;
1321 if (offset)
1322 *offset = whandle->offset;
1323
1324 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
1325 ws->allocated_vram += align64(bo->base.size, ws->info.gart_page_size);
1326 else if (bo->initial_domain & RADEON_DOMAIN_GTT)
1327 ws->allocated_gtt += align64(bo->base.size, ws->info.gart_page_size);
1328
1329 amdgpu_add_buffer_to_global_list(bo);
1330
1331 return &bo->base;
1332
1333 error_va_map:
1334 amdgpu_va_range_free(va_handle);
1335
1336 error_query:
1337 amdgpu_bo_free(result.buf_handle);
1338
1339 error:
1340 FREE(bo);
1341 return NULL;
1342 }
1343
1344 static bool amdgpu_bo_get_handle(struct pb_buffer *buffer,
1345 unsigned stride, unsigned offset,
1346 unsigned slice_size,
1347 struct winsys_handle *whandle)
1348 {
1349 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(buffer);
1350 enum amdgpu_bo_handle_type type;
1351 int r;
1352
1353 /* Don't allow exports of slab entries and sparse buffers. */
1354 if (!bo->bo)
1355 return false;
1356
1357 bo->u.real.use_reusable_pool = false;
1358
1359 switch (whandle->type) {
1360 case DRM_API_HANDLE_TYPE_SHARED:
1361 type = amdgpu_bo_handle_type_gem_flink_name;
1362 break;
1363 case DRM_API_HANDLE_TYPE_FD:
1364 type = amdgpu_bo_handle_type_dma_buf_fd;
1365 break;
1366 case DRM_API_HANDLE_TYPE_KMS:
1367 type = amdgpu_bo_handle_type_kms;
1368 break;
1369 default:
1370 return false;
1371 }
1372
1373 r = amdgpu_bo_export(bo->bo, type, &whandle->handle);
1374 if (r)
1375 return false;
1376
1377 whandle->stride = stride;
1378 whandle->offset = offset;
1379 whandle->offset += slice_size * whandle->layer;
1380 bo->is_shared = true;
1381 return true;
1382 }
1383
1384 static struct pb_buffer *amdgpu_bo_from_ptr(struct radeon_winsys *rws,
1385 void *pointer, uint64_t size)
1386 {
1387 struct amdgpu_winsys *ws = amdgpu_winsys(rws);
1388 amdgpu_bo_handle buf_handle;
1389 struct amdgpu_winsys_bo *bo;
1390 uint64_t va;
1391 amdgpu_va_handle va_handle;
1392 /* Avoid failure when the size is not page aligned */
1393 uint64_t aligned_size = align64(size, ws->info.gart_page_size);
1394
1395 bo = CALLOC_STRUCT(amdgpu_winsys_bo);
1396 if (!bo)
1397 return NULL;
1398
1399 if (amdgpu_create_bo_from_user_mem(ws->dev, pointer,
1400 aligned_size, &buf_handle))
1401 goto error;
1402
1403 if (amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
1404 aligned_size, 1 << 12, 0, &va, &va_handle, 0))
1405 goto error_va_alloc;
1406
1407 if (amdgpu_bo_va_op(buf_handle, 0, aligned_size, va, 0, AMDGPU_VA_OP_MAP))
1408 goto error_va_map;
1409
1410 /* Initialize it. */
1411 pipe_reference_init(&bo->base.reference, 1);
1412 bo->bo = buf_handle;
1413 bo->base.alignment = 0;
1414 bo->base.size = size;
1415 bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
1416 bo->ws = ws;
1417 bo->user_ptr = pointer;
1418 bo->va = va;
1419 bo->u.real.va_handle = va_handle;
1420 bo->initial_domain = RADEON_DOMAIN_GTT;
1421 bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
1422
1423 ws->allocated_gtt += aligned_size;
1424
1425 amdgpu_add_buffer_to_global_list(bo);
1426
1427 return (struct pb_buffer*)bo;
1428
1429 error_va_map:
1430 amdgpu_va_range_free(va_handle);
1431
1432 error_va_alloc:
1433 amdgpu_bo_free(buf_handle);
1434
1435 error:
1436 FREE(bo);
1437 return NULL;
1438 }
1439
1440 static bool amdgpu_bo_is_user_ptr(struct pb_buffer *buf)
1441 {
1442 return ((struct amdgpu_winsys_bo*)buf)->user_ptr != NULL;
1443 }
1444
1445 static bool amdgpu_bo_is_suballocated(struct pb_buffer *buf)
1446 {
1447 struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
1448
1449 return !bo->bo && !bo->sparse;
1450 }
1451
1452 static uint64_t amdgpu_bo_get_va(struct pb_buffer *buf)
1453 {
1454 return ((struct amdgpu_winsys_bo*)buf)->va;
1455 }
1456
1457 void amdgpu_bo_init_functions(struct amdgpu_winsys *ws)
1458 {
1459 ws->base.buffer_set_metadata = amdgpu_buffer_set_metadata;
1460 ws->base.buffer_get_metadata = amdgpu_buffer_get_metadata;
1461 ws->base.buffer_map = amdgpu_bo_map;
1462 ws->base.buffer_unmap = amdgpu_bo_unmap;
1463 ws->base.buffer_wait = amdgpu_bo_wait;
1464 ws->base.buffer_create = amdgpu_bo_create;
1465 ws->base.buffer_from_handle = amdgpu_bo_from_handle;
1466 ws->base.buffer_from_ptr = amdgpu_bo_from_ptr;
1467 ws->base.buffer_is_user_ptr = amdgpu_bo_is_user_ptr;
1468 ws->base.buffer_is_suballocated = amdgpu_bo_is_suballocated;
1469 ws->base.buffer_get_handle = amdgpu_bo_get_handle;
1470 ws->base.buffer_commit = amdgpu_bo_sparse_commit;
1471 ws->base.buffer_get_virtual_address = amdgpu_bo_get_va;
1472 ws->base.buffer_get_initial_domain = amdgpu_bo_get_initial_domain;
1473 }