gallium/radeon: enable suballocations for VRAM with no CPU access
[mesa.git] / src / gallium / winsys / radeon / drm / radeon_drm_bo.c
1 /*
2 * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
14 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
15 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
16 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
17 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
24 * of the Software.
25 */
26
27 #include "radeon_drm_cs.h"
28
29 #include "util/u_hash_table.h"
30 #include "util/u_memory.h"
31 #include "util/simple_list.h"
32 #include "os/os_thread.h"
33 #include "os/os_mman.h"
34 #include "os/os_time.h"
35
36 #include "state_tracker/drm_driver.h"
37
38 #include <sys/ioctl.h>
39 #include <xf86drm.h>
40 #include <errno.h>
41 #include <fcntl.h>
42 #include <stdio.h>
43 #include <inttypes.h>
44
45 static struct pb_buffer *
46 radeon_winsys_bo_create(struct radeon_winsys *rws,
47 uint64_t size,
48 unsigned alignment,
49 enum radeon_bo_domain domain,
50 enum radeon_bo_flag flags);
51
52 static inline struct radeon_bo *radeon_bo(struct pb_buffer *bo)
53 {
54 return (struct radeon_bo *)bo;
55 }
56
57 struct radeon_bo_va_hole {
58 struct list_head list;
59 uint64_t offset;
60 uint64_t size;
61 };
62
63 static bool radeon_real_bo_is_busy(struct radeon_bo *bo)
64 {
65 struct drm_radeon_gem_busy args = {0};
66
67 args.handle = bo->handle;
68 return drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_BUSY,
69 &args, sizeof(args)) != 0;
70 }
71
72 static bool radeon_bo_is_busy(struct radeon_bo *bo)
73 {
74 unsigned num_idle;
75 bool busy = false;
76
77 if (bo->handle)
78 return radeon_real_bo_is_busy(bo);
79
80 mtx_lock(&bo->rws->bo_fence_lock);
81 for (num_idle = 0; num_idle < bo->u.slab.num_fences; ++num_idle) {
82 if (radeon_real_bo_is_busy(bo->u.slab.fences[num_idle])) {
83 busy = true;
84 break;
85 }
86 radeon_bo_reference(&bo->u.slab.fences[num_idle], NULL);
87 }
88 memmove(&bo->u.slab.fences[0], &bo->u.slab.fences[num_idle],
89 (bo->u.slab.num_fences - num_idle) * sizeof(bo->u.slab.fences[0]));
90 bo->u.slab.num_fences -= num_idle;
91 mtx_unlock(&bo->rws->bo_fence_lock);
92
93 return busy;
94 }
95
96 static void radeon_real_bo_wait_idle(struct radeon_bo *bo)
97 {
98 struct drm_radeon_gem_wait_idle args = {0};
99
100 args.handle = bo->handle;
101 while (drmCommandWrite(bo->rws->fd, DRM_RADEON_GEM_WAIT_IDLE,
102 &args, sizeof(args)) == -EBUSY);
103 }
104
105 static void radeon_bo_wait_idle(struct radeon_bo *bo)
106 {
107 if (bo->handle) {
108 radeon_real_bo_wait_idle(bo);
109 } else {
110 mtx_lock(&bo->rws->bo_fence_lock);
111 while (bo->u.slab.num_fences) {
112 struct radeon_bo *fence = NULL;
113 radeon_bo_reference(&fence, bo->u.slab.fences[0]);
114 mtx_unlock(&bo->rws->bo_fence_lock);
115
116 /* Wait without holding the fence lock. */
117 radeon_real_bo_wait_idle(fence);
118
119 mtx_lock(&bo->rws->bo_fence_lock);
120 if (bo->u.slab.num_fences && fence == bo->u.slab.fences[0]) {
121 radeon_bo_reference(&bo->u.slab.fences[0], NULL);
122 memmove(&bo->u.slab.fences[0], &bo->u.slab.fences[1],
123 (bo->u.slab.num_fences - 1) * sizeof(bo->u.slab.fences[0]));
124 bo->u.slab.num_fences--;
125 }
126 radeon_bo_reference(&fence, NULL);
127 }
128 mtx_unlock(&bo->rws->bo_fence_lock);
129 }
130 }
131
132 static bool radeon_bo_wait(struct pb_buffer *_buf, uint64_t timeout,
133 enum radeon_bo_usage usage)
134 {
135 struct radeon_bo *bo = radeon_bo(_buf);
136 int64_t abs_timeout;
137
138 /* No timeout. Just query. */
139 if (timeout == 0)
140 return !bo->num_active_ioctls && !radeon_bo_is_busy(bo);
141
142 abs_timeout = os_time_get_absolute_timeout(timeout);
143
144 /* Wait if any ioctl is being submitted with this buffer. */
145 if (!os_wait_until_zero_abs_timeout(&bo->num_active_ioctls, abs_timeout))
146 return false;
147
148 /* Infinite timeout. */
149 if (abs_timeout == PIPE_TIMEOUT_INFINITE) {
150 radeon_bo_wait_idle(bo);
151 return true;
152 }
153
154 /* Other timeouts need to be emulated with a loop. */
155 while (radeon_bo_is_busy(bo)) {
156 if (os_time_get_nano() >= abs_timeout)
157 return false;
158 os_time_sleep(10);
159 }
160
161 return true;
162 }
163
164 static enum radeon_bo_domain get_valid_domain(enum radeon_bo_domain domain)
165 {
166 /* Zero domains the driver doesn't understand. */
167 domain &= RADEON_DOMAIN_VRAM_GTT;
168
169 /* If no domain is set, we must set something... */
170 if (!domain)
171 domain = RADEON_DOMAIN_VRAM_GTT;
172
173 return domain;
174 }
175
176 static enum radeon_bo_domain radeon_bo_get_initial_domain(
177 struct pb_buffer *buf)
178 {
179 struct radeon_bo *bo = (struct radeon_bo*)buf;
180 struct drm_radeon_gem_op args;
181
182 if (bo->rws->info.drm_minor < 38)
183 return RADEON_DOMAIN_VRAM_GTT;
184
185 memset(&args, 0, sizeof(args));
186 args.handle = bo->handle;
187 args.op = RADEON_GEM_OP_GET_INITIAL_DOMAIN;
188
189 if (drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_OP,
190 &args, sizeof(args))) {
191 fprintf(stderr, "radeon: failed to get initial domain: %p 0x%08X\n",
192 bo, bo->handle);
193 /* Default domain as returned by get_valid_domain. */
194 return RADEON_DOMAIN_VRAM_GTT;
195 }
196
197 /* GEM domains and winsys domains are defined the same. */
198 return get_valid_domain(args.value);
199 }
200
201 static uint64_t radeon_bomgr_find_va(struct radeon_drm_winsys *rws,
202 uint64_t size, uint64_t alignment)
203 {
204 struct radeon_bo_va_hole *hole, *n;
205 uint64_t offset = 0, waste = 0;
206
207 /* All VM address space holes will implicitly start aligned to the
208 * size alignment, so we don't need to sanitize the alignment here
209 */
210 size = align(size, rws->info.gart_page_size);
211
212 mtx_lock(&rws->bo_va_mutex);
213 /* first look for a hole */
214 LIST_FOR_EACH_ENTRY_SAFE(hole, n, &rws->va_holes, list) {
215 offset = hole->offset;
216 waste = offset % alignment;
217 waste = waste ? alignment - waste : 0;
218 offset += waste;
219 if (offset >= (hole->offset + hole->size)) {
220 continue;
221 }
222 if (!waste && hole->size == size) {
223 offset = hole->offset;
224 list_del(&hole->list);
225 FREE(hole);
226 mtx_unlock(&rws->bo_va_mutex);
227 return offset;
228 }
229 if ((hole->size - waste) > size) {
230 if (waste) {
231 n = CALLOC_STRUCT(radeon_bo_va_hole);
232 n->size = waste;
233 n->offset = hole->offset;
234 list_add(&n->list, &hole->list);
235 }
236 hole->size -= (size + waste);
237 hole->offset += size + waste;
238 mtx_unlock(&rws->bo_va_mutex);
239 return offset;
240 }
241 if ((hole->size - waste) == size) {
242 hole->size = waste;
243 mtx_unlock(&rws->bo_va_mutex);
244 return offset;
245 }
246 }
247
248 offset = rws->va_offset;
249 waste = offset % alignment;
250 waste = waste ? alignment - waste : 0;
251 if (waste) {
252 n = CALLOC_STRUCT(radeon_bo_va_hole);
253 n->size = waste;
254 n->offset = offset;
255 list_add(&n->list, &rws->va_holes);
256 }
257 offset += waste;
258 rws->va_offset += size + waste;
259 mtx_unlock(&rws->bo_va_mutex);
260 return offset;
261 }
262
263 static void radeon_bomgr_free_va(struct radeon_drm_winsys *rws,
264 uint64_t va, uint64_t size)
265 {
266 struct radeon_bo_va_hole *hole = NULL;
267
268 size = align(size, rws->info.gart_page_size);
269
270 mtx_lock(&rws->bo_va_mutex);
271 if ((va + size) == rws->va_offset) {
272 rws->va_offset = va;
273 /* Delete uppermost hole if it reaches the new top */
274 if (!LIST_IS_EMPTY(&rws->va_holes)) {
275 hole = container_of(rws->va_holes.next, hole, list);
276 if ((hole->offset + hole->size) == va) {
277 rws->va_offset = hole->offset;
278 list_del(&hole->list);
279 FREE(hole);
280 }
281 }
282 } else {
283 struct radeon_bo_va_hole *next;
284
285 hole = container_of(&rws->va_holes, hole, list);
286 LIST_FOR_EACH_ENTRY(next, &rws->va_holes, list) {
287 if (next->offset < va)
288 break;
289 hole = next;
290 }
291
292 if (&hole->list != &rws->va_holes) {
293 /* Grow upper hole if it's adjacent */
294 if (hole->offset == (va + size)) {
295 hole->offset = va;
296 hole->size += size;
297 /* Merge lower hole if it's adjacent */
298 if (next != hole && &next->list != &rws->va_holes &&
299 (next->offset + next->size) == va) {
300 next->size += hole->size;
301 list_del(&hole->list);
302 FREE(hole);
303 }
304 goto out;
305 }
306 }
307
308 /* Grow lower hole if it's adjacent */
309 if (next != hole && &next->list != &rws->va_holes &&
310 (next->offset + next->size) == va) {
311 next->size += size;
312 goto out;
313 }
314
315 /* FIXME on allocation failure we just lose virtual address space
316 * maybe print a warning
317 */
318 next = CALLOC_STRUCT(radeon_bo_va_hole);
319 if (next) {
320 next->size = size;
321 next->offset = va;
322 list_add(&next->list, &hole->list);
323 }
324 }
325 out:
326 mtx_unlock(&rws->bo_va_mutex);
327 }
328
329 void radeon_bo_destroy(struct pb_buffer *_buf)
330 {
331 struct radeon_bo *bo = radeon_bo(_buf);
332 struct radeon_drm_winsys *rws = bo->rws;
333 struct drm_gem_close args;
334
335 assert(bo->handle && "must not be called for slab entries");
336
337 memset(&args, 0, sizeof(args));
338
339 mtx_lock(&rws->bo_handles_mutex);
340 util_hash_table_remove(rws->bo_handles, (void*)(uintptr_t)bo->handle);
341 if (bo->flink_name) {
342 util_hash_table_remove(rws->bo_names,
343 (void*)(uintptr_t)bo->flink_name);
344 }
345 mtx_unlock(&rws->bo_handles_mutex);
346
347 if (bo->u.real.ptr)
348 os_munmap(bo->u.real.ptr, bo->base.size);
349
350 if (rws->info.has_virtual_memory) {
351 if (rws->va_unmap_working) {
352 struct drm_radeon_gem_va va;
353
354 va.handle = bo->handle;
355 va.vm_id = 0;
356 va.operation = RADEON_VA_UNMAP;
357 va.flags = RADEON_VM_PAGE_READABLE |
358 RADEON_VM_PAGE_WRITEABLE |
359 RADEON_VM_PAGE_SNOOPED;
360 va.offset = bo->va;
361
362 if (drmCommandWriteRead(rws->fd, DRM_RADEON_GEM_VA, &va,
363 sizeof(va)) != 0 &&
364 va.operation == RADEON_VA_RESULT_ERROR) {
365 fprintf(stderr, "radeon: Failed to deallocate virtual address for buffer:\n");
366 fprintf(stderr, "radeon: size : %"PRIu64" bytes\n", bo->base.size);
367 fprintf(stderr, "radeon: va : 0x%"PRIx64"\n", bo->va);
368 }
369 }
370
371 radeon_bomgr_free_va(rws, bo->va, bo->base.size);
372 }
373
374 /* Close object. */
375 args.handle = bo->handle;
376 drmIoctl(rws->fd, DRM_IOCTL_GEM_CLOSE, &args);
377
378 mtx_destroy(&bo->u.real.map_mutex);
379
380 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
381 rws->allocated_vram -= align(bo->base.size, rws->info.gart_page_size);
382 else if (bo->initial_domain & RADEON_DOMAIN_GTT)
383 rws->allocated_gtt -= align(bo->base.size, rws->info.gart_page_size);
384
385 if (bo->u.real.map_count >= 1) {
386 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
387 bo->rws->mapped_vram -= bo->base.size;
388 else
389 bo->rws->mapped_gtt -= bo->base.size;
390 bo->rws->num_mapped_buffers--;
391 }
392
393 FREE(bo);
394 }
395
396 static void radeon_bo_destroy_or_cache(struct pb_buffer *_buf)
397 {
398 struct radeon_bo *bo = radeon_bo(_buf);
399
400 assert(bo->handle && "must not be called for slab entries");
401
402 if (bo->u.real.use_reusable_pool)
403 pb_cache_add_buffer(&bo->u.real.cache_entry);
404 else
405 radeon_bo_destroy(_buf);
406 }
407
408 void *radeon_bo_do_map(struct radeon_bo *bo)
409 {
410 struct drm_radeon_gem_mmap args = {0};
411 void *ptr;
412 unsigned offset;
413
414 /* If the buffer is created from user memory, return the user pointer. */
415 if (bo->user_ptr)
416 return bo->user_ptr;
417
418 if (bo->handle) {
419 offset = 0;
420 } else {
421 offset = bo->va - bo->u.slab.real->va;
422 bo = bo->u.slab.real;
423 }
424
425 /* Map the buffer. */
426 mtx_lock(&bo->u.real.map_mutex);
427 /* Return the pointer if it's already mapped. */
428 if (bo->u.real.ptr) {
429 bo->u.real.map_count++;
430 mtx_unlock(&bo->u.real.map_mutex);
431 return (uint8_t*)bo->u.real.ptr + offset;
432 }
433 args.handle = bo->handle;
434 args.offset = 0;
435 args.size = (uint64_t)bo->base.size;
436 if (drmCommandWriteRead(bo->rws->fd,
437 DRM_RADEON_GEM_MMAP,
438 &args,
439 sizeof(args))) {
440 mtx_unlock(&bo->u.real.map_mutex);
441 fprintf(stderr, "radeon: gem_mmap failed: %p 0x%08X\n",
442 bo, bo->handle);
443 return NULL;
444 }
445
446 ptr = os_mmap(0, args.size, PROT_READ|PROT_WRITE, MAP_SHARED,
447 bo->rws->fd, args.addr_ptr);
448 if (ptr == MAP_FAILED) {
449 /* Clear the cache and try again. */
450 pb_cache_release_all_buffers(&bo->rws->bo_cache);
451
452 ptr = os_mmap(0, args.size, PROT_READ|PROT_WRITE, MAP_SHARED,
453 bo->rws->fd, args.addr_ptr);
454 if (ptr == MAP_FAILED) {
455 mtx_unlock(&bo->u.real.map_mutex);
456 fprintf(stderr, "radeon: mmap failed, errno: %i\n", errno);
457 return NULL;
458 }
459 }
460 bo->u.real.ptr = ptr;
461 bo->u.real.map_count = 1;
462
463 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
464 bo->rws->mapped_vram += bo->base.size;
465 else
466 bo->rws->mapped_gtt += bo->base.size;
467 bo->rws->num_mapped_buffers++;
468
469 mtx_unlock(&bo->u.real.map_mutex);
470 return (uint8_t*)bo->u.real.ptr + offset;
471 }
472
473 static void *radeon_bo_map(struct pb_buffer *buf,
474 struct radeon_winsys_cs *rcs,
475 enum pipe_transfer_usage usage)
476 {
477 struct radeon_bo *bo = (struct radeon_bo*)buf;
478 struct radeon_drm_cs *cs = (struct radeon_drm_cs*)rcs;
479
480 /* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
481 if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
482 /* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
483 if (usage & PIPE_TRANSFER_DONTBLOCK) {
484 if (!(usage & PIPE_TRANSFER_WRITE)) {
485 /* Mapping for read.
486 *
487 * Since we are mapping for read, we don't need to wait
488 * if the GPU is using the buffer for read too
489 * (neither one is changing it).
490 *
491 * Only check whether the buffer is being used for write. */
492 if (cs && radeon_bo_is_referenced_by_cs_for_write(cs, bo)) {
493 cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
494 return NULL;
495 }
496
497 if (!radeon_bo_wait((struct pb_buffer*)bo, 0,
498 RADEON_USAGE_WRITE)) {
499 return NULL;
500 }
501 } else {
502 if (cs && radeon_bo_is_referenced_by_cs(cs, bo)) {
503 cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
504 return NULL;
505 }
506
507 if (!radeon_bo_wait((struct pb_buffer*)bo, 0,
508 RADEON_USAGE_READWRITE)) {
509 return NULL;
510 }
511 }
512 } else {
513 uint64_t time = os_time_get_nano();
514
515 if (!(usage & PIPE_TRANSFER_WRITE)) {
516 /* Mapping for read.
517 *
518 * Since we are mapping for read, we don't need to wait
519 * if the GPU is using the buffer for read too
520 * (neither one is changing it).
521 *
522 * Only check whether the buffer is being used for write. */
523 if (cs && radeon_bo_is_referenced_by_cs_for_write(cs, bo)) {
524 cs->flush_cs(cs->flush_data, 0, NULL);
525 }
526 radeon_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
527 RADEON_USAGE_WRITE);
528 } else {
529 /* Mapping for write. */
530 if (cs) {
531 if (radeon_bo_is_referenced_by_cs(cs, bo)) {
532 cs->flush_cs(cs->flush_data, 0, NULL);
533 } else {
534 /* Try to avoid busy-waiting in radeon_bo_wait. */
535 if (p_atomic_read(&bo->num_active_ioctls))
536 radeon_drm_cs_sync_flush(rcs);
537 }
538 }
539
540 radeon_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
541 RADEON_USAGE_READWRITE);
542 }
543
544 bo->rws->buffer_wait_time += os_time_get_nano() - time;
545 }
546 }
547
548 return radeon_bo_do_map(bo);
549 }
550
551 static void radeon_bo_unmap(struct pb_buffer *_buf)
552 {
553 struct radeon_bo *bo = (struct radeon_bo*)_buf;
554
555 if (bo->user_ptr)
556 return;
557
558 if (!bo->handle)
559 bo = bo->u.slab.real;
560
561 mtx_lock(&bo->u.real.map_mutex);
562 if (!bo->u.real.ptr) {
563 mtx_unlock(&bo->u.real.map_mutex);
564 return; /* it's not been mapped */
565 }
566
567 assert(bo->u.real.map_count);
568 if (--bo->u.real.map_count) {
569 mtx_unlock(&bo->u.real.map_mutex);
570 return; /* it's been mapped multiple times */
571 }
572
573 os_munmap(bo->u.real.ptr, bo->base.size);
574 bo->u.real.ptr = NULL;
575
576 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
577 bo->rws->mapped_vram -= bo->base.size;
578 else
579 bo->rws->mapped_gtt -= bo->base.size;
580 bo->rws->num_mapped_buffers--;
581
582 mtx_unlock(&bo->u.real.map_mutex);
583 }
584
585 static const struct pb_vtbl radeon_bo_vtbl = {
586 radeon_bo_destroy_or_cache
587 /* other functions are never called */
588 };
589
590 static struct radeon_bo *radeon_create_bo(struct radeon_drm_winsys *rws,
591 unsigned size, unsigned alignment,
592 unsigned usage,
593 unsigned initial_domains,
594 unsigned flags,
595 unsigned pb_cache_bucket)
596 {
597 struct radeon_bo *bo;
598 struct drm_radeon_gem_create args;
599 int r;
600
601 memset(&args, 0, sizeof(args));
602
603 assert(initial_domains);
604 assert((initial_domains &
605 ~(RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM)) == 0);
606
607 args.size = size;
608 args.alignment = alignment;
609 args.initial_domain = initial_domains;
610 args.flags = 0;
611
612 if (flags & RADEON_FLAG_GTT_WC)
613 args.flags |= RADEON_GEM_GTT_WC;
614 if (flags & RADEON_FLAG_NO_CPU_ACCESS)
615 args.flags |= RADEON_GEM_NO_CPU_ACCESS;
616
617 if (drmCommandWriteRead(rws->fd, DRM_RADEON_GEM_CREATE,
618 &args, sizeof(args))) {
619 fprintf(stderr, "radeon: Failed to allocate a buffer:\n");
620 fprintf(stderr, "radeon: size : %u bytes\n", size);
621 fprintf(stderr, "radeon: alignment : %u bytes\n", alignment);
622 fprintf(stderr, "radeon: domains : %u\n", args.initial_domain);
623 fprintf(stderr, "radeon: flags : %u\n", args.flags);
624 return NULL;
625 }
626
627 assert(args.handle != 0);
628
629 bo = CALLOC_STRUCT(radeon_bo);
630 if (!bo)
631 return NULL;
632
633 pipe_reference_init(&bo->base.reference, 1);
634 bo->base.alignment = alignment;
635 bo->base.usage = usage;
636 bo->base.size = size;
637 bo->base.vtbl = &radeon_bo_vtbl;
638 bo->rws = rws;
639 bo->handle = args.handle;
640 bo->va = 0;
641 bo->initial_domain = initial_domains;
642 bo->hash = __sync_fetch_and_add(&rws->next_bo_hash, 1);
643 (void) mtx_init(&bo->u.real.map_mutex, mtx_plain);
644 pb_cache_init_entry(&rws->bo_cache, &bo->u.real.cache_entry, &bo->base,
645 pb_cache_bucket);
646
647 if (rws->info.has_virtual_memory) {
648 struct drm_radeon_gem_va va;
649 unsigned va_gap_size;
650
651 va_gap_size = rws->check_vm ? MAX2(4 * alignment, 64 * 1024) : 0;
652 bo->va = radeon_bomgr_find_va(rws, size + va_gap_size, alignment);
653
654 va.handle = bo->handle;
655 va.vm_id = 0;
656 va.operation = RADEON_VA_MAP;
657 va.flags = RADEON_VM_PAGE_READABLE |
658 RADEON_VM_PAGE_WRITEABLE |
659 RADEON_VM_PAGE_SNOOPED;
660 va.offset = bo->va;
661 r = drmCommandWriteRead(rws->fd, DRM_RADEON_GEM_VA, &va, sizeof(va));
662 if (r && va.operation == RADEON_VA_RESULT_ERROR) {
663 fprintf(stderr, "radeon: Failed to allocate virtual address for buffer:\n");
664 fprintf(stderr, "radeon: size : %d bytes\n", size);
665 fprintf(stderr, "radeon: alignment : %d bytes\n", alignment);
666 fprintf(stderr, "radeon: domains : %d\n", args.initial_domain);
667 fprintf(stderr, "radeon: va : 0x%016llx\n", (unsigned long long)bo->va);
668 radeon_bo_destroy(&bo->base);
669 return NULL;
670 }
671 mtx_lock(&rws->bo_handles_mutex);
672 if (va.operation == RADEON_VA_RESULT_VA_EXIST) {
673 struct pb_buffer *b = &bo->base;
674 struct radeon_bo *old_bo =
675 util_hash_table_get(rws->bo_vas, (void*)(uintptr_t)va.offset);
676
677 mtx_unlock(&rws->bo_handles_mutex);
678 pb_reference(&b, &old_bo->base);
679 return radeon_bo(b);
680 }
681
682 util_hash_table_set(rws->bo_vas, (void*)(uintptr_t)bo->va, bo);
683 mtx_unlock(&rws->bo_handles_mutex);
684 }
685
686 if (initial_domains & RADEON_DOMAIN_VRAM)
687 rws->allocated_vram += align(size, rws->info.gart_page_size);
688 else if (initial_domains & RADEON_DOMAIN_GTT)
689 rws->allocated_gtt += align(size, rws->info.gart_page_size);
690
691 return bo;
692 }
693
694 bool radeon_bo_can_reclaim(struct pb_buffer *_buf)
695 {
696 struct radeon_bo *bo = radeon_bo(_buf);
697
698 if (radeon_bo_is_referenced_by_any_cs(bo))
699 return false;
700
701 return radeon_bo_wait(_buf, 0, RADEON_USAGE_READWRITE);
702 }
703
704 bool radeon_bo_can_reclaim_slab(void *priv, struct pb_slab_entry *entry)
705 {
706 struct radeon_bo *bo = NULL; /* fix container_of */
707 bo = container_of(entry, bo, u.slab.entry);
708
709 return radeon_bo_can_reclaim(&bo->base);
710 }
711
712 static void radeon_bo_slab_destroy(struct pb_buffer *_buf)
713 {
714 struct radeon_bo *bo = radeon_bo(_buf);
715
716 assert(!bo->handle);
717
718 pb_slab_free(&bo->rws->bo_slabs, &bo->u.slab.entry);
719 }
720
721 static const struct pb_vtbl radeon_winsys_bo_slab_vtbl = {
722 radeon_bo_slab_destroy
723 /* other functions are never called */
724 };
725
726 struct pb_slab *radeon_bo_slab_alloc(void *priv, unsigned heap,
727 unsigned entry_size,
728 unsigned group_index)
729 {
730 struct radeon_drm_winsys *ws = priv;
731 struct radeon_slab *slab = CALLOC_STRUCT(radeon_slab);
732 enum radeon_bo_domain domains = radeon_domain_from_heap(heap);
733 enum radeon_bo_flag flags = radeon_flags_from_heap(heap);
734 unsigned base_hash;
735
736 if (!slab)
737 return NULL;
738
739 slab->buffer = radeon_bo(radeon_winsys_bo_create(&ws->base,
740 64 * 1024, 64 * 1024,
741 domains, flags));
742 if (!slab->buffer)
743 goto fail;
744
745 assert(slab->buffer->handle);
746
747 slab->base.num_entries = slab->buffer->base.size / entry_size;
748 slab->base.num_free = slab->base.num_entries;
749 slab->entries = CALLOC(slab->base.num_entries, sizeof(*slab->entries));
750 if (!slab->entries)
751 goto fail_buffer;
752
753 LIST_INITHEAD(&slab->base.free);
754
755 base_hash = __sync_fetch_and_add(&ws->next_bo_hash, slab->base.num_entries);
756
757 for (unsigned i = 0; i < slab->base.num_entries; ++i) {
758 struct radeon_bo *bo = &slab->entries[i];
759
760 bo->base.alignment = entry_size;
761 bo->base.usage = slab->buffer->base.usage;
762 bo->base.size = entry_size;
763 bo->base.vtbl = &radeon_winsys_bo_slab_vtbl;
764 bo->rws = ws;
765 bo->va = slab->buffer->va + i * entry_size;
766 bo->initial_domain = domains;
767 bo->hash = base_hash + i;
768 bo->u.slab.entry.slab = &slab->base;
769 bo->u.slab.entry.group_index = group_index;
770 bo->u.slab.real = slab->buffer;
771
772 LIST_ADDTAIL(&bo->u.slab.entry.head, &slab->base.free);
773 }
774
775 return &slab->base;
776
777 fail_buffer:
778 radeon_bo_reference(&slab->buffer, NULL);
779 fail:
780 FREE(slab);
781 return NULL;
782 }
783
784 void radeon_bo_slab_free(void *priv, struct pb_slab *pslab)
785 {
786 struct radeon_slab *slab = (struct radeon_slab *)pslab;
787
788 for (unsigned i = 0; i < slab->base.num_entries; ++i) {
789 struct radeon_bo *bo = &slab->entries[i];
790 for (unsigned j = 0; j < bo->u.slab.num_fences; ++j)
791 radeon_bo_reference(&bo->u.slab.fences[j], NULL);
792 FREE(bo->u.slab.fences);
793 }
794
795 FREE(slab->entries);
796 radeon_bo_reference(&slab->buffer, NULL);
797 FREE(slab);
798 }
799
800 static unsigned eg_tile_split(unsigned tile_split)
801 {
802 switch (tile_split) {
803 case 0: tile_split = 64; break;
804 case 1: tile_split = 128; break;
805 case 2: tile_split = 256; break;
806 case 3: tile_split = 512; break;
807 default:
808 case 4: tile_split = 1024; break;
809 case 5: tile_split = 2048; break;
810 case 6: tile_split = 4096; break;
811 }
812 return tile_split;
813 }
814
815 static unsigned eg_tile_split_rev(unsigned eg_tile_split)
816 {
817 switch (eg_tile_split) {
818 case 64: return 0;
819 case 128: return 1;
820 case 256: return 2;
821 case 512: return 3;
822 default:
823 case 1024: return 4;
824 case 2048: return 5;
825 case 4096: return 6;
826 }
827 }
828
829 static void radeon_bo_get_metadata(struct pb_buffer *_buf,
830 struct radeon_bo_metadata *md)
831 {
832 struct radeon_bo *bo = radeon_bo(_buf);
833 struct drm_radeon_gem_set_tiling args;
834
835 assert(bo->handle && "must not be called for slab entries");
836
837 memset(&args, 0, sizeof(args));
838
839 args.handle = bo->handle;
840
841 drmCommandWriteRead(bo->rws->fd,
842 DRM_RADEON_GEM_GET_TILING,
843 &args,
844 sizeof(args));
845
846 md->u.legacy.microtile = RADEON_LAYOUT_LINEAR;
847 md->u.legacy.macrotile = RADEON_LAYOUT_LINEAR;
848 if (args.tiling_flags & RADEON_TILING_MICRO)
849 md->u.legacy.microtile = RADEON_LAYOUT_TILED;
850 else if (args.tiling_flags & RADEON_TILING_MICRO_SQUARE)
851 md->u.legacy.microtile = RADEON_LAYOUT_SQUARETILED;
852
853 if (args.tiling_flags & RADEON_TILING_MACRO)
854 md->u.legacy.macrotile = RADEON_LAYOUT_TILED;
855
856 md->u.legacy.bankw = (args.tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
857 md->u.legacy.bankh = (args.tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
858 md->u.legacy.tile_split = (args.tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
859 md->u.legacy.mtilea = (args.tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
860 md->u.legacy.tile_split = eg_tile_split(md->u.legacy.tile_split);
861 md->u.legacy.scanout = bo->rws->gen >= DRV_SI && !(args.tiling_flags & RADEON_TILING_R600_NO_SCANOUT);
862 }
863
864 static void radeon_bo_set_metadata(struct pb_buffer *_buf,
865 struct radeon_bo_metadata *md)
866 {
867 struct radeon_bo *bo = radeon_bo(_buf);
868 struct drm_radeon_gem_set_tiling args;
869
870 assert(bo->handle && "must not be called for slab entries");
871
872 memset(&args, 0, sizeof(args));
873
874 os_wait_until_zero(&bo->num_active_ioctls, PIPE_TIMEOUT_INFINITE);
875
876 if (md->u.legacy.microtile == RADEON_LAYOUT_TILED)
877 args.tiling_flags |= RADEON_TILING_MICRO;
878 else if (md->u.legacy.microtile == RADEON_LAYOUT_SQUARETILED)
879 args.tiling_flags |= RADEON_TILING_MICRO_SQUARE;
880
881 if (md->u.legacy.macrotile == RADEON_LAYOUT_TILED)
882 args.tiling_flags |= RADEON_TILING_MACRO;
883
884 args.tiling_flags |= (md->u.legacy.bankw & RADEON_TILING_EG_BANKW_MASK) <<
885 RADEON_TILING_EG_BANKW_SHIFT;
886 args.tiling_flags |= (md->u.legacy.bankh & RADEON_TILING_EG_BANKH_MASK) <<
887 RADEON_TILING_EG_BANKH_SHIFT;
888 if (md->u.legacy.tile_split) {
889 args.tiling_flags |= (eg_tile_split_rev(md->u.legacy.tile_split) &
890 RADEON_TILING_EG_TILE_SPLIT_MASK) <<
891 RADEON_TILING_EG_TILE_SPLIT_SHIFT;
892 }
893 args.tiling_flags |= (md->u.legacy.mtilea & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK) <<
894 RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT;
895
896 if (bo->rws->gen >= DRV_SI && !md->u.legacy.scanout)
897 args.tiling_flags |= RADEON_TILING_R600_NO_SCANOUT;
898
899 args.handle = bo->handle;
900 args.pitch = md->u.legacy.stride;
901
902 drmCommandWriteRead(bo->rws->fd,
903 DRM_RADEON_GEM_SET_TILING,
904 &args,
905 sizeof(args));
906 }
907
908 static struct pb_buffer *
909 radeon_winsys_bo_create(struct radeon_winsys *rws,
910 uint64_t size,
911 unsigned alignment,
912 enum radeon_bo_domain domain,
913 enum radeon_bo_flag flags)
914 {
915 struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);
916 struct radeon_bo *bo;
917 unsigned usage = 0, pb_cache_bucket;
918
919 assert(!(flags & RADEON_FLAG_SPARSE)); /* not supported */
920
921 /* Only 32-bit sizes are supported. */
922 if (size > UINT_MAX)
923 return NULL;
924
925 /* VRAM implies WC. This is not optional. */
926 if (domain & RADEON_DOMAIN_VRAM)
927 flags |= RADEON_FLAG_GTT_WC;
928 /* NO_CPU_ACCESS is valid with VRAM only. */
929 if (domain != RADEON_DOMAIN_VRAM)
930 flags &= ~RADEON_FLAG_NO_CPU_ACCESS;
931
932 /* Sub-allocate small buffers from slabs. */
933 if (!(flags & RADEON_FLAG_NO_SUBALLOC) &&
934 size <= (1 << RADEON_SLAB_MAX_SIZE_LOG2) &&
935 ws->info.has_virtual_memory &&
936 alignment <= MAX2(1 << RADEON_SLAB_MIN_SIZE_LOG2, util_next_power_of_two(size))) {
937 struct pb_slab_entry *entry;
938 int heap = radeon_get_heap_index(domain, flags);
939
940 if (heap < 0 || heap >= RADEON_MAX_SLAB_HEAPS)
941 goto no_slab;
942
943 entry = pb_slab_alloc(&ws->bo_slabs, size, heap);
944 if (!entry) {
945 /* Clear the cache and try again. */
946 pb_cache_release_all_buffers(&ws->bo_cache);
947
948 entry = pb_slab_alloc(&ws->bo_slabs, size, heap);
949 }
950 if (!entry)
951 return NULL;
952
953 bo = NULL;
954 bo = container_of(entry, bo, u.slab.entry);
955
956 pipe_reference_init(&bo->base.reference, 1);
957
958 return &bo->base;
959 }
960 no_slab:
961
962 /* This flag is irrelevant for the cache. */
963 flags &= ~RADEON_FLAG_NO_SUBALLOC;
964
965 /* Align size to page size. This is the minimum alignment for normal
966 * BOs. Aligning this here helps the cached bufmgr. Especially small BOs,
967 * like constant/uniform buffers, can benefit from better and more reuse.
968 */
969 size = align(size, ws->info.gart_page_size);
970 alignment = align(alignment, ws->info.gart_page_size);
971
972 /* Only set one usage bit each for domains and flags, or the cache manager
973 * might consider different sets of domains / flags compatible
974 */
975 if (domain == RADEON_DOMAIN_VRAM_GTT)
976 usage = 1 << 2;
977 else
978 usage = (unsigned)domain >> 1;
979 assert(flags < sizeof(usage) * 8 - 3);
980 usage |= 1 << (flags + 3);
981
982 /* Determine the pb_cache bucket for minimizing pb_cache misses. */
983 pb_cache_bucket = 0;
984 if (domain & RADEON_DOMAIN_VRAM) /* VRAM or VRAM+GTT */
985 pb_cache_bucket += 1;
986 if (flags == RADEON_FLAG_GTT_WC) /* WC */
987 pb_cache_bucket += 2;
988 assert(pb_cache_bucket < ARRAY_SIZE(ws->bo_cache.buckets));
989
990 bo = radeon_bo(pb_cache_reclaim_buffer(&ws->bo_cache, size, alignment,
991 usage, pb_cache_bucket));
992 if (bo)
993 return &bo->base;
994
995 bo = radeon_create_bo(ws, size, alignment, usage, domain, flags,
996 pb_cache_bucket);
997 if (!bo) {
998 /* Clear the cache and try again. */
999 if (ws->info.has_virtual_memory)
1000 pb_slabs_reclaim(&ws->bo_slabs);
1001 pb_cache_release_all_buffers(&ws->bo_cache);
1002 bo = radeon_create_bo(ws, size, alignment, usage, domain, flags,
1003 pb_cache_bucket);
1004 if (!bo)
1005 return NULL;
1006 }
1007
1008 bo->u.real.use_reusable_pool = true;
1009
1010 mtx_lock(&ws->bo_handles_mutex);
1011 util_hash_table_set(ws->bo_handles, (void*)(uintptr_t)bo->handle, bo);
1012 mtx_unlock(&ws->bo_handles_mutex);
1013
1014 return &bo->base;
1015 }
1016
1017 static struct pb_buffer *radeon_winsys_bo_from_ptr(struct radeon_winsys *rws,
1018 void *pointer, uint64_t size)
1019 {
1020 struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);
1021 struct drm_radeon_gem_userptr args;
1022 struct radeon_bo *bo;
1023 int r;
1024
1025 bo = CALLOC_STRUCT(radeon_bo);
1026 if (!bo)
1027 return NULL;
1028
1029 memset(&args, 0, sizeof(args));
1030 args.addr = (uintptr_t)pointer;
1031 args.size = align(size, ws->info.gart_page_size);
1032 args.flags = RADEON_GEM_USERPTR_ANONONLY |
1033 RADEON_GEM_USERPTR_VALIDATE |
1034 RADEON_GEM_USERPTR_REGISTER;
1035 if (drmCommandWriteRead(ws->fd, DRM_RADEON_GEM_USERPTR,
1036 &args, sizeof(args))) {
1037 FREE(bo);
1038 return NULL;
1039 }
1040
1041 assert(args.handle != 0);
1042
1043 mtx_lock(&ws->bo_handles_mutex);
1044
1045 /* Initialize it. */
1046 pipe_reference_init(&bo->base.reference, 1);
1047 bo->handle = args.handle;
1048 bo->base.alignment = 0;
1049 bo->base.size = size;
1050 bo->base.vtbl = &radeon_bo_vtbl;
1051 bo->rws = ws;
1052 bo->user_ptr = pointer;
1053 bo->va = 0;
1054 bo->initial_domain = RADEON_DOMAIN_GTT;
1055 bo->hash = __sync_fetch_and_add(&ws->next_bo_hash, 1);
1056 (void) mtx_init(&bo->u.real.map_mutex, mtx_plain);
1057
1058 util_hash_table_set(ws->bo_handles, (void*)(uintptr_t)bo->handle, bo);
1059
1060 mtx_unlock(&ws->bo_handles_mutex);
1061
1062 if (ws->info.has_virtual_memory) {
1063 struct drm_radeon_gem_va va;
1064
1065 bo->va = radeon_bomgr_find_va(ws, bo->base.size, 1 << 20);
1066
1067 va.handle = bo->handle;
1068 va.operation = RADEON_VA_MAP;
1069 va.vm_id = 0;
1070 va.offset = bo->va;
1071 va.flags = RADEON_VM_PAGE_READABLE |
1072 RADEON_VM_PAGE_WRITEABLE |
1073 RADEON_VM_PAGE_SNOOPED;
1074 va.offset = bo->va;
1075 r = drmCommandWriteRead(ws->fd, DRM_RADEON_GEM_VA, &va, sizeof(va));
1076 if (r && va.operation == RADEON_VA_RESULT_ERROR) {
1077 fprintf(stderr, "radeon: Failed to assign virtual address space\n");
1078 radeon_bo_destroy(&bo->base);
1079 return NULL;
1080 }
1081 mtx_lock(&ws->bo_handles_mutex);
1082 if (va.operation == RADEON_VA_RESULT_VA_EXIST) {
1083 struct pb_buffer *b = &bo->base;
1084 struct radeon_bo *old_bo =
1085 util_hash_table_get(ws->bo_vas, (void*)(uintptr_t)va.offset);
1086
1087 mtx_unlock(&ws->bo_handles_mutex);
1088 pb_reference(&b, &old_bo->base);
1089 return b;
1090 }
1091
1092 util_hash_table_set(ws->bo_vas, (void*)(uintptr_t)bo->va, bo);
1093 mtx_unlock(&ws->bo_handles_mutex);
1094 }
1095
1096 ws->allocated_gtt += align(bo->base.size, ws->info.gart_page_size);
1097
1098 return (struct pb_buffer*)bo;
1099 }
1100
1101 static struct pb_buffer *radeon_winsys_bo_from_handle(struct radeon_winsys *rws,
1102 struct winsys_handle *whandle,
1103 unsigned *stride,
1104 unsigned *offset)
1105 {
1106 struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);
1107 struct radeon_bo *bo;
1108 int r;
1109 unsigned handle;
1110 uint64_t size = 0;
1111
1112 if (!offset && whandle->offset != 0) {
1113 fprintf(stderr, "attempt to import unsupported winsys offset %u\n",
1114 whandle->offset);
1115 return NULL;
1116 }
1117
1118 /* We must maintain a list of pairs <handle, bo>, so that we always return
1119 * the same BO for one particular handle. If we didn't do that and created
1120 * more than one BO for the same handle and then relocated them in a CS,
1121 * we would hit a deadlock in the kernel.
1122 *
1123 * The list of pairs is guarded by a mutex, of course. */
1124 mtx_lock(&ws->bo_handles_mutex);
1125
1126 if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
1127 /* First check if there already is an existing bo for the handle. */
1128 bo = util_hash_table_get(ws->bo_names, (void*)(uintptr_t)whandle->handle);
1129 } else if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
1130 /* We must first get the GEM handle, as fds are unreliable keys */
1131 r = drmPrimeFDToHandle(ws->fd, whandle->handle, &handle);
1132 if (r)
1133 goto fail;
1134 bo = util_hash_table_get(ws->bo_handles, (void*)(uintptr_t)handle);
1135 } else {
1136 /* Unknown handle type */
1137 goto fail;
1138 }
1139
1140 if (bo) {
1141 /* Increase the refcount. */
1142 struct pb_buffer *b = NULL;
1143 pb_reference(&b, &bo->base);
1144 goto done;
1145 }
1146
1147 /* There isn't, create a new one. */
1148 bo = CALLOC_STRUCT(radeon_bo);
1149 if (!bo) {
1150 goto fail;
1151 }
1152
1153 if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
1154 struct drm_gem_open open_arg = {};
1155 memset(&open_arg, 0, sizeof(open_arg));
1156 /* Open the BO. */
1157 open_arg.name = whandle->handle;
1158 if (drmIoctl(ws->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) {
1159 FREE(bo);
1160 goto fail;
1161 }
1162 handle = open_arg.handle;
1163 size = open_arg.size;
1164 bo->flink_name = whandle->handle;
1165 } else if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
1166 size = lseek(whandle->handle, 0, SEEK_END);
1167 /*
1168 * Could check errno to determine whether the kernel is new enough, but
1169 * it doesn't really matter why this failed, just that it failed.
1170 */
1171 if (size == (off_t)-1) {
1172 FREE(bo);
1173 goto fail;
1174 }
1175 lseek(whandle->handle, 0, SEEK_SET);
1176 }
1177
1178 assert(handle != 0);
1179
1180 bo->handle = handle;
1181
1182 /* Initialize it. */
1183 pipe_reference_init(&bo->base.reference, 1);
1184 bo->base.alignment = 0;
1185 bo->base.size = (unsigned) size;
1186 bo->base.vtbl = &radeon_bo_vtbl;
1187 bo->rws = ws;
1188 bo->va = 0;
1189 bo->hash = __sync_fetch_and_add(&ws->next_bo_hash, 1);
1190 (void) mtx_init(&bo->u.real.map_mutex, mtx_plain);
1191
1192 if (bo->flink_name)
1193 util_hash_table_set(ws->bo_names, (void*)(uintptr_t)bo->flink_name, bo);
1194
1195 util_hash_table_set(ws->bo_handles, (void*)(uintptr_t)bo->handle, bo);
1196
1197 done:
1198 mtx_unlock(&ws->bo_handles_mutex);
1199
1200 if (stride)
1201 *stride = whandle->stride;
1202 if (offset)
1203 *offset = whandle->offset;
1204
1205 if (ws->info.has_virtual_memory && !bo->va) {
1206 struct drm_radeon_gem_va va;
1207
1208 bo->va = radeon_bomgr_find_va(ws, bo->base.size, 1 << 20);
1209
1210 va.handle = bo->handle;
1211 va.operation = RADEON_VA_MAP;
1212 va.vm_id = 0;
1213 va.offset = bo->va;
1214 va.flags = RADEON_VM_PAGE_READABLE |
1215 RADEON_VM_PAGE_WRITEABLE |
1216 RADEON_VM_PAGE_SNOOPED;
1217 va.offset = bo->va;
1218 r = drmCommandWriteRead(ws->fd, DRM_RADEON_GEM_VA, &va, sizeof(va));
1219 if (r && va.operation == RADEON_VA_RESULT_ERROR) {
1220 fprintf(stderr, "radeon: Failed to assign virtual address space\n");
1221 radeon_bo_destroy(&bo->base);
1222 return NULL;
1223 }
1224 mtx_lock(&ws->bo_handles_mutex);
1225 if (va.operation == RADEON_VA_RESULT_VA_EXIST) {
1226 struct pb_buffer *b = &bo->base;
1227 struct radeon_bo *old_bo =
1228 util_hash_table_get(ws->bo_vas, (void*)(uintptr_t)va.offset);
1229
1230 mtx_unlock(&ws->bo_handles_mutex);
1231 pb_reference(&b, &old_bo->base);
1232 return b;
1233 }
1234
1235 util_hash_table_set(ws->bo_vas, (void*)(uintptr_t)bo->va, bo);
1236 mtx_unlock(&ws->bo_handles_mutex);
1237 }
1238
1239 bo->initial_domain = radeon_bo_get_initial_domain((void*)bo);
1240
1241 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
1242 ws->allocated_vram += align(bo->base.size, ws->info.gart_page_size);
1243 else if (bo->initial_domain & RADEON_DOMAIN_GTT)
1244 ws->allocated_gtt += align(bo->base.size, ws->info.gart_page_size);
1245
1246 return (struct pb_buffer*)bo;
1247
1248 fail:
1249 mtx_unlock(&ws->bo_handles_mutex);
1250 return NULL;
1251 }
1252
1253 static bool radeon_winsys_bo_get_handle(struct pb_buffer *buffer,
1254 unsigned stride, unsigned offset,
1255 unsigned slice_size,
1256 struct winsys_handle *whandle)
1257 {
1258 struct drm_gem_flink flink;
1259 struct radeon_bo *bo = radeon_bo(buffer);
1260 struct radeon_drm_winsys *ws = bo->rws;
1261
1262 /* Don't allow exports of slab entries. */
1263 if (!bo->handle)
1264 return false;
1265
1266 memset(&flink, 0, sizeof(flink));
1267
1268 bo->u.real.use_reusable_pool = false;
1269
1270 if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
1271 if (!bo->flink_name) {
1272 flink.handle = bo->handle;
1273
1274 if (ioctl(ws->fd, DRM_IOCTL_GEM_FLINK, &flink)) {
1275 return false;
1276 }
1277
1278 bo->flink_name = flink.name;
1279
1280 mtx_lock(&ws->bo_handles_mutex);
1281 util_hash_table_set(ws->bo_names, (void*)(uintptr_t)bo->flink_name, bo);
1282 mtx_unlock(&ws->bo_handles_mutex);
1283 }
1284 whandle->handle = bo->flink_name;
1285 } else if (whandle->type == DRM_API_HANDLE_TYPE_KMS) {
1286 whandle->handle = bo->handle;
1287 } else if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
1288 if (drmPrimeHandleToFD(ws->fd, bo->handle, DRM_CLOEXEC, (int*)&whandle->handle))
1289 return false;
1290 }
1291
1292 whandle->stride = stride;
1293 whandle->offset = offset;
1294 whandle->offset += slice_size * whandle->layer;
1295
1296 return true;
1297 }
1298
1299 static bool radeon_winsys_bo_is_user_ptr(struct pb_buffer *buf)
1300 {
1301 return ((struct radeon_bo*)buf)->user_ptr != NULL;
1302 }
1303
1304 static uint64_t radeon_winsys_bo_va(struct pb_buffer *buf)
1305 {
1306 return ((struct radeon_bo*)buf)->va;
1307 }
1308
1309 static unsigned radeon_winsys_bo_get_reloc_offset(struct pb_buffer *buf)
1310 {
1311 struct radeon_bo *bo = radeon_bo(buf);
1312
1313 if (bo->handle)
1314 return 0;
1315
1316 return bo->va - bo->u.slab.real->va;
1317 }
1318
1319 void radeon_drm_bo_init_functions(struct radeon_drm_winsys *ws)
1320 {
1321 ws->base.buffer_set_metadata = radeon_bo_set_metadata;
1322 ws->base.buffer_get_metadata = radeon_bo_get_metadata;
1323 ws->base.buffer_map = radeon_bo_map;
1324 ws->base.buffer_unmap = radeon_bo_unmap;
1325 ws->base.buffer_wait = radeon_bo_wait;
1326 ws->base.buffer_create = radeon_winsys_bo_create;
1327 ws->base.buffer_from_handle = radeon_winsys_bo_from_handle;
1328 ws->base.buffer_from_ptr = radeon_winsys_bo_from_ptr;
1329 ws->base.buffer_is_user_ptr = radeon_winsys_bo_is_user_ptr;
1330 ws->base.buffer_get_handle = radeon_winsys_bo_get_handle;
1331 ws->base.buffer_get_virtual_address = radeon_winsys_bo_va;
1332 ws->base.buffer_get_reloc_offset = radeon_winsys_bo_get_reloc_offset;
1333 ws->base.buffer_get_initial_domain = radeon_bo_get_initial_domain;
1334 }