radeon_drm_bo: explicitly check return value of drmCommandWriteRead
[mesa.git] / src / gallium / winsys / radeon / drm / radeon_drm_bo.c
1 /*
2 * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
14 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
15 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
16 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
17 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
24 * of the Software.
25 */
26
27 #include "radeon_drm_cs.h"
28
29 #include "util/u_hash_table.h"
30 #include "util/u_memory.h"
31 #include "util/simple_list.h"
32 #include "os/os_thread.h"
33 #include "os/os_mman.h"
34 #include "os/os_time.h"
35
36 #include "state_tracker/drm_driver.h"
37
38 #include <sys/ioctl.h>
39 #include <xf86drm.h>
40 #include <errno.h>
41 #include <fcntl.h>
42 #include <stdio.h>
43 #include <inttypes.h>
44
45 static struct pb_buffer *
46 radeon_winsys_bo_create(struct radeon_winsys *rws,
47 uint64_t size,
48 unsigned alignment,
49 enum radeon_bo_domain domain,
50 enum radeon_bo_flag flags);
51
52 static inline struct radeon_bo *radeon_bo(struct pb_buffer *bo)
53 {
54 return (struct radeon_bo *)bo;
55 }
56
57 struct radeon_bo_va_hole {
58 struct list_head list;
59 uint64_t offset;
60 uint64_t size;
61 };
62
63 static bool radeon_real_bo_is_busy(struct radeon_bo *bo)
64 {
65 struct drm_radeon_gem_busy args = {0};
66
67 args.handle = bo->handle;
68 return drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_BUSY,
69 &args, sizeof(args)) != 0;
70 }
71
72 static bool radeon_bo_is_busy(struct radeon_bo *bo)
73 {
74 unsigned num_idle;
75 bool busy = false;
76
77 if (bo->handle)
78 return radeon_real_bo_is_busy(bo);
79
80 mtx_lock(&bo->rws->bo_fence_lock);
81 for (num_idle = 0; num_idle < bo->u.slab.num_fences; ++num_idle) {
82 if (radeon_real_bo_is_busy(bo->u.slab.fences[num_idle])) {
83 busy = true;
84 break;
85 }
86 radeon_bo_reference(&bo->u.slab.fences[num_idle], NULL);
87 }
88 memmove(&bo->u.slab.fences[0], &bo->u.slab.fences[num_idle],
89 (bo->u.slab.num_fences - num_idle) * sizeof(bo->u.slab.fences[0]));
90 bo->u.slab.num_fences -= num_idle;
91 mtx_unlock(&bo->rws->bo_fence_lock);
92
93 return busy;
94 }
95
96 static void radeon_real_bo_wait_idle(struct radeon_bo *bo)
97 {
98 struct drm_radeon_gem_wait_idle args = {0};
99
100 args.handle = bo->handle;
101 while (drmCommandWrite(bo->rws->fd, DRM_RADEON_GEM_WAIT_IDLE,
102 &args, sizeof(args)) == -EBUSY);
103 }
104
105 static void radeon_bo_wait_idle(struct radeon_bo *bo)
106 {
107 if (bo->handle) {
108 radeon_real_bo_wait_idle(bo);
109 } else {
110 mtx_lock(&bo->rws->bo_fence_lock);
111 while (bo->u.slab.num_fences) {
112 struct radeon_bo *fence = NULL;
113 radeon_bo_reference(&fence, bo->u.slab.fences[0]);
114 mtx_unlock(&bo->rws->bo_fence_lock);
115
116 /* Wait without holding the fence lock. */
117 radeon_real_bo_wait_idle(fence);
118
119 mtx_lock(&bo->rws->bo_fence_lock);
120 if (bo->u.slab.num_fences && fence == bo->u.slab.fences[0]) {
121 radeon_bo_reference(&bo->u.slab.fences[0], NULL);
122 memmove(&bo->u.slab.fences[0], &bo->u.slab.fences[1],
123 (bo->u.slab.num_fences - 1) * sizeof(bo->u.slab.fences[0]));
124 bo->u.slab.num_fences--;
125 }
126 radeon_bo_reference(&fence, NULL);
127 }
128 mtx_unlock(&bo->rws->bo_fence_lock);
129 }
130 }
131
132 static bool radeon_bo_wait(struct pb_buffer *_buf, uint64_t timeout,
133 enum radeon_bo_usage usage)
134 {
135 struct radeon_bo *bo = radeon_bo(_buf);
136 int64_t abs_timeout;
137
138 /* No timeout. Just query. */
139 if (timeout == 0)
140 return !bo->num_active_ioctls && !radeon_bo_is_busy(bo);
141
142 abs_timeout = os_time_get_absolute_timeout(timeout);
143
144 /* Wait if any ioctl is being submitted with this buffer. */
145 if (!os_wait_until_zero_abs_timeout(&bo->num_active_ioctls, abs_timeout))
146 return false;
147
148 /* Infinite timeout. */
149 if (abs_timeout == PIPE_TIMEOUT_INFINITE) {
150 radeon_bo_wait_idle(bo);
151 return true;
152 }
153
154 /* Other timeouts need to be emulated with a loop. */
155 while (radeon_bo_is_busy(bo)) {
156 if (os_time_get_nano() >= abs_timeout)
157 return false;
158 os_time_sleep(10);
159 }
160
161 return true;
162 }
163
164 static enum radeon_bo_domain get_valid_domain(enum radeon_bo_domain domain)
165 {
166 /* Zero domains the driver doesn't understand. */
167 domain &= RADEON_DOMAIN_VRAM_GTT;
168
169 /* If no domain is set, we must set something... */
170 if (!domain)
171 domain = RADEON_DOMAIN_VRAM_GTT;
172
173 return domain;
174 }
175
176 static enum radeon_bo_domain radeon_bo_get_initial_domain(
177 struct pb_buffer *buf)
178 {
179 struct radeon_bo *bo = (struct radeon_bo*)buf;
180 struct drm_radeon_gem_op args;
181
182 if (bo->rws->info.drm_minor < 38)
183 return RADEON_DOMAIN_VRAM_GTT;
184
185 memset(&args, 0, sizeof(args));
186 args.handle = bo->handle;
187 args.op = RADEON_GEM_OP_GET_INITIAL_DOMAIN;
188
189 if (drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_OP,
190 &args, sizeof(args))) {
191 fprintf(stderr, "radeon: failed to get initial domain: %p 0x%08X\n",
192 bo, bo->handle);
193 /* Default domain as returned by get_valid_domain. */
194 return RADEON_DOMAIN_VRAM_GTT;
195 }
196
197 /* GEM domains and winsys domains are defined the same. */
198 return get_valid_domain(args.value);
199 }
200
201 static uint64_t radeon_bomgr_find_va(struct radeon_drm_winsys *rws,
202 uint64_t size, uint64_t alignment)
203 {
204 struct radeon_bo_va_hole *hole, *n;
205 uint64_t offset = 0, waste = 0;
206
207 /* All VM address space holes will implicitly start aligned to the
208 * size alignment, so we don't need to sanitize the alignment here
209 */
210 size = align(size, rws->info.gart_page_size);
211
212 mtx_lock(&rws->bo_va_mutex);
213 /* first look for a hole */
214 LIST_FOR_EACH_ENTRY_SAFE(hole, n, &rws->va_holes, list) {
215 offset = hole->offset;
216 waste = offset % alignment;
217 waste = waste ? alignment - waste : 0;
218 offset += waste;
219 if (offset >= (hole->offset + hole->size)) {
220 continue;
221 }
222 if (!waste && hole->size == size) {
223 offset = hole->offset;
224 list_del(&hole->list);
225 FREE(hole);
226 mtx_unlock(&rws->bo_va_mutex);
227 return offset;
228 }
229 if ((hole->size - waste) > size) {
230 if (waste) {
231 n = CALLOC_STRUCT(radeon_bo_va_hole);
232 n->size = waste;
233 n->offset = hole->offset;
234 list_add(&n->list, &hole->list);
235 }
236 hole->size -= (size + waste);
237 hole->offset += size + waste;
238 mtx_unlock(&rws->bo_va_mutex);
239 return offset;
240 }
241 if ((hole->size - waste) == size) {
242 hole->size = waste;
243 mtx_unlock(&rws->bo_va_mutex);
244 return offset;
245 }
246 }
247
248 offset = rws->va_offset;
249 waste = offset % alignment;
250 waste = waste ? alignment - waste : 0;
251 if (waste) {
252 n = CALLOC_STRUCT(radeon_bo_va_hole);
253 n->size = waste;
254 n->offset = offset;
255 list_add(&n->list, &rws->va_holes);
256 }
257 offset += waste;
258 rws->va_offset += size + waste;
259 mtx_unlock(&rws->bo_va_mutex);
260 return offset;
261 }
262
263 static void radeon_bomgr_free_va(struct radeon_drm_winsys *rws,
264 uint64_t va, uint64_t size)
265 {
266 struct radeon_bo_va_hole *hole = NULL;
267
268 size = align(size, rws->info.gart_page_size);
269
270 mtx_lock(&rws->bo_va_mutex);
271 if ((va + size) == rws->va_offset) {
272 rws->va_offset = va;
273 /* Delete uppermost hole if it reaches the new top */
274 if (!LIST_IS_EMPTY(&rws->va_holes)) {
275 hole = container_of(rws->va_holes.next, hole, list);
276 if ((hole->offset + hole->size) == va) {
277 rws->va_offset = hole->offset;
278 list_del(&hole->list);
279 FREE(hole);
280 }
281 }
282 } else {
283 struct radeon_bo_va_hole *next;
284
285 hole = container_of(&rws->va_holes, hole, list);
286 LIST_FOR_EACH_ENTRY(next, &rws->va_holes, list) {
287 if (next->offset < va)
288 break;
289 hole = next;
290 }
291
292 if (&hole->list != &rws->va_holes) {
293 /* Grow upper hole if it's adjacent */
294 if (hole->offset == (va + size)) {
295 hole->offset = va;
296 hole->size += size;
297 /* Merge lower hole if it's adjacent */
298 if (next != hole && &next->list != &rws->va_holes &&
299 (next->offset + next->size) == va) {
300 next->size += hole->size;
301 list_del(&hole->list);
302 FREE(hole);
303 }
304 goto out;
305 }
306 }
307
308 /* Grow lower hole if it's adjacent */
309 if (next != hole && &next->list != &rws->va_holes &&
310 (next->offset + next->size) == va) {
311 next->size += size;
312 goto out;
313 }
314
315 /* FIXME on allocation failure we just lose virtual address space
316 * maybe print a warning
317 */
318 next = CALLOC_STRUCT(radeon_bo_va_hole);
319 if (next) {
320 next->size = size;
321 next->offset = va;
322 list_add(&next->list, &hole->list);
323 }
324 }
325 out:
326 mtx_unlock(&rws->bo_va_mutex);
327 }
328
329 void radeon_bo_destroy(struct pb_buffer *_buf)
330 {
331 struct radeon_bo *bo = radeon_bo(_buf);
332 struct radeon_drm_winsys *rws = bo->rws;
333 struct drm_gem_close args;
334
335 assert(bo->handle && "must not be called for slab entries");
336
337 memset(&args, 0, sizeof(args));
338
339 mtx_lock(&rws->bo_handles_mutex);
340 util_hash_table_remove(rws->bo_handles, (void*)(uintptr_t)bo->handle);
341 if (bo->flink_name) {
342 util_hash_table_remove(rws->bo_names,
343 (void*)(uintptr_t)bo->flink_name);
344 }
345 mtx_unlock(&rws->bo_handles_mutex);
346
347 if (bo->u.real.ptr)
348 os_munmap(bo->u.real.ptr, bo->base.size);
349
350 if (rws->info.has_virtual_memory) {
351 if (rws->va_unmap_working) {
352 struct drm_radeon_gem_va va;
353
354 va.handle = bo->handle;
355 va.vm_id = 0;
356 va.operation = RADEON_VA_UNMAP;
357 va.flags = RADEON_VM_PAGE_READABLE |
358 RADEON_VM_PAGE_WRITEABLE |
359 RADEON_VM_PAGE_SNOOPED;
360 va.offset = bo->va;
361
362 if (drmCommandWriteRead(rws->fd, DRM_RADEON_GEM_VA, &va,
363 sizeof(va)) != 0 &&
364 va.operation == RADEON_VA_RESULT_ERROR) {
365 fprintf(stderr, "radeon: Failed to deallocate virtual address for buffer:\n");
366 fprintf(stderr, "radeon: size : %"PRIu64" bytes\n", bo->base.size);
367 fprintf(stderr, "radeon: va : 0x%"PRIx64"\n", bo->va);
368 }
369 }
370
371 radeon_bomgr_free_va(rws, bo->va, bo->base.size);
372 }
373
374 /* Close object. */
375 args.handle = bo->handle;
376 drmIoctl(rws->fd, DRM_IOCTL_GEM_CLOSE, &args);
377
378 mtx_destroy(&bo->u.real.map_mutex);
379
380 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
381 rws->allocated_vram -= align(bo->base.size, rws->info.gart_page_size);
382 else if (bo->initial_domain & RADEON_DOMAIN_GTT)
383 rws->allocated_gtt -= align(bo->base.size, rws->info.gart_page_size);
384
385 if (bo->u.real.map_count >= 1) {
386 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
387 bo->rws->mapped_vram -= bo->base.size;
388 else
389 bo->rws->mapped_gtt -= bo->base.size;
390 bo->rws->num_mapped_buffers--;
391 }
392
393 FREE(bo);
394 }
395
396 static void radeon_bo_destroy_or_cache(struct pb_buffer *_buf)
397 {
398 struct radeon_bo *bo = radeon_bo(_buf);
399
400 assert(bo->handle && "must not be called for slab entries");
401
402 if (bo->u.real.use_reusable_pool)
403 pb_cache_add_buffer(&bo->u.real.cache_entry);
404 else
405 radeon_bo_destroy(_buf);
406 }
407
408 void *radeon_bo_do_map(struct radeon_bo *bo)
409 {
410 struct drm_radeon_gem_mmap args = {0};
411 void *ptr;
412 unsigned offset;
413
414 /* If the buffer is created from user memory, return the user pointer. */
415 if (bo->user_ptr)
416 return bo->user_ptr;
417
418 if (bo->handle) {
419 offset = 0;
420 } else {
421 offset = bo->va - bo->u.slab.real->va;
422 bo = bo->u.slab.real;
423 }
424
425 /* Map the buffer. */
426 mtx_lock(&bo->u.real.map_mutex);
427 /* Return the pointer if it's already mapped. */
428 if (bo->u.real.ptr) {
429 bo->u.real.map_count++;
430 mtx_unlock(&bo->u.real.map_mutex);
431 return (uint8_t*)bo->u.real.ptr + offset;
432 }
433 args.handle = bo->handle;
434 args.offset = 0;
435 args.size = (uint64_t)bo->base.size;
436 if (drmCommandWriteRead(bo->rws->fd,
437 DRM_RADEON_GEM_MMAP,
438 &args,
439 sizeof(args))) {
440 mtx_unlock(&bo->u.real.map_mutex);
441 fprintf(stderr, "radeon: gem_mmap failed: %p 0x%08X\n",
442 bo, bo->handle);
443 return NULL;
444 }
445
446 ptr = os_mmap(0, args.size, PROT_READ|PROT_WRITE, MAP_SHARED,
447 bo->rws->fd, args.addr_ptr);
448 if (ptr == MAP_FAILED) {
449 /* Clear the cache and try again. */
450 pb_cache_release_all_buffers(&bo->rws->bo_cache);
451
452 ptr = os_mmap(0, args.size, PROT_READ|PROT_WRITE, MAP_SHARED,
453 bo->rws->fd, args.addr_ptr);
454 if (ptr == MAP_FAILED) {
455 mtx_unlock(&bo->u.real.map_mutex);
456 fprintf(stderr, "radeon: mmap failed, errno: %i\n", errno);
457 return NULL;
458 }
459 }
460 bo->u.real.ptr = ptr;
461 bo->u.real.map_count = 1;
462
463 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
464 bo->rws->mapped_vram += bo->base.size;
465 else
466 bo->rws->mapped_gtt += bo->base.size;
467 bo->rws->num_mapped_buffers++;
468
469 mtx_unlock(&bo->u.real.map_mutex);
470 return (uint8_t*)bo->u.real.ptr + offset;
471 }
472
473 static void *radeon_bo_map(struct pb_buffer *buf,
474 struct radeon_winsys_cs *rcs,
475 enum pipe_transfer_usage usage)
476 {
477 struct radeon_bo *bo = (struct radeon_bo*)buf;
478 struct radeon_drm_cs *cs = (struct radeon_drm_cs*)rcs;
479
480 /* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
481 if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
482 /* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
483 if (usage & PIPE_TRANSFER_DONTBLOCK) {
484 if (!(usage & PIPE_TRANSFER_WRITE)) {
485 /* Mapping for read.
486 *
487 * Since we are mapping for read, we don't need to wait
488 * if the GPU is using the buffer for read too
489 * (neither one is changing it).
490 *
491 * Only check whether the buffer is being used for write. */
492 if (cs && radeon_bo_is_referenced_by_cs_for_write(cs, bo)) {
493 cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
494 return NULL;
495 }
496
497 if (!radeon_bo_wait((struct pb_buffer*)bo, 0,
498 RADEON_USAGE_WRITE)) {
499 return NULL;
500 }
501 } else {
502 if (cs && radeon_bo_is_referenced_by_cs(cs, bo)) {
503 cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
504 return NULL;
505 }
506
507 if (!radeon_bo_wait((struct pb_buffer*)bo, 0,
508 RADEON_USAGE_READWRITE)) {
509 return NULL;
510 }
511 }
512 } else {
513 uint64_t time = os_time_get_nano();
514
515 if (!(usage & PIPE_TRANSFER_WRITE)) {
516 /* Mapping for read.
517 *
518 * Since we are mapping for read, we don't need to wait
519 * if the GPU is using the buffer for read too
520 * (neither one is changing it).
521 *
522 * Only check whether the buffer is being used for write. */
523 if (cs && radeon_bo_is_referenced_by_cs_for_write(cs, bo)) {
524 cs->flush_cs(cs->flush_data, 0, NULL);
525 }
526 radeon_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
527 RADEON_USAGE_WRITE);
528 } else {
529 /* Mapping for write. */
530 if (cs) {
531 if (radeon_bo_is_referenced_by_cs(cs, bo)) {
532 cs->flush_cs(cs->flush_data, 0, NULL);
533 } else {
534 /* Try to avoid busy-waiting in radeon_bo_wait. */
535 if (p_atomic_read(&bo->num_active_ioctls))
536 radeon_drm_cs_sync_flush(rcs);
537 }
538 }
539
540 radeon_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
541 RADEON_USAGE_READWRITE);
542 }
543
544 bo->rws->buffer_wait_time += os_time_get_nano() - time;
545 }
546 }
547
548 return radeon_bo_do_map(bo);
549 }
550
551 static void radeon_bo_unmap(struct pb_buffer *_buf)
552 {
553 struct radeon_bo *bo = (struct radeon_bo*)_buf;
554
555 if (bo->user_ptr)
556 return;
557
558 if (!bo->handle)
559 bo = bo->u.slab.real;
560
561 mtx_lock(&bo->u.real.map_mutex);
562 if (!bo->u.real.ptr) {
563 mtx_unlock(&bo->u.real.map_mutex);
564 return; /* it's not been mapped */
565 }
566
567 assert(bo->u.real.map_count);
568 if (--bo->u.real.map_count) {
569 mtx_unlock(&bo->u.real.map_mutex);
570 return; /* it's been mapped multiple times */
571 }
572
573 os_munmap(bo->u.real.ptr, bo->base.size);
574 bo->u.real.ptr = NULL;
575
576 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
577 bo->rws->mapped_vram -= bo->base.size;
578 else
579 bo->rws->mapped_gtt -= bo->base.size;
580 bo->rws->num_mapped_buffers--;
581
582 mtx_unlock(&bo->u.real.map_mutex);
583 }
584
585 static const struct pb_vtbl radeon_bo_vtbl = {
586 radeon_bo_destroy_or_cache
587 /* other functions are never called */
588 };
589
590 static struct radeon_bo *radeon_create_bo(struct radeon_drm_winsys *rws,
591 unsigned size, unsigned alignment,
592 unsigned usage,
593 unsigned initial_domains,
594 unsigned flags,
595 unsigned pb_cache_bucket)
596 {
597 struct radeon_bo *bo;
598 struct drm_radeon_gem_create args;
599 int r;
600
601 memset(&args, 0, sizeof(args));
602
603 assert(initial_domains);
604 assert((initial_domains &
605 ~(RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM)) == 0);
606
607 args.size = size;
608 args.alignment = alignment;
609 args.initial_domain = initial_domains;
610 args.flags = 0;
611
612 if (flags & RADEON_FLAG_GTT_WC)
613 args.flags |= RADEON_GEM_GTT_WC;
614 if (flags & RADEON_FLAG_CPU_ACCESS)
615 args.flags |= RADEON_GEM_CPU_ACCESS;
616 if (flags & RADEON_FLAG_NO_CPU_ACCESS)
617 args.flags |= RADEON_GEM_NO_CPU_ACCESS;
618
619 if (drmCommandWriteRead(rws->fd, DRM_RADEON_GEM_CREATE,
620 &args, sizeof(args))) {
621 fprintf(stderr, "radeon: Failed to allocate a buffer:\n");
622 fprintf(stderr, "radeon: size : %u bytes\n", size);
623 fprintf(stderr, "radeon: alignment : %u bytes\n", alignment);
624 fprintf(stderr, "radeon: domains : %u\n", args.initial_domain);
625 fprintf(stderr, "radeon: flags : %u\n", args.flags);
626 return NULL;
627 }
628
629 assert(args.handle != 0);
630
631 bo = CALLOC_STRUCT(radeon_bo);
632 if (!bo)
633 return NULL;
634
635 pipe_reference_init(&bo->base.reference, 1);
636 bo->base.alignment = alignment;
637 bo->base.usage = usage;
638 bo->base.size = size;
639 bo->base.vtbl = &radeon_bo_vtbl;
640 bo->rws = rws;
641 bo->handle = args.handle;
642 bo->va = 0;
643 bo->initial_domain = initial_domains;
644 bo->hash = __sync_fetch_and_add(&rws->next_bo_hash, 1);
645 (void) mtx_init(&bo->u.real.map_mutex, mtx_plain);
646 pb_cache_init_entry(&rws->bo_cache, &bo->u.real.cache_entry, &bo->base,
647 pb_cache_bucket);
648
649 if (rws->info.has_virtual_memory) {
650 struct drm_radeon_gem_va va;
651 unsigned va_gap_size;
652
653 va_gap_size = rws->check_vm ? MAX2(4 * alignment, 64 * 1024) : 0;
654 bo->va = radeon_bomgr_find_va(rws, size + va_gap_size, alignment);
655
656 va.handle = bo->handle;
657 va.vm_id = 0;
658 va.operation = RADEON_VA_MAP;
659 va.flags = RADEON_VM_PAGE_READABLE |
660 RADEON_VM_PAGE_WRITEABLE |
661 RADEON_VM_PAGE_SNOOPED;
662 va.offset = bo->va;
663 r = drmCommandWriteRead(rws->fd, DRM_RADEON_GEM_VA, &va, sizeof(va));
664 if (r && va.operation == RADEON_VA_RESULT_ERROR) {
665 fprintf(stderr, "radeon: Failed to allocate virtual address for buffer:\n");
666 fprintf(stderr, "radeon: size : %d bytes\n", size);
667 fprintf(stderr, "radeon: alignment : %d bytes\n", alignment);
668 fprintf(stderr, "radeon: domains : %d\n", args.initial_domain);
669 fprintf(stderr, "radeon: va : 0x%016llx\n", (unsigned long long)bo->va);
670 radeon_bo_destroy(&bo->base);
671 return NULL;
672 }
673 mtx_lock(&rws->bo_handles_mutex);
674 if (va.operation == RADEON_VA_RESULT_VA_EXIST) {
675 struct pb_buffer *b = &bo->base;
676 struct radeon_bo *old_bo =
677 util_hash_table_get(rws->bo_vas, (void*)(uintptr_t)va.offset);
678
679 mtx_unlock(&rws->bo_handles_mutex);
680 pb_reference(&b, &old_bo->base);
681 return radeon_bo(b);
682 }
683
684 util_hash_table_set(rws->bo_vas, (void*)(uintptr_t)bo->va, bo);
685 mtx_unlock(&rws->bo_handles_mutex);
686 }
687
688 if (initial_domains & RADEON_DOMAIN_VRAM)
689 rws->allocated_vram += align(size, rws->info.gart_page_size);
690 else if (initial_domains & RADEON_DOMAIN_GTT)
691 rws->allocated_gtt += align(size, rws->info.gart_page_size);
692
693 return bo;
694 }
695
696 bool radeon_bo_can_reclaim(struct pb_buffer *_buf)
697 {
698 struct radeon_bo *bo = radeon_bo(_buf);
699
700 if (radeon_bo_is_referenced_by_any_cs(bo))
701 return false;
702
703 return radeon_bo_wait(_buf, 0, RADEON_USAGE_READWRITE);
704 }
705
706 bool radeon_bo_can_reclaim_slab(void *priv, struct pb_slab_entry *entry)
707 {
708 struct radeon_bo *bo = NULL; /* fix container_of */
709 bo = container_of(entry, bo, u.slab.entry);
710
711 return radeon_bo_can_reclaim(&bo->base);
712 }
713
714 static void radeon_bo_slab_destroy(struct pb_buffer *_buf)
715 {
716 struct radeon_bo *bo = radeon_bo(_buf);
717
718 assert(!bo->handle);
719
720 pb_slab_free(&bo->rws->bo_slabs, &bo->u.slab.entry);
721 }
722
723 static const struct pb_vtbl radeon_winsys_bo_slab_vtbl = {
724 radeon_bo_slab_destroy
725 /* other functions are never called */
726 };
727
728 struct pb_slab *radeon_bo_slab_alloc(void *priv, unsigned heap,
729 unsigned entry_size,
730 unsigned group_index)
731 {
732 struct radeon_drm_winsys *ws = priv;
733 struct radeon_slab *slab = CALLOC_STRUCT(radeon_slab);
734 enum radeon_bo_domain domains;
735 enum radeon_bo_flag flags = 0;
736 unsigned base_hash;
737
738 if (!slab)
739 return NULL;
740
741 if (heap & 1)
742 flags |= RADEON_FLAG_GTT_WC;
743 if (heap & 2)
744 flags |= RADEON_FLAG_CPU_ACCESS;
745
746 switch (heap >> 2) {
747 case 0:
748 domains = RADEON_DOMAIN_VRAM;
749 break;
750 default:
751 case 1:
752 domains = RADEON_DOMAIN_VRAM_GTT;
753 break;
754 case 2:
755 domains = RADEON_DOMAIN_GTT;
756 break;
757 }
758
759 slab->buffer = radeon_bo(radeon_winsys_bo_create(&ws->base,
760 64 * 1024, 64 * 1024,
761 domains, flags));
762 if (!slab->buffer)
763 goto fail;
764
765 assert(slab->buffer->handle);
766
767 slab->base.num_entries = slab->buffer->base.size / entry_size;
768 slab->base.num_free = slab->base.num_entries;
769 slab->entries = CALLOC(slab->base.num_entries, sizeof(*slab->entries));
770 if (!slab->entries)
771 goto fail_buffer;
772
773 LIST_INITHEAD(&slab->base.free);
774
775 base_hash = __sync_fetch_and_add(&ws->next_bo_hash, slab->base.num_entries);
776
777 for (unsigned i = 0; i < slab->base.num_entries; ++i) {
778 struct radeon_bo *bo = &slab->entries[i];
779
780 bo->base.alignment = entry_size;
781 bo->base.usage = slab->buffer->base.usage;
782 bo->base.size = entry_size;
783 bo->base.vtbl = &radeon_winsys_bo_slab_vtbl;
784 bo->rws = ws;
785 bo->va = slab->buffer->va + i * entry_size;
786 bo->initial_domain = domains;
787 bo->hash = base_hash + i;
788 bo->u.slab.entry.slab = &slab->base;
789 bo->u.slab.entry.group_index = group_index;
790 bo->u.slab.real = slab->buffer;
791
792 LIST_ADDTAIL(&bo->u.slab.entry.head, &slab->base.free);
793 }
794
795 return &slab->base;
796
797 fail_buffer:
798 radeon_bo_reference(&slab->buffer, NULL);
799 fail:
800 FREE(slab);
801 return NULL;
802 }
803
804 void radeon_bo_slab_free(void *priv, struct pb_slab *pslab)
805 {
806 struct radeon_slab *slab = (struct radeon_slab *)pslab;
807
808 for (unsigned i = 0; i < slab->base.num_entries; ++i) {
809 struct radeon_bo *bo = &slab->entries[i];
810 for (unsigned j = 0; j < bo->u.slab.num_fences; ++j)
811 radeon_bo_reference(&bo->u.slab.fences[j], NULL);
812 FREE(bo->u.slab.fences);
813 }
814
815 FREE(slab->entries);
816 radeon_bo_reference(&slab->buffer, NULL);
817 FREE(slab);
818 }
819
820 static unsigned eg_tile_split(unsigned tile_split)
821 {
822 switch (tile_split) {
823 case 0: tile_split = 64; break;
824 case 1: tile_split = 128; break;
825 case 2: tile_split = 256; break;
826 case 3: tile_split = 512; break;
827 default:
828 case 4: tile_split = 1024; break;
829 case 5: tile_split = 2048; break;
830 case 6: tile_split = 4096; break;
831 }
832 return tile_split;
833 }
834
835 static unsigned eg_tile_split_rev(unsigned eg_tile_split)
836 {
837 switch (eg_tile_split) {
838 case 64: return 0;
839 case 128: return 1;
840 case 256: return 2;
841 case 512: return 3;
842 default:
843 case 1024: return 4;
844 case 2048: return 5;
845 case 4096: return 6;
846 }
847 }
848
849 static void radeon_bo_get_metadata(struct pb_buffer *_buf,
850 struct radeon_bo_metadata *md)
851 {
852 struct radeon_bo *bo = radeon_bo(_buf);
853 struct drm_radeon_gem_set_tiling args;
854
855 assert(bo->handle && "must not be called for slab entries");
856
857 memset(&args, 0, sizeof(args));
858
859 args.handle = bo->handle;
860
861 drmCommandWriteRead(bo->rws->fd,
862 DRM_RADEON_GEM_GET_TILING,
863 &args,
864 sizeof(args));
865
866 md->microtile = RADEON_LAYOUT_LINEAR;
867 md->macrotile = RADEON_LAYOUT_LINEAR;
868 if (args.tiling_flags & RADEON_TILING_MICRO)
869 md->microtile = RADEON_LAYOUT_TILED;
870 else if (args.tiling_flags & RADEON_TILING_MICRO_SQUARE)
871 md->microtile = RADEON_LAYOUT_SQUARETILED;
872
873 if (args.tiling_flags & RADEON_TILING_MACRO)
874 md->macrotile = RADEON_LAYOUT_TILED;
875
876 md->bankw = (args.tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
877 md->bankh = (args.tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
878 md->tile_split = (args.tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
879 md->mtilea = (args.tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
880 md->tile_split = eg_tile_split(md->tile_split);
881 md->scanout = bo->rws->gen >= DRV_SI && !(args.tiling_flags & RADEON_TILING_R600_NO_SCANOUT);
882 }
883
884 static void radeon_bo_set_metadata(struct pb_buffer *_buf,
885 struct radeon_bo_metadata *md)
886 {
887 struct radeon_bo *bo = radeon_bo(_buf);
888 struct drm_radeon_gem_set_tiling args;
889
890 assert(bo->handle && "must not be called for slab entries");
891
892 memset(&args, 0, sizeof(args));
893
894 os_wait_until_zero(&bo->num_active_ioctls, PIPE_TIMEOUT_INFINITE);
895
896 if (md->microtile == RADEON_LAYOUT_TILED)
897 args.tiling_flags |= RADEON_TILING_MICRO;
898 else if (md->microtile == RADEON_LAYOUT_SQUARETILED)
899 args.tiling_flags |= RADEON_TILING_MICRO_SQUARE;
900
901 if (md->macrotile == RADEON_LAYOUT_TILED)
902 args.tiling_flags |= RADEON_TILING_MACRO;
903
904 args.tiling_flags |= (md->bankw & RADEON_TILING_EG_BANKW_MASK) <<
905 RADEON_TILING_EG_BANKW_SHIFT;
906 args.tiling_flags |= (md->bankh & RADEON_TILING_EG_BANKH_MASK) <<
907 RADEON_TILING_EG_BANKH_SHIFT;
908 if (md->tile_split) {
909 args.tiling_flags |= (eg_tile_split_rev(md->tile_split) &
910 RADEON_TILING_EG_TILE_SPLIT_MASK) <<
911 RADEON_TILING_EG_TILE_SPLIT_SHIFT;
912 }
913 args.tiling_flags |= (md->mtilea & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK) <<
914 RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT;
915
916 if (bo->rws->gen >= DRV_SI && !md->scanout)
917 args.tiling_flags |= RADEON_TILING_R600_NO_SCANOUT;
918
919 args.handle = bo->handle;
920 args.pitch = md->stride;
921
922 drmCommandWriteRead(bo->rws->fd,
923 DRM_RADEON_GEM_SET_TILING,
924 &args,
925 sizeof(args));
926 }
927
928 static struct pb_buffer *
929 radeon_winsys_bo_create(struct radeon_winsys *rws,
930 uint64_t size,
931 unsigned alignment,
932 enum radeon_bo_domain domain,
933 enum radeon_bo_flag flags)
934 {
935 struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);
936 struct radeon_bo *bo;
937 unsigned usage = 0, pb_cache_bucket;
938
939 /* Only 32-bit sizes are supported. */
940 if (size > UINT_MAX)
941 return NULL;
942
943 /* Sub-allocate small buffers from slabs. */
944 if (!(flags & RADEON_FLAG_HANDLE) &&
945 size <= (1 << RADEON_SLAB_MAX_SIZE_LOG2) &&
946 ws->info.has_virtual_memory &&
947 alignment <= MAX2(1 << RADEON_SLAB_MIN_SIZE_LOG2, util_next_power_of_two(size))) {
948 struct pb_slab_entry *entry;
949 unsigned heap = 0;
950
951 if (flags & RADEON_FLAG_GTT_WC)
952 heap |= 1;
953 if (flags & RADEON_FLAG_CPU_ACCESS)
954 heap |= 2;
955 if (flags & ~(RADEON_FLAG_GTT_WC | RADEON_FLAG_CPU_ACCESS))
956 goto no_slab;
957
958 switch (domain) {
959 case RADEON_DOMAIN_VRAM:
960 heap |= 0 * 4;
961 break;
962 case RADEON_DOMAIN_VRAM_GTT:
963 heap |= 1 * 4;
964 break;
965 case RADEON_DOMAIN_GTT:
966 heap |= 2 * 4;
967 break;
968 default:
969 goto no_slab;
970 }
971
972 entry = pb_slab_alloc(&ws->bo_slabs, size, heap);
973 if (!entry) {
974 /* Clear the cache and try again. */
975 pb_cache_release_all_buffers(&ws->bo_cache);
976
977 entry = pb_slab_alloc(&ws->bo_slabs, size, heap);
978 }
979 if (!entry)
980 return NULL;
981
982 bo = NULL;
983 bo = container_of(entry, bo, u.slab.entry);
984
985 pipe_reference_init(&bo->base.reference, 1);
986
987 return &bo->base;
988 }
989 no_slab:
990
991 /* This flag is irrelevant for the cache. */
992 flags &= ~RADEON_FLAG_HANDLE;
993
994 /* Align size to page size. This is the minimum alignment for normal
995 * BOs. Aligning this here helps the cached bufmgr. Especially small BOs,
996 * like constant/uniform buffers, can benefit from better and more reuse.
997 */
998 size = align(size, ws->info.gart_page_size);
999 alignment = align(alignment, ws->info.gart_page_size);
1000
1001 /* Only set one usage bit each for domains and flags, or the cache manager
1002 * might consider different sets of domains / flags compatible
1003 */
1004 if (domain == RADEON_DOMAIN_VRAM_GTT)
1005 usage = 1 << 2;
1006 else
1007 usage = (unsigned)domain >> 1;
1008 assert(flags < sizeof(usage) * 8 - 3);
1009 usage |= 1 << (flags + 3);
1010
1011 /* Determine the pb_cache bucket for minimizing pb_cache misses. */
1012 pb_cache_bucket = 0;
1013 if (domain & RADEON_DOMAIN_VRAM) /* VRAM or VRAM+GTT */
1014 pb_cache_bucket += 1;
1015 if (flags == RADEON_FLAG_GTT_WC) /* WC */
1016 pb_cache_bucket += 2;
1017 assert(pb_cache_bucket < ARRAY_SIZE(ws->bo_cache.buckets));
1018
1019 bo = radeon_bo(pb_cache_reclaim_buffer(&ws->bo_cache, size, alignment,
1020 usage, pb_cache_bucket));
1021 if (bo)
1022 return &bo->base;
1023
1024 bo = radeon_create_bo(ws, size, alignment, usage, domain, flags,
1025 pb_cache_bucket);
1026 if (!bo) {
1027 /* Clear the cache and try again. */
1028 pb_slabs_reclaim(&ws->bo_slabs);
1029 pb_cache_release_all_buffers(&ws->bo_cache);
1030 bo = radeon_create_bo(ws, size, alignment, usage, domain, flags,
1031 pb_cache_bucket);
1032 if (!bo)
1033 return NULL;
1034 }
1035
1036 bo->u.real.use_reusable_pool = true;
1037
1038 mtx_lock(&ws->bo_handles_mutex);
1039 util_hash_table_set(ws->bo_handles, (void*)(uintptr_t)bo->handle, bo);
1040 mtx_unlock(&ws->bo_handles_mutex);
1041
1042 return &bo->base;
1043 }
1044
1045 static struct pb_buffer *radeon_winsys_bo_from_ptr(struct radeon_winsys *rws,
1046 void *pointer, uint64_t size)
1047 {
1048 struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);
1049 struct drm_radeon_gem_userptr args;
1050 struct radeon_bo *bo;
1051 int r;
1052
1053 bo = CALLOC_STRUCT(radeon_bo);
1054 if (!bo)
1055 return NULL;
1056
1057 memset(&args, 0, sizeof(args));
1058 args.addr = (uintptr_t)pointer;
1059 args.size = align(size, ws->info.gart_page_size);
1060 args.flags = RADEON_GEM_USERPTR_ANONONLY |
1061 RADEON_GEM_USERPTR_VALIDATE |
1062 RADEON_GEM_USERPTR_REGISTER;
1063 if (drmCommandWriteRead(ws->fd, DRM_RADEON_GEM_USERPTR,
1064 &args, sizeof(args))) {
1065 FREE(bo);
1066 return NULL;
1067 }
1068
1069 assert(args.handle != 0);
1070
1071 mtx_lock(&ws->bo_handles_mutex);
1072
1073 /* Initialize it. */
1074 pipe_reference_init(&bo->base.reference, 1);
1075 bo->handle = args.handle;
1076 bo->base.alignment = 0;
1077 bo->base.size = size;
1078 bo->base.vtbl = &radeon_bo_vtbl;
1079 bo->rws = ws;
1080 bo->user_ptr = pointer;
1081 bo->va = 0;
1082 bo->initial_domain = RADEON_DOMAIN_GTT;
1083 bo->hash = __sync_fetch_and_add(&ws->next_bo_hash, 1);
1084 (void) mtx_init(&bo->u.real.map_mutex, mtx_plain);
1085
1086 util_hash_table_set(ws->bo_handles, (void*)(uintptr_t)bo->handle, bo);
1087
1088 mtx_unlock(&ws->bo_handles_mutex);
1089
1090 if (ws->info.has_virtual_memory) {
1091 struct drm_radeon_gem_va va;
1092
1093 bo->va = radeon_bomgr_find_va(ws, bo->base.size, 1 << 20);
1094
1095 va.handle = bo->handle;
1096 va.operation = RADEON_VA_MAP;
1097 va.vm_id = 0;
1098 va.offset = bo->va;
1099 va.flags = RADEON_VM_PAGE_READABLE |
1100 RADEON_VM_PAGE_WRITEABLE |
1101 RADEON_VM_PAGE_SNOOPED;
1102 va.offset = bo->va;
1103 r = drmCommandWriteRead(ws->fd, DRM_RADEON_GEM_VA, &va, sizeof(va));
1104 if (r && va.operation == RADEON_VA_RESULT_ERROR) {
1105 fprintf(stderr, "radeon: Failed to assign virtual address space\n");
1106 radeon_bo_destroy(&bo->base);
1107 return NULL;
1108 }
1109 mtx_lock(&ws->bo_handles_mutex);
1110 if (va.operation == RADEON_VA_RESULT_VA_EXIST) {
1111 struct pb_buffer *b = &bo->base;
1112 struct radeon_bo *old_bo =
1113 util_hash_table_get(ws->bo_vas, (void*)(uintptr_t)va.offset);
1114
1115 mtx_unlock(&ws->bo_handles_mutex);
1116 pb_reference(&b, &old_bo->base);
1117 return b;
1118 }
1119
1120 util_hash_table_set(ws->bo_vas, (void*)(uintptr_t)bo->va, bo);
1121 mtx_unlock(&ws->bo_handles_mutex);
1122 }
1123
1124 ws->allocated_gtt += align(bo->base.size, ws->info.gart_page_size);
1125
1126 return (struct pb_buffer*)bo;
1127 }
1128
1129 static struct pb_buffer *radeon_winsys_bo_from_handle(struct radeon_winsys *rws,
1130 struct winsys_handle *whandle,
1131 unsigned *stride,
1132 unsigned *offset)
1133 {
1134 struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);
1135 struct radeon_bo *bo;
1136 int r;
1137 unsigned handle;
1138 uint64_t size = 0;
1139
1140 if (!offset && whandle->offset != 0) {
1141 fprintf(stderr, "attempt to import unsupported winsys offset %u\n",
1142 whandle->offset);
1143 return NULL;
1144 }
1145
1146 /* We must maintain a list of pairs <handle, bo>, so that we always return
1147 * the same BO for one particular handle. If we didn't do that and created
1148 * more than one BO for the same handle and then relocated them in a CS,
1149 * we would hit a deadlock in the kernel.
1150 *
1151 * The list of pairs is guarded by a mutex, of course. */
1152 mtx_lock(&ws->bo_handles_mutex);
1153
1154 if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
1155 /* First check if there already is an existing bo for the handle. */
1156 bo = util_hash_table_get(ws->bo_names, (void*)(uintptr_t)whandle->handle);
1157 } else if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
1158 /* We must first get the GEM handle, as fds are unreliable keys */
1159 r = drmPrimeFDToHandle(ws->fd, whandle->handle, &handle);
1160 if (r)
1161 goto fail;
1162 bo = util_hash_table_get(ws->bo_handles, (void*)(uintptr_t)handle);
1163 } else {
1164 /* Unknown handle type */
1165 goto fail;
1166 }
1167
1168 if (bo) {
1169 /* Increase the refcount. */
1170 struct pb_buffer *b = NULL;
1171 pb_reference(&b, &bo->base);
1172 goto done;
1173 }
1174
1175 /* There isn't, create a new one. */
1176 bo = CALLOC_STRUCT(radeon_bo);
1177 if (!bo) {
1178 goto fail;
1179 }
1180
1181 if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
1182 struct drm_gem_open open_arg = {};
1183 memset(&open_arg, 0, sizeof(open_arg));
1184 /* Open the BO. */
1185 open_arg.name = whandle->handle;
1186 if (drmIoctl(ws->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) {
1187 FREE(bo);
1188 goto fail;
1189 }
1190 handle = open_arg.handle;
1191 size = open_arg.size;
1192 bo->flink_name = whandle->handle;
1193 } else if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
1194 size = lseek(whandle->handle, 0, SEEK_END);
1195 /*
1196 * Could check errno to determine whether the kernel is new enough, but
1197 * it doesn't really matter why this failed, just that it failed.
1198 */
1199 if (size == (off_t)-1) {
1200 FREE(bo);
1201 goto fail;
1202 }
1203 lseek(whandle->handle, 0, SEEK_SET);
1204 }
1205
1206 assert(handle != 0);
1207
1208 bo->handle = handle;
1209
1210 /* Initialize it. */
1211 pipe_reference_init(&bo->base.reference, 1);
1212 bo->base.alignment = 0;
1213 bo->base.size = (unsigned) size;
1214 bo->base.vtbl = &radeon_bo_vtbl;
1215 bo->rws = ws;
1216 bo->va = 0;
1217 bo->hash = __sync_fetch_and_add(&ws->next_bo_hash, 1);
1218 (void) mtx_init(&bo->u.real.map_mutex, mtx_plain);
1219
1220 if (bo->flink_name)
1221 util_hash_table_set(ws->bo_names, (void*)(uintptr_t)bo->flink_name, bo);
1222
1223 util_hash_table_set(ws->bo_handles, (void*)(uintptr_t)bo->handle, bo);
1224
1225 done:
1226 mtx_unlock(&ws->bo_handles_mutex);
1227
1228 if (stride)
1229 *stride = whandle->stride;
1230 if (offset)
1231 *offset = whandle->offset;
1232
1233 if (ws->info.has_virtual_memory && !bo->va) {
1234 struct drm_radeon_gem_va va;
1235
1236 bo->va = radeon_bomgr_find_va(ws, bo->base.size, 1 << 20);
1237
1238 va.handle = bo->handle;
1239 va.operation = RADEON_VA_MAP;
1240 va.vm_id = 0;
1241 va.offset = bo->va;
1242 va.flags = RADEON_VM_PAGE_READABLE |
1243 RADEON_VM_PAGE_WRITEABLE |
1244 RADEON_VM_PAGE_SNOOPED;
1245 va.offset = bo->va;
1246 r = drmCommandWriteRead(ws->fd, DRM_RADEON_GEM_VA, &va, sizeof(va));
1247 if (r && va.operation == RADEON_VA_RESULT_ERROR) {
1248 fprintf(stderr, "radeon: Failed to assign virtual address space\n");
1249 radeon_bo_destroy(&bo->base);
1250 return NULL;
1251 }
1252 mtx_lock(&ws->bo_handles_mutex);
1253 if (va.operation == RADEON_VA_RESULT_VA_EXIST) {
1254 struct pb_buffer *b = &bo->base;
1255 struct radeon_bo *old_bo =
1256 util_hash_table_get(ws->bo_vas, (void*)(uintptr_t)va.offset);
1257
1258 mtx_unlock(&ws->bo_handles_mutex);
1259 pb_reference(&b, &old_bo->base);
1260 return b;
1261 }
1262
1263 util_hash_table_set(ws->bo_vas, (void*)(uintptr_t)bo->va, bo);
1264 mtx_unlock(&ws->bo_handles_mutex);
1265 }
1266
1267 bo->initial_domain = radeon_bo_get_initial_domain((void*)bo);
1268
1269 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
1270 ws->allocated_vram += align(bo->base.size, ws->info.gart_page_size);
1271 else if (bo->initial_domain & RADEON_DOMAIN_GTT)
1272 ws->allocated_gtt += align(bo->base.size, ws->info.gart_page_size);
1273
1274 return (struct pb_buffer*)bo;
1275
1276 fail:
1277 mtx_unlock(&ws->bo_handles_mutex);
1278 return NULL;
1279 }
1280
1281 static bool radeon_winsys_bo_get_handle(struct pb_buffer *buffer,
1282 unsigned stride, unsigned offset,
1283 unsigned slice_size,
1284 struct winsys_handle *whandle)
1285 {
1286 struct drm_gem_flink flink;
1287 struct radeon_bo *bo = radeon_bo(buffer);
1288 struct radeon_drm_winsys *ws = bo->rws;
1289
1290 if (!bo->handle) {
1291 offset += bo->va - bo->u.slab.real->va;
1292 bo = bo->u.slab.real;
1293 }
1294
1295 memset(&flink, 0, sizeof(flink));
1296
1297 bo->u.real.use_reusable_pool = false;
1298
1299 if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
1300 if (!bo->flink_name) {
1301 flink.handle = bo->handle;
1302
1303 if (ioctl(ws->fd, DRM_IOCTL_GEM_FLINK, &flink)) {
1304 return false;
1305 }
1306
1307 bo->flink_name = flink.name;
1308
1309 mtx_lock(&ws->bo_handles_mutex);
1310 util_hash_table_set(ws->bo_names, (void*)(uintptr_t)bo->flink_name, bo);
1311 mtx_unlock(&ws->bo_handles_mutex);
1312 }
1313 whandle->handle = bo->flink_name;
1314 } else if (whandle->type == DRM_API_HANDLE_TYPE_KMS) {
1315 whandle->handle = bo->handle;
1316 } else if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
1317 if (drmPrimeHandleToFD(ws->fd, bo->handle, DRM_CLOEXEC, (int*)&whandle->handle))
1318 return false;
1319 }
1320
1321 whandle->stride = stride;
1322 whandle->offset = offset;
1323 whandle->offset += slice_size * whandle->layer;
1324
1325 return true;
1326 }
1327
1328 static bool radeon_winsys_bo_is_user_ptr(struct pb_buffer *buf)
1329 {
1330 return ((struct radeon_bo*)buf)->user_ptr != NULL;
1331 }
1332
1333 static uint64_t radeon_winsys_bo_va(struct pb_buffer *buf)
1334 {
1335 return ((struct radeon_bo*)buf)->va;
1336 }
1337
1338 static unsigned radeon_winsys_bo_get_reloc_offset(struct pb_buffer *buf)
1339 {
1340 struct radeon_bo *bo = radeon_bo(buf);
1341
1342 if (bo->handle)
1343 return 0;
1344
1345 return bo->va - bo->u.slab.real->va;
1346 }
1347
1348 void radeon_drm_bo_init_functions(struct radeon_drm_winsys *ws)
1349 {
1350 ws->base.buffer_set_metadata = radeon_bo_set_metadata;
1351 ws->base.buffer_get_metadata = radeon_bo_get_metadata;
1352 ws->base.buffer_map = radeon_bo_map;
1353 ws->base.buffer_unmap = radeon_bo_unmap;
1354 ws->base.buffer_wait = radeon_bo_wait;
1355 ws->base.buffer_create = radeon_winsys_bo_create;
1356 ws->base.buffer_from_handle = radeon_winsys_bo_from_handle;
1357 ws->base.buffer_from_ptr = radeon_winsys_bo_from_ptr;
1358 ws->base.buffer_is_user_ptr = radeon_winsys_bo_is_user_ptr;
1359 ws->base.buffer_get_handle = radeon_winsys_bo_get_handle;
1360 ws->base.buffer_get_virtual_address = radeon_winsys_bo_va;
1361 ws->base.buffer_get_reloc_offset = radeon_winsys_bo_get_reloc_offset;
1362 ws->base.buffer_get_initial_domain = radeon_bo_get_initial_domain;
1363 }