gallium/pb_cache: divide the cache into buckets for reducing cache misses
[mesa.git] / src / gallium / winsys / radeon / drm / radeon_drm_bo.c
1 /*
2 * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
14 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
15 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
16 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
17 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
24 * of the Software.
25 */
26
27 #include "radeon_drm_cs.h"
28
29 #include "util/u_hash_table.h"
30 #include "util/u_memory.h"
31 #include "util/simple_list.h"
32 #include "os/os_thread.h"
33 #include "os/os_mman.h"
34 #include "os/os_time.h"
35
36 #include "state_tracker/drm_driver.h"
37
38 #include <sys/ioctl.h>
39 #include <xf86drm.h>
40 #include <errno.h>
41 #include <fcntl.h>
42 #include <stdio.h>
43 #include <inttypes.h>
44
45 static inline struct radeon_bo *radeon_bo(struct pb_buffer *bo)
46 {
47 return (struct radeon_bo *)bo;
48 }
49
50 struct radeon_bo_va_hole {
51 struct list_head list;
52 uint64_t offset;
53 uint64_t size;
54 };
55
56 static bool radeon_bo_is_busy(struct radeon_bo *bo)
57 {
58 struct drm_radeon_gem_busy args = {0};
59
60 args.handle = bo->handle;
61 return drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_BUSY,
62 &args, sizeof(args)) != 0;
63 }
64
65 static void radeon_bo_wait_idle(struct radeon_bo *bo)
66 {
67 struct drm_radeon_gem_wait_idle args = {0};
68
69 args.handle = bo->handle;
70 while (drmCommandWrite(bo->rws->fd, DRM_RADEON_GEM_WAIT_IDLE,
71 &args, sizeof(args)) == -EBUSY);
72 }
73
74 static bool radeon_bo_wait(struct pb_buffer *_buf, uint64_t timeout,
75 enum radeon_bo_usage usage)
76 {
77 struct radeon_bo *bo = radeon_bo(_buf);
78 int64_t abs_timeout;
79
80 /* No timeout. Just query. */
81 if (timeout == 0)
82 return !bo->num_active_ioctls && !radeon_bo_is_busy(bo);
83
84 abs_timeout = os_time_get_absolute_timeout(timeout);
85
86 /* Wait if any ioctl is being submitted with this buffer. */
87 if (!os_wait_until_zero_abs_timeout(&bo->num_active_ioctls, abs_timeout))
88 return false;
89
90 /* Infinite timeout. */
91 if (abs_timeout == PIPE_TIMEOUT_INFINITE) {
92 radeon_bo_wait_idle(bo);
93 return true;
94 }
95
96 /* Other timeouts need to be emulated with a loop. */
97 while (radeon_bo_is_busy(bo)) {
98 if (os_time_get_nano() >= abs_timeout)
99 return false;
100 os_time_sleep(10);
101 }
102
103 return true;
104 }
105
106 static enum radeon_bo_domain get_valid_domain(enum radeon_bo_domain domain)
107 {
108 /* Zero domains the driver doesn't understand. */
109 domain &= RADEON_DOMAIN_VRAM_GTT;
110
111 /* If no domain is set, we must set something... */
112 if (!domain)
113 domain = RADEON_DOMAIN_VRAM_GTT;
114
115 return domain;
116 }
117
118 static enum radeon_bo_domain radeon_bo_get_initial_domain(
119 struct pb_buffer *buf)
120 {
121 struct radeon_bo *bo = (struct radeon_bo*)buf;
122 struct drm_radeon_gem_op args;
123
124 if (bo->rws->info.drm_minor < 38)
125 return RADEON_DOMAIN_VRAM_GTT;
126
127 memset(&args, 0, sizeof(args));
128 args.handle = bo->handle;
129 args.op = RADEON_GEM_OP_GET_INITIAL_DOMAIN;
130
131 drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_OP,
132 &args, sizeof(args));
133
134 /* GEM domains and winsys domains are defined the same. */
135 return get_valid_domain(args.value);
136 }
137
138 static uint64_t radeon_bomgr_find_va(struct radeon_drm_winsys *rws,
139 uint64_t size, uint64_t alignment)
140 {
141 struct radeon_bo_va_hole *hole, *n;
142 uint64_t offset = 0, waste = 0;
143
144 /* All VM address space holes will implicitly start aligned to the
145 * size alignment, so we don't need to sanitize the alignment here
146 */
147 size = align(size, rws->info.gart_page_size);
148
149 pipe_mutex_lock(rws->bo_va_mutex);
150 /* first look for a hole */
151 LIST_FOR_EACH_ENTRY_SAFE(hole, n, &rws->va_holes, list) {
152 offset = hole->offset;
153 waste = offset % alignment;
154 waste = waste ? alignment - waste : 0;
155 offset += waste;
156 if (offset >= (hole->offset + hole->size)) {
157 continue;
158 }
159 if (!waste && hole->size == size) {
160 offset = hole->offset;
161 list_del(&hole->list);
162 FREE(hole);
163 pipe_mutex_unlock(rws->bo_va_mutex);
164 return offset;
165 }
166 if ((hole->size - waste) > size) {
167 if (waste) {
168 n = CALLOC_STRUCT(radeon_bo_va_hole);
169 n->size = waste;
170 n->offset = hole->offset;
171 list_add(&n->list, &hole->list);
172 }
173 hole->size -= (size + waste);
174 hole->offset += size + waste;
175 pipe_mutex_unlock(rws->bo_va_mutex);
176 return offset;
177 }
178 if ((hole->size - waste) == size) {
179 hole->size = waste;
180 pipe_mutex_unlock(rws->bo_va_mutex);
181 return offset;
182 }
183 }
184
185 offset = rws->va_offset;
186 waste = offset % alignment;
187 waste = waste ? alignment - waste : 0;
188 if (waste) {
189 n = CALLOC_STRUCT(radeon_bo_va_hole);
190 n->size = waste;
191 n->offset = offset;
192 list_add(&n->list, &rws->va_holes);
193 }
194 offset += waste;
195 rws->va_offset += size + waste;
196 pipe_mutex_unlock(rws->bo_va_mutex);
197 return offset;
198 }
199
200 static void radeon_bomgr_free_va(struct radeon_drm_winsys *rws,
201 uint64_t va, uint64_t size)
202 {
203 struct radeon_bo_va_hole *hole;
204
205 size = align(size, rws->info.gart_page_size);
206
207 pipe_mutex_lock(rws->bo_va_mutex);
208 if ((va + size) == rws->va_offset) {
209 rws->va_offset = va;
210 /* Delete uppermost hole if it reaches the new top */
211 if (!LIST_IS_EMPTY(&rws->va_holes)) {
212 hole = container_of(rws->va_holes.next, hole, list);
213 if ((hole->offset + hole->size) == va) {
214 rws->va_offset = hole->offset;
215 list_del(&hole->list);
216 FREE(hole);
217 }
218 }
219 } else {
220 struct radeon_bo_va_hole *next;
221
222 hole = container_of(&rws->va_holes, hole, list);
223 LIST_FOR_EACH_ENTRY(next, &rws->va_holes, list) {
224 if (next->offset < va)
225 break;
226 hole = next;
227 }
228
229 if (&hole->list != &rws->va_holes) {
230 /* Grow upper hole if it's adjacent */
231 if (hole->offset == (va + size)) {
232 hole->offset = va;
233 hole->size += size;
234 /* Merge lower hole if it's adjacent */
235 if (next != hole && &next->list != &rws->va_holes &&
236 (next->offset + next->size) == va) {
237 next->size += hole->size;
238 list_del(&hole->list);
239 FREE(hole);
240 }
241 goto out;
242 }
243 }
244
245 /* Grow lower hole if it's adjacent */
246 if (next != hole && &next->list != &rws->va_holes &&
247 (next->offset + next->size) == va) {
248 next->size += size;
249 goto out;
250 }
251
252 /* FIXME on allocation failure we just lose virtual address space
253 * maybe print a warning
254 */
255 next = CALLOC_STRUCT(radeon_bo_va_hole);
256 if (next) {
257 next->size = size;
258 next->offset = va;
259 list_add(&next->list, &hole->list);
260 }
261 }
262 out:
263 pipe_mutex_unlock(rws->bo_va_mutex);
264 }
265
266 void radeon_bo_destroy(struct pb_buffer *_buf)
267 {
268 struct radeon_bo *bo = radeon_bo(_buf);
269 struct radeon_drm_winsys *rws = bo->rws;
270 struct drm_gem_close args;
271
272 memset(&args, 0, sizeof(args));
273
274 pipe_mutex_lock(rws->bo_handles_mutex);
275 util_hash_table_remove(rws->bo_handles, (void*)(uintptr_t)bo->handle);
276 if (bo->flink_name) {
277 util_hash_table_remove(rws->bo_names,
278 (void*)(uintptr_t)bo->flink_name);
279 }
280 pipe_mutex_unlock(rws->bo_handles_mutex);
281
282 if (bo->ptr)
283 os_munmap(bo->ptr, bo->base.size);
284
285 if (rws->info.has_virtual_memory) {
286 if (rws->va_unmap_working) {
287 struct drm_radeon_gem_va va;
288
289 va.handle = bo->handle;
290 va.vm_id = 0;
291 va.operation = RADEON_VA_UNMAP;
292 va.flags = RADEON_VM_PAGE_READABLE |
293 RADEON_VM_PAGE_WRITEABLE |
294 RADEON_VM_PAGE_SNOOPED;
295 va.offset = bo->va;
296
297 if (drmCommandWriteRead(rws->fd, DRM_RADEON_GEM_VA, &va,
298 sizeof(va)) != 0 &&
299 va.operation == RADEON_VA_RESULT_ERROR) {
300 fprintf(stderr, "radeon: Failed to deallocate virtual address for buffer:\n");
301 fprintf(stderr, "radeon: size : %"PRIu64" bytes\n", bo->base.size);
302 fprintf(stderr, "radeon: va : 0x%"PRIx64"\n", bo->va);
303 }
304 }
305
306 radeon_bomgr_free_va(rws, bo->va, bo->base.size);
307 }
308
309 /* Close object. */
310 args.handle = bo->handle;
311 drmIoctl(rws->fd, DRM_IOCTL_GEM_CLOSE, &args);
312
313 pipe_mutex_destroy(bo->map_mutex);
314
315 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
316 rws->allocated_vram -= align(bo->base.size, rws->info.gart_page_size);
317 else if (bo->initial_domain & RADEON_DOMAIN_GTT)
318 rws->allocated_gtt -= align(bo->base.size, rws->info.gart_page_size);
319 FREE(bo);
320 }
321
322 static void radeon_bo_destroy_or_cache(struct pb_buffer *_buf)
323 {
324 struct radeon_bo *bo = radeon_bo(_buf);
325
326 if (bo->use_reusable_pool)
327 pb_cache_add_buffer(&bo->cache_entry);
328 else
329 radeon_bo_destroy(_buf);
330 }
331
332 void *radeon_bo_do_map(struct radeon_bo *bo)
333 {
334 struct drm_radeon_gem_mmap args = {0};
335 void *ptr;
336
337 /* If the buffer is created from user memory, return the user pointer. */
338 if (bo->user_ptr)
339 return bo->user_ptr;
340
341 /* Map the buffer. */
342 pipe_mutex_lock(bo->map_mutex);
343 /* Return the pointer if it's already mapped. */
344 if (bo->ptr) {
345 bo->map_count++;
346 pipe_mutex_unlock(bo->map_mutex);
347 return bo->ptr;
348 }
349 args.handle = bo->handle;
350 args.offset = 0;
351 args.size = (uint64_t)bo->base.size;
352 if (drmCommandWriteRead(bo->rws->fd,
353 DRM_RADEON_GEM_MMAP,
354 &args,
355 sizeof(args))) {
356 pipe_mutex_unlock(bo->map_mutex);
357 fprintf(stderr, "radeon: gem_mmap failed: %p 0x%08X\n",
358 bo, bo->handle);
359 return NULL;
360 }
361
362 ptr = os_mmap(0, args.size, PROT_READ|PROT_WRITE, MAP_SHARED,
363 bo->rws->fd, args.addr_ptr);
364 if (ptr == MAP_FAILED) {
365 /* Clear the cache and try again. */
366 pb_cache_release_all_buffers(&bo->rws->bo_cache);
367
368 ptr = os_mmap(0, args.size, PROT_READ|PROT_WRITE, MAP_SHARED,
369 bo->rws->fd, args.addr_ptr);
370 if (ptr == MAP_FAILED) {
371 pipe_mutex_unlock(bo->map_mutex);
372 fprintf(stderr, "radeon: mmap failed, errno: %i\n", errno);
373 return NULL;
374 }
375 }
376 bo->ptr = ptr;
377 bo->map_count = 1;
378 pipe_mutex_unlock(bo->map_mutex);
379
380 return bo->ptr;
381 }
382
383 static void *radeon_bo_map(struct pb_buffer *buf,
384 struct radeon_winsys_cs *rcs,
385 enum pipe_transfer_usage usage)
386 {
387 struct radeon_bo *bo = (struct radeon_bo*)buf;
388 struct radeon_drm_cs *cs = (struct radeon_drm_cs*)rcs;
389
390 /* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
391 if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
392 /* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
393 if (usage & PIPE_TRANSFER_DONTBLOCK) {
394 if (!(usage & PIPE_TRANSFER_WRITE)) {
395 /* Mapping for read.
396 *
397 * Since we are mapping for read, we don't need to wait
398 * if the GPU is using the buffer for read too
399 * (neither one is changing it).
400 *
401 * Only check whether the buffer is being used for write. */
402 if (cs && radeon_bo_is_referenced_by_cs_for_write(cs, bo)) {
403 cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
404 return NULL;
405 }
406
407 if (!radeon_bo_wait((struct pb_buffer*)bo, 0,
408 RADEON_USAGE_WRITE)) {
409 return NULL;
410 }
411 } else {
412 if (cs && radeon_bo_is_referenced_by_cs(cs, bo)) {
413 cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
414 return NULL;
415 }
416
417 if (!radeon_bo_wait((struct pb_buffer*)bo, 0,
418 RADEON_USAGE_READWRITE)) {
419 return NULL;
420 }
421 }
422 } else {
423 uint64_t time = os_time_get_nano();
424
425 if (!(usage & PIPE_TRANSFER_WRITE)) {
426 /* Mapping for read.
427 *
428 * Since we are mapping for read, we don't need to wait
429 * if the GPU is using the buffer for read too
430 * (neither one is changing it).
431 *
432 * Only check whether the buffer is being used for write. */
433 if (cs && radeon_bo_is_referenced_by_cs_for_write(cs, bo)) {
434 cs->flush_cs(cs->flush_data, 0, NULL);
435 }
436 radeon_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
437 RADEON_USAGE_WRITE);
438 } else {
439 /* Mapping for write. */
440 if (cs) {
441 if (radeon_bo_is_referenced_by_cs(cs, bo)) {
442 cs->flush_cs(cs->flush_data, 0, NULL);
443 } else {
444 /* Try to avoid busy-waiting in radeon_bo_wait. */
445 if (p_atomic_read(&bo->num_active_ioctls))
446 radeon_drm_cs_sync_flush(rcs);
447 }
448 }
449
450 radeon_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
451 RADEON_USAGE_READWRITE);
452 }
453
454 bo->rws->buffer_wait_time += os_time_get_nano() - time;
455 }
456 }
457
458 return radeon_bo_do_map(bo);
459 }
460
461 static void radeon_bo_unmap(struct pb_buffer *_buf)
462 {
463 struct radeon_bo *bo = (struct radeon_bo*)_buf;
464
465 if (bo->user_ptr)
466 return;
467
468 pipe_mutex_lock(bo->map_mutex);
469 if (!bo->ptr) {
470 pipe_mutex_unlock(bo->map_mutex);
471 return; /* it's not been mapped */
472 }
473
474 assert(bo->map_count);
475 if (--bo->map_count) {
476 pipe_mutex_unlock(bo->map_mutex);
477 return; /* it's been mapped multiple times */
478 }
479
480 os_munmap(bo->ptr, bo->base.size);
481 bo->ptr = NULL;
482 pipe_mutex_unlock(bo->map_mutex);
483 }
484
485 static const struct pb_vtbl radeon_bo_vtbl = {
486 radeon_bo_destroy_or_cache
487 /* other functions are never called */
488 };
489
490 #ifndef RADEON_GEM_GTT_WC
491 #define RADEON_GEM_GTT_WC (1 << 2)
492 #endif
493 #ifndef RADEON_GEM_CPU_ACCESS
494 /* BO is expected to be accessed by the CPU */
495 #define RADEON_GEM_CPU_ACCESS (1 << 3)
496 #endif
497 #ifndef RADEON_GEM_NO_CPU_ACCESS
498 /* CPU access is not expected to work for this BO */
499 #define RADEON_GEM_NO_CPU_ACCESS (1 << 4)
500 #endif
501
502 static struct radeon_bo *radeon_create_bo(struct radeon_drm_winsys *rws,
503 unsigned size, unsigned alignment,
504 unsigned usage,
505 unsigned initial_domains,
506 unsigned flags)
507 {
508 struct radeon_bo *bo;
509 struct drm_radeon_gem_create args;
510 int r;
511
512 memset(&args, 0, sizeof(args));
513
514 assert(initial_domains);
515 assert((initial_domains &
516 ~(RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM)) == 0);
517
518 args.size = size;
519 args.alignment = alignment;
520 args.initial_domain = initial_domains;
521 args.flags = 0;
522
523 if (flags & RADEON_FLAG_GTT_WC)
524 args.flags |= RADEON_GEM_GTT_WC;
525 if (flags & RADEON_FLAG_CPU_ACCESS)
526 args.flags |= RADEON_GEM_CPU_ACCESS;
527 if (flags & RADEON_FLAG_NO_CPU_ACCESS)
528 args.flags |= RADEON_GEM_NO_CPU_ACCESS;
529
530 if (drmCommandWriteRead(rws->fd, DRM_RADEON_GEM_CREATE,
531 &args, sizeof(args))) {
532 fprintf(stderr, "radeon: Failed to allocate a buffer:\n");
533 fprintf(stderr, "radeon: size : %u bytes\n", size);
534 fprintf(stderr, "radeon: alignment : %u bytes\n", alignment);
535 fprintf(stderr, "radeon: domains : %u\n", args.initial_domain);
536 fprintf(stderr, "radeon: flags : %u\n", args.flags);
537 return NULL;
538 }
539
540 bo = CALLOC_STRUCT(radeon_bo);
541 if (!bo)
542 return NULL;
543
544 pipe_reference_init(&bo->base.reference, 1);
545 bo->base.alignment = alignment;
546 bo->base.usage = usage;
547 bo->base.size = size;
548 bo->base.vtbl = &radeon_bo_vtbl;
549 bo->rws = rws;
550 bo->handle = args.handle;
551 bo->va = 0;
552 bo->initial_domain = initial_domains;
553 pipe_mutex_init(bo->map_mutex);
554 pb_cache_init_entry(&rws->bo_cache, &bo->cache_entry, &bo->base, 0);
555
556 if (rws->info.has_virtual_memory) {
557 struct drm_radeon_gem_va va;
558 unsigned va_gap_size;
559
560 va_gap_size = rws->check_vm ? MAX2(4 * alignment, 64 * 1024) : 0;
561 bo->va = radeon_bomgr_find_va(rws, size + va_gap_size, alignment);
562
563 va.handle = bo->handle;
564 va.vm_id = 0;
565 va.operation = RADEON_VA_MAP;
566 va.flags = RADEON_VM_PAGE_READABLE |
567 RADEON_VM_PAGE_WRITEABLE |
568 RADEON_VM_PAGE_SNOOPED;
569 va.offset = bo->va;
570 r = drmCommandWriteRead(rws->fd, DRM_RADEON_GEM_VA, &va, sizeof(va));
571 if (r && va.operation == RADEON_VA_RESULT_ERROR) {
572 fprintf(stderr, "radeon: Failed to allocate virtual address for buffer:\n");
573 fprintf(stderr, "radeon: size : %d bytes\n", size);
574 fprintf(stderr, "radeon: alignment : %d bytes\n", alignment);
575 fprintf(stderr, "radeon: domains : %d\n", args.initial_domain);
576 fprintf(stderr, "radeon: va : 0x%016llx\n", (unsigned long long)bo->va);
577 radeon_bo_destroy(&bo->base);
578 return NULL;
579 }
580 pipe_mutex_lock(rws->bo_handles_mutex);
581 if (va.operation == RADEON_VA_RESULT_VA_EXIST) {
582 struct pb_buffer *b = &bo->base;
583 struct radeon_bo *old_bo =
584 util_hash_table_get(rws->bo_vas, (void*)(uintptr_t)va.offset);
585
586 pipe_mutex_unlock(rws->bo_handles_mutex);
587 pb_reference(&b, &old_bo->base);
588 return radeon_bo(b);
589 }
590
591 util_hash_table_set(rws->bo_vas, (void*)(uintptr_t)bo->va, bo);
592 pipe_mutex_unlock(rws->bo_handles_mutex);
593 }
594
595 if (initial_domains & RADEON_DOMAIN_VRAM)
596 rws->allocated_vram += align(size, rws->info.gart_page_size);
597 else if (initial_domains & RADEON_DOMAIN_GTT)
598 rws->allocated_gtt += align(size, rws->info.gart_page_size);
599
600 return bo;
601 }
602
603 bool radeon_bo_can_reclaim(struct pb_buffer *_buf)
604 {
605 struct radeon_bo *bo = radeon_bo(_buf);
606
607 if (radeon_bo_is_referenced_by_any_cs(bo))
608 return false;
609
610 return radeon_bo_wait(_buf, 0, RADEON_USAGE_READWRITE);
611 }
612
613 static unsigned eg_tile_split(unsigned tile_split)
614 {
615 switch (tile_split) {
616 case 0: tile_split = 64; break;
617 case 1: tile_split = 128; break;
618 case 2: tile_split = 256; break;
619 case 3: tile_split = 512; break;
620 default:
621 case 4: tile_split = 1024; break;
622 case 5: tile_split = 2048; break;
623 case 6: tile_split = 4096; break;
624 }
625 return tile_split;
626 }
627
628 static unsigned eg_tile_split_rev(unsigned eg_tile_split)
629 {
630 switch (eg_tile_split) {
631 case 64: return 0;
632 case 128: return 1;
633 case 256: return 2;
634 case 512: return 3;
635 default:
636 case 1024: return 4;
637 case 2048: return 5;
638 case 4096: return 6;
639 }
640 }
641
642 static void radeon_bo_get_metadata(struct pb_buffer *_buf,
643 struct radeon_bo_metadata *md)
644 {
645 struct radeon_bo *bo = radeon_bo(_buf);
646 struct drm_radeon_gem_set_tiling args;
647
648 memset(&args, 0, sizeof(args));
649
650 args.handle = bo->handle;
651
652 drmCommandWriteRead(bo->rws->fd,
653 DRM_RADEON_GEM_GET_TILING,
654 &args,
655 sizeof(args));
656
657 md->microtile = RADEON_LAYOUT_LINEAR;
658 md->macrotile = RADEON_LAYOUT_LINEAR;
659 if (args.tiling_flags & RADEON_TILING_MICRO)
660 md->microtile = RADEON_LAYOUT_TILED;
661 else if (args.tiling_flags & RADEON_TILING_MICRO_SQUARE)
662 md->microtile = RADEON_LAYOUT_SQUARETILED;
663
664 if (args.tiling_flags & RADEON_TILING_MACRO)
665 md->macrotile = RADEON_LAYOUT_TILED;
666
667 md->bankw = (args.tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
668 md->bankh = (args.tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
669 md->tile_split = (args.tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
670 md->mtilea = (args.tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
671 md->tile_split = eg_tile_split(md->tile_split);
672 md->scanout = bo->rws->gen >= DRV_SI && !(args.tiling_flags & RADEON_TILING_R600_NO_SCANOUT);
673 }
674
675 static void radeon_bo_set_metadata(struct pb_buffer *_buf,
676 struct radeon_bo_metadata *md)
677 {
678 struct radeon_bo *bo = radeon_bo(_buf);
679 struct drm_radeon_gem_set_tiling args;
680
681 memset(&args, 0, sizeof(args));
682
683 os_wait_until_zero(&bo->num_active_ioctls, PIPE_TIMEOUT_INFINITE);
684
685 if (md->microtile == RADEON_LAYOUT_TILED)
686 args.tiling_flags |= RADEON_TILING_MICRO;
687 else if (md->microtile == RADEON_LAYOUT_SQUARETILED)
688 args.tiling_flags |= RADEON_TILING_MICRO_SQUARE;
689
690 if (md->macrotile == RADEON_LAYOUT_TILED)
691 args.tiling_flags |= RADEON_TILING_MACRO;
692
693 args.tiling_flags |= (md->bankw & RADEON_TILING_EG_BANKW_MASK) <<
694 RADEON_TILING_EG_BANKW_SHIFT;
695 args.tiling_flags |= (md->bankh & RADEON_TILING_EG_BANKH_MASK) <<
696 RADEON_TILING_EG_BANKH_SHIFT;
697 if (md->tile_split) {
698 args.tiling_flags |= (eg_tile_split_rev(md->tile_split) &
699 RADEON_TILING_EG_TILE_SPLIT_MASK) <<
700 RADEON_TILING_EG_TILE_SPLIT_SHIFT;
701 }
702 args.tiling_flags |= (md->mtilea & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK) <<
703 RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT;
704
705 if (bo->rws->gen >= DRV_SI && !md->scanout)
706 args.tiling_flags |= RADEON_TILING_R600_NO_SCANOUT;
707
708 args.handle = bo->handle;
709 args.pitch = md->stride;
710
711 drmCommandWriteRead(bo->rws->fd,
712 DRM_RADEON_GEM_SET_TILING,
713 &args,
714 sizeof(args));
715 }
716
717 static struct pb_buffer *
718 radeon_winsys_bo_create(struct radeon_winsys *rws,
719 uint64_t size,
720 unsigned alignment,
721 enum radeon_bo_domain domain,
722 enum radeon_bo_flag flags)
723 {
724 struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);
725 struct radeon_bo *bo;
726 unsigned usage = 0;
727
728 /* Only 32-bit sizes are supported. */
729 if (size > UINT_MAX)
730 return NULL;
731
732 /* Align size to page size. This is the minimum alignment for normal
733 * BOs. Aligning this here helps the cached bufmgr. Especially small BOs,
734 * like constant/uniform buffers, can benefit from better and more reuse.
735 */
736 size = align(size, ws->info.gart_page_size);
737 alignment = align(alignment, ws->info.gart_page_size);
738
739 /* Only set one usage bit each for domains and flags, or the cache manager
740 * might consider different sets of domains / flags compatible
741 */
742 if (domain == RADEON_DOMAIN_VRAM_GTT)
743 usage = 1 << 2;
744 else
745 usage = domain >> 1;
746 assert(flags < sizeof(usage) * 8 - 3);
747 usage |= 1 << (flags + 3);
748
749 bo = radeon_bo(pb_cache_reclaim_buffer(&ws->bo_cache, size, alignment, usage, 0));
750 if (bo)
751 return &bo->base;
752
753 bo = radeon_create_bo(ws, size, alignment, usage, domain, flags);
754 if (!bo) {
755 /* Clear the cache and try again. */
756 pb_cache_release_all_buffers(&ws->bo_cache);
757 bo = radeon_create_bo(ws, size, alignment, usage, domain, flags);
758 if (!bo)
759 return NULL;
760 }
761
762 bo->use_reusable_pool = true;
763
764 pipe_mutex_lock(ws->bo_handles_mutex);
765 util_hash_table_set(ws->bo_handles, (void*)(uintptr_t)bo->handle, bo);
766 pipe_mutex_unlock(ws->bo_handles_mutex);
767
768 return &bo->base;
769 }
770
771 static struct pb_buffer *radeon_winsys_bo_from_ptr(struct radeon_winsys *rws,
772 void *pointer, uint64_t size)
773 {
774 struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);
775 struct drm_radeon_gem_userptr args;
776 struct radeon_bo *bo;
777 int r;
778
779 bo = CALLOC_STRUCT(radeon_bo);
780 if (!bo)
781 return NULL;
782
783 memset(&args, 0, sizeof(args));
784 args.addr = (uintptr_t)pointer;
785 args.size = align(size, ws->info.gart_page_size);
786 args.flags = RADEON_GEM_USERPTR_ANONONLY |
787 RADEON_GEM_USERPTR_VALIDATE |
788 RADEON_GEM_USERPTR_REGISTER;
789 if (drmCommandWriteRead(ws->fd, DRM_RADEON_GEM_USERPTR,
790 &args, sizeof(args))) {
791 FREE(bo);
792 return NULL;
793 }
794
795 pipe_mutex_lock(ws->bo_handles_mutex);
796
797 /* Initialize it. */
798 pipe_reference_init(&bo->base.reference, 1);
799 bo->handle = args.handle;
800 bo->base.alignment = 0;
801 bo->base.size = size;
802 bo->base.vtbl = &radeon_bo_vtbl;
803 bo->rws = ws;
804 bo->user_ptr = pointer;
805 bo->va = 0;
806 bo->initial_domain = RADEON_DOMAIN_GTT;
807 pipe_mutex_init(bo->map_mutex);
808
809 util_hash_table_set(ws->bo_handles, (void*)(uintptr_t)bo->handle, bo);
810
811 pipe_mutex_unlock(ws->bo_handles_mutex);
812
813 if (ws->info.has_virtual_memory) {
814 struct drm_radeon_gem_va va;
815
816 bo->va = radeon_bomgr_find_va(ws, bo->base.size, 1 << 20);
817
818 va.handle = bo->handle;
819 va.operation = RADEON_VA_MAP;
820 va.vm_id = 0;
821 va.offset = bo->va;
822 va.flags = RADEON_VM_PAGE_READABLE |
823 RADEON_VM_PAGE_WRITEABLE |
824 RADEON_VM_PAGE_SNOOPED;
825 va.offset = bo->va;
826 r = drmCommandWriteRead(ws->fd, DRM_RADEON_GEM_VA, &va, sizeof(va));
827 if (r && va.operation == RADEON_VA_RESULT_ERROR) {
828 fprintf(stderr, "radeon: Failed to assign virtual address space\n");
829 radeon_bo_destroy(&bo->base);
830 return NULL;
831 }
832 pipe_mutex_lock(ws->bo_handles_mutex);
833 if (va.operation == RADEON_VA_RESULT_VA_EXIST) {
834 struct pb_buffer *b = &bo->base;
835 struct radeon_bo *old_bo =
836 util_hash_table_get(ws->bo_vas, (void*)(uintptr_t)va.offset);
837
838 pipe_mutex_unlock(ws->bo_handles_mutex);
839 pb_reference(&b, &old_bo->base);
840 return b;
841 }
842
843 util_hash_table_set(ws->bo_vas, (void*)(uintptr_t)bo->va, bo);
844 pipe_mutex_unlock(ws->bo_handles_mutex);
845 }
846
847 ws->allocated_gtt += align(bo->base.size, ws->info.gart_page_size);
848
849 return (struct pb_buffer*)bo;
850 }
851
852 static struct pb_buffer *radeon_winsys_bo_from_handle(struct radeon_winsys *rws,
853 struct winsys_handle *whandle,
854 unsigned *stride,
855 unsigned *offset)
856 {
857 struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);
858 struct radeon_bo *bo;
859 int r;
860 unsigned handle;
861 uint64_t size = 0;
862
863 if (!offset && whandle->offset != 0) {
864 fprintf(stderr, "attempt to import unsupported winsys offset %u\n",
865 whandle->offset);
866 return NULL;
867 }
868
869 /* We must maintain a list of pairs <handle, bo>, so that we always return
870 * the same BO for one particular handle. If we didn't do that and created
871 * more than one BO for the same handle and then relocated them in a CS,
872 * we would hit a deadlock in the kernel.
873 *
874 * The list of pairs is guarded by a mutex, of course. */
875 pipe_mutex_lock(ws->bo_handles_mutex);
876
877 if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
878 /* First check if there already is an existing bo for the handle. */
879 bo = util_hash_table_get(ws->bo_names, (void*)(uintptr_t)whandle->handle);
880 } else if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
881 /* We must first get the GEM handle, as fds are unreliable keys */
882 r = drmPrimeFDToHandle(ws->fd, whandle->handle, &handle);
883 if (r)
884 goto fail;
885 bo = util_hash_table_get(ws->bo_handles, (void*)(uintptr_t)handle);
886 } else {
887 /* Unknown handle type */
888 goto fail;
889 }
890
891 if (bo) {
892 /* Increase the refcount. */
893 struct pb_buffer *b = NULL;
894 pb_reference(&b, &bo->base);
895 goto done;
896 }
897
898 /* There isn't, create a new one. */
899 bo = CALLOC_STRUCT(radeon_bo);
900 if (!bo) {
901 goto fail;
902 }
903
904 if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
905 struct drm_gem_open open_arg = {};
906 memset(&open_arg, 0, sizeof(open_arg));
907 /* Open the BO. */
908 open_arg.name = whandle->handle;
909 if (drmIoctl(ws->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) {
910 FREE(bo);
911 goto fail;
912 }
913 handle = open_arg.handle;
914 size = open_arg.size;
915 bo->flink_name = whandle->handle;
916 } else if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
917 size = lseek(whandle->handle, 0, SEEK_END);
918 /*
919 * Could check errno to determine whether the kernel is new enough, but
920 * it doesn't really matter why this failed, just that it failed.
921 */
922 if (size == (off_t)-1) {
923 FREE(bo);
924 goto fail;
925 }
926 lseek(whandle->handle, 0, SEEK_SET);
927 }
928
929 bo->handle = handle;
930
931 /* Initialize it. */
932 pipe_reference_init(&bo->base.reference, 1);
933 bo->base.alignment = 0;
934 bo->base.size = (unsigned) size;
935 bo->base.vtbl = &radeon_bo_vtbl;
936 bo->rws = ws;
937 bo->va = 0;
938 pipe_mutex_init(bo->map_mutex);
939
940 if (bo->flink_name)
941 util_hash_table_set(ws->bo_names, (void*)(uintptr_t)bo->flink_name, bo);
942
943 util_hash_table_set(ws->bo_handles, (void*)(uintptr_t)bo->handle, bo);
944
945 done:
946 pipe_mutex_unlock(ws->bo_handles_mutex);
947
948 if (stride)
949 *stride = whandle->stride;
950 if (offset)
951 *offset = whandle->offset;
952
953 if (ws->info.has_virtual_memory && !bo->va) {
954 struct drm_radeon_gem_va va;
955
956 bo->va = radeon_bomgr_find_va(ws, bo->base.size, 1 << 20);
957
958 va.handle = bo->handle;
959 va.operation = RADEON_VA_MAP;
960 va.vm_id = 0;
961 va.offset = bo->va;
962 va.flags = RADEON_VM_PAGE_READABLE |
963 RADEON_VM_PAGE_WRITEABLE |
964 RADEON_VM_PAGE_SNOOPED;
965 va.offset = bo->va;
966 r = drmCommandWriteRead(ws->fd, DRM_RADEON_GEM_VA, &va, sizeof(va));
967 if (r && va.operation == RADEON_VA_RESULT_ERROR) {
968 fprintf(stderr, "radeon: Failed to assign virtual address space\n");
969 radeon_bo_destroy(&bo->base);
970 return NULL;
971 }
972 pipe_mutex_lock(ws->bo_handles_mutex);
973 if (va.operation == RADEON_VA_RESULT_VA_EXIST) {
974 struct pb_buffer *b = &bo->base;
975 struct radeon_bo *old_bo =
976 util_hash_table_get(ws->bo_vas, (void*)(uintptr_t)va.offset);
977
978 pipe_mutex_unlock(ws->bo_handles_mutex);
979 pb_reference(&b, &old_bo->base);
980 return b;
981 }
982
983 util_hash_table_set(ws->bo_vas, (void*)(uintptr_t)bo->va, bo);
984 pipe_mutex_unlock(ws->bo_handles_mutex);
985 }
986
987 bo->initial_domain = radeon_bo_get_initial_domain((void*)bo);
988
989 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
990 ws->allocated_vram += align(bo->base.size, ws->info.gart_page_size);
991 else if (bo->initial_domain & RADEON_DOMAIN_GTT)
992 ws->allocated_gtt += align(bo->base.size, ws->info.gart_page_size);
993
994 return (struct pb_buffer*)bo;
995
996 fail:
997 pipe_mutex_unlock(ws->bo_handles_mutex);
998 return NULL;
999 }
1000
1001 static bool radeon_winsys_bo_get_handle(struct pb_buffer *buffer,
1002 unsigned stride, unsigned offset,
1003 unsigned slice_size,
1004 struct winsys_handle *whandle)
1005 {
1006 struct drm_gem_flink flink;
1007 struct radeon_bo *bo = radeon_bo(buffer);
1008 struct radeon_drm_winsys *ws = bo->rws;
1009
1010 memset(&flink, 0, sizeof(flink));
1011
1012 bo->use_reusable_pool = false;
1013
1014 if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
1015 if (!bo->flink_name) {
1016 flink.handle = bo->handle;
1017
1018 if (ioctl(ws->fd, DRM_IOCTL_GEM_FLINK, &flink)) {
1019 return false;
1020 }
1021
1022 bo->flink_name = flink.name;
1023
1024 pipe_mutex_lock(ws->bo_handles_mutex);
1025 util_hash_table_set(ws->bo_names, (void*)(uintptr_t)bo->flink_name, bo);
1026 pipe_mutex_unlock(ws->bo_handles_mutex);
1027 }
1028 whandle->handle = bo->flink_name;
1029 } else if (whandle->type == DRM_API_HANDLE_TYPE_KMS) {
1030 whandle->handle = bo->handle;
1031 } else if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
1032 if (drmPrimeHandleToFD(ws->fd, bo->handle, DRM_CLOEXEC, (int*)&whandle->handle))
1033 return false;
1034 }
1035
1036 whandle->stride = stride;
1037 whandle->offset = offset;
1038 whandle->offset += slice_size * whandle->layer;
1039
1040 return true;
1041 }
1042
1043 static bool radeon_winsys_bo_is_user_ptr(struct pb_buffer *buf)
1044 {
1045 return ((struct radeon_bo*)buf)->user_ptr != NULL;
1046 }
1047
1048 static uint64_t radeon_winsys_bo_va(struct pb_buffer *buf)
1049 {
1050 return ((struct radeon_bo*)buf)->va;
1051 }
1052
1053 void radeon_drm_bo_init_functions(struct radeon_drm_winsys *ws)
1054 {
1055 ws->base.buffer_set_metadata = radeon_bo_set_metadata;
1056 ws->base.buffer_get_metadata = radeon_bo_get_metadata;
1057 ws->base.buffer_map = radeon_bo_map;
1058 ws->base.buffer_unmap = radeon_bo_unmap;
1059 ws->base.buffer_wait = radeon_bo_wait;
1060 ws->base.buffer_create = radeon_winsys_bo_create;
1061 ws->base.buffer_from_handle = radeon_winsys_bo_from_handle;
1062 ws->base.buffer_from_ptr = radeon_winsys_bo_from_ptr;
1063 ws->base.buffer_is_user_ptr = radeon_winsys_bo_is_user_ptr;
1064 ws->base.buffer_get_handle = radeon_winsys_bo_get_handle;
1065 ws->base.buffer_get_virtual_address = radeon_winsys_bo_va;
1066 ws->base.buffer_get_initial_domain = radeon_bo_get_initial_domain;
1067 }