winsys/radeon: add slab entry structures to radeon_bo
[mesa.git] / src / gallium / winsys / radeon / drm / radeon_drm_bo.c
1 /*
2 * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
14 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
15 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
16 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
17 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
24 * of the Software.
25 */
26
27 #include "radeon_drm_cs.h"
28
29 #include "util/u_hash_table.h"
30 #include "util/u_memory.h"
31 #include "util/simple_list.h"
32 #include "os/os_thread.h"
33 #include "os/os_mman.h"
34 #include "os/os_time.h"
35
36 #include "state_tracker/drm_driver.h"
37
38 #include <sys/ioctl.h>
39 #include <xf86drm.h>
40 #include <errno.h>
41 #include <fcntl.h>
42 #include <stdio.h>
43 #include <inttypes.h>
44
45 static inline struct radeon_bo *radeon_bo(struct pb_buffer *bo)
46 {
47 return (struct radeon_bo *)bo;
48 }
49
50 struct radeon_bo_va_hole {
51 struct list_head list;
52 uint64_t offset;
53 uint64_t size;
54 };
55
56 static bool radeon_bo_is_busy(struct radeon_bo *bo)
57 {
58 struct drm_radeon_gem_busy args = {0};
59
60 args.handle = bo->handle;
61 return drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_BUSY,
62 &args, sizeof(args)) != 0;
63 }
64
65 static void radeon_bo_wait_idle(struct radeon_bo *bo)
66 {
67 struct drm_radeon_gem_wait_idle args = {0};
68
69 args.handle = bo->handle;
70 while (drmCommandWrite(bo->rws->fd, DRM_RADEON_GEM_WAIT_IDLE,
71 &args, sizeof(args)) == -EBUSY);
72 }
73
74 static bool radeon_bo_wait(struct pb_buffer *_buf, uint64_t timeout,
75 enum radeon_bo_usage usage)
76 {
77 struct radeon_bo *bo = radeon_bo(_buf);
78 int64_t abs_timeout;
79
80 /* No timeout. Just query. */
81 if (timeout == 0)
82 return !bo->num_active_ioctls && !radeon_bo_is_busy(bo);
83
84 abs_timeout = os_time_get_absolute_timeout(timeout);
85
86 /* Wait if any ioctl is being submitted with this buffer. */
87 if (!os_wait_until_zero_abs_timeout(&bo->num_active_ioctls, abs_timeout))
88 return false;
89
90 /* Infinite timeout. */
91 if (abs_timeout == PIPE_TIMEOUT_INFINITE) {
92 radeon_bo_wait_idle(bo);
93 return true;
94 }
95
96 /* Other timeouts need to be emulated with a loop. */
97 while (radeon_bo_is_busy(bo)) {
98 if (os_time_get_nano() >= abs_timeout)
99 return false;
100 os_time_sleep(10);
101 }
102
103 return true;
104 }
105
106 static enum radeon_bo_domain get_valid_domain(enum radeon_bo_domain domain)
107 {
108 /* Zero domains the driver doesn't understand. */
109 domain &= RADEON_DOMAIN_VRAM_GTT;
110
111 /* If no domain is set, we must set something... */
112 if (!domain)
113 domain = RADEON_DOMAIN_VRAM_GTT;
114
115 return domain;
116 }
117
118 static enum radeon_bo_domain radeon_bo_get_initial_domain(
119 struct pb_buffer *buf)
120 {
121 struct radeon_bo *bo = (struct radeon_bo*)buf;
122 struct drm_radeon_gem_op args;
123
124 if (bo->rws->info.drm_minor < 38)
125 return RADEON_DOMAIN_VRAM_GTT;
126
127 memset(&args, 0, sizeof(args));
128 args.handle = bo->handle;
129 args.op = RADEON_GEM_OP_GET_INITIAL_DOMAIN;
130
131 drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_OP,
132 &args, sizeof(args));
133
134 /* GEM domains and winsys domains are defined the same. */
135 return get_valid_domain(args.value);
136 }
137
138 static uint64_t radeon_bomgr_find_va(struct radeon_drm_winsys *rws,
139 uint64_t size, uint64_t alignment)
140 {
141 struct radeon_bo_va_hole *hole, *n;
142 uint64_t offset = 0, waste = 0;
143
144 /* All VM address space holes will implicitly start aligned to the
145 * size alignment, so we don't need to sanitize the alignment here
146 */
147 size = align(size, rws->info.gart_page_size);
148
149 pipe_mutex_lock(rws->bo_va_mutex);
150 /* first look for a hole */
151 LIST_FOR_EACH_ENTRY_SAFE(hole, n, &rws->va_holes, list) {
152 offset = hole->offset;
153 waste = offset % alignment;
154 waste = waste ? alignment - waste : 0;
155 offset += waste;
156 if (offset >= (hole->offset + hole->size)) {
157 continue;
158 }
159 if (!waste && hole->size == size) {
160 offset = hole->offset;
161 list_del(&hole->list);
162 FREE(hole);
163 pipe_mutex_unlock(rws->bo_va_mutex);
164 return offset;
165 }
166 if ((hole->size - waste) > size) {
167 if (waste) {
168 n = CALLOC_STRUCT(radeon_bo_va_hole);
169 n->size = waste;
170 n->offset = hole->offset;
171 list_add(&n->list, &hole->list);
172 }
173 hole->size -= (size + waste);
174 hole->offset += size + waste;
175 pipe_mutex_unlock(rws->bo_va_mutex);
176 return offset;
177 }
178 if ((hole->size - waste) == size) {
179 hole->size = waste;
180 pipe_mutex_unlock(rws->bo_va_mutex);
181 return offset;
182 }
183 }
184
185 offset = rws->va_offset;
186 waste = offset % alignment;
187 waste = waste ? alignment - waste : 0;
188 if (waste) {
189 n = CALLOC_STRUCT(radeon_bo_va_hole);
190 n->size = waste;
191 n->offset = offset;
192 list_add(&n->list, &rws->va_holes);
193 }
194 offset += waste;
195 rws->va_offset += size + waste;
196 pipe_mutex_unlock(rws->bo_va_mutex);
197 return offset;
198 }
199
200 static void radeon_bomgr_free_va(struct radeon_drm_winsys *rws,
201 uint64_t va, uint64_t size)
202 {
203 struct radeon_bo_va_hole *hole;
204
205 size = align(size, rws->info.gart_page_size);
206
207 pipe_mutex_lock(rws->bo_va_mutex);
208 if ((va + size) == rws->va_offset) {
209 rws->va_offset = va;
210 /* Delete uppermost hole if it reaches the new top */
211 if (!LIST_IS_EMPTY(&rws->va_holes)) {
212 hole = container_of(rws->va_holes.next, hole, list);
213 if ((hole->offset + hole->size) == va) {
214 rws->va_offset = hole->offset;
215 list_del(&hole->list);
216 FREE(hole);
217 }
218 }
219 } else {
220 struct radeon_bo_va_hole *next;
221
222 hole = container_of(&rws->va_holes, hole, list);
223 LIST_FOR_EACH_ENTRY(next, &rws->va_holes, list) {
224 if (next->offset < va)
225 break;
226 hole = next;
227 }
228
229 if (&hole->list != &rws->va_holes) {
230 /* Grow upper hole if it's adjacent */
231 if (hole->offset == (va + size)) {
232 hole->offset = va;
233 hole->size += size;
234 /* Merge lower hole if it's adjacent */
235 if (next != hole && &next->list != &rws->va_holes &&
236 (next->offset + next->size) == va) {
237 next->size += hole->size;
238 list_del(&hole->list);
239 FREE(hole);
240 }
241 goto out;
242 }
243 }
244
245 /* Grow lower hole if it's adjacent */
246 if (next != hole && &next->list != &rws->va_holes &&
247 (next->offset + next->size) == va) {
248 next->size += size;
249 goto out;
250 }
251
252 /* FIXME on allocation failure we just lose virtual address space
253 * maybe print a warning
254 */
255 next = CALLOC_STRUCT(radeon_bo_va_hole);
256 if (next) {
257 next->size = size;
258 next->offset = va;
259 list_add(&next->list, &hole->list);
260 }
261 }
262 out:
263 pipe_mutex_unlock(rws->bo_va_mutex);
264 }
265
266 void radeon_bo_destroy(struct pb_buffer *_buf)
267 {
268 struct radeon_bo *bo = radeon_bo(_buf);
269 struct radeon_drm_winsys *rws = bo->rws;
270 struct drm_gem_close args;
271
272 assert(bo->handle && "must not be called for slab entries");
273
274 memset(&args, 0, sizeof(args));
275
276 pipe_mutex_lock(rws->bo_handles_mutex);
277 util_hash_table_remove(rws->bo_handles, (void*)(uintptr_t)bo->handle);
278 if (bo->flink_name) {
279 util_hash_table_remove(rws->bo_names,
280 (void*)(uintptr_t)bo->flink_name);
281 }
282 pipe_mutex_unlock(rws->bo_handles_mutex);
283
284 if (bo->u.real.ptr)
285 os_munmap(bo->u.real.ptr, bo->base.size);
286
287 if (rws->info.has_virtual_memory) {
288 if (rws->va_unmap_working) {
289 struct drm_radeon_gem_va va;
290
291 va.handle = bo->handle;
292 va.vm_id = 0;
293 va.operation = RADEON_VA_UNMAP;
294 va.flags = RADEON_VM_PAGE_READABLE |
295 RADEON_VM_PAGE_WRITEABLE |
296 RADEON_VM_PAGE_SNOOPED;
297 va.offset = bo->va;
298
299 if (drmCommandWriteRead(rws->fd, DRM_RADEON_GEM_VA, &va,
300 sizeof(va)) != 0 &&
301 va.operation == RADEON_VA_RESULT_ERROR) {
302 fprintf(stderr, "radeon: Failed to deallocate virtual address for buffer:\n");
303 fprintf(stderr, "radeon: size : %"PRIu64" bytes\n", bo->base.size);
304 fprintf(stderr, "radeon: va : 0x%"PRIx64"\n", bo->va);
305 }
306 }
307
308 radeon_bomgr_free_va(rws, bo->va, bo->base.size);
309 }
310
311 /* Close object. */
312 args.handle = bo->handle;
313 drmIoctl(rws->fd, DRM_IOCTL_GEM_CLOSE, &args);
314
315 pipe_mutex_destroy(bo->u.real.map_mutex);
316
317 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
318 rws->allocated_vram -= align(bo->base.size, rws->info.gart_page_size);
319 else if (bo->initial_domain & RADEON_DOMAIN_GTT)
320 rws->allocated_gtt -= align(bo->base.size, rws->info.gart_page_size);
321
322 if (bo->u.real.map_count >= 1) {
323 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
324 bo->rws->mapped_vram -= bo->base.size;
325 else
326 bo->rws->mapped_gtt -= bo->base.size;
327 }
328
329 FREE(bo);
330 }
331
332 static void radeon_bo_destroy_or_cache(struct pb_buffer *_buf)
333 {
334 struct radeon_bo *bo = radeon_bo(_buf);
335
336 assert(bo->handle && "must not be called for slab entries");
337
338 if (bo->u.real.use_reusable_pool)
339 pb_cache_add_buffer(&bo->u.real.cache_entry);
340 else
341 radeon_bo_destroy(_buf);
342 }
343
344 void *radeon_bo_do_map(struct radeon_bo *bo)
345 {
346 struct drm_radeon_gem_mmap args = {0};
347 void *ptr;
348 unsigned offset;
349
350 /* If the buffer is created from user memory, return the user pointer. */
351 if (bo->user_ptr)
352 return bo->user_ptr;
353
354 if (bo->handle) {
355 offset = 0;
356 } else {
357 offset = bo->va - bo->u.slab.real->va;
358 bo = bo->u.slab.real;
359 }
360
361 /* Map the buffer. */
362 pipe_mutex_lock(bo->u.real.map_mutex);
363 /* Return the pointer if it's already mapped. */
364 if (bo->u.real.ptr) {
365 bo->u.real.map_count++;
366 pipe_mutex_unlock(bo->u.real.map_mutex);
367 return (uint8_t*)bo->u.real.ptr + offset;
368 }
369 args.handle = bo->handle;
370 args.offset = 0;
371 args.size = (uint64_t)bo->base.size;
372 if (drmCommandWriteRead(bo->rws->fd,
373 DRM_RADEON_GEM_MMAP,
374 &args,
375 sizeof(args))) {
376 pipe_mutex_unlock(bo->u.real.map_mutex);
377 fprintf(stderr, "radeon: gem_mmap failed: %p 0x%08X\n",
378 bo, bo->handle);
379 return NULL;
380 }
381
382 ptr = os_mmap(0, args.size, PROT_READ|PROT_WRITE, MAP_SHARED,
383 bo->rws->fd, args.addr_ptr);
384 if (ptr == MAP_FAILED) {
385 /* Clear the cache and try again. */
386 pb_cache_release_all_buffers(&bo->rws->bo_cache);
387
388 ptr = os_mmap(0, args.size, PROT_READ|PROT_WRITE, MAP_SHARED,
389 bo->rws->fd, args.addr_ptr);
390 if (ptr == MAP_FAILED) {
391 pipe_mutex_unlock(bo->u.real.map_mutex);
392 fprintf(stderr, "radeon: mmap failed, errno: %i\n", errno);
393 return NULL;
394 }
395 }
396 bo->u.real.ptr = ptr;
397 bo->u.real.map_count = 1;
398
399 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
400 bo->rws->mapped_vram += bo->base.size;
401 else
402 bo->rws->mapped_gtt += bo->base.size;
403
404 pipe_mutex_unlock(bo->u.real.map_mutex);
405 return (uint8_t*)bo->u.real.ptr + offset;
406 }
407
408 static void *radeon_bo_map(struct pb_buffer *buf,
409 struct radeon_winsys_cs *rcs,
410 enum pipe_transfer_usage usage)
411 {
412 struct radeon_bo *bo = (struct radeon_bo*)buf;
413 struct radeon_drm_cs *cs = (struct radeon_drm_cs*)rcs;
414
415 /* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
416 if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
417 /* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
418 if (usage & PIPE_TRANSFER_DONTBLOCK) {
419 if (!(usage & PIPE_TRANSFER_WRITE)) {
420 /* Mapping for read.
421 *
422 * Since we are mapping for read, we don't need to wait
423 * if the GPU is using the buffer for read too
424 * (neither one is changing it).
425 *
426 * Only check whether the buffer is being used for write. */
427 if (cs && radeon_bo_is_referenced_by_cs_for_write(cs, bo)) {
428 cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
429 return NULL;
430 }
431
432 if (!radeon_bo_wait((struct pb_buffer*)bo, 0,
433 RADEON_USAGE_WRITE)) {
434 return NULL;
435 }
436 } else {
437 if (cs && radeon_bo_is_referenced_by_cs(cs, bo)) {
438 cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
439 return NULL;
440 }
441
442 if (!radeon_bo_wait((struct pb_buffer*)bo, 0,
443 RADEON_USAGE_READWRITE)) {
444 return NULL;
445 }
446 }
447 } else {
448 uint64_t time = os_time_get_nano();
449
450 if (!(usage & PIPE_TRANSFER_WRITE)) {
451 /* Mapping for read.
452 *
453 * Since we are mapping for read, we don't need to wait
454 * if the GPU is using the buffer for read too
455 * (neither one is changing it).
456 *
457 * Only check whether the buffer is being used for write. */
458 if (cs && radeon_bo_is_referenced_by_cs_for_write(cs, bo)) {
459 cs->flush_cs(cs->flush_data, 0, NULL);
460 }
461 radeon_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
462 RADEON_USAGE_WRITE);
463 } else {
464 /* Mapping for write. */
465 if (cs) {
466 if (radeon_bo_is_referenced_by_cs(cs, bo)) {
467 cs->flush_cs(cs->flush_data, 0, NULL);
468 } else {
469 /* Try to avoid busy-waiting in radeon_bo_wait. */
470 if (p_atomic_read(&bo->num_active_ioctls))
471 radeon_drm_cs_sync_flush(rcs);
472 }
473 }
474
475 radeon_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
476 RADEON_USAGE_READWRITE);
477 }
478
479 bo->rws->buffer_wait_time += os_time_get_nano() - time;
480 }
481 }
482
483 return radeon_bo_do_map(bo);
484 }
485
486 static void radeon_bo_unmap(struct pb_buffer *_buf)
487 {
488 struct radeon_bo *bo = (struct radeon_bo*)_buf;
489
490 if (bo->user_ptr)
491 return;
492
493 if (!bo->handle)
494 bo = bo->u.slab.real;
495
496 pipe_mutex_lock(bo->u.real.map_mutex);
497 if (!bo->u.real.ptr) {
498 pipe_mutex_unlock(bo->u.real.map_mutex);
499 return; /* it's not been mapped */
500 }
501
502 assert(bo->u.real.map_count);
503 if (--bo->u.real.map_count) {
504 pipe_mutex_unlock(bo->u.real.map_mutex);
505 return; /* it's been mapped multiple times */
506 }
507
508 os_munmap(bo->u.real.ptr, bo->base.size);
509 bo->u.real.ptr = NULL;
510
511 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
512 bo->rws->mapped_vram -= bo->base.size;
513 else
514 bo->rws->mapped_gtt -= bo->base.size;
515
516 pipe_mutex_unlock(bo->u.real.map_mutex);
517 }
518
519 static const struct pb_vtbl radeon_bo_vtbl = {
520 radeon_bo_destroy_or_cache
521 /* other functions are never called */
522 };
523
524 #ifndef RADEON_GEM_GTT_WC
525 #define RADEON_GEM_GTT_WC (1 << 2)
526 #endif
527 #ifndef RADEON_GEM_CPU_ACCESS
528 /* BO is expected to be accessed by the CPU */
529 #define RADEON_GEM_CPU_ACCESS (1 << 3)
530 #endif
531 #ifndef RADEON_GEM_NO_CPU_ACCESS
532 /* CPU access is not expected to work for this BO */
533 #define RADEON_GEM_NO_CPU_ACCESS (1 << 4)
534 #endif
535
536 static struct radeon_bo *radeon_create_bo(struct radeon_drm_winsys *rws,
537 unsigned size, unsigned alignment,
538 unsigned usage,
539 unsigned initial_domains,
540 unsigned flags,
541 unsigned pb_cache_bucket)
542 {
543 struct radeon_bo *bo;
544 struct drm_radeon_gem_create args;
545 int r;
546
547 memset(&args, 0, sizeof(args));
548
549 assert(initial_domains);
550 assert((initial_domains &
551 ~(RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM)) == 0);
552
553 args.size = size;
554 args.alignment = alignment;
555 args.initial_domain = initial_domains;
556 args.flags = 0;
557
558 if (flags & RADEON_FLAG_GTT_WC)
559 args.flags |= RADEON_GEM_GTT_WC;
560 if (flags & RADEON_FLAG_CPU_ACCESS)
561 args.flags |= RADEON_GEM_CPU_ACCESS;
562 if (flags & RADEON_FLAG_NO_CPU_ACCESS)
563 args.flags |= RADEON_GEM_NO_CPU_ACCESS;
564
565 if (drmCommandWriteRead(rws->fd, DRM_RADEON_GEM_CREATE,
566 &args, sizeof(args))) {
567 fprintf(stderr, "radeon: Failed to allocate a buffer:\n");
568 fprintf(stderr, "radeon: size : %u bytes\n", size);
569 fprintf(stderr, "radeon: alignment : %u bytes\n", alignment);
570 fprintf(stderr, "radeon: domains : %u\n", args.initial_domain);
571 fprintf(stderr, "radeon: flags : %u\n", args.flags);
572 return NULL;
573 }
574
575 assert(args.handle != 0);
576
577 bo = CALLOC_STRUCT(radeon_bo);
578 if (!bo)
579 return NULL;
580
581 pipe_reference_init(&bo->base.reference, 1);
582 bo->base.alignment = alignment;
583 bo->base.usage = usage;
584 bo->base.size = size;
585 bo->base.vtbl = &radeon_bo_vtbl;
586 bo->rws = rws;
587 bo->handle = args.handle;
588 bo->va = 0;
589 bo->initial_domain = initial_domains;
590 pipe_mutex_init(bo->u.real.map_mutex);
591 pb_cache_init_entry(&rws->bo_cache, &bo->u.real.cache_entry, &bo->base,
592 pb_cache_bucket);
593
594 if (rws->info.has_virtual_memory) {
595 struct drm_radeon_gem_va va;
596 unsigned va_gap_size;
597
598 va_gap_size = rws->check_vm ? MAX2(4 * alignment, 64 * 1024) : 0;
599 bo->va = radeon_bomgr_find_va(rws, size + va_gap_size, alignment);
600
601 va.handle = bo->handle;
602 va.vm_id = 0;
603 va.operation = RADEON_VA_MAP;
604 va.flags = RADEON_VM_PAGE_READABLE |
605 RADEON_VM_PAGE_WRITEABLE |
606 RADEON_VM_PAGE_SNOOPED;
607 va.offset = bo->va;
608 r = drmCommandWriteRead(rws->fd, DRM_RADEON_GEM_VA, &va, sizeof(va));
609 if (r && va.operation == RADEON_VA_RESULT_ERROR) {
610 fprintf(stderr, "radeon: Failed to allocate virtual address for buffer:\n");
611 fprintf(stderr, "radeon: size : %d bytes\n", size);
612 fprintf(stderr, "radeon: alignment : %d bytes\n", alignment);
613 fprintf(stderr, "radeon: domains : %d\n", args.initial_domain);
614 fprintf(stderr, "radeon: va : 0x%016llx\n", (unsigned long long)bo->va);
615 radeon_bo_destroy(&bo->base);
616 return NULL;
617 }
618 pipe_mutex_lock(rws->bo_handles_mutex);
619 if (va.operation == RADEON_VA_RESULT_VA_EXIST) {
620 struct pb_buffer *b = &bo->base;
621 struct radeon_bo *old_bo =
622 util_hash_table_get(rws->bo_vas, (void*)(uintptr_t)va.offset);
623
624 pipe_mutex_unlock(rws->bo_handles_mutex);
625 pb_reference(&b, &old_bo->base);
626 return radeon_bo(b);
627 }
628
629 util_hash_table_set(rws->bo_vas, (void*)(uintptr_t)bo->va, bo);
630 pipe_mutex_unlock(rws->bo_handles_mutex);
631 }
632
633 if (initial_domains & RADEON_DOMAIN_VRAM)
634 rws->allocated_vram += align(size, rws->info.gart_page_size);
635 else if (initial_domains & RADEON_DOMAIN_GTT)
636 rws->allocated_gtt += align(size, rws->info.gart_page_size);
637
638 return bo;
639 }
640
641 bool radeon_bo_can_reclaim(struct pb_buffer *_buf)
642 {
643 struct radeon_bo *bo = radeon_bo(_buf);
644
645 if (radeon_bo_is_referenced_by_any_cs(bo))
646 return false;
647
648 return radeon_bo_wait(_buf, 0, RADEON_USAGE_READWRITE);
649 }
650
651 static unsigned eg_tile_split(unsigned tile_split)
652 {
653 switch (tile_split) {
654 case 0: tile_split = 64; break;
655 case 1: tile_split = 128; break;
656 case 2: tile_split = 256; break;
657 case 3: tile_split = 512; break;
658 default:
659 case 4: tile_split = 1024; break;
660 case 5: tile_split = 2048; break;
661 case 6: tile_split = 4096; break;
662 }
663 return tile_split;
664 }
665
666 static unsigned eg_tile_split_rev(unsigned eg_tile_split)
667 {
668 switch (eg_tile_split) {
669 case 64: return 0;
670 case 128: return 1;
671 case 256: return 2;
672 case 512: return 3;
673 default:
674 case 1024: return 4;
675 case 2048: return 5;
676 case 4096: return 6;
677 }
678 }
679
680 static void radeon_bo_get_metadata(struct pb_buffer *_buf,
681 struct radeon_bo_metadata *md)
682 {
683 struct radeon_bo *bo = radeon_bo(_buf);
684 struct drm_radeon_gem_set_tiling args;
685
686 assert(bo->handle && "must not be called for slab entries");
687
688 memset(&args, 0, sizeof(args));
689
690 args.handle = bo->handle;
691
692 drmCommandWriteRead(bo->rws->fd,
693 DRM_RADEON_GEM_GET_TILING,
694 &args,
695 sizeof(args));
696
697 md->microtile = RADEON_LAYOUT_LINEAR;
698 md->macrotile = RADEON_LAYOUT_LINEAR;
699 if (args.tiling_flags & RADEON_TILING_MICRO)
700 md->microtile = RADEON_LAYOUT_TILED;
701 else if (args.tiling_flags & RADEON_TILING_MICRO_SQUARE)
702 md->microtile = RADEON_LAYOUT_SQUARETILED;
703
704 if (args.tiling_flags & RADEON_TILING_MACRO)
705 md->macrotile = RADEON_LAYOUT_TILED;
706
707 md->bankw = (args.tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
708 md->bankh = (args.tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
709 md->tile_split = (args.tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
710 md->mtilea = (args.tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
711 md->tile_split = eg_tile_split(md->tile_split);
712 md->scanout = bo->rws->gen >= DRV_SI && !(args.tiling_flags & RADEON_TILING_R600_NO_SCANOUT);
713 }
714
715 static void radeon_bo_set_metadata(struct pb_buffer *_buf,
716 struct radeon_bo_metadata *md)
717 {
718 struct radeon_bo *bo = radeon_bo(_buf);
719 struct drm_radeon_gem_set_tiling args;
720
721 assert(bo->handle && "must not be called for slab entries");
722
723 memset(&args, 0, sizeof(args));
724
725 os_wait_until_zero(&bo->num_active_ioctls, PIPE_TIMEOUT_INFINITE);
726
727 if (md->microtile == RADEON_LAYOUT_TILED)
728 args.tiling_flags |= RADEON_TILING_MICRO;
729 else if (md->microtile == RADEON_LAYOUT_SQUARETILED)
730 args.tiling_flags |= RADEON_TILING_MICRO_SQUARE;
731
732 if (md->macrotile == RADEON_LAYOUT_TILED)
733 args.tiling_flags |= RADEON_TILING_MACRO;
734
735 args.tiling_flags |= (md->bankw & RADEON_TILING_EG_BANKW_MASK) <<
736 RADEON_TILING_EG_BANKW_SHIFT;
737 args.tiling_flags |= (md->bankh & RADEON_TILING_EG_BANKH_MASK) <<
738 RADEON_TILING_EG_BANKH_SHIFT;
739 if (md->tile_split) {
740 args.tiling_flags |= (eg_tile_split_rev(md->tile_split) &
741 RADEON_TILING_EG_TILE_SPLIT_MASK) <<
742 RADEON_TILING_EG_TILE_SPLIT_SHIFT;
743 }
744 args.tiling_flags |= (md->mtilea & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK) <<
745 RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT;
746
747 if (bo->rws->gen >= DRV_SI && !md->scanout)
748 args.tiling_flags |= RADEON_TILING_R600_NO_SCANOUT;
749
750 args.handle = bo->handle;
751 args.pitch = md->stride;
752
753 drmCommandWriteRead(bo->rws->fd,
754 DRM_RADEON_GEM_SET_TILING,
755 &args,
756 sizeof(args));
757 }
758
759 static struct pb_buffer *
760 radeon_winsys_bo_create(struct radeon_winsys *rws,
761 uint64_t size,
762 unsigned alignment,
763 enum radeon_bo_domain domain,
764 enum radeon_bo_flag flags)
765 {
766 struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);
767 struct radeon_bo *bo;
768 unsigned usage = 0, pb_cache_bucket;
769
770 /* Only 32-bit sizes are supported. */
771 if (size > UINT_MAX)
772 return NULL;
773
774 /* This flag is irrelevant for the cache. */
775 flags &= ~RADEON_FLAG_HANDLE;
776
777 /* Align size to page size. This is the minimum alignment for normal
778 * BOs. Aligning this here helps the cached bufmgr. Especially small BOs,
779 * like constant/uniform buffers, can benefit from better and more reuse.
780 */
781 size = align(size, ws->info.gart_page_size);
782 alignment = align(alignment, ws->info.gart_page_size);
783
784 /* Only set one usage bit each for domains and flags, or the cache manager
785 * might consider different sets of domains / flags compatible
786 */
787 if (domain == RADEON_DOMAIN_VRAM_GTT)
788 usage = 1 << 2;
789 else
790 usage = (unsigned)domain >> 1;
791 assert(flags < sizeof(usage) * 8 - 3);
792 usage |= 1 << (flags + 3);
793
794 /* Determine the pb_cache bucket for minimizing pb_cache misses. */
795 pb_cache_bucket = 0;
796 if (size <= 4096) /* small buffers */
797 pb_cache_bucket += 1;
798 if (domain & RADEON_DOMAIN_VRAM) /* VRAM or VRAM+GTT */
799 pb_cache_bucket += 2;
800 if (flags == RADEON_FLAG_GTT_WC) /* WC */
801 pb_cache_bucket += 4;
802 assert(pb_cache_bucket < ARRAY_SIZE(ws->bo_cache.buckets));
803
804 bo = radeon_bo(pb_cache_reclaim_buffer(&ws->bo_cache, size, alignment,
805 usage, pb_cache_bucket));
806 if (bo)
807 return &bo->base;
808
809 bo = radeon_create_bo(ws, size, alignment, usage, domain, flags,
810 pb_cache_bucket);
811 if (!bo) {
812 /* Clear the cache and try again. */
813 pb_cache_release_all_buffers(&ws->bo_cache);
814 bo = radeon_create_bo(ws, size, alignment, usage, domain, flags,
815 pb_cache_bucket);
816 if (!bo)
817 return NULL;
818 }
819
820 bo->u.real.use_reusable_pool = true;
821
822 pipe_mutex_lock(ws->bo_handles_mutex);
823 util_hash_table_set(ws->bo_handles, (void*)(uintptr_t)bo->handle, bo);
824 pipe_mutex_unlock(ws->bo_handles_mutex);
825
826 return &bo->base;
827 }
828
829 static struct pb_buffer *radeon_winsys_bo_from_ptr(struct radeon_winsys *rws,
830 void *pointer, uint64_t size)
831 {
832 struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);
833 struct drm_radeon_gem_userptr args;
834 struct radeon_bo *bo;
835 int r;
836
837 bo = CALLOC_STRUCT(radeon_bo);
838 if (!bo)
839 return NULL;
840
841 memset(&args, 0, sizeof(args));
842 args.addr = (uintptr_t)pointer;
843 args.size = align(size, ws->info.gart_page_size);
844 args.flags = RADEON_GEM_USERPTR_ANONONLY |
845 RADEON_GEM_USERPTR_VALIDATE |
846 RADEON_GEM_USERPTR_REGISTER;
847 if (drmCommandWriteRead(ws->fd, DRM_RADEON_GEM_USERPTR,
848 &args, sizeof(args))) {
849 FREE(bo);
850 return NULL;
851 }
852
853 assert(args.handle != 0);
854
855 pipe_mutex_lock(ws->bo_handles_mutex);
856
857 /* Initialize it. */
858 pipe_reference_init(&bo->base.reference, 1);
859 bo->handle = args.handle;
860 bo->base.alignment = 0;
861 bo->base.size = size;
862 bo->base.vtbl = &radeon_bo_vtbl;
863 bo->rws = ws;
864 bo->user_ptr = pointer;
865 bo->va = 0;
866 bo->initial_domain = RADEON_DOMAIN_GTT;
867 pipe_mutex_init(bo->u.real.map_mutex);
868
869 util_hash_table_set(ws->bo_handles, (void*)(uintptr_t)bo->handle, bo);
870
871 pipe_mutex_unlock(ws->bo_handles_mutex);
872
873 if (ws->info.has_virtual_memory) {
874 struct drm_radeon_gem_va va;
875
876 bo->va = radeon_bomgr_find_va(ws, bo->base.size, 1 << 20);
877
878 va.handle = bo->handle;
879 va.operation = RADEON_VA_MAP;
880 va.vm_id = 0;
881 va.offset = bo->va;
882 va.flags = RADEON_VM_PAGE_READABLE |
883 RADEON_VM_PAGE_WRITEABLE |
884 RADEON_VM_PAGE_SNOOPED;
885 va.offset = bo->va;
886 r = drmCommandWriteRead(ws->fd, DRM_RADEON_GEM_VA, &va, sizeof(va));
887 if (r && va.operation == RADEON_VA_RESULT_ERROR) {
888 fprintf(stderr, "radeon: Failed to assign virtual address space\n");
889 radeon_bo_destroy(&bo->base);
890 return NULL;
891 }
892 pipe_mutex_lock(ws->bo_handles_mutex);
893 if (va.operation == RADEON_VA_RESULT_VA_EXIST) {
894 struct pb_buffer *b = &bo->base;
895 struct radeon_bo *old_bo =
896 util_hash_table_get(ws->bo_vas, (void*)(uintptr_t)va.offset);
897
898 pipe_mutex_unlock(ws->bo_handles_mutex);
899 pb_reference(&b, &old_bo->base);
900 return b;
901 }
902
903 util_hash_table_set(ws->bo_vas, (void*)(uintptr_t)bo->va, bo);
904 pipe_mutex_unlock(ws->bo_handles_mutex);
905 }
906
907 ws->allocated_gtt += align(bo->base.size, ws->info.gart_page_size);
908
909 return (struct pb_buffer*)bo;
910 }
911
912 static struct pb_buffer *radeon_winsys_bo_from_handle(struct radeon_winsys *rws,
913 struct winsys_handle *whandle,
914 unsigned *stride,
915 unsigned *offset)
916 {
917 struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);
918 struct radeon_bo *bo;
919 int r;
920 unsigned handle;
921 uint64_t size = 0;
922
923 if (!offset && whandle->offset != 0) {
924 fprintf(stderr, "attempt to import unsupported winsys offset %u\n",
925 whandle->offset);
926 return NULL;
927 }
928
929 /* We must maintain a list of pairs <handle, bo>, so that we always return
930 * the same BO for one particular handle. If we didn't do that and created
931 * more than one BO for the same handle and then relocated them in a CS,
932 * we would hit a deadlock in the kernel.
933 *
934 * The list of pairs is guarded by a mutex, of course. */
935 pipe_mutex_lock(ws->bo_handles_mutex);
936
937 if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
938 /* First check if there already is an existing bo for the handle. */
939 bo = util_hash_table_get(ws->bo_names, (void*)(uintptr_t)whandle->handle);
940 } else if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
941 /* We must first get the GEM handle, as fds are unreliable keys */
942 r = drmPrimeFDToHandle(ws->fd, whandle->handle, &handle);
943 if (r)
944 goto fail;
945 bo = util_hash_table_get(ws->bo_handles, (void*)(uintptr_t)handle);
946 } else {
947 /* Unknown handle type */
948 goto fail;
949 }
950
951 if (bo) {
952 /* Increase the refcount. */
953 struct pb_buffer *b = NULL;
954 pb_reference(&b, &bo->base);
955 goto done;
956 }
957
958 /* There isn't, create a new one. */
959 bo = CALLOC_STRUCT(radeon_bo);
960 if (!bo) {
961 goto fail;
962 }
963
964 if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
965 struct drm_gem_open open_arg = {};
966 memset(&open_arg, 0, sizeof(open_arg));
967 /* Open the BO. */
968 open_arg.name = whandle->handle;
969 if (drmIoctl(ws->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) {
970 FREE(bo);
971 goto fail;
972 }
973 handle = open_arg.handle;
974 size = open_arg.size;
975 bo->flink_name = whandle->handle;
976 } else if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
977 size = lseek(whandle->handle, 0, SEEK_END);
978 /*
979 * Could check errno to determine whether the kernel is new enough, but
980 * it doesn't really matter why this failed, just that it failed.
981 */
982 if (size == (off_t)-1) {
983 FREE(bo);
984 goto fail;
985 }
986 lseek(whandle->handle, 0, SEEK_SET);
987 }
988
989 assert(handle != 0);
990
991 bo->handle = handle;
992
993 /* Initialize it. */
994 pipe_reference_init(&bo->base.reference, 1);
995 bo->base.alignment = 0;
996 bo->base.size = (unsigned) size;
997 bo->base.vtbl = &radeon_bo_vtbl;
998 bo->rws = ws;
999 bo->va = 0;
1000 pipe_mutex_init(bo->u.real.map_mutex);
1001
1002 if (bo->flink_name)
1003 util_hash_table_set(ws->bo_names, (void*)(uintptr_t)bo->flink_name, bo);
1004
1005 util_hash_table_set(ws->bo_handles, (void*)(uintptr_t)bo->handle, bo);
1006
1007 done:
1008 pipe_mutex_unlock(ws->bo_handles_mutex);
1009
1010 if (stride)
1011 *stride = whandle->stride;
1012 if (offset)
1013 *offset = whandle->offset;
1014
1015 if (ws->info.has_virtual_memory && !bo->va) {
1016 struct drm_radeon_gem_va va;
1017
1018 bo->va = radeon_bomgr_find_va(ws, bo->base.size, 1 << 20);
1019
1020 va.handle = bo->handle;
1021 va.operation = RADEON_VA_MAP;
1022 va.vm_id = 0;
1023 va.offset = bo->va;
1024 va.flags = RADEON_VM_PAGE_READABLE |
1025 RADEON_VM_PAGE_WRITEABLE |
1026 RADEON_VM_PAGE_SNOOPED;
1027 va.offset = bo->va;
1028 r = drmCommandWriteRead(ws->fd, DRM_RADEON_GEM_VA, &va, sizeof(va));
1029 if (r && va.operation == RADEON_VA_RESULT_ERROR) {
1030 fprintf(stderr, "radeon: Failed to assign virtual address space\n");
1031 radeon_bo_destroy(&bo->base);
1032 return NULL;
1033 }
1034 pipe_mutex_lock(ws->bo_handles_mutex);
1035 if (va.operation == RADEON_VA_RESULT_VA_EXIST) {
1036 struct pb_buffer *b = &bo->base;
1037 struct radeon_bo *old_bo =
1038 util_hash_table_get(ws->bo_vas, (void*)(uintptr_t)va.offset);
1039
1040 pipe_mutex_unlock(ws->bo_handles_mutex);
1041 pb_reference(&b, &old_bo->base);
1042 return b;
1043 }
1044
1045 util_hash_table_set(ws->bo_vas, (void*)(uintptr_t)bo->va, bo);
1046 pipe_mutex_unlock(ws->bo_handles_mutex);
1047 }
1048
1049 bo->initial_domain = radeon_bo_get_initial_domain((void*)bo);
1050
1051 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
1052 ws->allocated_vram += align(bo->base.size, ws->info.gart_page_size);
1053 else if (bo->initial_domain & RADEON_DOMAIN_GTT)
1054 ws->allocated_gtt += align(bo->base.size, ws->info.gart_page_size);
1055
1056 return (struct pb_buffer*)bo;
1057
1058 fail:
1059 pipe_mutex_unlock(ws->bo_handles_mutex);
1060 return NULL;
1061 }
1062
1063 static bool radeon_winsys_bo_get_handle(struct pb_buffer *buffer,
1064 unsigned stride, unsigned offset,
1065 unsigned slice_size,
1066 struct winsys_handle *whandle)
1067 {
1068 struct drm_gem_flink flink;
1069 struct radeon_bo *bo = radeon_bo(buffer);
1070 struct radeon_drm_winsys *ws = bo->rws;
1071
1072 if (!bo->handle) {
1073 offset += bo->va - bo->u.slab.real->va;
1074 bo = bo->u.slab.real;
1075 }
1076
1077 memset(&flink, 0, sizeof(flink));
1078
1079 bo->u.real.use_reusable_pool = false;
1080
1081 if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
1082 if (!bo->flink_name) {
1083 flink.handle = bo->handle;
1084
1085 if (ioctl(ws->fd, DRM_IOCTL_GEM_FLINK, &flink)) {
1086 return false;
1087 }
1088
1089 bo->flink_name = flink.name;
1090
1091 pipe_mutex_lock(ws->bo_handles_mutex);
1092 util_hash_table_set(ws->bo_names, (void*)(uintptr_t)bo->flink_name, bo);
1093 pipe_mutex_unlock(ws->bo_handles_mutex);
1094 }
1095 whandle->handle = bo->flink_name;
1096 } else if (whandle->type == DRM_API_HANDLE_TYPE_KMS) {
1097 whandle->handle = bo->handle;
1098 } else if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
1099 if (drmPrimeHandleToFD(ws->fd, bo->handle, DRM_CLOEXEC, (int*)&whandle->handle))
1100 return false;
1101 }
1102
1103 whandle->stride = stride;
1104 whandle->offset = offset;
1105 whandle->offset += slice_size * whandle->layer;
1106
1107 return true;
1108 }
1109
1110 static bool radeon_winsys_bo_is_user_ptr(struct pb_buffer *buf)
1111 {
1112 return ((struct radeon_bo*)buf)->user_ptr != NULL;
1113 }
1114
1115 static uint64_t radeon_winsys_bo_va(struct pb_buffer *buf)
1116 {
1117 return ((struct radeon_bo*)buf)->va;
1118 }
1119
1120 void radeon_drm_bo_init_functions(struct radeon_drm_winsys *ws)
1121 {
1122 ws->base.buffer_set_metadata = radeon_bo_set_metadata;
1123 ws->base.buffer_get_metadata = radeon_bo_get_metadata;
1124 ws->base.buffer_map = radeon_bo_map;
1125 ws->base.buffer_unmap = radeon_bo_unmap;
1126 ws->base.buffer_wait = radeon_bo_wait;
1127 ws->base.buffer_create = radeon_winsys_bo_create;
1128 ws->base.buffer_from_handle = radeon_winsys_bo_from_handle;
1129 ws->base.buffer_from_ptr = radeon_winsys_bo_from_ptr;
1130 ws->base.buffer_is_user_ptr = radeon_winsys_bo_is_user_ptr;
1131 ws->base.buffer_get_handle = radeon_winsys_bo_get_handle;
1132 ws->base.buffer_get_virtual_address = radeon_winsys_bo_va;
1133 ws->base.buffer_get_initial_domain = radeon_bo_get_initial_domain;
1134 }