95c20537c71dc452a6b6491c87469b7e24594e36
[mesa.git] / src / gallium / winsys / radeon / drm / radeon_drm_bo.c
1 /*
2 * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
14 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
15 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
16 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
17 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
24 * of the Software.
25 */
26
27 #define _FILE_OFFSET_BITS 64
28 #include "radeon_drm_cs.h"
29
30 #include "util/u_hash_table.h"
31 #include "util/u_memory.h"
32 #include "util/u_simple_list.h"
33 #include "util/u_double_list.h"
34 #include "os/os_thread.h"
35 #include "os/os_mman.h"
36 #include "os/os_time.h"
37
38 #include "state_tracker/drm_driver.h"
39
40 #include <sys/ioctl.h>
41 #include <xf86drm.h>
42 #include <errno.h>
43 #include <fcntl.h>
44 #include <stdio.h>
45
46 /*
47 * this are copy from radeon_drm, once an updated libdrm is released
48 * we should bump configure.ac requirement for it and remove the following
49 * field
50 */
51 #define RADEON_BO_FLAGS_MACRO_TILE 1
52 #define RADEON_BO_FLAGS_MICRO_TILE 2
53 #define RADEON_BO_FLAGS_MICRO_TILE_SQUARE 0x20
54
55 #ifndef DRM_RADEON_GEM_WAIT
56 #define DRM_RADEON_GEM_WAIT 0x2b
57
58 #define RADEON_GEM_NO_WAIT 0x1
59 #define RADEON_GEM_USAGE_READ 0x2
60 #define RADEON_GEM_USAGE_WRITE 0x4
61
62 struct drm_radeon_gem_wait {
63 uint32_t handle;
64 uint32_t flags; /* one of RADEON_GEM_* */
65 };
66
67 #endif
68
69 #ifndef RADEON_VA_MAP
70
71 #define RADEON_VA_MAP 1
72 #define RADEON_VA_UNMAP 2
73
74 #define RADEON_VA_RESULT_OK 0
75 #define RADEON_VA_RESULT_ERROR 1
76 #define RADEON_VA_RESULT_VA_EXIST 2
77
78 #define RADEON_VM_PAGE_VALID (1 << 0)
79 #define RADEON_VM_PAGE_READABLE (1 << 1)
80 #define RADEON_VM_PAGE_WRITEABLE (1 << 2)
81 #define RADEON_VM_PAGE_SYSTEM (1 << 3)
82 #define RADEON_VM_PAGE_SNOOPED (1 << 4)
83
84 struct drm_radeon_gem_va {
85 uint32_t handle;
86 uint32_t operation;
87 uint32_t vm_id;
88 uint32_t flags;
89 uint64_t offset;
90 };
91
92 #define DRM_RADEON_GEM_VA 0x2b
93 #endif
94
95 #ifndef DRM_RADEON_GEM_OP
96 #define DRM_RADEON_GEM_OP 0x2c
97
98 /* Sets or returns a value associated with a buffer. */
99 struct drm_radeon_gem_op {
100 uint32_t handle; /* buffer */
101 uint32_t op; /* RADEON_GEM_OP_* */
102 uint64_t value; /* input or return value */
103 };
104
105 #define RADEON_GEM_OP_GET_INITIAL_DOMAIN 0
106 #define RADEON_GEM_OP_SET_INITIAL_DOMAIN 1
107 #endif
108
109
110 extern const struct pb_vtbl radeon_bo_vtbl;
111
112
113 static INLINE struct radeon_bo *radeon_bo(struct pb_buffer *bo)
114 {
115 assert(bo->vtbl == &radeon_bo_vtbl);
116 return (struct radeon_bo *)bo;
117 }
118
119 struct radeon_bo_va_hole {
120 struct list_head list;
121 uint64_t offset;
122 uint64_t size;
123 };
124
125 struct radeon_bomgr {
126 /* Base class. */
127 struct pb_manager base;
128
129 /* Winsys. */
130 struct radeon_drm_winsys *rws;
131
132 /* List of buffer GEM names. Protected by bo_handles_mutex. */
133 struct util_hash_table *bo_names;
134 /* List of buffer handles. Protectded by bo_handles_mutex. */
135 struct util_hash_table *bo_handles;
136 /* List of buffer virtual memory ranges. Protectded by bo_handles_mutex. */
137 struct util_hash_table *bo_vas;
138 pipe_mutex bo_handles_mutex;
139 pipe_mutex bo_va_mutex;
140
141 /* is virtual address supported */
142 bool va;
143 uint64_t va_offset;
144 struct list_head va_holes;
145 };
146
147 static INLINE struct radeon_bomgr *radeon_bomgr(struct pb_manager *mgr)
148 {
149 return (struct radeon_bomgr *)mgr;
150 }
151
152 static struct radeon_bo *get_radeon_bo(struct pb_buffer *_buf)
153 {
154 struct radeon_bo *bo = NULL;
155
156 if (_buf->vtbl == &radeon_bo_vtbl) {
157 bo = radeon_bo(_buf);
158 } else {
159 struct pb_buffer *base_buf;
160 pb_size offset;
161 pb_get_base_buffer(_buf, &base_buf, &offset);
162
163 if (base_buf->vtbl == &radeon_bo_vtbl)
164 bo = radeon_bo(base_buf);
165 }
166
167 return bo;
168 }
169
170 static void radeon_bo_wait(struct pb_buffer *_buf, enum radeon_bo_usage usage)
171 {
172 struct radeon_bo *bo = get_radeon_bo(_buf);
173
174 while (p_atomic_read(&bo->num_active_ioctls)) {
175 sched_yield();
176 }
177
178 /* XXX use this when it's ready */
179 /*if (bo->rws->info.drm_minor >= 12) {
180 struct drm_radeon_gem_wait args = {};
181 args.handle = bo->handle;
182 args.flags = usage;
183 while (drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_WAIT,
184 &args, sizeof(args)) == -EBUSY);
185 } else*/ {
186 struct drm_radeon_gem_wait_idle args;
187 memset(&args, 0, sizeof(args));
188 args.handle = bo->handle;
189 while (drmCommandWrite(bo->rws->fd, DRM_RADEON_GEM_WAIT_IDLE,
190 &args, sizeof(args)) == -EBUSY);
191 }
192 }
193
194 static boolean radeon_bo_is_busy(struct pb_buffer *_buf,
195 enum radeon_bo_usage usage)
196 {
197 struct radeon_bo *bo = get_radeon_bo(_buf);
198
199 if (p_atomic_read(&bo->num_active_ioctls)) {
200 return TRUE;
201 }
202
203 /* XXX use this when it's ready */
204 /*if (bo->rws->info.drm_minor >= 12) {
205 struct drm_radeon_gem_wait args = {};
206 args.handle = bo->handle;
207 args.flags = usage | RADEON_GEM_NO_WAIT;
208 return drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_WAIT,
209 &args, sizeof(args)) != 0;
210 } else*/ {
211 struct drm_radeon_gem_busy args;
212 memset(&args, 0, sizeof(args));
213 args.handle = bo->handle;
214 return drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_BUSY,
215 &args, sizeof(args)) != 0;
216 }
217 }
218
219 static enum radeon_bo_domain get_valid_domain(enum radeon_bo_domain domain)
220 {
221 /* Zero domains the driver doesn't understand. */
222 domain &= RADEON_DOMAIN_VRAM_GTT;
223
224 /* If no domain is set, we must set something... */
225 if (!domain)
226 domain = RADEON_DOMAIN_VRAM_GTT;
227
228 return domain;
229 }
230
231 static enum radeon_bo_domain radeon_bo_get_initial_domain(
232 struct radeon_winsys_cs_handle *buf)
233 {
234 struct radeon_bo *bo = (struct radeon_bo*)buf;
235 struct drm_radeon_gem_op args;
236
237 if (bo->rws->info.drm_minor < 38)
238 return RADEON_DOMAIN_VRAM_GTT;
239
240 memset(&args, 0, sizeof(args));
241 args.handle = bo->handle;
242 args.op = RADEON_GEM_OP_GET_INITIAL_DOMAIN;
243
244 drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_OP,
245 &args, sizeof(args));
246
247 /* GEM domains and winsys domains are defined the same. */
248 return get_valid_domain(args.value);
249 }
250
251 static uint64_t radeon_bomgr_find_va(struct radeon_bomgr *mgr, uint64_t size, uint64_t alignment)
252 {
253 struct radeon_bo_va_hole *hole, *n;
254 uint64_t offset = 0, waste = 0;
255
256 alignment = MAX2(alignment, 4096);
257 size = align(size, 4096);
258
259 pipe_mutex_lock(mgr->bo_va_mutex);
260 /* first look for a hole */
261 LIST_FOR_EACH_ENTRY_SAFE(hole, n, &mgr->va_holes, list) {
262 offset = hole->offset;
263 waste = offset % alignment;
264 waste = waste ? alignment - waste : 0;
265 offset += waste;
266 if (offset >= (hole->offset + hole->size)) {
267 continue;
268 }
269 if (!waste && hole->size == size) {
270 offset = hole->offset;
271 list_del(&hole->list);
272 FREE(hole);
273 pipe_mutex_unlock(mgr->bo_va_mutex);
274 return offset;
275 }
276 if ((hole->size - waste) > size) {
277 if (waste) {
278 n = CALLOC_STRUCT(radeon_bo_va_hole);
279 n->size = waste;
280 n->offset = hole->offset;
281 list_add(&n->list, &hole->list);
282 }
283 hole->size -= (size + waste);
284 hole->offset += size + waste;
285 pipe_mutex_unlock(mgr->bo_va_mutex);
286 return offset;
287 }
288 if ((hole->size - waste) == size) {
289 hole->size = waste;
290 pipe_mutex_unlock(mgr->bo_va_mutex);
291 return offset;
292 }
293 }
294
295 offset = mgr->va_offset;
296 waste = offset % alignment;
297 waste = waste ? alignment - waste : 0;
298 if (waste) {
299 n = CALLOC_STRUCT(radeon_bo_va_hole);
300 n->size = waste;
301 n->offset = offset;
302 list_add(&n->list, &mgr->va_holes);
303 }
304 offset += waste;
305 mgr->va_offset += size + waste;
306 pipe_mutex_unlock(mgr->bo_va_mutex);
307 return offset;
308 }
309
310 static void radeon_bomgr_free_va(struct radeon_bomgr *mgr, uint64_t va, uint64_t size)
311 {
312 struct radeon_bo_va_hole *hole;
313
314 size = align(size, 4096);
315
316 pipe_mutex_lock(mgr->bo_va_mutex);
317 if ((va + size) == mgr->va_offset) {
318 mgr->va_offset = va;
319 /* Delete uppermost hole if it reaches the new top */
320 if (!LIST_IS_EMPTY(&mgr->va_holes)) {
321 hole = container_of(mgr->va_holes.next, hole, list);
322 if ((hole->offset + hole->size) == va) {
323 mgr->va_offset = hole->offset;
324 list_del(&hole->list);
325 FREE(hole);
326 }
327 }
328 } else {
329 struct radeon_bo_va_hole *next;
330
331 hole = container_of(&mgr->va_holes, hole, list);
332 LIST_FOR_EACH_ENTRY(next, &mgr->va_holes, list) {
333 if (next->offset < va)
334 break;
335 hole = next;
336 }
337
338 if (&hole->list != &mgr->va_holes) {
339 /* Grow upper hole if it's adjacent */
340 if (hole->offset == (va + size)) {
341 hole->offset = va;
342 hole->size += size;
343 /* Merge lower hole if it's adjacent */
344 if (next != hole && &next->list != &mgr->va_holes &&
345 (next->offset + next->size) == va) {
346 next->size += hole->size;
347 list_del(&hole->list);
348 FREE(hole);
349 }
350 goto out;
351 }
352 }
353
354 /* Grow lower hole if it's adjacent */
355 if (next != hole && &next->list != &mgr->va_holes &&
356 (next->offset + next->size) == va) {
357 next->size += size;
358 goto out;
359 }
360
361 /* FIXME on allocation failure we just lose virtual address space
362 * maybe print a warning
363 */
364 next = CALLOC_STRUCT(radeon_bo_va_hole);
365 if (next) {
366 next->size = size;
367 next->offset = va;
368 list_add(&next->list, &hole->list);
369 }
370 }
371 out:
372 pipe_mutex_unlock(mgr->bo_va_mutex);
373 }
374
375 static void radeon_bo_destroy(struct pb_buffer *_buf)
376 {
377 struct radeon_bo *bo = radeon_bo(_buf);
378 struct radeon_bomgr *mgr = bo->mgr;
379 struct drm_gem_close args;
380
381 memset(&args, 0, sizeof(args));
382
383 pipe_mutex_lock(bo->mgr->bo_handles_mutex);
384 util_hash_table_remove(bo->mgr->bo_handles, (void*)(uintptr_t)bo->handle);
385 if (bo->name) {
386 util_hash_table_remove(bo->mgr->bo_names,
387 (void*)(uintptr_t)bo->name);
388 }
389 pipe_mutex_unlock(bo->mgr->bo_handles_mutex);
390
391 if (bo->ptr)
392 os_munmap(bo->ptr, bo->base.size);
393
394 /* Close object. */
395 args.handle = bo->handle;
396 drmIoctl(bo->rws->fd, DRM_IOCTL_GEM_CLOSE, &args);
397
398 if (mgr->va) {
399 radeon_bomgr_free_va(mgr, bo->va, bo->base.size);
400 }
401
402 pipe_mutex_destroy(bo->map_mutex);
403
404 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
405 bo->rws->allocated_vram -= align(bo->base.size, 4096);
406 else if (bo->initial_domain & RADEON_DOMAIN_GTT)
407 bo->rws->allocated_gtt -= align(bo->base.size, 4096);
408 FREE(bo);
409 }
410
411 void *radeon_bo_do_map(struct radeon_bo *bo)
412 {
413 struct drm_radeon_gem_mmap args = {0};
414 void *ptr;
415
416 /* Return the pointer if it's already mapped. */
417 if (bo->ptr)
418 return bo->ptr;
419
420 /* Map the buffer. */
421 pipe_mutex_lock(bo->map_mutex);
422 /* Return the pointer if it's already mapped (in case of a race). */
423 if (bo->ptr) {
424 pipe_mutex_unlock(bo->map_mutex);
425 return bo->ptr;
426 }
427 args.handle = bo->handle;
428 args.offset = 0;
429 args.size = (uint64_t)bo->base.size;
430 if (drmCommandWriteRead(bo->rws->fd,
431 DRM_RADEON_GEM_MMAP,
432 &args,
433 sizeof(args))) {
434 pipe_mutex_unlock(bo->map_mutex);
435 fprintf(stderr, "radeon: gem_mmap failed: %p 0x%08X\n",
436 bo, bo->handle);
437 return NULL;
438 }
439
440 ptr = os_mmap(0, args.size, PROT_READ|PROT_WRITE, MAP_SHARED,
441 bo->rws->fd, args.addr_ptr);
442 if (ptr == MAP_FAILED) {
443 pipe_mutex_unlock(bo->map_mutex);
444 fprintf(stderr, "radeon: mmap failed, errno: %i\n", errno);
445 return NULL;
446 }
447 bo->ptr = ptr;
448 pipe_mutex_unlock(bo->map_mutex);
449
450 return bo->ptr;
451 }
452
453 static void *radeon_bo_map(struct radeon_winsys_cs_handle *buf,
454 struct radeon_winsys_cs *rcs,
455 enum pipe_transfer_usage usage)
456 {
457 struct radeon_bo *bo = (struct radeon_bo*)buf;
458 struct radeon_drm_cs *cs = (struct radeon_drm_cs*)rcs;
459
460 /* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
461 if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
462 /* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
463 if (usage & PIPE_TRANSFER_DONTBLOCK) {
464 if (!(usage & PIPE_TRANSFER_WRITE)) {
465 /* Mapping for read.
466 *
467 * Since we are mapping for read, we don't need to wait
468 * if the GPU is using the buffer for read too
469 * (neither one is changing it).
470 *
471 * Only check whether the buffer is being used for write. */
472 if (cs && radeon_bo_is_referenced_by_cs_for_write(cs, bo)) {
473 cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC);
474 return NULL;
475 }
476
477 if (radeon_bo_is_busy((struct pb_buffer*)bo,
478 RADEON_USAGE_WRITE)) {
479 return NULL;
480 }
481 } else {
482 if (cs && radeon_bo_is_referenced_by_cs(cs, bo)) {
483 cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC);
484 return NULL;
485 }
486
487 if (radeon_bo_is_busy((struct pb_buffer*)bo,
488 RADEON_USAGE_READWRITE)) {
489 return NULL;
490 }
491 }
492 } else {
493 uint64_t time = os_time_get_nano();
494
495 if (!(usage & PIPE_TRANSFER_WRITE)) {
496 /* Mapping for read.
497 *
498 * Since we are mapping for read, we don't need to wait
499 * if the GPU is using the buffer for read too
500 * (neither one is changing it).
501 *
502 * Only check whether the buffer is being used for write. */
503 if (cs && radeon_bo_is_referenced_by_cs_for_write(cs, bo)) {
504 cs->flush_cs(cs->flush_data, 0);
505 }
506 radeon_bo_wait((struct pb_buffer*)bo,
507 RADEON_USAGE_WRITE);
508 } else {
509 /* Mapping for write. */
510 if (cs) {
511 if (radeon_bo_is_referenced_by_cs(cs, bo)) {
512 cs->flush_cs(cs->flush_data, 0);
513 } else {
514 /* Try to avoid busy-waiting in radeon_bo_wait. */
515 if (p_atomic_read(&bo->num_active_ioctls))
516 radeon_drm_cs_sync_flush(rcs);
517 }
518 }
519
520 radeon_bo_wait((struct pb_buffer*)bo, RADEON_USAGE_READWRITE);
521 }
522
523 bo->mgr->rws->buffer_wait_time += os_time_get_nano() - time;
524 }
525 }
526
527 return radeon_bo_do_map(bo);
528 }
529
530 static void radeon_bo_unmap(struct radeon_winsys_cs_handle *_buf)
531 {
532 /* NOP */
533 }
534
535 static void radeon_bo_get_base_buffer(struct pb_buffer *buf,
536 struct pb_buffer **base_buf,
537 unsigned *offset)
538 {
539 *base_buf = buf;
540 *offset = 0;
541 }
542
543 static enum pipe_error radeon_bo_validate(struct pb_buffer *_buf,
544 struct pb_validate *vl,
545 unsigned flags)
546 {
547 /* Always pinned */
548 return PIPE_OK;
549 }
550
551 static void radeon_bo_fence(struct pb_buffer *buf,
552 struct pipe_fence_handle *fence)
553 {
554 }
555
556 const struct pb_vtbl radeon_bo_vtbl = {
557 radeon_bo_destroy,
558 NULL, /* never called */
559 NULL, /* never called */
560 radeon_bo_validate,
561 radeon_bo_fence,
562 radeon_bo_get_base_buffer,
563 };
564
565 static struct pb_buffer *radeon_bomgr_create_bo(struct pb_manager *_mgr,
566 pb_size size,
567 const struct pb_desc *desc)
568 {
569 struct radeon_bomgr *mgr = radeon_bomgr(_mgr);
570 struct radeon_drm_winsys *rws = mgr->rws;
571 struct radeon_bo *bo;
572 struct drm_radeon_gem_create args;
573 struct radeon_bo_desc *rdesc = (struct radeon_bo_desc*)desc;
574 int r;
575
576 memset(&args, 0, sizeof(args));
577
578 assert(rdesc->initial_domains);
579 assert((rdesc->initial_domains &
580 ~(RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM)) == 0);
581
582 args.size = size;
583 args.alignment = desc->alignment;
584 args.initial_domain = rdesc->initial_domains;
585
586 if (drmCommandWriteRead(rws->fd, DRM_RADEON_GEM_CREATE,
587 &args, sizeof(args))) {
588 fprintf(stderr, "radeon: Failed to allocate a buffer:\n");
589 fprintf(stderr, "radeon: size : %d bytes\n", size);
590 fprintf(stderr, "radeon: alignment : %d bytes\n", desc->alignment);
591 fprintf(stderr, "radeon: domains : %d\n", args.initial_domain);
592 return NULL;
593 }
594
595 bo = CALLOC_STRUCT(radeon_bo);
596 if (!bo)
597 return NULL;
598
599 pipe_reference_init(&bo->base.reference, 1);
600 bo->base.alignment = desc->alignment;
601 bo->base.usage = desc->usage;
602 bo->base.size = size;
603 bo->base.vtbl = &radeon_bo_vtbl;
604 bo->mgr = mgr;
605 bo->rws = mgr->rws;
606 bo->handle = args.handle;
607 bo->va = 0;
608 bo->initial_domain = rdesc->initial_domains;
609 pipe_mutex_init(bo->map_mutex);
610
611 if (mgr->va) {
612 struct drm_radeon_gem_va va;
613
614 bo->va = radeon_bomgr_find_va(mgr, size, desc->alignment);
615
616 va.handle = bo->handle;
617 va.vm_id = 0;
618 va.operation = RADEON_VA_MAP;
619 va.flags = RADEON_VM_PAGE_READABLE |
620 RADEON_VM_PAGE_WRITEABLE |
621 RADEON_VM_PAGE_SNOOPED;
622 va.offset = bo->va;
623 r = drmCommandWriteRead(rws->fd, DRM_RADEON_GEM_VA, &va, sizeof(va));
624 if (r && va.operation == RADEON_VA_RESULT_ERROR) {
625 fprintf(stderr, "radeon: Failed to allocate virtual address for buffer:\n");
626 fprintf(stderr, "radeon: size : %d bytes\n", size);
627 fprintf(stderr, "radeon: alignment : %d bytes\n", desc->alignment);
628 fprintf(stderr, "radeon: domains : %d\n", args.initial_domain);
629 fprintf(stderr, "radeon: va : 0x%016llx\n", (unsigned long long)bo->va);
630 radeon_bo_destroy(&bo->base);
631 return NULL;
632 }
633 pipe_mutex_lock(mgr->bo_handles_mutex);
634 if (va.operation == RADEON_VA_RESULT_VA_EXIST) {
635 struct pb_buffer *b = &bo->base;
636 struct radeon_bo *old_bo =
637 util_hash_table_get(mgr->bo_vas, (void*)(uintptr_t)va.offset);
638
639 pipe_mutex_unlock(mgr->bo_handles_mutex);
640 pb_reference(&b, &old_bo->base);
641 return b;
642 }
643
644 util_hash_table_set(mgr->bo_vas, (void*)(uintptr_t)bo->va, bo);
645 pipe_mutex_unlock(mgr->bo_handles_mutex);
646 }
647
648 if (rdesc->initial_domains & RADEON_DOMAIN_VRAM)
649 rws->allocated_vram += align(size, 4096);
650 else if (rdesc->initial_domains & RADEON_DOMAIN_GTT)
651 rws->allocated_gtt += align(size, 4096);
652
653 return &bo->base;
654 }
655
656 static void radeon_bomgr_flush(struct pb_manager *mgr)
657 {
658 /* NOP */
659 }
660
661 /* This is for the cache bufmgr. */
662 static boolean radeon_bomgr_is_buffer_busy(struct pb_manager *_mgr,
663 struct pb_buffer *_buf)
664 {
665 struct radeon_bo *bo = radeon_bo(_buf);
666
667 if (radeon_bo_is_referenced_by_any_cs(bo)) {
668 return TRUE;
669 }
670
671 if (radeon_bo_is_busy((struct pb_buffer*)bo, RADEON_USAGE_READWRITE)) {
672 return TRUE;
673 }
674
675 return FALSE;
676 }
677
678 static void radeon_bomgr_destroy(struct pb_manager *_mgr)
679 {
680 struct radeon_bomgr *mgr = radeon_bomgr(_mgr);
681 util_hash_table_destroy(mgr->bo_names);
682 util_hash_table_destroy(mgr->bo_handles);
683 util_hash_table_destroy(mgr->bo_vas);
684 pipe_mutex_destroy(mgr->bo_handles_mutex);
685 pipe_mutex_destroy(mgr->bo_va_mutex);
686 FREE(mgr);
687 }
688
689 #define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x)))
690
691 static unsigned handle_hash(void *key)
692 {
693 return PTR_TO_UINT(key);
694 }
695
696 static int handle_compare(void *key1, void *key2)
697 {
698 return PTR_TO_UINT(key1) != PTR_TO_UINT(key2);
699 }
700
701 struct pb_manager *radeon_bomgr_create(struct radeon_drm_winsys *rws)
702 {
703 struct radeon_bomgr *mgr;
704
705 mgr = CALLOC_STRUCT(radeon_bomgr);
706 if (!mgr)
707 return NULL;
708
709 mgr->base.destroy = radeon_bomgr_destroy;
710 mgr->base.create_buffer = radeon_bomgr_create_bo;
711 mgr->base.flush = radeon_bomgr_flush;
712 mgr->base.is_buffer_busy = radeon_bomgr_is_buffer_busy;
713
714 mgr->rws = rws;
715 mgr->bo_names = util_hash_table_create(handle_hash, handle_compare);
716 mgr->bo_handles = util_hash_table_create(handle_hash, handle_compare);
717 mgr->bo_vas = util_hash_table_create(handle_hash, handle_compare);
718 pipe_mutex_init(mgr->bo_handles_mutex);
719 pipe_mutex_init(mgr->bo_va_mutex);
720
721 mgr->va = rws->info.r600_virtual_address;
722 mgr->va_offset = rws->info.r600_va_start;
723 list_inithead(&mgr->va_holes);
724
725 return &mgr->base;
726 }
727
728 static unsigned eg_tile_split(unsigned tile_split)
729 {
730 switch (tile_split) {
731 case 0: tile_split = 64; break;
732 case 1: tile_split = 128; break;
733 case 2: tile_split = 256; break;
734 case 3: tile_split = 512; break;
735 default:
736 case 4: tile_split = 1024; break;
737 case 5: tile_split = 2048; break;
738 case 6: tile_split = 4096; break;
739 }
740 return tile_split;
741 }
742
743 static unsigned eg_tile_split_rev(unsigned eg_tile_split)
744 {
745 switch (eg_tile_split) {
746 case 64: return 0;
747 case 128: return 1;
748 case 256: return 2;
749 case 512: return 3;
750 default:
751 case 1024: return 4;
752 case 2048: return 5;
753 case 4096: return 6;
754 }
755 }
756
757 static void radeon_bo_get_tiling(struct pb_buffer *_buf,
758 enum radeon_bo_layout *microtiled,
759 enum radeon_bo_layout *macrotiled,
760 unsigned *bankw, unsigned *bankh,
761 unsigned *tile_split,
762 unsigned *stencil_tile_split,
763 unsigned *mtilea,
764 bool *scanout)
765 {
766 struct radeon_bo *bo = get_radeon_bo(_buf);
767 struct drm_radeon_gem_set_tiling args;
768
769 memset(&args, 0, sizeof(args));
770
771 args.handle = bo->handle;
772
773 drmCommandWriteRead(bo->rws->fd,
774 DRM_RADEON_GEM_GET_TILING,
775 &args,
776 sizeof(args));
777
778 *microtiled = RADEON_LAYOUT_LINEAR;
779 *macrotiled = RADEON_LAYOUT_LINEAR;
780 if (args.tiling_flags & RADEON_BO_FLAGS_MICRO_TILE)
781 *microtiled = RADEON_LAYOUT_TILED;
782 else if (args.tiling_flags & RADEON_TILING_MICRO_SQUARE)
783 *microtiled = RADEON_LAYOUT_SQUARETILED;
784
785 if (args.tiling_flags & RADEON_BO_FLAGS_MACRO_TILE)
786 *macrotiled = RADEON_LAYOUT_TILED;
787 if (bankw && tile_split && stencil_tile_split && mtilea && tile_split) {
788 *bankw = (args.tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
789 *bankh = (args.tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
790 *tile_split = (args.tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
791 *stencil_tile_split = (args.tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK;
792 *mtilea = (args.tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
793 *tile_split = eg_tile_split(*tile_split);
794 }
795 if (scanout)
796 *scanout = bo->rws->gen >= DRV_SI && !(args.tiling_flags & RADEON_TILING_R600_NO_SCANOUT);
797 }
798
799 static void radeon_bo_set_tiling(struct pb_buffer *_buf,
800 struct radeon_winsys_cs *rcs,
801 enum radeon_bo_layout microtiled,
802 enum radeon_bo_layout macrotiled,
803 unsigned bankw, unsigned bankh,
804 unsigned tile_split,
805 unsigned stencil_tile_split,
806 unsigned mtilea,
807 uint32_t pitch,
808 bool scanout)
809 {
810 struct radeon_bo *bo = get_radeon_bo(_buf);
811 struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
812 struct drm_radeon_gem_set_tiling args;
813
814 memset(&args, 0, sizeof(args));
815
816 /* Tiling determines how DRM treats the buffer data.
817 * We must flush CS when changing it if the buffer is referenced. */
818 if (cs && radeon_bo_is_referenced_by_cs(cs, bo)) {
819 cs->flush_cs(cs->flush_data, 0);
820 }
821
822 while (p_atomic_read(&bo->num_active_ioctls)) {
823 sched_yield();
824 }
825
826 if (microtiled == RADEON_LAYOUT_TILED)
827 args.tiling_flags |= RADEON_BO_FLAGS_MICRO_TILE;
828 else if (microtiled == RADEON_LAYOUT_SQUARETILED)
829 args.tiling_flags |= RADEON_BO_FLAGS_MICRO_TILE_SQUARE;
830
831 if (macrotiled == RADEON_LAYOUT_TILED)
832 args.tiling_flags |= RADEON_BO_FLAGS_MACRO_TILE;
833
834 args.tiling_flags |= (bankw & RADEON_TILING_EG_BANKW_MASK) <<
835 RADEON_TILING_EG_BANKW_SHIFT;
836 args.tiling_flags |= (bankh & RADEON_TILING_EG_BANKH_MASK) <<
837 RADEON_TILING_EG_BANKH_SHIFT;
838 if (tile_split) {
839 args.tiling_flags |= (eg_tile_split_rev(tile_split) &
840 RADEON_TILING_EG_TILE_SPLIT_MASK) <<
841 RADEON_TILING_EG_TILE_SPLIT_SHIFT;
842 }
843 args.tiling_flags |= (stencil_tile_split &
844 RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK) <<
845 RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT;
846 args.tiling_flags |= (mtilea & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK) <<
847 RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT;
848
849 if (bo->rws->gen >= DRV_SI && !scanout)
850 args.tiling_flags |= RADEON_TILING_R600_NO_SCANOUT;
851
852 args.handle = bo->handle;
853 args.pitch = pitch;
854
855 drmCommandWriteRead(bo->rws->fd,
856 DRM_RADEON_GEM_SET_TILING,
857 &args,
858 sizeof(args));
859 }
860
861 static struct radeon_winsys_cs_handle *radeon_drm_get_cs_handle(struct pb_buffer *_buf)
862 {
863 /* return radeon_bo. */
864 return (struct radeon_winsys_cs_handle*)get_radeon_bo(_buf);
865 }
866
867 static struct pb_buffer *
868 radeon_winsys_bo_create(struct radeon_winsys *rws,
869 unsigned size,
870 unsigned alignment,
871 boolean use_reusable_pool,
872 enum radeon_bo_domain domain)
873 {
874 struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);
875 struct radeon_bomgr *mgr = radeon_bomgr(ws->kman);
876 struct radeon_bo_desc desc;
877 struct pb_manager *provider;
878 struct pb_buffer *buffer;
879
880 memset(&desc, 0, sizeof(desc));
881 desc.base.alignment = alignment;
882
883 /* Additional criteria for the cache manager. */
884 desc.base.usage = domain;
885 desc.initial_domains = domain;
886
887 /* Assign a buffer manager. */
888 if (use_reusable_pool)
889 provider = ws->cman;
890 else
891 provider = ws->kman;
892
893 buffer = provider->create_buffer(provider, size, &desc.base);
894 if (!buffer)
895 return NULL;
896
897 pipe_mutex_lock(mgr->bo_handles_mutex);
898 util_hash_table_set(mgr->bo_handles, (void*)(uintptr_t)get_radeon_bo(buffer)->handle, buffer);
899 pipe_mutex_unlock(mgr->bo_handles_mutex);
900
901 return (struct pb_buffer*)buffer;
902 }
903
904 static struct pb_buffer *radeon_winsys_bo_from_handle(struct radeon_winsys *rws,
905 struct winsys_handle *whandle,
906 unsigned *stride)
907 {
908 struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);
909 struct radeon_bo *bo;
910 struct radeon_bomgr *mgr = radeon_bomgr(ws->kman);
911 int r;
912 unsigned handle;
913 uint64_t size = 0;
914
915 /* We must maintain a list of pairs <handle, bo>, so that we always return
916 * the same BO for one particular handle. If we didn't do that and created
917 * more than one BO for the same handle and then relocated them in a CS,
918 * we would hit a deadlock in the kernel.
919 *
920 * The list of pairs is guarded by a mutex, of course. */
921 pipe_mutex_lock(mgr->bo_handles_mutex);
922
923 if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
924 /* First check if there already is an existing bo for the handle. */
925 bo = util_hash_table_get(mgr->bo_names, (void*)(uintptr_t)whandle->handle);
926 } else if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
927 /* We must first get the GEM handle, as fds are unreliable keys */
928 r = drmPrimeFDToHandle(ws->fd, whandle->handle, &handle);
929 if (r)
930 goto fail;
931 bo = util_hash_table_get(mgr->bo_handles, (void*)(uintptr_t)handle);
932 } else {
933 /* Unknown handle type */
934 goto fail;
935 }
936
937 if (bo) {
938 /* Increase the refcount. */
939 struct pb_buffer *b = NULL;
940 pb_reference(&b, &bo->base);
941 goto done;
942 }
943
944 /* There isn't, create a new one. */
945 bo = CALLOC_STRUCT(radeon_bo);
946 if (!bo) {
947 goto fail;
948 }
949
950 if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
951 struct drm_gem_open open_arg = {};
952 memset(&open_arg, 0, sizeof(open_arg));
953 /* Open the BO. */
954 open_arg.name = whandle->handle;
955 if (drmIoctl(ws->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) {
956 FREE(bo);
957 goto fail;
958 }
959 handle = open_arg.handle;
960 size = open_arg.size;
961 bo->name = whandle->handle;
962 } else if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
963 size = lseek(whandle->handle, 0, SEEK_END);
964 /*
965 * Could check errno to determine whether the kernel is new enough, but
966 * it doesn't really matter why this failed, just that it failed.
967 */
968 if (size == (off_t)-1) {
969 FREE(bo);
970 goto fail;
971 }
972 lseek(whandle->handle, 0, SEEK_SET);
973 }
974
975 bo->handle = handle;
976
977 /* Initialize it. */
978 pipe_reference_init(&bo->base.reference, 1);
979 bo->base.alignment = 0;
980 bo->base.usage = PB_USAGE_GPU_WRITE | PB_USAGE_GPU_READ;
981 bo->base.size = (unsigned) size;
982 bo->base.vtbl = &radeon_bo_vtbl;
983 bo->mgr = mgr;
984 bo->rws = mgr->rws;
985 bo->va = 0;
986 pipe_mutex_init(bo->map_mutex);
987
988 if (bo->name)
989 util_hash_table_set(mgr->bo_names, (void*)(uintptr_t)bo->name, bo);
990
991 util_hash_table_set(mgr->bo_handles, (void*)(uintptr_t)bo->handle, bo);
992
993 done:
994 pipe_mutex_unlock(mgr->bo_handles_mutex);
995
996 if (stride)
997 *stride = whandle->stride;
998
999 if (mgr->va && !bo->va) {
1000 struct drm_radeon_gem_va va;
1001
1002 bo->va = radeon_bomgr_find_va(mgr, bo->base.size, 1 << 20);
1003
1004 va.handle = bo->handle;
1005 va.operation = RADEON_VA_MAP;
1006 va.vm_id = 0;
1007 va.offset = bo->va;
1008 va.flags = RADEON_VM_PAGE_READABLE |
1009 RADEON_VM_PAGE_WRITEABLE |
1010 RADEON_VM_PAGE_SNOOPED;
1011 va.offset = bo->va;
1012 r = drmCommandWriteRead(ws->fd, DRM_RADEON_GEM_VA, &va, sizeof(va));
1013 if (r && va.operation == RADEON_VA_RESULT_ERROR) {
1014 fprintf(stderr, "radeon: Failed to assign virtual address space\n");
1015 radeon_bo_destroy(&bo->base);
1016 return NULL;
1017 }
1018 pipe_mutex_lock(mgr->bo_handles_mutex);
1019 if (va.operation == RADEON_VA_RESULT_VA_EXIST) {
1020 struct pb_buffer *b = &bo->base;
1021 struct radeon_bo *old_bo =
1022 util_hash_table_get(mgr->bo_vas, (void*)(uintptr_t)va.offset);
1023
1024 pipe_mutex_unlock(mgr->bo_handles_mutex);
1025 pb_reference(&b, &old_bo->base);
1026 return b;
1027 }
1028
1029 util_hash_table_set(mgr->bo_vas, (void*)(uintptr_t)bo->va, bo);
1030 pipe_mutex_unlock(mgr->bo_handles_mutex);
1031 }
1032
1033 bo->initial_domain = radeon_bo_get_initial_domain((void*)bo);
1034
1035 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
1036 ws->allocated_vram += align(bo->base.size, 4096);
1037 else if (bo->initial_domain & RADEON_DOMAIN_GTT)
1038 ws->allocated_gtt += align(bo->base.size, 4096);
1039
1040 return (struct pb_buffer*)bo;
1041
1042 fail:
1043 pipe_mutex_unlock(mgr->bo_handles_mutex);
1044 return NULL;
1045 }
1046
1047 static boolean radeon_winsys_bo_get_handle(struct pb_buffer *buffer,
1048 unsigned stride,
1049 struct winsys_handle *whandle)
1050 {
1051 struct drm_gem_flink flink;
1052 struct radeon_bo *bo = get_radeon_bo(buffer);
1053
1054 memset(&flink, 0, sizeof(flink));
1055
1056 if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
1057 if (!bo->flinked) {
1058 flink.handle = bo->handle;
1059
1060 if (ioctl(bo->rws->fd, DRM_IOCTL_GEM_FLINK, &flink)) {
1061 return FALSE;
1062 }
1063
1064 bo->flinked = TRUE;
1065 bo->flink = flink.name;
1066
1067 pipe_mutex_lock(bo->mgr->bo_handles_mutex);
1068 util_hash_table_set(bo->mgr->bo_names, (void*)(uintptr_t)bo->flink, bo);
1069 pipe_mutex_unlock(bo->mgr->bo_handles_mutex);
1070 }
1071 whandle->handle = bo->flink;
1072 } else if (whandle->type == DRM_API_HANDLE_TYPE_KMS) {
1073 whandle->handle = bo->handle;
1074 } else if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
1075 if (drmPrimeHandleToFD(bo->rws->fd, bo->handle, DRM_CLOEXEC, (int*)&whandle->handle))
1076 return FALSE;
1077 }
1078
1079 whandle->stride = stride;
1080 return TRUE;
1081 }
1082
1083 static uint64_t radeon_winsys_bo_va(struct radeon_winsys_cs_handle *buf)
1084 {
1085 return ((struct radeon_bo*)buf)->va;
1086 }
1087
1088 void radeon_bomgr_init_functions(struct radeon_drm_winsys *ws)
1089 {
1090 ws->base.buffer_get_cs_handle = radeon_drm_get_cs_handle;
1091 ws->base.buffer_set_tiling = radeon_bo_set_tiling;
1092 ws->base.buffer_get_tiling = radeon_bo_get_tiling;
1093 ws->base.buffer_map = radeon_bo_map;
1094 ws->base.buffer_unmap = radeon_bo_unmap;
1095 ws->base.buffer_wait = radeon_bo_wait;
1096 ws->base.buffer_is_busy = radeon_bo_is_busy;
1097 ws->base.buffer_create = radeon_winsys_bo_create;
1098 ws->base.buffer_from_handle = radeon_winsys_bo_from_handle;
1099 ws->base.buffer_get_handle = radeon_winsys_bo_get_handle;
1100 ws->base.buffer_get_virtual_address = radeon_winsys_bo_va;
1101 ws->base.buffer_get_initial_domain = radeon_bo_get_initial_domain;
1102 }