r600g: optimize r600_resource_va
[mesa.git] / src / gallium / winsys / radeon / drm / radeon_drm_bo.c
1 /*
2 * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
14 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
15 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
16 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
17 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
24 * of the Software.
25 */
26
27 #define _FILE_OFFSET_BITS 64
28 #include "radeon_drm_cs.h"
29
30 #include "util/u_hash_table.h"
31 #include "util/u_memory.h"
32 #include "util/u_simple_list.h"
33 #include "util/u_double_list.h"
34 #include "os/os_thread.h"
35 #include "os/os_mman.h"
36
37 #include "state_tracker/drm_driver.h"
38
39 #include <sys/ioctl.h>
40 #include <xf86drm.h>
41 #include <errno.h>
42
43 /*
44 * this are copy from radeon_drm, once an updated libdrm is released
45 * we should bump configure.ac requirement for it and remove the following
46 * field
47 */
48 #define RADEON_BO_FLAGS_MACRO_TILE 1
49 #define RADEON_BO_FLAGS_MICRO_TILE 2
50 #define RADEON_BO_FLAGS_MICRO_TILE_SQUARE 0x20
51
52 #ifndef DRM_RADEON_GEM_WAIT
53 #define DRM_RADEON_GEM_WAIT 0x2b
54
55 #define RADEON_GEM_NO_WAIT 0x1
56 #define RADEON_GEM_USAGE_READ 0x2
57 #define RADEON_GEM_USAGE_WRITE 0x4
58
59 struct drm_radeon_gem_wait {
60 uint32_t handle;
61 uint32_t flags; /* one of RADEON_GEM_* */
62 };
63
64 #endif
65
66 #ifndef RADEON_VA_MAP
67
68 #define RADEON_VA_MAP 1
69 #define RADEON_VA_UNMAP 2
70
71 #define RADEON_VA_RESULT_OK 0
72 #define RADEON_VA_RESULT_ERROR 1
73 #define RADEON_VA_RESULT_VA_EXIST 2
74
75 #define RADEON_VM_PAGE_VALID (1 << 0)
76 #define RADEON_VM_PAGE_READABLE (1 << 1)
77 #define RADEON_VM_PAGE_WRITEABLE (1 << 2)
78 #define RADEON_VM_PAGE_SYSTEM (1 << 3)
79 #define RADEON_VM_PAGE_SNOOPED (1 << 4)
80
81 struct drm_radeon_gem_va {
82 uint32_t handle;
83 uint32_t operation;
84 uint32_t vm_id;
85 uint32_t flags;
86 uint64_t offset;
87 };
88
89 #define DRM_RADEON_GEM_VA 0x2b
90 #endif
91
92
93
94 extern const struct pb_vtbl radeon_bo_vtbl;
95
96
97 static INLINE struct radeon_bo *radeon_bo(struct pb_buffer *bo)
98 {
99 assert(bo->vtbl == &radeon_bo_vtbl);
100 return (struct radeon_bo *)bo;
101 }
102
103 struct radeon_bo_va_hole {
104 struct list_head list;
105 uint64_t offset;
106 uint64_t size;
107 };
108
109 struct radeon_bomgr {
110 /* Base class. */
111 struct pb_manager base;
112
113 /* Winsys. */
114 struct radeon_drm_winsys *rws;
115
116 /* List of buffer handles and its mutex. */
117 struct util_hash_table *bo_handles;
118 pipe_mutex bo_handles_mutex;
119 pipe_mutex bo_va_mutex;
120
121 /* is virtual address supported */
122 bool va;
123 unsigned va_offset;
124 struct list_head va_holes;
125 };
126
127 static INLINE struct radeon_bomgr *radeon_bomgr(struct pb_manager *mgr)
128 {
129 return (struct radeon_bomgr *)mgr;
130 }
131
132 static struct radeon_bo *get_radeon_bo(struct pb_buffer *_buf)
133 {
134 struct radeon_bo *bo = NULL;
135
136 if (_buf->vtbl == &radeon_bo_vtbl) {
137 bo = radeon_bo(_buf);
138 } else {
139 struct pb_buffer *base_buf;
140 pb_size offset;
141 pb_get_base_buffer(_buf, &base_buf, &offset);
142
143 if (base_buf->vtbl == &radeon_bo_vtbl)
144 bo = radeon_bo(base_buf);
145 }
146
147 return bo;
148 }
149
150 static void radeon_bo_wait(struct pb_buffer *_buf, enum radeon_bo_usage usage)
151 {
152 struct radeon_bo *bo = get_radeon_bo(_buf);
153
154 while (p_atomic_read(&bo->num_active_ioctls)) {
155 sched_yield();
156 }
157
158 /* XXX use this when it's ready */
159 /*if (bo->rws->info.drm_minor >= 12) {
160 struct drm_radeon_gem_wait args = {};
161 args.handle = bo->handle;
162 args.flags = usage;
163 while (drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_WAIT,
164 &args, sizeof(args)) == -EBUSY);
165 } else*/ {
166 struct drm_radeon_gem_wait_idle args;
167 memset(&args, 0, sizeof(args));
168 args.handle = bo->handle;
169 while (drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_WAIT_IDLE,
170 &args, sizeof(args)) == -EBUSY);
171 }
172 }
173
174 static boolean radeon_bo_is_busy(struct pb_buffer *_buf,
175 enum radeon_bo_usage usage)
176 {
177 struct radeon_bo *bo = get_radeon_bo(_buf);
178
179 if (p_atomic_read(&bo->num_active_ioctls)) {
180 return TRUE;
181 }
182
183 /* XXX use this when it's ready */
184 /*if (bo->rws->info.drm_minor >= 12) {
185 struct drm_radeon_gem_wait args = {};
186 args.handle = bo->handle;
187 args.flags = usage | RADEON_GEM_NO_WAIT;
188 return drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_WAIT,
189 &args, sizeof(args)) != 0;
190 } else*/ {
191 struct drm_radeon_gem_busy args;
192 memset(&args, 0, sizeof(args));
193 args.handle = bo->handle;
194 return drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_BUSY,
195 &args, sizeof(args)) != 0;
196 }
197 }
198
199 static uint64_t radeon_bomgr_find_va(struct radeon_bomgr *mgr, uint64_t size, uint64_t alignment)
200 {
201 struct radeon_bo_va_hole *hole, *n;
202 uint64_t offset = 0, waste = 0;
203
204 pipe_mutex_lock(mgr->bo_va_mutex);
205 /* first look for a hole */
206 LIST_FOR_EACH_ENTRY_SAFE(hole, n, &mgr->va_holes, list) {
207 offset = hole->offset;
208 waste = 0;
209 if (alignment) {
210 waste = offset % alignment;
211 waste = waste ? alignment - waste : 0;
212 }
213 offset += waste;
214 if (offset >= (hole->offset + hole->size)) {
215 continue;
216 }
217 if (!waste && hole->size == size) {
218 offset = hole->offset;
219 list_del(&hole->list);
220 FREE(hole);
221 pipe_mutex_unlock(mgr->bo_va_mutex);
222 return offset;
223 }
224 if ((hole->size - waste) >= size) {
225 if (waste) {
226 n = CALLOC_STRUCT(radeon_bo_va_hole);
227 n->size = waste;
228 n->offset = hole->offset;
229 list_add(&n->list, &mgr->va_holes);
230 }
231 hole->size -= (size + waste);
232 hole->offset += size + waste;
233 pipe_mutex_unlock(mgr->bo_va_mutex);
234 return offset;
235 }
236 }
237
238 offset = mgr->va_offset;
239 waste = 0;
240 if (alignment) {
241 waste = offset % alignment;
242 waste = waste ? alignment - waste : 0;
243 }
244 offset += waste;
245 mgr->va_offset += size + waste;
246 pipe_mutex_unlock(mgr->bo_va_mutex);
247 return offset;
248 }
249
250 static void radeon_bomgr_force_va(struct radeon_bomgr *mgr, uint64_t va, uint64_t size)
251 {
252 pipe_mutex_lock(mgr->bo_va_mutex);
253 if (va >= mgr->va_offset) {
254 if (va > mgr->va_offset) {
255 struct radeon_bo_va_hole *hole;
256 hole = CALLOC_STRUCT(radeon_bo_va_hole);
257 if (hole) {
258 hole->size = va - mgr->va_offset;
259 hole->offset = mgr->va_offset;
260 list_add(&hole->list, &mgr->va_holes);
261 }
262 }
263 mgr->va_offset = va + size;
264 } else {
265 struct radeon_bo_va_hole *hole, *n;
266 uint64_t stmp, etmp;
267
268 /* free all holes that fall into the range
269 * NOTE that we might lose virtual address space
270 */
271 LIST_FOR_EACH_ENTRY_SAFE(hole, n, &mgr->va_holes, list) {
272 stmp = hole->offset;
273 etmp = stmp + hole->size;
274 if (va >= stmp && va < etmp) {
275 list_del(&hole->list);
276 FREE(hole);
277 }
278 }
279 }
280 pipe_mutex_unlock(mgr->bo_va_mutex);
281 }
282
283 static void radeon_bomgr_free_va(struct radeon_bomgr *mgr, uint64_t va, uint64_t size)
284 {
285 pipe_mutex_lock(mgr->bo_va_mutex);
286 if ((va + size) == mgr->va_offset) {
287 mgr->va_offset = va;
288 } else {
289 struct radeon_bo_va_hole *hole;
290
291 /* FIXME on allocation failure we just lose virtual address space
292 * maybe print a warning
293 */
294 hole = CALLOC_STRUCT(radeon_bo_va_hole);
295 if (hole) {
296 hole->size = size;
297 hole->offset = va;
298 list_add(&hole->list, &mgr->va_holes);
299 }
300 }
301 pipe_mutex_unlock(mgr->bo_va_mutex);
302 }
303
304 static void radeon_bo_destroy(struct pb_buffer *_buf)
305 {
306 struct radeon_bo *bo = radeon_bo(_buf);
307 struct radeon_bomgr *mgr = bo->mgr;
308 struct drm_gem_close args;
309
310 memset(&args, 0, sizeof(args));
311
312 if (bo->name) {
313 pipe_mutex_lock(bo->mgr->bo_handles_mutex);
314 util_hash_table_remove(bo->mgr->bo_handles,
315 (void*)(uintptr_t)bo->name);
316 pipe_mutex_unlock(bo->mgr->bo_handles_mutex);
317 }
318
319 if (bo->ptr)
320 os_munmap(bo->ptr, bo->base.size);
321
322 if (mgr->va) {
323 radeon_bomgr_free_va(mgr, bo->va, bo->va_size);
324 }
325
326 /* Close object. */
327 args.handle = bo->handle;
328 drmIoctl(bo->rws->fd, DRM_IOCTL_GEM_CLOSE, &args);
329 pipe_mutex_destroy(bo->map_mutex);
330 FREE(bo);
331 }
332
333 static unsigned get_pb_usage_from_transfer_flags(enum pipe_transfer_usage usage)
334 {
335 unsigned res = 0;
336
337 if (usage & PIPE_TRANSFER_WRITE)
338 res |= PB_USAGE_CPU_WRITE;
339
340 if (usage & PIPE_TRANSFER_DONTBLOCK)
341 res |= PB_USAGE_DONTBLOCK;
342
343 if (usage & PIPE_TRANSFER_UNSYNCHRONIZED)
344 res |= PB_USAGE_UNSYNCHRONIZED;
345
346 return res;
347 }
348
349 static void *radeon_bo_map_internal(struct pb_buffer *_buf,
350 unsigned flags, void *flush_ctx)
351 {
352 struct radeon_bo *bo = radeon_bo(_buf);
353 struct radeon_drm_cs *cs = flush_ctx;
354 struct drm_radeon_gem_mmap args;
355 void *ptr;
356
357 memset(&args, 0, sizeof(args));
358
359 /* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
360 if (!(flags & PB_USAGE_UNSYNCHRONIZED)) {
361 /* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
362 if (flags & PB_USAGE_DONTBLOCK) {
363 if (!(flags & PB_USAGE_CPU_WRITE)) {
364 /* Mapping for read.
365 *
366 * Since we are mapping for read, we don't need to wait
367 * if the GPU is using the buffer for read too
368 * (neither one is changing it).
369 *
370 * Only check whether the buffer is being used for write. */
371 if (radeon_bo_is_referenced_by_cs_for_write(cs, bo)) {
372 cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC);
373 return NULL;
374 }
375
376 if (radeon_bo_is_busy((struct pb_buffer*)bo,
377 RADEON_USAGE_WRITE)) {
378 return NULL;
379 }
380 } else {
381 if (radeon_bo_is_referenced_by_cs(cs, bo)) {
382 cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC);
383 return NULL;
384 }
385
386 if (radeon_bo_is_busy((struct pb_buffer*)bo,
387 RADEON_USAGE_READWRITE)) {
388 return NULL;
389 }
390 }
391 } else {
392 if (!(flags & PB_USAGE_CPU_WRITE)) {
393 /* Mapping for read.
394 *
395 * Since we are mapping for read, we don't need to wait
396 * if the GPU is using the buffer for read too
397 * (neither one is changing it).
398 *
399 * Only check whether the buffer is being used for write. */
400 if (radeon_bo_is_referenced_by_cs_for_write(cs, bo)) {
401 cs->flush_cs(cs->flush_data, 0);
402 }
403 radeon_bo_wait((struct pb_buffer*)bo,
404 RADEON_USAGE_WRITE);
405 } else {
406 /* Mapping for write. */
407 if (radeon_bo_is_referenced_by_cs(cs, bo)) {
408 cs->flush_cs(cs->flush_data, 0);
409 } else {
410 /* Try to avoid busy-waiting in radeon_bo_wait. */
411 if (p_atomic_read(&bo->num_active_ioctls))
412 radeon_drm_cs_sync_flush(cs);
413 }
414
415 radeon_bo_wait((struct pb_buffer*)bo, RADEON_USAGE_READWRITE);
416 }
417 }
418 }
419
420 /* Return the pointer if it's already mapped. */
421 if (bo->ptr)
422 return bo->ptr;
423
424 /* Map the buffer. */
425 pipe_mutex_lock(bo->map_mutex);
426 /* Return the pointer if it's already mapped (in case of a race). */
427 if (bo->ptr) {
428 pipe_mutex_unlock(bo->map_mutex);
429 return bo->ptr;
430 }
431 args.handle = bo->handle;
432 args.offset = 0;
433 args.size = (uint64_t)bo->base.size;
434 if (drmCommandWriteRead(bo->rws->fd,
435 DRM_RADEON_GEM_MMAP,
436 &args,
437 sizeof(args))) {
438 pipe_mutex_unlock(bo->map_mutex);
439 fprintf(stderr, "radeon: gem_mmap failed: %p 0x%08X\n",
440 bo, bo->handle);
441 return NULL;
442 }
443
444 ptr = os_mmap(0, args.size, PROT_READ|PROT_WRITE, MAP_SHARED,
445 bo->rws->fd, args.addr_ptr);
446 if (ptr == MAP_FAILED) {
447 pipe_mutex_unlock(bo->map_mutex);
448 fprintf(stderr, "radeon: mmap failed, errno: %i\n", errno);
449 return NULL;
450 }
451 bo->ptr = ptr;
452 pipe_mutex_unlock(bo->map_mutex);
453
454 return bo->ptr;
455 }
456
457 static void radeon_bo_unmap_internal(struct pb_buffer *_buf)
458 {
459 /* NOP */
460 }
461
462 static void radeon_bo_get_base_buffer(struct pb_buffer *buf,
463 struct pb_buffer **base_buf,
464 unsigned *offset)
465 {
466 *base_buf = buf;
467 *offset = 0;
468 }
469
470 static enum pipe_error radeon_bo_validate(struct pb_buffer *_buf,
471 struct pb_validate *vl,
472 unsigned flags)
473 {
474 /* Always pinned */
475 return PIPE_OK;
476 }
477
478 static void radeon_bo_fence(struct pb_buffer *buf,
479 struct pipe_fence_handle *fence)
480 {
481 }
482
483 const struct pb_vtbl radeon_bo_vtbl = {
484 radeon_bo_destroy,
485 radeon_bo_map_internal,
486 radeon_bo_unmap_internal,
487 radeon_bo_validate,
488 radeon_bo_fence,
489 radeon_bo_get_base_buffer,
490 };
491
492 static struct pb_buffer *radeon_bomgr_create_bo(struct pb_manager *_mgr,
493 pb_size size,
494 const struct pb_desc *desc)
495 {
496 struct radeon_bomgr *mgr = radeon_bomgr(_mgr);
497 struct radeon_drm_winsys *rws = mgr->rws;
498 struct radeon_bo *bo;
499 struct drm_radeon_gem_create args;
500 struct radeon_bo_desc *rdesc = (struct radeon_bo_desc*)desc;
501 int r;
502
503 memset(&args, 0, sizeof(args));
504
505 assert(rdesc->initial_domains);
506 assert((rdesc->initial_domains &
507 ~(RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM)) == 0);
508
509 args.size = size;
510 args.alignment = desc->alignment;
511 args.initial_domain = rdesc->initial_domains;
512
513 if (drmCommandWriteRead(rws->fd, DRM_RADEON_GEM_CREATE,
514 &args, sizeof(args))) {
515 fprintf(stderr, "radeon: Failed to allocate a buffer:\n");
516 fprintf(stderr, "radeon: size : %d bytes\n", size);
517 fprintf(stderr, "radeon: alignment : %d bytes\n", desc->alignment);
518 fprintf(stderr, "radeon: domains : %d\n", args.initial_domain);
519 return NULL;
520 }
521
522 bo = CALLOC_STRUCT(radeon_bo);
523 if (!bo)
524 return NULL;
525
526 pipe_reference_init(&bo->base.reference, 1);
527 bo->base.alignment = desc->alignment;
528 bo->base.usage = desc->usage;
529 bo->base.size = size;
530 bo->base.vtbl = &radeon_bo_vtbl;
531 bo->mgr = mgr;
532 bo->rws = mgr->rws;
533 bo->handle = args.handle;
534 bo->va = 0;
535 pipe_mutex_init(bo->map_mutex);
536
537 if (mgr->va) {
538 struct drm_radeon_gem_va va;
539
540 bo->va_size = align(size, 4096);
541 bo->va = radeon_bomgr_find_va(mgr, bo->va_size, desc->alignment);
542
543 va.handle = bo->handle;
544 va.vm_id = 0;
545 va.operation = RADEON_VA_MAP;
546 va.flags = RADEON_VM_PAGE_READABLE |
547 RADEON_VM_PAGE_WRITEABLE |
548 RADEON_VM_PAGE_SNOOPED;
549 va.offset = bo->va;
550 r = drmCommandWriteRead(rws->fd, DRM_RADEON_GEM_VA, &va, sizeof(va));
551 if (r && va.operation == RADEON_VA_RESULT_ERROR) {
552 fprintf(stderr, "radeon: Failed to allocate a buffer:\n");
553 fprintf(stderr, "radeon: size : %d bytes\n", size);
554 fprintf(stderr, "radeon: alignment : %d bytes\n", desc->alignment);
555 fprintf(stderr, "radeon: domains : %d\n", args.initial_domain);
556 radeon_bo_destroy(&bo->base);
557 return NULL;
558 }
559 if (va.operation == RADEON_VA_RESULT_VA_EXIST) {
560 radeon_bomgr_free_va(mgr, bo->va, bo->va_size);
561 bo->va = va.offset;
562 radeon_bomgr_force_va(mgr, bo->va, bo->va_size);
563 }
564 }
565
566 return &bo->base;
567 }
568
569 static void radeon_bomgr_flush(struct pb_manager *mgr)
570 {
571 /* NOP */
572 }
573
574 /* This is for the cache bufmgr. */
575 static boolean radeon_bomgr_is_buffer_busy(struct pb_manager *_mgr,
576 struct pb_buffer *_buf)
577 {
578 struct radeon_bo *bo = radeon_bo(_buf);
579
580 if (radeon_bo_is_referenced_by_any_cs(bo)) {
581 return TRUE;
582 }
583
584 if (radeon_bo_is_busy((struct pb_buffer*)bo, RADEON_USAGE_READWRITE)) {
585 return TRUE;
586 }
587
588 return FALSE;
589 }
590
591 static void radeon_bomgr_destroy(struct pb_manager *_mgr)
592 {
593 struct radeon_bomgr *mgr = radeon_bomgr(_mgr);
594 util_hash_table_destroy(mgr->bo_handles);
595 pipe_mutex_destroy(mgr->bo_handles_mutex);
596 pipe_mutex_destroy(mgr->bo_va_mutex);
597 FREE(mgr);
598 }
599
600 #define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x)))
601
602 static unsigned handle_hash(void *key)
603 {
604 return PTR_TO_UINT(key);
605 }
606
607 static int handle_compare(void *key1, void *key2)
608 {
609 return PTR_TO_UINT(key1) != PTR_TO_UINT(key2);
610 }
611
612 struct pb_manager *radeon_bomgr_create(struct radeon_drm_winsys *rws)
613 {
614 struct radeon_bomgr *mgr;
615
616 mgr = CALLOC_STRUCT(radeon_bomgr);
617 if (!mgr)
618 return NULL;
619
620 mgr->base.destroy = radeon_bomgr_destroy;
621 mgr->base.create_buffer = radeon_bomgr_create_bo;
622 mgr->base.flush = radeon_bomgr_flush;
623 mgr->base.is_buffer_busy = radeon_bomgr_is_buffer_busy;
624
625 mgr->rws = rws;
626 mgr->bo_handles = util_hash_table_create(handle_hash, handle_compare);
627 pipe_mutex_init(mgr->bo_handles_mutex);
628 pipe_mutex_init(mgr->bo_va_mutex);
629
630 mgr->va = rws->info.r600_virtual_address;
631 mgr->va_offset = rws->info.r600_va_start;
632 list_inithead(&mgr->va_holes);
633
634 return &mgr->base;
635 }
636
637 static void *radeon_bo_map(struct pb_buffer *buf,
638 struct radeon_winsys_cs *cs,
639 enum pipe_transfer_usage usage)
640 {
641 return pb_map(buf, get_pb_usage_from_transfer_flags(usage), cs);
642 }
643
644 static unsigned eg_tile_split(unsigned tile_split)
645 {
646 switch (tile_split) {
647 case 0: tile_split = 64; break;
648 case 1: tile_split = 128; break;
649 case 2: tile_split = 256; break;
650 case 3: tile_split = 512; break;
651 default:
652 case 4: tile_split = 1024; break;
653 case 5: tile_split = 2048; break;
654 case 6: tile_split = 4096; break;
655 }
656 return tile_split;
657 }
658
659 static void radeon_bo_get_tiling(struct pb_buffer *_buf,
660 enum radeon_bo_layout *microtiled,
661 enum radeon_bo_layout *macrotiled,
662 unsigned *bankw, unsigned *bankh,
663 unsigned *tile_split,
664 unsigned *stencil_tile_split,
665 unsigned *mtilea)
666 {
667 struct radeon_bo *bo = get_radeon_bo(_buf);
668 struct drm_radeon_gem_set_tiling args;
669
670 memset(&args, 0, sizeof(args));
671
672 args.handle = bo->handle;
673
674 drmCommandWriteRead(bo->rws->fd,
675 DRM_RADEON_GEM_GET_TILING,
676 &args,
677 sizeof(args));
678
679 *microtiled = RADEON_LAYOUT_LINEAR;
680 *macrotiled = RADEON_LAYOUT_LINEAR;
681 if (args.tiling_flags & RADEON_BO_FLAGS_MICRO_TILE)
682 *microtiled = RADEON_LAYOUT_TILED;
683
684 if (args.tiling_flags & RADEON_BO_FLAGS_MACRO_TILE)
685 *macrotiled = RADEON_LAYOUT_TILED;
686 if (bankw && tile_split && stencil_tile_split && mtilea && tile_split) {
687 *bankw = (args.tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
688 *bankh = (args.tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
689 *tile_split = (args.tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
690 *stencil_tile_split = (args.tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK;
691 *mtilea = (args.tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
692 *tile_split = eg_tile_split(*tile_split);
693 }
694 }
695
696 static void radeon_bo_set_tiling(struct pb_buffer *_buf,
697 struct radeon_winsys_cs *rcs,
698 enum radeon_bo_layout microtiled,
699 enum radeon_bo_layout macrotiled,
700 uint32_t pitch)
701 {
702 struct radeon_bo *bo = get_radeon_bo(_buf);
703 struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
704 struct drm_radeon_gem_set_tiling args;
705
706 memset(&args, 0, sizeof(args));
707
708 /* Tiling determines how DRM treats the buffer data.
709 * We must flush CS when changing it if the buffer is referenced. */
710 if (cs && radeon_bo_is_referenced_by_cs(cs, bo)) {
711 cs->flush_cs(cs->flush_data, 0);
712 }
713
714 while (p_atomic_read(&bo->num_active_ioctls)) {
715 sched_yield();
716 }
717
718 if (microtiled == RADEON_LAYOUT_TILED)
719 args.tiling_flags |= RADEON_BO_FLAGS_MICRO_TILE;
720 else if (microtiled == RADEON_LAYOUT_SQUARETILED)
721 args.tiling_flags |= RADEON_BO_FLAGS_MICRO_TILE_SQUARE;
722
723 if (macrotiled == RADEON_LAYOUT_TILED)
724 args.tiling_flags |= RADEON_BO_FLAGS_MACRO_TILE;
725
726 args.handle = bo->handle;
727 args.pitch = pitch;
728
729 drmCommandWriteRead(bo->rws->fd,
730 DRM_RADEON_GEM_SET_TILING,
731 &args,
732 sizeof(args));
733 }
734
735 static struct radeon_winsys_cs_handle *radeon_drm_get_cs_handle(
736 struct pb_buffer *_buf)
737 {
738 /* return radeon_bo. */
739 return (struct radeon_winsys_cs_handle*)get_radeon_bo(_buf);
740 }
741
742 static struct pb_buffer *
743 radeon_winsys_bo_create(struct radeon_winsys *rws,
744 unsigned size,
745 unsigned alignment,
746 unsigned bind,
747 enum radeon_bo_domain domain)
748 {
749 struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);
750 struct radeon_bo_desc desc;
751 struct pb_manager *provider;
752 struct pb_buffer *buffer;
753
754 memset(&desc, 0, sizeof(desc));
755 desc.base.alignment = alignment;
756
757 /* Additional criteria for the cache manager. */
758 desc.base.usage = domain;
759 desc.initial_domains = domain;
760
761 /* Assign a buffer manager. */
762 if (bind & (PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER |
763 PIPE_BIND_CONSTANT_BUFFER | PIPE_BIND_CUSTOM))
764 provider = ws->cman;
765 else
766 provider = ws->kman;
767
768 buffer = provider->create_buffer(provider, size, &desc.base);
769 if (!buffer)
770 return NULL;
771
772 return (struct pb_buffer*)buffer;
773 }
774
775 static struct pb_buffer *radeon_winsys_bo_from_handle(struct radeon_winsys *rws,
776 struct winsys_handle *whandle,
777 unsigned *stride)
778 {
779 struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);
780 struct radeon_bo *bo;
781 struct radeon_bomgr *mgr = radeon_bomgr(ws->kman);
782 struct drm_gem_open open_arg = {};
783 int r;
784
785 memset(&open_arg, 0, sizeof(open_arg));
786
787 /* We must maintain a list of pairs <handle, bo>, so that we always return
788 * the same BO for one particular handle. If we didn't do that and created
789 * more than one BO for the same handle and then relocated them in a CS,
790 * we would hit a deadlock in the kernel.
791 *
792 * The list of pairs is guarded by a mutex, of course. */
793 pipe_mutex_lock(mgr->bo_handles_mutex);
794
795 /* First check if there already is an existing bo for the handle. */
796 bo = util_hash_table_get(mgr->bo_handles, (void*)(uintptr_t)whandle->handle);
797 if (bo) {
798 /* Increase the refcount. */
799 struct pb_buffer *b = NULL;
800 pb_reference(&b, &bo->base);
801 goto done;
802 }
803
804 /* There isn't, create a new one. */
805 bo = CALLOC_STRUCT(radeon_bo);
806 if (!bo) {
807 goto fail;
808 }
809
810 /* Open the BO. */
811 open_arg.name = whandle->handle;
812 if (drmIoctl(ws->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) {
813 FREE(bo);
814 goto fail;
815 }
816 bo->handle = open_arg.handle;
817 bo->name = whandle->handle;
818
819 /* Initialize it. */
820 pipe_reference_init(&bo->base.reference, 1);
821 bo->base.alignment = 0;
822 bo->base.usage = PB_USAGE_GPU_WRITE | PB_USAGE_GPU_READ;
823 bo->base.size = open_arg.size;
824 bo->base.vtbl = &radeon_bo_vtbl;
825 bo->mgr = mgr;
826 bo->rws = mgr->rws;
827 bo->va = 0;
828 pipe_mutex_init(bo->map_mutex);
829
830 util_hash_table_set(mgr->bo_handles, (void*)(uintptr_t)whandle->handle, bo);
831
832 done:
833 pipe_mutex_unlock(mgr->bo_handles_mutex);
834
835 if (stride)
836 *stride = whandle->stride;
837
838 if (mgr->va) {
839 struct drm_radeon_gem_va va;
840
841 bo->va_size = ((bo->base.size + 4095) & ~4095);
842 bo->va = radeon_bomgr_find_va(mgr, bo->va_size, 1 << 20);
843
844 va.handle = bo->handle;
845 va.operation = RADEON_VA_MAP;
846 va.vm_id = 0;
847 va.offset = bo->va;
848 va.flags = RADEON_VM_PAGE_READABLE |
849 RADEON_VM_PAGE_WRITEABLE |
850 RADEON_VM_PAGE_SNOOPED;
851 va.offset = bo->va;
852 r = drmCommandWriteRead(ws->fd, DRM_RADEON_GEM_VA, &va, sizeof(va));
853 if (r && va.operation == RADEON_VA_RESULT_ERROR) {
854 fprintf(stderr, "radeon: Failed to assign virtual address space\n");
855 radeon_bo_destroy(&bo->base);
856 return NULL;
857 }
858 if (va.operation == RADEON_VA_RESULT_VA_EXIST) {
859 radeon_bomgr_free_va(mgr, bo->va, bo->va_size);
860 bo->va = va.offset;
861 radeon_bomgr_force_va(mgr, bo->va, bo->va_size);
862 }
863 }
864
865 return (struct pb_buffer*)bo;
866
867 fail:
868 pipe_mutex_unlock(mgr->bo_handles_mutex);
869 return NULL;
870 }
871
872 static boolean radeon_winsys_bo_get_handle(struct pb_buffer *buffer,
873 unsigned stride,
874 struct winsys_handle *whandle)
875 {
876 struct drm_gem_flink flink;
877 struct radeon_bo *bo = get_radeon_bo(buffer);
878
879 memset(&flink, 0, sizeof(flink));
880
881 if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
882 if (!bo->flinked) {
883 flink.handle = bo->handle;
884
885 if (ioctl(bo->rws->fd, DRM_IOCTL_GEM_FLINK, &flink)) {
886 return FALSE;
887 }
888
889 bo->flinked = TRUE;
890 bo->flink = flink.name;
891 }
892 whandle->handle = bo->flink;
893 } else if (whandle->type == DRM_API_HANDLE_TYPE_KMS) {
894 whandle->handle = bo->handle;
895 }
896
897 whandle->stride = stride;
898 return TRUE;
899 }
900
901 static uint64_t radeon_winsys_bo_va(struct radeon_winsys_cs_handle *buf)
902 {
903 return ((struct radeon_bo*)buf)->va;
904 }
905
906 void radeon_bomgr_init_functions(struct radeon_drm_winsys *ws)
907 {
908 ws->base.buffer_get_cs_handle = radeon_drm_get_cs_handle;
909 ws->base.buffer_set_tiling = radeon_bo_set_tiling;
910 ws->base.buffer_get_tiling = radeon_bo_get_tiling;
911 ws->base.buffer_map = radeon_bo_map;
912 ws->base.buffer_unmap = pb_unmap;
913 ws->base.buffer_wait = radeon_bo_wait;
914 ws->base.buffer_is_busy = radeon_bo_is_busy;
915 ws->base.buffer_create = radeon_winsys_bo_create;
916 ws->base.buffer_from_handle = radeon_winsys_bo_from_handle;
917 ws->base.buffer_get_handle = radeon_winsys_bo_get_handle;
918 ws->base.buffer_get_virtual_address = radeon_winsys_bo_va;
919 }