2 * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
14 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
15 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
16 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
17 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
27 #define _FILE_OFFSET_BITS 64
28 #include "radeon_drm_cs.h"
30 #include "util/u_hash_table.h"
31 #include "util/u_memory.h"
32 #include "util/u_simple_list.h"
33 #include "os/os_thread.h"
35 #include "state_tracker/drm_driver.h"
37 #include <sys/ioctl.h>
42 #define RADEON_BO_FLAGS_MACRO_TILE 1
43 #define RADEON_BO_FLAGS_MICRO_TILE 2
44 #define RADEON_BO_FLAGS_MICRO_TILE_SQUARE 0x20
46 extern const struct pb_vtbl radeon_bo_vtbl
;
49 static INLINE
struct radeon_bo
*radeon_bo(struct pb_buffer
*bo
)
51 assert(bo
->vtbl
== &radeon_bo_vtbl
);
52 return (struct radeon_bo
*)bo
;
57 struct pb_manager base
;
60 struct radeon_drm_winsys
*rws
;
62 /* List of buffer handles and its mutex. */
63 struct util_hash_table
*bo_handles
;
64 pipe_mutex bo_handles_mutex
;
67 static INLINE
struct radeon_bomgr
*radeon_bomgr(struct pb_manager
*mgr
)
69 return (struct radeon_bomgr
*)mgr
;
72 static struct radeon_bo
*get_radeon_bo(struct pb_buffer
*_buf
)
74 struct radeon_bo
*bo
= NULL
;
76 if (_buf
->vtbl
== &radeon_bo_vtbl
) {
79 struct pb_buffer
*base_buf
;
81 pb_get_base_buffer(_buf
, &base_buf
, &offset
);
83 if (base_buf
->vtbl
== &radeon_bo_vtbl
)
84 bo
= radeon_bo(base_buf
);
90 void radeon_bo_unref(struct radeon_bo
*bo
)
92 struct drm_gem_close args
= {};
94 if (!p_atomic_dec_zero(&bo
->ref_count
))
98 pipe_mutex_lock(bo
->mgr
->bo_handles_mutex
);
99 util_hash_table_remove(bo
->mgr
->bo_handles
,
100 (void*)(uintptr_t)bo
->name
);
101 pipe_mutex_unlock(bo
->mgr
->bo_handles_mutex
);
105 munmap(bo
->ptr
, bo
->size
);
108 args
.handle
= bo
->handle
;
109 drmIoctl(bo
->rws
->fd
, DRM_IOCTL_GEM_CLOSE
, &args
);
110 pipe_mutex_destroy(bo
->map_mutex
);
114 static void radeon_bo_wait(struct r300_winsys_bo
*_buf
)
116 struct radeon_bo
*bo
= get_radeon_bo(pb_buffer(_buf
));
117 struct drm_radeon_gem_wait_idle args
= {};
119 while (p_atomic_read(&bo
->num_active_ioctls
)) {
123 args
.handle
= bo
->handle
;
124 while (drmCommandWriteRead(bo
->rws
->fd
, DRM_RADEON_GEM_WAIT_IDLE
,
125 &args
, sizeof(args
)) == -EBUSY
);
128 static boolean
radeon_bo_is_busy(struct r300_winsys_bo
*_buf
)
130 struct radeon_bo
*bo
= get_radeon_bo(pb_buffer(_buf
));
131 struct drm_radeon_gem_busy args
= {};
133 if (p_atomic_read(&bo
->num_active_ioctls
)) {
137 args
.handle
= bo
->handle
;
138 return drmCommandWriteRead(bo
->rws
->fd
, DRM_RADEON_GEM_BUSY
,
139 &args
, sizeof(args
)) != 0;
142 static void radeon_bo_destroy(struct pb_buffer
*_buf
)
144 struct radeon_bo
*bo
= radeon_bo(_buf
);
149 static unsigned get_pb_usage_from_transfer_flags(enum pipe_transfer_usage usage
)
153 if (usage
& PIPE_TRANSFER_DONTBLOCK
)
154 res
|= PB_USAGE_DONTBLOCK
;
156 if (usage
& PIPE_TRANSFER_UNSYNCHRONIZED
)
157 res
|= PB_USAGE_UNSYNCHRONIZED
;
162 static void *radeon_bo_map_internal(struct pb_buffer
*_buf
,
163 unsigned flags
, void *flush_ctx
)
165 struct radeon_bo
*bo
= radeon_bo(_buf
);
166 struct radeon_drm_cs
*cs
= flush_ctx
;
167 struct drm_radeon_gem_mmap args
= {};
170 /* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
171 if (!(flags
& PB_USAGE_UNSYNCHRONIZED
)) {
172 /* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
173 if (flags
& PB_USAGE_DONTBLOCK
) {
174 if (radeon_bo_is_referenced_by_cs(cs
, bo
)) {
175 cs
->flush_cs(cs
->flush_data
, R300_FLUSH_ASYNC
);
179 if (radeon_bo_is_busy((struct r300_winsys_bo
*)bo
)) {
183 if (radeon_bo_is_referenced_by_cs(cs
, bo
)) {
184 cs
->flush_cs(cs
->flush_data
, 0);
186 /* Try to avoid busy-waiting in radeon_bo_wait. */
187 if (p_atomic_read(&bo
->num_active_ioctls
))
188 radeon_drm_cs_sync_flush(cs
);
191 radeon_bo_wait((struct r300_winsys_bo
*)bo
);
195 /* Return the pointer if it's already mapped. */
199 /* Map the buffer. */
200 pipe_mutex_lock(bo
->map_mutex
);
201 args
.handle
= bo
->handle
;
203 args
.size
= (uint64_t)bo
->size
;
204 if (drmCommandWriteRead(bo
->rws
->fd
,
208 pipe_mutex_unlock(bo
->map_mutex
);
209 fprintf(stderr
, "radeon: gem_mmap failed: %p 0x%08X\n",
214 ptr
= mmap(0, args
.size
, PROT_READ
|PROT_WRITE
, MAP_SHARED
,
215 bo
->rws
->fd
, args
.addr_ptr
);
216 if (ptr
== MAP_FAILED
) {
217 pipe_mutex_unlock(bo
->map_mutex
);
218 fprintf(stderr
, "radeon: mmap failed, errno: %i\n", errno
);
222 pipe_mutex_unlock(bo
->map_mutex
);
227 static void radeon_bo_unmap_internal(struct pb_buffer
*_buf
)
232 static void radeon_bo_get_base_buffer(struct pb_buffer
*buf
,
233 struct pb_buffer
**base_buf
,
240 static enum pipe_error
radeon_bo_validate(struct pb_buffer
*_buf
,
241 struct pb_validate
*vl
,
248 static void radeon_bo_fence(struct pb_buffer
*buf
,
249 struct pipe_fence_handle
*fence
)
253 const struct pb_vtbl radeon_bo_vtbl
= {
255 radeon_bo_map_internal
,
256 radeon_bo_unmap_internal
,
259 radeon_bo_get_base_buffer
,
262 static struct pb_buffer
*radeon_bomgr_create_bo(struct pb_manager
*_mgr
,
264 const struct pb_desc
*desc
)
266 struct radeon_bomgr
*mgr
= radeon_bomgr(_mgr
);
267 struct radeon_drm_winsys
*rws
= mgr
->rws
;
268 struct radeon_bo
*bo
;
269 struct drm_radeon_gem_create args
= {};
272 args
.alignment
= desc
->alignment
;
273 args
.initial_domain
=
274 (desc
->usage
& RADEON_PB_USAGE_DOMAIN_GTT
?
275 RADEON_GEM_DOMAIN_GTT
: 0) |
276 (desc
->usage
& RADEON_PB_USAGE_DOMAIN_VRAM
?
277 RADEON_GEM_DOMAIN_VRAM
: 0);
279 if (drmCommandWriteRead(rws
->fd
, DRM_RADEON_GEM_CREATE
,
280 &args
, sizeof(args
))) {
281 fprintf(stderr
, "Failed to allocate :\n");
282 fprintf(stderr
, " size : %d bytes\n", size
);
283 fprintf(stderr
, " alignment : %d bytes\n", desc
->alignment
);
284 fprintf(stderr
, " domains : %d\n", args
.initial_domain
);
288 bo
= CALLOC_STRUCT(radeon_bo
);
292 pipe_reference_init(&bo
->base
.base
.reference
, 1);
293 bo
->base
.base
.alignment
= desc
->alignment
;
294 bo
->base
.base
.usage
= desc
->usage
;
295 bo
->base
.base
.size
= size
;
296 bo
->base
.vtbl
= &radeon_bo_vtbl
;
299 bo
->handle
= args
.handle
;
301 pipe_mutex_init(bo
->map_mutex
);
307 static void radeon_bomgr_flush(struct pb_manager
*mgr
)
312 /* This is for the cache bufmgr. */
313 static boolean
radeon_bomgr_is_buffer_busy(struct pb_manager
*_mgr
,
314 struct pb_buffer
*_buf
)
316 struct radeon_bo
*bo
= radeon_bo(_buf
);
318 if (radeon_bo_is_referenced_by_any_cs(bo
)) {
322 if (radeon_bo_is_busy((struct r300_winsys_bo
*)bo
)) {
329 static void radeon_bomgr_destroy(struct pb_manager
*_mgr
)
331 struct radeon_bomgr
*mgr
= radeon_bomgr(_mgr
);
332 util_hash_table_destroy(mgr
->bo_handles
);
333 pipe_mutex_destroy(mgr
->bo_handles_mutex
);
337 #define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x)))
339 static unsigned handle_hash(void *key
)
341 return PTR_TO_UINT(key
);
344 static int handle_compare(void *key1
, void *key2
)
346 return PTR_TO_UINT(key1
) != PTR_TO_UINT(key2
);
349 struct pb_manager
*radeon_bomgr_create(struct radeon_drm_winsys
*rws
)
351 struct radeon_bomgr
*mgr
;
353 mgr
= CALLOC_STRUCT(radeon_bomgr
);
357 mgr
->base
.destroy
= radeon_bomgr_destroy
;
358 mgr
->base
.create_buffer
= radeon_bomgr_create_bo
;
359 mgr
->base
.flush
= radeon_bomgr_flush
;
360 mgr
->base
.is_buffer_busy
= radeon_bomgr_is_buffer_busy
;
363 mgr
->bo_handles
= util_hash_table_create(handle_hash
, handle_compare
);
364 pipe_mutex_init(mgr
->bo_handles_mutex
);
368 static void *radeon_bo_map(struct r300_winsys_bo
*buf
,
369 struct r300_winsys_cs
*cs
,
370 enum pipe_transfer_usage usage
)
372 struct pb_buffer
*_buf
= pb_buffer(buf
);
374 return pb_map(_buf
, get_pb_usage_from_transfer_flags(usage
), cs
);
377 static void radeon_bo_get_tiling(struct r300_winsys_bo
*_buf
,
378 enum r300_buffer_tiling
*microtiled
,
379 enum r300_buffer_tiling
*macrotiled
)
381 struct radeon_bo
*bo
= get_radeon_bo(pb_buffer(_buf
));
382 struct drm_radeon_gem_set_tiling args
= {};
384 args
.handle
= bo
->handle
;
386 drmCommandWriteRead(bo
->rws
->fd
,
387 DRM_RADEON_GEM_GET_TILING
,
391 *microtiled
= R300_BUFFER_LINEAR
;
392 *macrotiled
= R300_BUFFER_LINEAR
;
393 if (args
.tiling_flags
& RADEON_BO_FLAGS_MICRO_TILE
)
394 *microtiled
= R300_BUFFER_TILED
;
396 if (args
.tiling_flags
& RADEON_BO_FLAGS_MACRO_TILE
)
397 *macrotiled
= R300_BUFFER_TILED
;
400 static void radeon_bo_set_tiling(struct r300_winsys_bo
*_buf
,
401 struct r300_winsys_cs
*rcs
,
402 enum r300_buffer_tiling microtiled
,
403 enum r300_buffer_tiling macrotiled
,
406 struct radeon_bo
*bo
= get_radeon_bo(pb_buffer(_buf
));
407 struct radeon_drm_cs
*cs
= radeon_drm_cs(rcs
);
408 struct drm_radeon_gem_set_tiling args
= {};
410 /* Tiling determines how DRM treats the buffer data.
411 * We must flush CS when changing it if the buffer is referenced. */
412 if (cs
&& radeon_bo_is_referenced_by_cs(cs
, bo
)) {
413 cs
->flush_cs(cs
->flush_data
, 0);
416 while (p_atomic_read(&bo
->num_active_ioctls
)) {
420 if (microtiled
== R300_BUFFER_TILED
)
421 args
.tiling_flags
|= RADEON_BO_FLAGS_MICRO_TILE
;
422 else if (microtiled
== R300_BUFFER_SQUARETILED
)
423 args
.tiling_flags
|= RADEON_BO_FLAGS_MICRO_TILE_SQUARE
;
425 if (macrotiled
== R300_BUFFER_TILED
)
426 args
.tiling_flags
|= RADEON_BO_FLAGS_MACRO_TILE
;
428 args
.handle
= bo
->handle
;
431 drmCommandWriteRead(bo
->rws
->fd
,
432 DRM_RADEON_GEM_SET_TILING
,
437 static struct r300_winsys_cs_handle
*radeon_drm_get_cs_handle(
438 struct r300_winsys_bo
*_buf
)
440 /* return radeon_bo. */
441 return (struct r300_winsys_cs_handle
*)
442 get_radeon_bo(pb_buffer(_buf
));
445 static unsigned get_pb_usage_from_create_flags(unsigned bind
, unsigned usage
,
446 enum r300_buffer_domain domain
)
450 if (bind
& (PIPE_BIND_VERTEX_BUFFER
| PIPE_BIND_INDEX_BUFFER
))
451 res
|= RADEON_PB_USAGE_CACHE
;
453 if (domain
& R300_DOMAIN_GTT
)
454 res
|= RADEON_PB_USAGE_DOMAIN_GTT
;
456 if (domain
& R300_DOMAIN_VRAM
)
457 res
|= RADEON_PB_USAGE_DOMAIN_VRAM
;
462 static struct r300_winsys_bo
*
463 radeon_winsys_bo_create(struct r300_winsys_screen
*rws
,
468 enum r300_buffer_domain domain
)
470 struct radeon_drm_winsys
*ws
= radeon_drm_winsys(rws
);
472 struct pb_manager
*provider
;
473 struct pb_buffer
*buffer
;
475 memset(&desc
, 0, sizeof(desc
));
476 desc
.alignment
= alignment
;
477 desc
.usage
= get_pb_usage_from_create_flags(bind
, usage
, domain
);
479 /* Assign a buffer manager. */
480 if (bind
& (PIPE_BIND_VERTEX_BUFFER
| PIPE_BIND_INDEX_BUFFER
))
485 buffer
= provider
->create_buffer(provider
, size
, &desc
);
489 return (struct r300_winsys_bo
*)buffer
;
492 static struct r300_winsys_bo
*radeon_winsys_bo_from_handle(struct r300_winsys_screen
*rws
,
493 struct winsys_handle
*whandle
,
497 struct radeon_drm_winsys
*ws
= radeon_drm_winsys(rws
);
498 struct radeon_bo
*bo
;
499 struct radeon_bomgr
*mgr
= radeon_bomgr(ws
->kman
);
500 struct drm_gem_open open_arg
= {};
502 /* We must maintain a list of pairs <handle, bo>, so that we always return
503 * the same BO for one particular handle. If we didn't do that and created
504 * more than one BO for the same handle and then relocated them in a CS,
505 * we would hit a deadlock in the kernel.
507 * The list of pairs is guarded by a mutex, of course. */
508 pipe_mutex_lock(mgr
->bo_handles_mutex
);
510 /* First check if there already is an existing bo for the handle. */
511 bo
= util_hash_table_get(mgr
->bo_handles
, (void*)(uintptr_t)whandle
->handle
);
513 /* Increase the refcount. */
514 struct pb_buffer
*b
= NULL
;
515 pb_reference(&b
, &bo
->base
);
519 /* There isn't, create a new one. */
520 bo
= CALLOC_STRUCT(radeon_bo
);
526 open_arg
.name
= whandle
->handle
;
527 if (drmIoctl(ws
->fd
, DRM_IOCTL_GEM_OPEN
, &open_arg
)) {
531 bo
->handle
= open_arg
.handle
;
532 bo
->size
= open_arg
.size
;
533 bo
->name
= whandle
->handle
;
537 pipe_reference_init(&bo
->base
.base
.reference
, 1);
538 bo
->base
.base
.alignment
= 0;
539 bo
->base
.base
.usage
= PB_USAGE_GPU_WRITE
| PB_USAGE_GPU_READ
;
540 bo
->base
.base
.size
= bo
->size
;
541 bo
->base
.vtbl
= &radeon_bo_vtbl
;
544 pipe_mutex_init(bo
->map_mutex
);
546 util_hash_table_set(mgr
->bo_handles
, (void*)(uintptr_t)whandle
->handle
, bo
);
549 pipe_mutex_unlock(mgr
->bo_handles_mutex
);
552 *stride
= whandle
->stride
;
554 *size
= bo
->base
.base
.size
;
556 return (struct r300_winsys_bo
*)bo
;
559 pipe_mutex_unlock(mgr
->bo_handles_mutex
);
563 static boolean
radeon_winsys_bo_get_handle(struct r300_winsys_bo
*buffer
,
565 struct winsys_handle
*whandle
)
567 struct drm_gem_flink flink
= {};
568 struct radeon_bo
*bo
= get_radeon_bo(pb_buffer(buffer
));
570 if (whandle
->type
== DRM_API_HANDLE_TYPE_SHARED
) {
572 flink
.handle
= bo
->handle
;
574 if (ioctl(bo
->rws
->fd
, DRM_IOCTL_GEM_FLINK
, &flink
)) {
579 bo
->flink
= flink
.name
;
581 whandle
->handle
= bo
->flink
;
582 } else if (whandle
->type
== DRM_API_HANDLE_TYPE_KMS
) {
583 whandle
->handle
= bo
->handle
;
586 whandle
->stride
= stride
;
590 void radeon_bomgr_init_functions(struct radeon_drm_winsys
*ws
)
592 ws
->base
.buffer_get_cs_handle
= radeon_drm_get_cs_handle
;
593 ws
->base
.buffer_set_tiling
= radeon_bo_set_tiling
;
594 ws
->base
.buffer_get_tiling
= radeon_bo_get_tiling
;
595 ws
->base
.buffer_map
= radeon_bo_map
;
596 ws
->base
.buffer_unmap
= pb_unmap
;
597 ws
->base
.buffer_wait
= radeon_bo_wait
;
598 ws
->base
.buffer_is_busy
= radeon_bo_is_busy
;
599 ws
->base
.buffer_create
= radeon_winsys_bo_create
;
600 ws
->base
.buffer_from_handle
= radeon_winsys_bo_from_handle
;
601 ws
->base
.buffer_get_handle
= radeon_winsys_bo_get_handle
;