1 #include "radeon_drm_buffer.h"
2 #include "radeon_drm_cs.h"
4 #include "util/u_hash_table.h"
5 #include "util/u_memory.h"
6 #include "util/u_simple_list.h"
7 #include "pipebuffer/pb_bufmgr.h"
8 #include "os/os_thread.h"
10 #include "state_tracker/drm_driver.h"
12 #include <radeon_drm.h>
13 #include <radeon_bo_gem.h>
14 #include <sys/ioctl.h>
16 struct radeon_drm_bufmgr
;
18 struct radeon_drm_buffer
{
19 struct pb_buffer base
;
20 struct radeon_drm_bufmgr
*mgr
;
27 struct radeon_drm_buffer
*next
, *prev
;
30 extern const struct pb_vtbl radeon_drm_buffer_vtbl
;
33 static INLINE
struct radeon_drm_buffer
*
34 radeon_drm_buffer(struct pb_buffer
*buf
)
37 assert(buf
->vtbl
== &radeon_drm_buffer_vtbl
);
38 return (struct radeon_drm_buffer
*)buf
;
41 struct radeon_drm_bufmgr
{
43 struct pb_manager base
;
46 struct radeon_drm_winsys
*rws
;
48 /* List of mapped buffers and its mutex. */
49 struct radeon_drm_buffer buffer_map_list
;
50 pipe_mutex buffer_map_list_mutex
;
52 /* List of buffer handles and its mutex. */
53 struct util_hash_table
*buffer_handles
;
54 pipe_mutex buffer_handles_mutex
;
57 static INLINE
struct radeon_drm_bufmgr
*
58 radeon_drm_bufmgr(struct pb_manager
*mgr
)
61 return (struct radeon_drm_bufmgr
*)mgr
;
65 radeon_drm_buffer_destroy(struct pb_buffer
*_buf
)
67 struct radeon_drm_buffer
*buf
= radeon_drm_buffer(_buf
);
70 if (buf
->bo
->ptr
!= NULL
) {
71 pipe_mutex_lock(buf
->mgr
->buffer_map_list_mutex
);
72 /* Now test it again inside the mutex. */
73 if (buf
->bo
->ptr
!= NULL
) {
74 remove_from_list(buf
);
75 radeon_bo_unmap(buf
->bo
);
78 pipe_mutex_unlock(buf
->mgr
->buffer_map_list_mutex
);
80 name
= radeon_gem_name_bo(buf
->bo
);
82 pipe_mutex_lock(buf
->mgr
->buffer_handles_mutex
);
83 util_hash_table_remove(buf
->mgr
->buffer_handles
,
84 (void*)(uintptr_t)name
);
85 pipe_mutex_unlock(buf
->mgr
->buffer_handles_mutex
);
87 radeon_bo_unref(buf
->bo
);
92 static unsigned get_pb_usage_from_transfer_flags(enum pipe_transfer_usage usage
)
96 if (usage
& PIPE_TRANSFER_READ
)
97 res
|= PB_USAGE_CPU_READ
;
99 if (usage
& PIPE_TRANSFER_WRITE
)
100 res
|= PB_USAGE_CPU_WRITE
;
102 if (usage
& PIPE_TRANSFER_DONTBLOCK
)
103 res
|= PB_USAGE_DONTBLOCK
;
105 if (usage
& PIPE_TRANSFER_UNSYNCHRONIZED
)
106 res
|= PB_USAGE_UNSYNCHRONIZED
;
112 radeon_drm_buffer_map_internal(struct pb_buffer
*_buf
,
113 unsigned flags
, void *flush_ctx
)
115 struct radeon_drm_buffer
*buf
= radeon_drm_buffer(_buf
);
116 struct radeon_drm_cs
*cs
= flush_ctx
;
119 /* Note how we use radeon_bo_is_referenced_by_cs here. There are
120 * basically two places this map function can be called from:
122 * - create_buffer (in the buffer reuse case)
124 * Since pb managers are per-winsys managers, not per-context managers,
125 * and we shouldn't reuse buffers if they are in-use in any context,
126 * we simply ask: is this buffer referenced by *any* CS?
128 * The problem with buffer_create is that it comes from pipe_screen,
129 * so we have no CS to look at, though luckily the following code
130 * is sufficient to tell whether the buffer is in use. */
131 if (flags
& PB_USAGE_DONTBLOCK
) {
132 if (_buf
->base
.usage
& RADEON_PB_USAGE_VERTEX
)
133 if (radeon_bo_is_referenced_by_cs(buf
->bo
, NULL
))
137 if (buf
->bo
->ptr
!= NULL
) {
138 pipe_mutex_lock(buf
->mgr
->buffer_map_list_mutex
);
139 /* Now test ptr again inside the mutex. We might have gotten a race
140 * during the first test. */
141 if (buf
->bo
->ptr
!= NULL
) {
142 remove_from_list(buf
);
144 pipe_mutex_unlock(buf
->mgr
->buffer_map_list_mutex
);
148 if (flags
& PB_USAGE_DONTBLOCK
) {
150 if (radeon_bo_is_busy(buf
->bo
, &domain
))
154 /* If we don't have any CS and the buffer is referenced,
155 * we cannot flush. */
156 assert(cs
|| !radeon_bo_is_referenced_by_cs(buf
->bo
, NULL
));
158 if (cs
&& radeon_bo_is_referenced_by_cs(buf
->bo
, NULL
)) {
159 cs
->flush_cs(cs
->flush_data
);
162 if (flags
& PB_USAGE_CPU_WRITE
) {
166 if (radeon_bo_map(buf
->bo
, write
)) {
170 pipe_mutex_lock(buf
->mgr
->buffer_map_list_mutex
);
171 remove_from_list(buf
);
172 pipe_mutex_unlock(buf
->mgr
->buffer_map_list_mutex
);
177 radeon_drm_buffer_unmap_internal(struct pb_buffer
*_buf
)
179 struct radeon_drm_buffer
*buf
= radeon_drm_buffer(_buf
);
180 pipe_mutex_lock(buf
->mgr
->buffer_map_list_mutex
);
181 if (is_empty_list(buf
)) { /* = is not inserted... */
182 insert_at_tail(&buf
->mgr
->buffer_map_list
, buf
);
184 pipe_mutex_unlock(buf
->mgr
->buffer_map_list_mutex
);
188 radeon_drm_buffer_get_base_buffer(struct pb_buffer
*buf
,
189 struct pb_buffer
**base_buf
,
197 static enum pipe_error
198 radeon_drm_buffer_validate(struct pb_buffer
*_buf
,
199 struct pb_validate
*vl
,
207 radeon_drm_buffer_fence(struct pb_buffer
*buf
,
208 struct pipe_fence_handle
*fence
)
212 const struct pb_vtbl radeon_drm_buffer_vtbl
= {
213 radeon_drm_buffer_destroy
,
214 radeon_drm_buffer_map_internal
,
215 radeon_drm_buffer_unmap_internal
,
216 radeon_drm_buffer_validate
,
217 radeon_drm_buffer_fence
,
218 radeon_drm_buffer_get_base_buffer
,
221 static struct pb_buffer
*
222 radeon_drm_bufmgr_create_buffer_from_handle_unsafe(struct pb_manager
*_mgr
,
225 struct radeon_drm_bufmgr
*mgr
= radeon_drm_bufmgr(_mgr
);
226 struct radeon_drm_winsys
*rws
= mgr
->rws
;
227 struct radeon_drm_buffer
*buf
;
228 struct radeon_bo
*bo
;
230 buf
= util_hash_table_get(mgr
->buffer_handles
, (void*)(uintptr_t)handle
);
233 struct pb_buffer
*b
= NULL
;
234 pb_reference(&b
, &buf
->base
);
238 bo
= radeon_bo_open(rws
->bom
, handle
, 0,
243 buf
= CALLOC_STRUCT(radeon_drm_buffer
);
249 make_empty_list(buf
);
251 pipe_reference_init(&buf
->base
.base
.reference
, 1);
252 buf
->base
.base
.alignment
= 0;
253 buf
->base
.base
.usage
= PB_USAGE_GPU_WRITE
| PB_USAGE_GPU_READ
;
254 buf
->base
.base
.size
= bo
->size
;
255 buf
->base
.vtbl
= &radeon_drm_buffer_vtbl
;
260 util_hash_table_set(mgr
->buffer_handles
, (void*)(uintptr_t)handle
, buf
);
266 radeon_drm_bufmgr_create_buffer_from_handle(struct pb_manager
*_mgr
,
269 struct radeon_drm_bufmgr
*mgr
= radeon_drm_bufmgr(_mgr
);
270 struct pb_buffer
*pb
;
272 pipe_mutex_lock(mgr
->buffer_handles_mutex
);
273 pb
= radeon_drm_bufmgr_create_buffer_from_handle_unsafe(_mgr
, handle
);
274 pipe_mutex_unlock(mgr
->buffer_handles_mutex
);
279 static struct pb_buffer
*
280 radeon_drm_bufmgr_create_buffer(struct pb_manager
*_mgr
,
282 const struct pb_desc
*desc
)
284 struct radeon_drm_bufmgr
*mgr
= radeon_drm_bufmgr(_mgr
);
285 struct radeon_drm_winsys
*rws
= mgr
->rws
;
286 struct radeon_drm_buffer
*buf
;
289 buf
= CALLOC_STRUCT(radeon_drm_buffer
);
293 pipe_reference_init(&buf
->base
.base
.reference
, 1);
294 buf
->base
.base
.alignment
= desc
->alignment
;
295 buf
->base
.base
.usage
= desc
->usage
;
296 buf
->base
.base
.size
= size
;
297 buf
->base
.vtbl
= &radeon_drm_buffer_vtbl
;
300 make_empty_list(buf
);
303 (desc
->usage
& RADEON_PB_USAGE_DOMAIN_GTT
? RADEON_GEM_DOMAIN_GTT
: 0) |
304 (desc
->usage
& RADEON_PB_USAGE_DOMAIN_VRAM
? RADEON_GEM_DOMAIN_VRAM
: 0);
306 buf
->bo
= radeon_bo_open(rws
->bom
, 0, size
,
307 desc
->alignment
, domain
, 0);
320 radeon_drm_bufmgr_flush(struct pb_manager
*mgr
)
326 radeon_drm_bufmgr_destroy(struct pb_manager
*_mgr
)
328 struct radeon_drm_bufmgr
*mgr
= radeon_drm_bufmgr(_mgr
);
329 util_hash_table_destroy(mgr
->buffer_handles
);
330 pipe_mutex_destroy(mgr
->buffer_map_list_mutex
);
331 pipe_mutex_destroy(mgr
->buffer_handles_mutex
);
335 static unsigned handle_hash(void *key
)
337 return (unsigned)key
;
340 static int handle_compare(void *key1
, void *key2
)
342 return !((int)key1
== (int)key2
);
346 radeon_drm_bufmgr_create(struct radeon_drm_winsys
*rws
)
348 struct radeon_drm_bufmgr
*mgr
;
350 mgr
= CALLOC_STRUCT(radeon_drm_bufmgr
);
354 mgr
->base
.destroy
= radeon_drm_bufmgr_destroy
;
355 mgr
->base
.create_buffer
= radeon_drm_bufmgr_create_buffer
;
356 mgr
->base
.flush
= radeon_drm_bufmgr_flush
;
359 make_empty_list(&mgr
->buffer_map_list
);
360 mgr
->buffer_handles
= util_hash_table_create(handle_hash
, handle_compare
);
361 pipe_mutex_init(mgr
->buffer_map_list_mutex
);
362 pipe_mutex_init(mgr
->buffer_handles_mutex
);
366 static struct radeon_drm_buffer
*get_drm_buffer(struct pb_buffer
*_buf
)
368 struct radeon_drm_buffer
*buf
= NULL
;
370 if (_buf
->vtbl
== &radeon_drm_buffer_vtbl
) {
371 buf
= radeon_drm_buffer(_buf
);
373 struct pb_buffer
*base_buf
;
375 pb_get_base_buffer(_buf
, &base_buf
, &offset
);
377 if (base_buf
->vtbl
== &radeon_drm_buffer_vtbl
)
378 buf
= radeon_drm_buffer(base_buf
);
384 static void *radeon_drm_buffer_map(struct r300_winsys_screen
*ws
,
385 struct r300_winsys_buffer
*buf
,
386 struct r300_winsys_cs
*cs
,
387 enum pipe_transfer_usage usage
)
389 struct pb_buffer
*_buf
= radeon_pb_buffer(buf
);
391 return pb_map(_buf
, get_pb_usage_from_transfer_flags(usage
), radeon_drm_cs(cs
));
394 static void radeon_drm_buffer_unmap(struct r300_winsys_screen
*ws
,
395 struct r300_winsys_buffer
*buf
)
397 struct pb_buffer
*_buf
= radeon_pb_buffer(buf
);
402 boolean
radeon_drm_bufmgr_get_handle(struct pb_buffer
*_buf
,
403 struct winsys_handle
*whandle
)
405 struct drm_gem_flink flink
;
406 struct radeon_drm_buffer
*buf
= get_drm_buffer(_buf
);
408 if (whandle
->type
== DRM_API_HANDLE_TYPE_SHARED
) {
410 flink
.handle
= buf
->bo
->handle
;
412 if (ioctl(buf
->mgr
->rws
->fd
, DRM_IOCTL_GEM_FLINK
, &flink
)) {
417 buf
->flink
= flink
.name
;
419 whandle
->handle
= buf
->flink
;
420 } else if (whandle
->type
== DRM_API_HANDLE_TYPE_KMS
) {
421 whandle
->handle
= buf
->bo
->handle
;
426 static void radeon_drm_buffer_get_tiling(struct r300_winsys_screen
*ws
,
427 struct r300_winsys_buffer
*_buf
,
428 enum r300_buffer_tiling
*microtiled
,
429 enum r300_buffer_tiling
*macrotiled
)
431 struct radeon_drm_buffer
*buf
= get_drm_buffer(radeon_pb_buffer(_buf
));
432 uint32_t flags
= 0, pitch
;
434 radeon_bo_get_tiling(buf
->bo
, &flags
, &pitch
);
436 *microtiled
= R300_BUFFER_LINEAR
;
437 *macrotiled
= R300_BUFFER_LINEAR
;
438 if (flags
& RADEON_BO_FLAGS_MICRO_TILE
)
439 *microtiled
= R300_BUFFER_TILED
;
441 if (flags
& RADEON_BO_FLAGS_MACRO_TILE
)
442 *macrotiled
= R300_BUFFER_TILED
;
445 static void radeon_drm_buffer_set_tiling(struct r300_winsys_screen
*ws
,
446 struct r300_winsys_buffer
*_buf
,
447 enum r300_buffer_tiling microtiled
,
448 enum r300_buffer_tiling macrotiled
,
451 struct radeon_drm_buffer
*buf
= get_drm_buffer(radeon_pb_buffer(_buf
));
453 if (microtiled
== R300_BUFFER_TILED
)
454 flags
|= RADEON_BO_FLAGS_MICRO_TILE
;
455 /* XXX Remove this ifdef when libdrm version 2.4.19 becomes mandatory. */
456 #ifdef RADEON_BO_FLAGS_MICRO_TILE_SQUARE
457 else if (microtiled
== R300_BUFFER_SQUARETILED
)
458 flags
|= RADEON_BO_FLAGS_MICRO_TILE_SQUARE
;
460 if (macrotiled
== R300_BUFFER_TILED
)
461 flags
|= RADEON_BO_FLAGS_MACRO_TILE
;
463 radeon_bo_set_tiling(buf
->bo
, flags
, pitch
);
466 static struct r300_winsys_cs_buffer
*radeon_drm_get_cs_handle(
467 struct r300_winsys_screen
*rws
,
468 struct r300_winsys_buffer
*_buf
)
470 /* return pure radeon_bo. */
471 return (struct r300_winsys_cs_buffer
*)
472 get_drm_buffer(radeon_pb_buffer(_buf
))->bo
;
475 static boolean
radeon_drm_is_buffer_referenced(struct r300_winsys_cs
*rcs
,
476 struct r300_winsys_cs_buffer
*_buf
,
477 enum r300_reference_domain domain
)
479 struct radeon_bo
*bo
= (struct radeon_bo
*)_buf
;
482 if (domain
& R300_REF_CS
) {
483 if (radeon_bo_is_referenced_by_cs(bo
, NULL
)) {
488 if (domain
& R300_REF_HW
) {
489 if (radeon_bo_is_busy(bo
, &tmp
)) {
497 void radeon_drm_bufmgr_flush_maps(struct pb_manager
*_mgr
)
499 struct radeon_drm_bufmgr
*mgr
= radeon_drm_bufmgr(_mgr
);
500 struct radeon_drm_buffer
*rpb
, *t_rpb
;
502 pipe_mutex_lock(mgr
->buffer_map_list_mutex
);
504 foreach_s(rpb
, t_rpb
, &mgr
->buffer_map_list
) {
505 radeon_bo_unmap(rpb
->bo
);
507 remove_from_list(rpb
);
510 make_empty_list(&mgr
->buffer_map_list
);
512 pipe_mutex_unlock(mgr
->buffer_map_list_mutex
);
515 static void radeon_drm_buffer_wait(struct r300_winsys_screen
*ws
,
516 struct r300_winsys_buffer
*_buf
)
518 struct radeon_drm_buffer
*buf
= get_drm_buffer(radeon_pb_buffer(_buf
));
520 radeon_bo_wait(buf
->bo
);
523 void radeon_drm_bufmgr_init_functions(struct radeon_drm_winsys
*ws
)
525 ws
->base
.buffer_get_cs_handle
= radeon_drm_get_cs_handle
;
526 ws
->base
.buffer_set_tiling
= radeon_drm_buffer_set_tiling
;
527 ws
->base
.buffer_get_tiling
= radeon_drm_buffer_get_tiling
;
528 ws
->base
.buffer_map
= radeon_drm_buffer_map
;
529 ws
->base
.buffer_unmap
= radeon_drm_buffer_unmap
;
530 ws
->base
.buffer_wait
= radeon_drm_buffer_wait
;
531 ws
->base
.cs_is_buffer_referenced
= radeon_drm_is_buffer_referenced
;