2 * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
3 * Copyright © 2015 Advanced Micro Devices, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
16 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
18 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
29 * Marek Olšák <maraeo@gmail.com>
32 #include "amdgpu_cs.h"
34 #include "os/os_time.h"
35 #include "state_tracker/drm_driver.h"
36 #include <amdgpu_drm.h>
40 static inline struct amdgpu_winsys_bo
*amdgpu_winsys_bo(struct pb_buffer
*bo
)
42 return (struct amdgpu_winsys_bo
*)bo
;
45 static bool amdgpu_bo_wait(struct pb_buffer
*_buf
, uint64_t timeout
,
46 enum radeon_bo_usage usage
)
48 struct amdgpu_winsys_bo
*bo
= amdgpu_winsys_bo(_buf
);
49 struct amdgpu_winsys
*ws
= bo
->ws
;
53 /* We can't use user fences for shared buffers, because user fences
54 * are local to this process only. If we want to wait for all buffer
55 * uses in all processes, we have to use amdgpu_bo_wait_for_idle.
57 bool buffer_busy
= true;
60 r
= amdgpu_bo_wait_for_idle(bo
->bo
, timeout
, &buffer_busy
);
62 fprintf(stderr
, "%s: amdgpu_bo_wait_for_idle failed %i\n", __func__
,
68 /* Timeout == 0 is quite simple. */
69 pipe_mutex_lock(ws
->bo_fence_lock
);
70 for (i
= 0; i
< RING_LAST
; i
++)
72 if (amdgpu_fence_wait(bo
->fence
[i
], 0, false)) {
73 /* Release the idle fence to avoid checking it again later. */
74 amdgpu_fence_reference(&bo
->fence
[i
], NULL
);
76 pipe_mutex_unlock(ws
->bo_fence_lock
);
80 pipe_mutex_unlock(ws
->bo_fence_lock
);
84 struct pipe_fence_handle
*fence
[RING_LAST
] = {};
85 bool fence_idle
[RING_LAST
] = {};
86 bool buffer_idle
= true;
87 int64_t abs_timeout
= os_time_get_absolute_timeout(timeout
);
89 /* Take references to all fences, so that we can wait for them
90 * without the lock. */
91 pipe_mutex_lock(ws
->bo_fence_lock
);
92 for (i
= 0; i
< RING_LAST
; i
++)
93 amdgpu_fence_reference(&fence
[i
], bo
->fence
[i
]);
94 pipe_mutex_unlock(ws
->bo_fence_lock
);
96 /* Now wait for the fences. */
97 for (i
= 0; i
< RING_LAST
; i
++) {
99 if (amdgpu_fence_wait(fence
[i
], abs_timeout
, true))
100 fence_idle
[i
] = true;
106 /* Release idle fences to avoid checking them again later. */
107 pipe_mutex_lock(ws
->bo_fence_lock
);
108 for (i
= 0; i
< RING_LAST
; i
++) {
109 if (fence
[i
] == bo
->fence
[i
] && fence_idle
[i
])
110 amdgpu_fence_reference(&bo
->fence
[i
], NULL
);
112 amdgpu_fence_reference(&fence
[i
], NULL
);
114 pipe_mutex_unlock(ws
->bo_fence_lock
);
120 static enum radeon_bo_domain
amdgpu_bo_get_initial_domain(
121 struct pb_buffer
*buf
)
123 return ((struct amdgpu_winsys_bo
*)buf
)->initial_domain
;
126 void amdgpu_bo_destroy(struct pb_buffer
*_buf
)
128 struct amdgpu_winsys_bo
*bo
= amdgpu_winsys_bo(_buf
);
131 pipe_mutex_lock(bo
->ws
->global_bo_list_lock
);
132 LIST_DEL(&bo
->global_list_item
);
133 bo
->ws
->num_buffers
--;
134 pipe_mutex_unlock(bo
->ws
->global_bo_list_lock
);
136 amdgpu_bo_va_op(bo
->bo
, 0, bo
->base
.size
, bo
->va
, 0, AMDGPU_VA_OP_UNMAP
);
137 amdgpu_va_range_free(bo
->va_handle
);
138 amdgpu_bo_free(bo
->bo
);
140 for (i
= 0; i
< RING_LAST
; i
++)
141 amdgpu_fence_reference(&bo
->fence
[i
], NULL
);
143 if (bo
->initial_domain
& RADEON_DOMAIN_VRAM
)
144 bo
->ws
->allocated_vram
-= align(bo
->base
.size
, bo
->ws
->gart_page_size
);
145 else if (bo
->initial_domain
& RADEON_DOMAIN_GTT
)
146 bo
->ws
->allocated_gtt
-= align(bo
->base
.size
, bo
->ws
->gart_page_size
);
150 static void amdgpu_bo_destroy_or_cache(struct pb_buffer
*_buf
)
152 struct amdgpu_winsys_bo
*bo
= amdgpu_winsys_bo(_buf
);
154 if (bo
->use_reusable_pool
)
155 pb_cache_add_buffer(&bo
->cache_entry
);
157 amdgpu_bo_destroy(_buf
);
160 static void *amdgpu_bo_map(struct pb_buffer
*buf
,
161 struct radeon_winsys_cs
*rcs
,
162 enum pipe_transfer_usage usage
)
164 struct amdgpu_winsys_bo
*bo
= (struct amdgpu_winsys_bo
*)buf
;
165 struct amdgpu_cs
*cs
= (struct amdgpu_cs
*)rcs
;
169 /* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
170 if (!(usage
& PIPE_TRANSFER_UNSYNCHRONIZED
)) {
171 /* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
172 if (usage
& PIPE_TRANSFER_DONTBLOCK
) {
173 if (!(usage
& PIPE_TRANSFER_WRITE
)) {
176 * Since we are mapping for read, we don't need to wait
177 * if the GPU is using the buffer for read too
178 * (neither one is changing it).
180 * Only check whether the buffer is being used for write. */
181 if (cs
&& amdgpu_bo_is_referenced_by_cs_with_usage(cs
, bo
,
182 RADEON_USAGE_WRITE
)) {
183 cs
->flush_cs(cs
->flush_data
, RADEON_FLUSH_ASYNC
, NULL
);
187 if (!amdgpu_bo_wait((struct pb_buffer
*)bo
, 0,
188 RADEON_USAGE_WRITE
)) {
192 if (cs
&& amdgpu_bo_is_referenced_by_cs(cs
, bo
)) {
193 cs
->flush_cs(cs
->flush_data
, RADEON_FLUSH_ASYNC
, NULL
);
197 if (!amdgpu_bo_wait((struct pb_buffer
*)bo
, 0,
198 RADEON_USAGE_READWRITE
)) {
203 uint64_t time
= os_time_get_nano();
205 if (!(usage
& PIPE_TRANSFER_WRITE
)) {
208 * Since we are mapping for read, we don't need to wait
209 * if the GPU is using the buffer for read too
210 * (neither one is changing it).
212 * Only check whether the buffer is being used for write. */
213 if (cs
&& amdgpu_bo_is_referenced_by_cs_with_usage(cs
, bo
,
214 RADEON_USAGE_WRITE
)) {
215 cs
->flush_cs(cs
->flush_data
, 0, NULL
);
217 amdgpu_bo_wait((struct pb_buffer
*)bo
, PIPE_TIMEOUT_INFINITE
,
220 /* Mapping for write. */
221 if (cs
&& amdgpu_bo_is_referenced_by_cs(cs
, bo
))
222 cs
->flush_cs(cs
->flush_data
, 0, NULL
);
224 amdgpu_bo_wait((struct pb_buffer
*)bo
, PIPE_TIMEOUT_INFINITE
,
225 RADEON_USAGE_READWRITE
);
228 bo
->ws
->buffer_wait_time
+= os_time_get_nano() - time
;
232 /* If the buffer is created from user memory, return the user pointer. */
236 r
= amdgpu_bo_cpu_map(bo
->bo
, &cpu
);
238 /* Clear the cache and try again. */
239 pb_cache_release_all_buffers(&bo
->ws
->bo_cache
);
240 r
= amdgpu_bo_cpu_map(bo
->bo
, &cpu
);
242 return r
? NULL
: cpu
;
245 static void amdgpu_bo_unmap(struct pb_buffer
*buf
)
247 struct amdgpu_winsys_bo
*bo
= (struct amdgpu_winsys_bo
*)buf
;
249 amdgpu_bo_cpu_unmap(bo
->bo
);
252 static const struct pb_vtbl amdgpu_winsys_bo_vtbl
= {
253 amdgpu_bo_destroy_or_cache
254 /* other functions are never called */
257 static void amdgpu_add_buffer_to_global_list(struct amdgpu_winsys_bo
*bo
)
259 struct amdgpu_winsys
*ws
= bo
->ws
;
261 pipe_mutex_lock(ws
->global_bo_list_lock
);
262 LIST_ADDTAIL(&bo
->global_list_item
, &ws
->global_bo_list
);
264 pipe_mutex_unlock(ws
->global_bo_list_lock
);
267 static struct amdgpu_winsys_bo
*amdgpu_create_bo(struct amdgpu_winsys
*ws
,
271 enum radeon_bo_domain initial_domain
,
274 struct amdgpu_bo_alloc_request request
= {0};
275 amdgpu_bo_handle buf_handle
;
277 struct amdgpu_winsys_bo
*bo
;
278 amdgpu_va_handle va_handle
;
281 assert(initial_domain
& RADEON_DOMAIN_VRAM_GTT
);
282 bo
= CALLOC_STRUCT(amdgpu_winsys_bo
);
287 pb_cache_init_entry(&ws
->bo_cache
, &bo
->cache_entry
, &bo
->base
);
288 request
.alloc_size
= size
;
289 request
.phys_alignment
= alignment
;
291 if (initial_domain
& RADEON_DOMAIN_VRAM
)
292 request
.preferred_heap
|= AMDGPU_GEM_DOMAIN_VRAM
;
293 if (initial_domain
& RADEON_DOMAIN_GTT
)
294 request
.preferred_heap
|= AMDGPU_GEM_DOMAIN_GTT
;
296 if (flags
& RADEON_FLAG_CPU_ACCESS
)
297 request
.flags
|= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
;
298 if (flags
& RADEON_FLAG_NO_CPU_ACCESS
)
299 request
.flags
|= AMDGPU_GEM_CREATE_NO_CPU_ACCESS
;
300 if (flags
& RADEON_FLAG_GTT_WC
)
301 request
.flags
|= AMDGPU_GEM_CREATE_CPU_GTT_USWC
;
303 r
= amdgpu_bo_alloc(ws
->dev
, &request
, &buf_handle
);
305 fprintf(stderr
, "amdgpu: Failed to allocate a buffer:\n");
306 fprintf(stderr
, "amdgpu: size : %d bytes\n", size
);
307 fprintf(stderr
, "amdgpu: alignment : %d bytes\n", alignment
);
308 fprintf(stderr
, "amdgpu: domains : %d\n", initial_domain
);
312 r
= amdgpu_va_range_alloc(ws
->dev
, amdgpu_gpu_va_range_general
,
313 size
, alignment
, 0, &va
, &va_handle
, 0);
317 r
= amdgpu_bo_va_op(buf_handle
, 0, size
, va
, 0, AMDGPU_VA_OP_MAP
);
321 pipe_reference_init(&bo
->base
.reference
, 1);
322 bo
->base
.alignment
= alignment
;
323 bo
->base
.usage
= usage
;
324 bo
->base
.size
= size
;
325 bo
->base
.vtbl
= &amdgpu_winsys_bo_vtbl
;
329 bo
->va_handle
= va_handle
;
330 bo
->initial_domain
= initial_domain
;
331 bo
->unique_id
= __sync_fetch_and_add(&ws
->next_bo_unique_id
, 1);
333 if (initial_domain
& RADEON_DOMAIN_VRAM
)
334 ws
->allocated_vram
+= align(size
, ws
->gart_page_size
);
335 else if (initial_domain
& RADEON_DOMAIN_GTT
)
336 ws
->allocated_gtt
+= align(size
, ws
->gart_page_size
);
338 amdgpu_add_buffer_to_global_list(bo
);
343 amdgpu_va_range_free(va_handle
);
346 amdgpu_bo_free(buf_handle
);
353 bool amdgpu_bo_can_reclaim(struct pb_buffer
*_buf
)
355 struct amdgpu_winsys_bo
*bo
= amdgpu_winsys_bo(_buf
);
357 if (amdgpu_bo_is_referenced_by_any_cs(bo
)) {
361 return amdgpu_bo_wait(_buf
, 0, RADEON_USAGE_READWRITE
);
364 static unsigned eg_tile_split(unsigned tile_split
)
366 switch (tile_split
) {
367 case 0: tile_split
= 64; break;
368 case 1: tile_split
= 128; break;
369 case 2: tile_split
= 256; break;
370 case 3: tile_split
= 512; break;
372 case 4: tile_split
= 1024; break;
373 case 5: tile_split
= 2048; break;
374 case 6: tile_split
= 4096; break;
379 static unsigned eg_tile_split_rev(unsigned eg_tile_split
)
381 switch (eg_tile_split
) {
393 static void amdgpu_buffer_get_metadata(struct pb_buffer
*_buf
,
394 struct radeon_bo_metadata
*md
)
396 struct amdgpu_winsys_bo
*bo
= amdgpu_winsys_bo(_buf
);
397 struct amdgpu_bo_info info
= {0};
398 uint32_t tiling_flags
;
401 r
= amdgpu_bo_query_info(bo
->bo
, &info
);
405 tiling_flags
= info
.metadata
.tiling_info
;
407 md
->microtile
= RADEON_LAYOUT_LINEAR
;
408 md
->macrotile
= RADEON_LAYOUT_LINEAR
;
410 if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
) == 4) /* 2D_TILED_THIN1 */
411 md
->macrotile
= RADEON_LAYOUT_TILED
;
412 else if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
) == 2) /* 1D_TILED_THIN1 */
413 md
->microtile
= RADEON_LAYOUT_TILED
;
415 md
->bankw
= 1 << AMDGPU_TILING_GET(tiling_flags
, BANK_WIDTH
);
416 md
->bankh
= 1 << AMDGPU_TILING_GET(tiling_flags
, BANK_HEIGHT
);
417 md
->tile_split
= eg_tile_split(AMDGPU_TILING_GET(tiling_flags
, TILE_SPLIT
));
418 md
->mtilea
= 1 << AMDGPU_TILING_GET(tiling_flags
, MACRO_TILE_ASPECT
);
419 md
->scanout
= AMDGPU_TILING_GET(tiling_flags
, MICRO_TILE_MODE
) == 0; /* DISPLAY */
421 md
->size_metadata
= info
.metadata
.size_metadata
;
422 memcpy(md
->metadata
, info
.metadata
.umd_metadata
, sizeof(md
->metadata
));
425 static void amdgpu_buffer_set_metadata(struct pb_buffer
*_buf
,
426 struct radeon_bo_metadata
*md
)
428 struct amdgpu_winsys_bo
*bo
= amdgpu_winsys_bo(_buf
);
429 struct amdgpu_bo_metadata metadata
= {0};
430 uint32_t tiling_flags
= 0;
432 if (md
->macrotile
== RADEON_LAYOUT_TILED
)
433 tiling_flags
|= AMDGPU_TILING_SET(ARRAY_MODE
, 4); /* 2D_TILED_THIN1 */
434 else if (md
->microtile
== RADEON_LAYOUT_TILED
)
435 tiling_flags
|= AMDGPU_TILING_SET(ARRAY_MODE
, 2); /* 1D_TILED_THIN1 */
437 tiling_flags
|= AMDGPU_TILING_SET(ARRAY_MODE
, 1); /* LINEAR_ALIGNED */
439 tiling_flags
|= AMDGPU_TILING_SET(PIPE_CONFIG
, md
->pipe_config
);
440 tiling_flags
|= AMDGPU_TILING_SET(BANK_WIDTH
, util_logbase2(md
->bankw
));
441 tiling_flags
|= AMDGPU_TILING_SET(BANK_HEIGHT
, util_logbase2(md
->bankh
));
443 tiling_flags
|= AMDGPU_TILING_SET(TILE_SPLIT
, eg_tile_split_rev(md
->tile_split
));
444 tiling_flags
|= AMDGPU_TILING_SET(MACRO_TILE_ASPECT
, util_logbase2(md
->mtilea
));
445 tiling_flags
|= AMDGPU_TILING_SET(NUM_BANKS
, util_logbase2(md
->num_banks
)-1);
448 tiling_flags
|= AMDGPU_TILING_SET(MICRO_TILE_MODE
, 0); /* DISPLAY_MICRO_TILING */
450 tiling_flags
|= AMDGPU_TILING_SET(MICRO_TILE_MODE
, 1); /* THIN_MICRO_TILING */
452 metadata
.tiling_info
= tiling_flags
;
453 metadata
.size_metadata
= md
->size_metadata
;
454 memcpy(metadata
.umd_metadata
, md
->metadata
, sizeof(md
->metadata
));
456 amdgpu_bo_set_metadata(bo
->bo
, &metadata
);
459 static struct pb_buffer
*
460 amdgpu_bo_create(struct radeon_winsys
*rws
,
463 boolean use_reusable_pool
,
464 enum radeon_bo_domain domain
,
465 enum radeon_bo_flag flags
)
467 struct amdgpu_winsys
*ws
= amdgpu_winsys(rws
);
468 struct amdgpu_winsys_bo
*bo
;
471 /* Don't use VRAM if the GPU doesn't have much. This is only the initial
472 * domain. The kernel is free to move the buffer if it wants to.
474 * 64MB means no VRAM by todays standards.
476 if (domain
& RADEON_DOMAIN_VRAM
&& ws
->info
.vram_size
<= 64*1024*1024) {
477 domain
= RADEON_DOMAIN_GTT
;
478 flags
= RADEON_FLAG_GTT_WC
;
481 /* Align size to page size. This is the minimum alignment for normal
482 * BOs. Aligning this here helps the cached bufmgr. Especially small BOs,
483 * like constant/uniform buffers, can benefit from better and more reuse.
485 size
= align(size
, ws
->gart_page_size
);
487 /* Only set one usage bit each for domains and flags, or the cache manager
488 * might consider different sets of domains / flags compatible
490 if (domain
== RADEON_DOMAIN_VRAM_GTT
)
494 assert(flags
< sizeof(usage
) * 8 - 3);
495 usage
|= 1 << (flags
+ 3);
497 /* Get a buffer from the cache. */
498 if (use_reusable_pool
) {
499 bo
= (struct amdgpu_winsys_bo
*)
500 pb_cache_reclaim_buffer(&ws
->bo_cache
, size
, alignment
,
506 /* Create a new one. */
507 bo
= amdgpu_create_bo(ws
, size
, alignment
, usage
, domain
, flags
);
509 /* Clear the cache and try again. */
510 pb_cache_release_all_buffers(&ws
->bo_cache
);
511 bo
= amdgpu_create_bo(ws
, size
, alignment
, usage
, domain
, flags
);
516 bo
->use_reusable_pool
= use_reusable_pool
;
520 static struct pb_buffer
*amdgpu_bo_from_handle(struct radeon_winsys
*rws
,
521 struct winsys_handle
*whandle
,
525 struct amdgpu_winsys
*ws
= amdgpu_winsys(rws
);
526 struct amdgpu_winsys_bo
*bo
;
527 enum amdgpu_bo_handle_type type
;
528 struct amdgpu_bo_import_result result
= {0};
530 amdgpu_va_handle va_handle
;
531 struct amdgpu_bo_info info
= {0};
532 enum radeon_bo_domain initial
= 0;
535 /* Initialize the structure. */
536 bo
= CALLOC_STRUCT(amdgpu_winsys_bo
);
541 switch (whandle
->type
) {
542 case DRM_API_HANDLE_TYPE_SHARED
:
543 type
= amdgpu_bo_handle_type_gem_flink_name
;
545 case DRM_API_HANDLE_TYPE_FD
:
546 type
= amdgpu_bo_handle_type_dma_buf_fd
;
552 r
= amdgpu_bo_import(ws
->dev
, type
, whandle
->handle
, &result
);
556 /* Get initial domains. */
557 r
= amdgpu_bo_query_info(result
.buf_handle
, &info
);
561 r
= amdgpu_va_range_alloc(ws
->dev
, amdgpu_gpu_va_range_general
,
562 result
.alloc_size
, 1 << 20, 0, &va
, &va_handle
, 0);
566 r
= amdgpu_bo_va_op(result
.buf_handle
, 0, result
.alloc_size
, va
, 0, AMDGPU_VA_OP_MAP
);
570 if (info
.preferred_heap
& AMDGPU_GEM_DOMAIN_VRAM
)
571 initial
|= RADEON_DOMAIN_VRAM
;
572 if (info
.preferred_heap
& AMDGPU_GEM_DOMAIN_GTT
)
573 initial
|= RADEON_DOMAIN_GTT
;
576 pipe_reference_init(&bo
->base
.reference
, 1);
577 bo
->base
.alignment
= info
.phys_alignment
;
578 bo
->base
.usage
= PB_USAGE_GPU_WRITE
| PB_USAGE_GPU_READ
;
579 bo
->bo
= result
.buf_handle
;
580 bo
->base
.size
= result
.alloc_size
;
581 bo
->base
.vtbl
= &amdgpu_winsys_bo_vtbl
;
584 bo
->va_handle
= va_handle
;
585 bo
->initial_domain
= initial
;
586 bo
->unique_id
= __sync_fetch_and_add(&ws
->next_bo_unique_id
, 1);
587 bo
->is_shared
= true;
590 *stride
= whandle
->stride
;
592 *offset
= whandle
->offset
;
594 if (bo
->initial_domain
& RADEON_DOMAIN_VRAM
)
595 ws
->allocated_vram
+= align(bo
->base
.size
, ws
->gart_page_size
);
596 else if (bo
->initial_domain
& RADEON_DOMAIN_GTT
)
597 ws
->allocated_gtt
+= align(bo
->base
.size
, ws
->gart_page_size
);
599 amdgpu_add_buffer_to_global_list(bo
);
604 amdgpu_va_range_free(va_handle
);
607 amdgpu_bo_free(result
.buf_handle
);
614 static boolean
amdgpu_bo_get_handle(struct pb_buffer
*buffer
,
615 unsigned stride
, unsigned offset
,
617 struct winsys_handle
*whandle
)
619 struct amdgpu_winsys_bo
*bo
= amdgpu_winsys_bo(buffer
);
620 enum amdgpu_bo_handle_type type
;
623 bo
->use_reusable_pool
= false;
625 switch (whandle
->type
) {
626 case DRM_API_HANDLE_TYPE_SHARED
:
627 type
= amdgpu_bo_handle_type_gem_flink_name
;
629 case DRM_API_HANDLE_TYPE_FD
:
630 type
= amdgpu_bo_handle_type_dma_buf_fd
;
632 case DRM_API_HANDLE_TYPE_KMS
:
633 type
= amdgpu_bo_handle_type_kms
;
639 r
= amdgpu_bo_export(bo
->bo
, type
, &whandle
->handle
);
643 whandle
->stride
= stride
;
644 whandle
->offset
= offset
;
645 whandle
->offset
+= slice_size
* whandle
->layer
;
646 bo
->is_shared
= true;
650 static struct pb_buffer
*amdgpu_bo_from_ptr(struct radeon_winsys
*rws
,
651 void *pointer
, unsigned size
)
653 struct amdgpu_winsys
*ws
= amdgpu_winsys(rws
);
654 amdgpu_bo_handle buf_handle
;
655 struct amdgpu_winsys_bo
*bo
;
657 amdgpu_va_handle va_handle
;
659 bo
= CALLOC_STRUCT(amdgpu_winsys_bo
);
663 if (amdgpu_create_bo_from_user_mem(ws
->dev
, pointer
, size
, &buf_handle
))
666 if (amdgpu_va_range_alloc(ws
->dev
, amdgpu_gpu_va_range_general
,
667 size
, 1 << 12, 0, &va
, &va_handle
, 0))
670 if (amdgpu_bo_va_op(buf_handle
, 0, size
, va
, 0, AMDGPU_VA_OP_MAP
))
674 pipe_reference_init(&bo
->base
.reference
, 1);
676 bo
->base
.alignment
= 0;
677 bo
->base
.usage
= PB_USAGE_GPU_WRITE
| PB_USAGE_GPU_READ
;
678 bo
->base
.size
= size
;
679 bo
->base
.vtbl
= &amdgpu_winsys_bo_vtbl
;
681 bo
->user_ptr
= pointer
;
683 bo
->va_handle
= va_handle
;
684 bo
->initial_domain
= RADEON_DOMAIN_GTT
;
685 bo
->unique_id
= __sync_fetch_and_add(&ws
->next_bo_unique_id
, 1);
687 ws
->allocated_gtt
+= align(bo
->base
.size
, ws
->gart_page_size
);
689 amdgpu_add_buffer_to_global_list(bo
);
691 return (struct pb_buffer
*)bo
;
694 amdgpu_va_range_free(va_handle
);
697 amdgpu_bo_free(buf_handle
);
704 static bool amdgpu_bo_is_user_ptr(struct pb_buffer
*buf
)
706 return ((struct amdgpu_winsys_bo
*)buf
)->user_ptr
!= NULL
;
709 static uint64_t amdgpu_bo_get_va(struct pb_buffer
*buf
)
711 return ((struct amdgpu_winsys_bo
*)buf
)->va
;
714 void amdgpu_bo_init_functions(struct amdgpu_winsys
*ws
)
716 ws
->base
.buffer_set_metadata
= amdgpu_buffer_set_metadata
;
717 ws
->base
.buffer_get_metadata
= amdgpu_buffer_get_metadata
;
718 ws
->base
.buffer_map
= amdgpu_bo_map
;
719 ws
->base
.buffer_unmap
= amdgpu_bo_unmap
;
720 ws
->base
.buffer_wait
= amdgpu_bo_wait
;
721 ws
->base
.buffer_create
= amdgpu_bo_create
;
722 ws
->base
.buffer_from_handle
= amdgpu_bo_from_handle
;
723 ws
->base
.buffer_from_ptr
= amdgpu_bo_from_ptr
;
724 ws
->base
.buffer_is_user_ptr
= amdgpu_bo_is_user_ptr
;
725 ws
->base
.buffer_get_handle
= amdgpu_bo_get_handle
;
726 ws
->base
.buffer_get_virtual_address
= amdgpu_bo_get_va
;
727 ws
->base
.buffer_get_initial_domain
= amdgpu_bo_get_initial_domain
;