2 * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
3 * Copyright © 2015 Advanced Micro Devices, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
16 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
18 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
29 * Marek Olšák <maraeo@gmail.com>
32 #include "amdgpu_cs.h"
34 #include "os/os_time.h"
35 #include "state_tracker/drm_driver.h"
36 #include <amdgpu_drm.h>
40 static inline struct amdgpu_winsys_bo
*amdgpu_winsys_bo(struct pb_buffer
*bo
)
42 return (struct amdgpu_winsys_bo
*)bo
;
45 static bool amdgpu_bo_wait(struct pb_buffer
*_buf
, uint64_t timeout
,
46 enum radeon_bo_usage usage
)
48 struct amdgpu_winsys_bo
*bo
= amdgpu_winsys_bo(_buf
);
49 struct amdgpu_winsys
*ws
= bo
->rws
;
53 /* We can't use user fences for shared buffers, because user fences
54 * are local to this process only. If we want to wait for all buffer
55 * uses in all processes, we have to use amdgpu_bo_wait_for_idle.
57 bool buffer_busy
= true;
60 r
= amdgpu_bo_wait_for_idle(bo
->bo
, timeout
, &buffer_busy
);
62 fprintf(stderr
, "%s: amdgpu_bo_wait_for_idle failed %i\n", __func__
,
68 /* Timeout == 0 is quite simple. */
69 pipe_mutex_lock(ws
->bo_fence_lock
);
70 for (i
= 0; i
< RING_LAST
; i
++)
72 if (amdgpu_fence_wait(bo
->fence
[i
], 0, false)) {
73 /* Release the idle fence to avoid checking it again later. */
74 amdgpu_fence_reference(&bo
->fence
[i
], NULL
);
76 pipe_mutex_unlock(ws
->bo_fence_lock
);
80 pipe_mutex_unlock(ws
->bo_fence_lock
);
84 struct pipe_fence_handle
*fence
[RING_LAST
] = {};
85 bool fence_idle
[RING_LAST
] = {};
86 bool buffer_idle
= true;
87 int64_t abs_timeout
= os_time_get_absolute_timeout(timeout
);
89 /* Take references to all fences, so that we can wait for them
90 * without the lock. */
91 pipe_mutex_lock(ws
->bo_fence_lock
);
92 for (i
= 0; i
< RING_LAST
; i
++)
93 amdgpu_fence_reference(&fence
[i
], bo
->fence
[i
]);
94 pipe_mutex_unlock(ws
->bo_fence_lock
);
96 /* Now wait for the fences. */
97 for (i
= 0; i
< RING_LAST
; i
++) {
99 if (amdgpu_fence_wait(fence
[i
], abs_timeout
, true))
100 fence_idle
[i
] = true;
106 /* Release idle fences to avoid checking them again later. */
107 pipe_mutex_lock(ws
->bo_fence_lock
);
108 for (i
= 0; i
< RING_LAST
; i
++) {
109 if (fence
[i
] == bo
->fence
[i
] && fence_idle
[i
])
110 amdgpu_fence_reference(&bo
->fence
[i
], NULL
);
112 amdgpu_fence_reference(&fence
[i
], NULL
);
114 pipe_mutex_unlock(ws
->bo_fence_lock
);
120 static enum radeon_bo_domain
amdgpu_bo_get_initial_domain(
121 struct radeon_winsys_cs_handle
*buf
)
123 return ((struct amdgpu_winsys_bo
*)buf
)->initial_domain
;
126 void amdgpu_bo_destroy(struct pb_buffer
*_buf
)
128 struct amdgpu_winsys_bo
*bo
= amdgpu_winsys_bo(_buf
);
131 amdgpu_bo_va_op(bo
->bo
, 0, bo
->base
.size
, bo
->va
, 0, AMDGPU_VA_OP_UNMAP
);
132 amdgpu_va_range_free(bo
->va_handle
);
133 amdgpu_bo_free(bo
->bo
);
135 for (i
= 0; i
< RING_LAST
; i
++)
136 amdgpu_fence_reference(&bo
->fence
[i
], NULL
);
138 if (bo
->initial_domain
& RADEON_DOMAIN_VRAM
)
139 bo
->rws
->allocated_vram
-= align(bo
->base
.size
, bo
->rws
->gart_page_size
);
140 else if (bo
->initial_domain
& RADEON_DOMAIN_GTT
)
141 bo
->rws
->allocated_gtt
-= align(bo
->base
.size
, bo
->rws
->gart_page_size
);
145 static void amdgpu_bo_destroy_or_cache(struct pb_buffer
*_buf
)
147 struct amdgpu_winsys_bo
*bo
= amdgpu_winsys_bo(_buf
);
149 if (bo
->use_reusable_pool
)
150 pb_cache_add_buffer(&bo
->cache_entry
);
152 amdgpu_bo_destroy(_buf
);
155 static void *amdgpu_bo_map(struct radeon_winsys_cs_handle
*buf
,
156 struct radeon_winsys_cs
*rcs
,
157 enum pipe_transfer_usage usage
)
159 struct amdgpu_winsys_bo
*bo
= (struct amdgpu_winsys_bo
*)buf
;
160 struct amdgpu_cs
*cs
= (struct amdgpu_cs
*)rcs
;
164 /* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
165 if (!(usage
& PIPE_TRANSFER_UNSYNCHRONIZED
)) {
166 /* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
167 if (usage
& PIPE_TRANSFER_DONTBLOCK
) {
168 if (!(usage
& PIPE_TRANSFER_WRITE
)) {
171 * Since we are mapping for read, we don't need to wait
172 * if the GPU is using the buffer for read too
173 * (neither one is changing it).
175 * Only check whether the buffer is being used for write. */
176 if (cs
&& amdgpu_bo_is_referenced_by_cs_with_usage(cs
, bo
,
177 RADEON_USAGE_WRITE
)) {
178 cs
->flush_cs(cs
->flush_data
, RADEON_FLUSH_ASYNC
, NULL
);
182 if (!amdgpu_bo_wait((struct pb_buffer
*)bo
, 0,
183 RADEON_USAGE_WRITE
)) {
187 if (cs
&& amdgpu_bo_is_referenced_by_cs(cs
, bo
)) {
188 cs
->flush_cs(cs
->flush_data
, RADEON_FLUSH_ASYNC
, NULL
);
192 if (!amdgpu_bo_wait((struct pb_buffer
*)bo
, 0,
193 RADEON_USAGE_READWRITE
)) {
198 uint64_t time
= os_time_get_nano();
200 if (!(usage
& PIPE_TRANSFER_WRITE
)) {
203 * Since we are mapping for read, we don't need to wait
204 * if the GPU is using the buffer for read too
205 * (neither one is changing it).
207 * Only check whether the buffer is being used for write. */
208 if (cs
&& amdgpu_bo_is_referenced_by_cs_with_usage(cs
, bo
,
209 RADEON_USAGE_WRITE
)) {
210 cs
->flush_cs(cs
->flush_data
, 0, NULL
);
212 amdgpu_bo_wait((struct pb_buffer
*)bo
, PIPE_TIMEOUT_INFINITE
,
215 /* Mapping for write. */
216 if (cs
&& amdgpu_bo_is_referenced_by_cs(cs
, bo
))
217 cs
->flush_cs(cs
->flush_data
, 0, NULL
);
219 amdgpu_bo_wait((struct pb_buffer
*)bo
, PIPE_TIMEOUT_INFINITE
,
220 RADEON_USAGE_READWRITE
);
223 bo
->rws
->buffer_wait_time
+= os_time_get_nano() - time
;
227 /* If the buffer is created from user memory, return the user pointer. */
231 r
= amdgpu_bo_cpu_map(bo
->bo
, &cpu
);
232 return r
? NULL
: cpu
;
235 static void amdgpu_bo_unmap(struct radeon_winsys_cs_handle
*buf
)
237 struct amdgpu_winsys_bo
*bo
= (struct amdgpu_winsys_bo
*)buf
;
239 amdgpu_bo_cpu_unmap(bo
->bo
);
242 static const struct pb_vtbl amdgpu_winsys_bo_vtbl
= {
243 amdgpu_bo_destroy_or_cache
244 /* other functions are never called */
247 static struct amdgpu_winsys_bo
*amdgpu_create_bo(struct amdgpu_winsys
*rws
,
251 enum radeon_bo_domain initial_domain
,
254 struct amdgpu_bo_alloc_request request
= {0};
255 amdgpu_bo_handle buf_handle
;
257 struct amdgpu_winsys_bo
*bo
;
258 amdgpu_va_handle va_handle
;
261 assert(initial_domain
& RADEON_DOMAIN_VRAM_GTT
);
262 bo
= CALLOC_STRUCT(amdgpu_winsys_bo
);
267 pb_cache_init_entry(&rws
->bo_cache
, &bo
->cache_entry
, &bo
->base
);
268 request
.alloc_size
= size
;
269 request
.phys_alignment
= alignment
;
271 if (initial_domain
& RADEON_DOMAIN_VRAM
) {
272 request
.preferred_heap
|= AMDGPU_GEM_DOMAIN_VRAM
;
273 if (flags
& RADEON_FLAG_CPU_ACCESS
)
274 request
.flags
|= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
;
276 if (initial_domain
& RADEON_DOMAIN_GTT
) {
277 request
.preferred_heap
|= AMDGPU_GEM_DOMAIN_GTT
;
278 if (flags
& RADEON_FLAG_GTT_WC
)
279 request
.flags
|= AMDGPU_GEM_CREATE_CPU_GTT_USWC
;
282 r
= amdgpu_bo_alloc(rws
->dev
, &request
, &buf_handle
);
284 fprintf(stderr
, "amdgpu: Failed to allocate a buffer:\n");
285 fprintf(stderr
, "amdgpu: size : %d bytes\n", size
);
286 fprintf(stderr
, "amdgpu: alignment : %d bytes\n", alignment
);
287 fprintf(stderr
, "amdgpu: domains : %d\n", initial_domain
);
291 r
= amdgpu_va_range_alloc(rws
->dev
, amdgpu_gpu_va_range_general
,
292 size
, alignment
, 0, &va
, &va_handle
, 0);
296 r
= amdgpu_bo_va_op(buf_handle
, 0, size
, va
, 0, AMDGPU_VA_OP_MAP
);
300 pipe_reference_init(&bo
->base
.reference
, 1);
301 bo
->base
.alignment
= alignment
;
302 bo
->base
.usage
= usage
;
303 bo
->base
.size
= size
;
304 bo
->base
.vtbl
= &amdgpu_winsys_bo_vtbl
;
308 bo
->va_handle
= va_handle
;
309 bo
->initial_domain
= initial_domain
;
310 bo
->unique_id
= __sync_fetch_and_add(&rws
->next_bo_unique_id
, 1);
312 if (initial_domain
& RADEON_DOMAIN_VRAM
)
313 rws
->allocated_vram
+= align(size
, rws
->gart_page_size
);
314 else if (initial_domain
& RADEON_DOMAIN_GTT
)
315 rws
->allocated_gtt
+= align(size
, rws
->gart_page_size
);
320 amdgpu_va_range_free(va_handle
);
323 amdgpu_bo_free(buf_handle
);
330 bool amdgpu_bo_can_reclaim(struct pb_buffer
*_buf
)
332 struct amdgpu_winsys_bo
*bo
= amdgpu_winsys_bo(_buf
);
334 if (amdgpu_bo_is_referenced_by_any_cs(bo
)) {
338 return amdgpu_bo_wait(_buf
, 0, RADEON_USAGE_READWRITE
);
341 static unsigned eg_tile_split(unsigned tile_split
)
343 switch (tile_split
) {
344 case 0: tile_split
= 64; break;
345 case 1: tile_split
= 128; break;
346 case 2: tile_split
= 256; break;
347 case 3: tile_split
= 512; break;
349 case 4: tile_split
= 1024; break;
350 case 5: tile_split
= 2048; break;
351 case 6: tile_split
= 4096; break;
356 static unsigned eg_tile_split_rev(unsigned eg_tile_split
)
358 switch (eg_tile_split
) {
370 static void amdgpu_bo_get_tiling(struct pb_buffer
*_buf
,
371 enum radeon_bo_layout
*microtiled
,
372 enum radeon_bo_layout
*macrotiled
,
373 unsigned *bankw
, unsigned *bankh
,
374 unsigned *tile_split
,
375 unsigned *stencil_tile_split
,
379 struct amdgpu_winsys_bo
*bo
= amdgpu_winsys_bo(_buf
);
380 struct amdgpu_bo_info info
= {0};
381 uint32_t tiling_flags
;
384 r
= amdgpu_bo_query_info(bo
->bo
, &info
);
388 tiling_flags
= info
.metadata
.tiling_info
;
390 *microtiled
= RADEON_LAYOUT_LINEAR
;
391 *macrotiled
= RADEON_LAYOUT_LINEAR
;
393 if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
) == 4) /* 2D_TILED_THIN1 */
394 *macrotiled
= RADEON_LAYOUT_TILED
;
395 else if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
) == 2) /* 1D_TILED_THIN1 */
396 *microtiled
= RADEON_LAYOUT_TILED
;
398 if (bankw
&& tile_split
&& mtilea
&& tile_split
) {
399 *bankw
= 1 << AMDGPU_TILING_GET(tiling_flags
, BANK_WIDTH
);
400 *bankh
= 1 << AMDGPU_TILING_GET(tiling_flags
, BANK_HEIGHT
);
401 *tile_split
= eg_tile_split(AMDGPU_TILING_GET(tiling_flags
, TILE_SPLIT
));
402 *mtilea
= 1 << AMDGPU_TILING_GET(tiling_flags
, MACRO_TILE_ASPECT
);
405 *scanout
= AMDGPU_TILING_GET(tiling_flags
, MICRO_TILE_MODE
) == 0; /* DISPLAY */
408 static void amdgpu_bo_set_tiling(struct pb_buffer
*_buf
,
409 struct radeon_winsys_cs
*rcs
,
410 enum radeon_bo_layout microtiled
,
411 enum radeon_bo_layout macrotiled
,
412 unsigned pipe_config
,
413 unsigned bankw
, unsigned bankh
,
415 unsigned stencil_tile_split
,
416 unsigned mtilea
, unsigned num_banks
,
420 struct amdgpu_winsys_bo
*bo
= amdgpu_winsys_bo(_buf
);
421 struct amdgpu_bo_metadata metadata
= {0};
422 uint32_t tiling_flags
= 0;
424 if (macrotiled
== RADEON_LAYOUT_TILED
)
425 tiling_flags
|= AMDGPU_TILING_SET(ARRAY_MODE
, 4); /* 2D_TILED_THIN1 */
426 else if (microtiled
== RADEON_LAYOUT_TILED
)
427 tiling_flags
|= AMDGPU_TILING_SET(ARRAY_MODE
, 2); /* 1D_TILED_THIN1 */
429 tiling_flags
|= AMDGPU_TILING_SET(ARRAY_MODE
, 1); /* LINEAR_ALIGNED */
431 tiling_flags
|= AMDGPU_TILING_SET(PIPE_CONFIG
, pipe_config
);
432 tiling_flags
|= AMDGPU_TILING_SET(BANK_WIDTH
, util_logbase2(bankw
));
433 tiling_flags
|= AMDGPU_TILING_SET(BANK_HEIGHT
, util_logbase2(bankh
));
435 tiling_flags
|= AMDGPU_TILING_SET(TILE_SPLIT
, eg_tile_split_rev(tile_split
));
436 tiling_flags
|= AMDGPU_TILING_SET(MACRO_TILE_ASPECT
, util_logbase2(mtilea
));
437 tiling_flags
|= AMDGPU_TILING_SET(NUM_BANKS
, util_logbase2(num_banks
)-1);
440 tiling_flags
|= AMDGPU_TILING_SET(MICRO_TILE_MODE
, 0); /* DISPLAY_MICRO_TILING */
442 tiling_flags
|= AMDGPU_TILING_SET(MICRO_TILE_MODE
, 1); /* THIN_MICRO_TILING */
444 metadata
.tiling_info
= tiling_flags
;
446 amdgpu_bo_set_metadata(bo
->bo
, &metadata
);
449 static struct radeon_winsys_cs_handle
*amdgpu_get_cs_handle(struct pb_buffer
*_buf
)
451 /* return a direct pointer to amdgpu_winsys_bo. */
452 return (struct radeon_winsys_cs_handle
*)_buf
;
455 static struct pb_buffer
*
456 amdgpu_bo_create(struct radeon_winsys
*rws
,
459 boolean use_reusable_pool
,
460 enum radeon_bo_domain domain
,
461 enum radeon_bo_flag flags
)
463 struct amdgpu_winsys
*ws
= amdgpu_winsys(rws
);
464 struct amdgpu_winsys_bo
*bo
;
467 /* Don't use VRAM if the GPU doesn't have much. This is only the initial
468 * domain. The kernel is free to move the buffer if it wants to.
470 * 64MB means no VRAM by todays standards.
472 if (domain
& RADEON_DOMAIN_VRAM
&& ws
->info
.vram_size
<= 64*1024*1024) {
473 domain
= RADEON_DOMAIN_GTT
;
474 flags
= RADEON_FLAG_GTT_WC
;
477 /* Align size to page size. This is the minimum alignment for normal
478 * BOs. Aligning this here helps the cached bufmgr. Especially small BOs,
479 * like constant/uniform buffers, can benefit from better and more reuse.
481 size
= align(size
, ws
->gart_page_size
);
483 /* Only set one usage bit each for domains and flags, or the cache manager
484 * might consider different sets of domains / flags compatible
486 if (domain
== RADEON_DOMAIN_VRAM_GTT
)
490 assert(flags
< sizeof(usage
) * 8 - 3);
491 usage
|= 1 << (flags
+ 3);
493 /* Get a buffer from the cache. */
494 if (use_reusable_pool
) {
495 bo
= (struct amdgpu_winsys_bo
*)
496 pb_cache_reclaim_buffer(&ws
->bo_cache
, size
, alignment
,
502 /* Create a new one. */
503 bo
= amdgpu_create_bo(ws
, size
, alignment
, usage
, domain
, flags
);
507 bo
->use_reusable_pool
= use_reusable_pool
;
511 static struct pb_buffer
*amdgpu_bo_from_handle(struct radeon_winsys
*rws
,
512 struct winsys_handle
*whandle
,
515 struct amdgpu_winsys
*ws
= amdgpu_winsys(rws
);
516 struct amdgpu_winsys_bo
*bo
;
517 enum amdgpu_bo_handle_type type
;
518 struct amdgpu_bo_import_result result
= {0};
520 amdgpu_va_handle va_handle
;
521 struct amdgpu_bo_info info
= {0};
522 enum radeon_bo_domain initial
= 0;
525 /* Initialize the structure. */
526 bo
= CALLOC_STRUCT(amdgpu_winsys_bo
);
531 switch (whandle
->type
) {
532 case DRM_API_HANDLE_TYPE_SHARED
:
533 type
= amdgpu_bo_handle_type_gem_flink_name
;
535 case DRM_API_HANDLE_TYPE_FD
:
536 type
= amdgpu_bo_handle_type_dma_buf_fd
;
542 r
= amdgpu_bo_import(ws
->dev
, type
, whandle
->handle
, &result
);
546 /* Get initial domains. */
547 r
= amdgpu_bo_query_info(result
.buf_handle
, &info
);
551 r
= amdgpu_va_range_alloc(ws
->dev
, amdgpu_gpu_va_range_general
,
552 result
.alloc_size
, 1 << 20, 0, &va
, &va_handle
, 0);
556 r
= amdgpu_bo_va_op(result
.buf_handle
, 0, result
.alloc_size
, va
, 0, AMDGPU_VA_OP_MAP
);
560 if (info
.preferred_heap
& AMDGPU_GEM_DOMAIN_VRAM
)
561 initial
|= RADEON_DOMAIN_VRAM
;
562 if (info
.preferred_heap
& AMDGPU_GEM_DOMAIN_GTT
)
563 initial
|= RADEON_DOMAIN_GTT
;
566 pipe_reference_init(&bo
->base
.reference
, 1);
567 bo
->base
.alignment
= info
.phys_alignment
;
568 bo
->base
.usage
= PB_USAGE_GPU_WRITE
| PB_USAGE_GPU_READ
;
569 bo
->bo
= result
.buf_handle
;
570 bo
->base
.size
= result
.alloc_size
;
571 bo
->base
.vtbl
= &amdgpu_winsys_bo_vtbl
;
574 bo
->va_handle
= va_handle
;
575 bo
->initial_domain
= initial
;
576 bo
->unique_id
= __sync_fetch_and_add(&ws
->next_bo_unique_id
, 1);
577 bo
->is_shared
= true;
580 *stride
= whandle
->stride
;
582 if (bo
->initial_domain
& RADEON_DOMAIN_VRAM
)
583 ws
->allocated_vram
+= align(bo
->base
.size
, ws
->gart_page_size
);
584 else if (bo
->initial_domain
& RADEON_DOMAIN_GTT
)
585 ws
->allocated_gtt
+= align(bo
->base
.size
, ws
->gart_page_size
);
590 amdgpu_va_range_free(va_handle
);
593 amdgpu_bo_free(result
.buf_handle
);
600 static boolean
amdgpu_bo_get_handle(struct pb_buffer
*buffer
,
602 struct winsys_handle
*whandle
)
604 struct amdgpu_winsys_bo
*bo
= amdgpu_winsys_bo(buffer
);
605 enum amdgpu_bo_handle_type type
;
608 bo
->use_reusable_pool
= false;
610 switch (whandle
->type
) {
611 case DRM_API_HANDLE_TYPE_SHARED
:
612 type
= amdgpu_bo_handle_type_gem_flink_name
;
614 case DRM_API_HANDLE_TYPE_FD
:
615 type
= amdgpu_bo_handle_type_dma_buf_fd
;
617 case DRM_API_HANDLE_TYPE_KMS
:
618 type
= amdgpu_bo_handle_type_kms
;
624 r
= amdgpu_bo_export(bo
->bo
, type
, &whandle
->handle
);
628 whandle
->stride
= stride
;
629 bo
->is_shared
= true;
633 static struct pb_buffer
*amdgpu_bo_from_ptr(struct radeon_winsys
*rws
,
634 void *pointer
, unsigned size
)
636 struct amdgpu_winsys
*ws
= amdgpu_winsys(rws
);
637 amdgpu_bo_handle buf_handle
;
638 struct amdgpu_winsys_bo
*bo
;
640 amdgpu_va_handle va_handle
;
642 bo
= CALLOC_STRUCT(amdgpu_winsys_bo
);
646 if (amdgpu_create_bo_from_user_mem(ws
->dev
, pointer
, size
, &buf_handle
))
649 if (amdgpu_va_range_alloc(ws
->dev
, amdgpu_gpu_va_range_general
,
650 size
, 1 << 12, 0, &va
, &va_handle
, 0))
653 if (amdgpu_bo_va_op(buf_handle
, 0, size
, va
, 0, AMDGPU_VA_OP_MAP
))
657 pipe_reference_init(&bo
->base
.reference
, 1);
659 bo
->base
.alignment
= 0;
660 bo
->base
.usage
= PB_USAGE_GPU_WRITE
| PB_USAGE_GPU_READ
;
661 bo
->base
.size
= size
;
662 bo
->base
.vtbl
= &amdgpu_winsys_bo_vtbl
;
664 bo
->user_ptr
= pointer
;
666 bo
->va_handle
= va_handle
;
667 bo
->initial_domain
= RADEON_DOMAIN_GTT
;
668 bo
->unique_id
= __sync_fetch_and_add(&ws
->next_bo_unique_id
, 1);
670 ws
->allocated_gtt
+= align(bo
->base
.size
, ws
->gart_page_size
);
672 return (struct pb_buffer
*)bo
;
675 amdgpu_va_range_free(va_handle
);
678 amdgpu_bo_free(buf_handle
);
685 static uint64_t amdgpu_bo_get_va(struct radeon_winsys_cs_handle
*buf
)
687 return ((struct amdgpu_winsys_bo
*)buf
)->va
;
690 void amdgpu_bo_init_functions(struct amdgpu_winsys
*ws
)
692 ws
->base
.buffer_get_cs_handle
= amdgpu_get_cs_handle
;
693 ws
->base
.buffer_set_tiling
= amdgpu_bo_set_tiling
;
694 ws
->base
.buffer_get_tiling
= amdgpu_bo_get_tiling
;
695 ws
->base
.buffer_map
= amdgpu_bo_map
;
696 ws
->base
.buffer_unmap
= amdgpu_bo_unmap
;
697 ws
->base
.buffer_wait
= amdgpu_bo_wait
;
698 ws
->base
.buffer_create
= amdgpu_bo_create
;
699 ws
->base
.buffer_from_handle
= amdgpu_bo_from_handle
;
700 ws
->base
.buffer_from_ptr
= amdgpu_bo_from_ptr
;
701 ws
->base
.buffer_get_handle
= amdgpu_bo_get_handle
;
702 ws
->base
.buffer_get_virtual_address
= amdgpu_bo_get_va
;
703 ws
->base
.buffer_get_initial_domain
= amdgpu_bo_get_initial_domain
;