2 * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
3 * Copyright © 2015 Advanced Micro Devices, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
16 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
18 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
29 * Marek Olšák <maraeo@gmail.com>
32 #include "amdgpu_cs.h"
34 #include "os/os_time.h"
35 #include "state_tracker/drm_driver.h"
36 #include <amdgpu_drm.h>
42 struct amdgpu_sparse_backing_chunk
{
46 static struct pb_buffer
*
47 amdgpu_bo_create(struct radeon_winsys
*rws
,
50 enum radeon_bo_domain domain
,
51 enum radeon_bo_flag flags
);
53 static bool amdgpu_bo_wait(struct pb_buffer
*_buf
, uint64_t timeout
,
54 enum radeon_bo_usage usage
)
56 struct amdgpu_winsys_bo
*bo
= amdgpu_winsys_bo(_buf
);
57 struct amdgpu_winsys
*ws
= bo
->ws
;
61 if (p_atomic_read(&bo
->num_active_ioctls
))
65 abs_timeout
= os_time_get_absolute_timeout(timeout
);
67 /* Wait if any ioctl is being submitted with this buffer. */
68 if (!os_wait_until_zero_abs_timeout(&bo
->num_active_ioctls
, abs_timeout
))
73 /* We can't use user fences for shared buffers, because user fences
74 * are local to this process only. If we want to wait for all buffer
75 * uses in all processes, we have to use amdgpu_bo_wait_for_idle.
77 bool buffer_busy
= true;
80 r
= amdgpu_bo_wait_for_idle(bo
->bo
, timeout
, &buffer_busy
);
82 fprintf(stderr
, "%s: amdgpu_bo_wait_for_idle failed %i\n", __func__
,
91 mtx_lock(&ws
->bo_fence_lock
);
93 for (idle_fences
= 0; idle_fences
< bo
->num_fences
; ++idle_fences
) {
94 if (!amdgpu_fence_wait(bo
->fences
[idle_fences
], 0, false))
98 /* Release the idle fences to avoid checking them again later. */
99 for (unsigned i
= 0; i
< idle_fences
; ++i
)
100 amdgpu_fence_reference(&bo
->fences
[i
], NULL
);
102 memmove(&bo
->fences
[0], &bo
->fences
[idle_fences
],
103 (bo
->num_fences
- idle_fences
) * sizeof(*bo
->fences
));
104 bo
->num_fences
-= idle_fences
;
106 buffer_idle
= !bo
->num_fences
;
107 mtx_unlock(&ws
->bo_fence_lock
);
111 bool buffer_idle
= true;
113 mtx_lock(&ws
->bo_fence_lock
);
114 while (bo
->num_fences
&& buffer_idle
) {
115 struct pipe_fence_handle
*fence
= NULL
;
116 bool fence_idle
= false;
118 amdgpu_fence_reference(&fence
, bo
->fences
[0]);
120 /* Wait for the fence. */
121 mtx_unlock(&ws
->bo_fence_lock
);
122 if (amdgpu_fence_wait(fence
, abs_timeout
, true))
126 mtx_lock(&ws
->bo_fence_lock
);
128 /* Release an idle fence to avoid checking it again later, keeping in
129 * mind that the fence array may have been modified by other threads.
131 if (fence_idle
&& bo
->num_fences
&& bo
->fences
[0] == fence
) {
132 amdgpu_fence_reference(&bo
->fences
[0], NULL
);
133 memmove(&bo
->fences
[0], &bo
->fences
[1],
134 (bo
->num_fences
- 1) * sizeof(*bo
->fences
));
138 amdgpu_fence_reference(&fence
, NULL
);
140 mtx_unlock(&ws
->bo_fence_lock
);
146 static enum radeon_bo_domain
amdgpu_bo_get_initial_domain(
147 struct pb_buffer
*buf
)
149 return ((struct amdgpu_winsys_bo
*)buf
)->initial_domain
;
152 static void amdgpu_bo_remove_fences(struct amdgpu_winsys_bo
*bo
)
154 for (unsigned i
= 0; i
< bo
->num_fences
; ++i
)
155 amdgpu_fence_reference(&bo
->fences
[i
], NULL
);
162 void amdgpu_bo_destroy(struct pb_buffer
*_buf
)
164 struct amdgpu_winsys_bo
*bo
= amdgpu_winsys_bo(_buf
);
166 assert(bo
->bo
&& "must not be called for slab entries");
168 mtx_lock(&bo
->ws
->global_bo_list_lock
);
169 LIST_DEL(&bo
->u
.real
.global_list_item
);
170 bo
->ws
->num_buffers
--;
171 mtx_unlock(&bo
->ws
->global_bo_list_lock
);
173 amdgpu_bo_va_op(bo
->bo
, 0, bo
->base
.size
, bo
->va
, 0, AMDGPU_VA_OP_UNMAP
);
174 amdgpu_va_range_free(bo
->u
.real
.va_handle
);
175 amdgpu_bo_free(bo
->bo
);
177 amdgpu_bo_remove_fences(bo
);
179 if (bo
->initial_domain
& RADEON_DOMAIN_VRAM
)
180 bo
->ws
->allocated_vram
-= align64(bo
->base
.size
, bo
->ws
->info
.gart_page_size
);
181 else if (bo
->initial_domain
& RADEON_DOMAIN_GTT
)
182 bo
->ws
->allocated_gtt
-= align64(bo
->base
.size
, bo
->ws
->info
.gart_page_size
);
184 if (bo
->u
.real
.map_count
>= 1) {
185 if (bo
->initial_domain
& RADEON_DOMAIN_VRAM
)
186 bo
->ws
->mapped_vram
-= bo
->base
.size
;
187 else if (bo
->initial_domain
& RADEON_DOMAIN_GTT
)
188 bo
->ws
->mapped_gtt
-= bo
->base
.size
;
189 bo
->ws
->num_mapped_buffers
--;
195 static void amdgpu_bo_destroy_or_cache(struct pb_buffer
*_buf
)
197 struct amdgpu_winsys_bo
*bo
= amdgpu_winsys_bo(_buf
);
199 assert(bo
->bo
); /* slab buffers have a separate vtbl */
201 if (bo
->u
.real
.use_reusable_pool
)
202 pb_cache_add_buffer(&bo
->u
.real
.cache_entry
);
204 amdgpu_bo_destroy(_buf
);
207 static void *amdgpu_bo_map(struct pb_buffer
*buf
,
208 struct radeon_winsys_cs
*rcs
,
209 enum pipe_transfer_usage usage
)
211 struct amdgpu_winsys_bo
*bo
= (struct amdgpu_winsys_bo
*)buf
;
212 struct amdgpu_winsys_bo
*real
;
213 struct amdgpu_cs
*cs
= (struct amdgpu_cs
*)rcs
;
220 /* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
221 if (!(usage
& PIPE_TRANSFER_UNSYNCHRONIZED
)) {
222 /* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
223 if (usage
& PIPE_TRANSFER_DONTBLOCK
) {
224 if (!(usage
& PIPE_TRANSFER_WRITE
)) {
227 * Since we are mapping for read, we don't need to wait
228 * if the GPU is using the buffer for read too
229 * (neither one is changing it).
231 * Only check whether the buffer is being used for write. */
232 if (cs
&& amdgpu_bo_is_referenced_by_cs_with_usage(cs
, bo
,
233 RADEON_USAGE_WRITE
)) {
234 cs
->flush_cs(cs
->flush_data
, RADEON_FLUSH_ASYNC
, NULL
);
238 if (!amdgpu_bo_wait((struct pb_buffer
*)bo
, 0,
239 RADEON_USAGE_WRITE
)) {
243 if (cs
&& amdgpu_bo_is_referenced_by_cs(cs
, bo
)) {
244 cs
->flush_cs(cs
->flush_data
, RADEON_FLUSH_ASYNC
, NULL
);
248 if (!amdgpu_bo_wait((struct pb_buffer
*)bo
, 0,
249 RADEON_USAGE_READWRITE
)) {
254 uint64_t time
= os_time_get_nano();
256 if (!(usage
& PIPE_TRANSFER_WRITE
)) {
259 * Since we are mapping for read, we don't need to wait
260 * if the GPU is using the buffer for read too
261 * (neither one is changing it).
263 * Only check whether the buffer is being used for write. */
265 if (amdgpu_bo_is_referenced_by_cs_with_usage(cs
, bo
,
266 RADEON_USAGE_WRITE
)) {
267 cs
->flush_cs(cs
->flush_data
, 0, NULL
);
269 /* Try to avoid busy-waiting in amdgpu_bo_wait. */
270 if (p_atomic_read(&bo
->num_active_ioctls
))
271 amdgpu_cs_sync_flush(rcs
);
275 amdgpu_bo_wait((struct pb_buffer
*)bo
, PIPE_TIMEOUT_INFINITE
,
278 /* Mapping for write. */
280 if (amdgpu_bo_is_referenced_by_cs(cs
, bo
)) {
281 cs
->flush_cs(cs
->flush_data
, 0, NULL
);
283 /* Try to avoid busy-waiting in amdgpu_bo_wait. */
284 if (p_atomic_read(&bo
->num_active_ioctls
))
285 amdgpu_cs_sync_flush(rcs
);
289 amdgpu_bo_wait((struct pb_buffer
*)bo
, PIPE_TIMEOUT_INFINITE
,
290 RADEON_USAGE_READWRITE
);
293 bo
->ws
->buffer_wait_time
+= os_time_get_nano() - time
;
297 /* If the buffer is created from user memory, return the user pointer. */
304 real
= bo
->u
.slab
.real
;
305 offset
= bo
->va
- real
->va
;
308 r
= amdgpu_bo_cpu_map(real
->bo
, &cpu
);
310 /* Clear the cache and try again. */
311 pb_cache_release_all_buffers(&real
->ws
->bo_cache
);
312 r
= amdgpu_bo_cpu_map(real
->bo
, &cpu
);
317 if (p_atomic_inc_return(&real
->u
.real
.map_count
) == 1) {
318 if (real
->initial_domain
& RADEON_DOMAIN_VRAM
)
319 real
->ws
->mapped_vram
+= real
->base
.size
;
320 else if (real
->initial_domain
& RADEON_DOMAIN_GTT
)
321 real
->ws
->mapped_gtt
+= real
->base
.size
;
322 real
->ws
->num_mapped_buffers
++;
324 return (uint8_t*)cpu
+ offset
;
327 static void amdgpu_bo_unmap(struct pb_buffer
*buf
)
329 struct amdgpu_winsys_bo
*bo
= (struct amdgpu_winsys_bo
*)buf
;
330 struct amdgpu_winsys_bo
*real
;
337 real
= bo
->bo
? bo
: bo
->u
.slab
.real
;
339 if (p_atomic_dec_zero(&real
->u
.real
.map_count
)) {
340 if (real
->initial_domain
& RADEON_DOMAIN_VRAM
)
341 real
->ws
->mapped_vram
-= real
->base
.size
;
342 else if (real
->initial_domain
& RADEON_DOMAIN_GTT
)
343 real
->ws
->mapped_gtt
-= real
->base
.size
;
344 real
->ws
->num_mapped_buffers
--;
347 amdgpu_bo_cpu_unmap(real
->bo
);
350 static const struct pb_vtbl amdgpu_winsys_bo_vtbl
= {
351 amdgpu_bo_destroy_or_cache
352 /* other functions are never called */
355 static void amdgpu_add_buffer_to_global_list(struct amdgpu_winsys_bo
*bo
)
357 struct amdgpu_winsys
*ws
= bo
->ws
;
361 mtx_lock(&ws
->global_bo_list_lock
);
362 LIST_ADDTAIL(&bo
->u
.real
.global_list_item
, &ws
->global_bo_list
);
364 mtx_unlock(&ws
->global_bo_list_lock
);
367 static struct amdgpu_winsys_bo
*amdgpu_create_bo(struct amdgpu_winsys
*ws
,
371 enum radeon_bo_domain initial_domain
,
373 unsigned pb_cache_bucket
)
375 struct amdgpu_bo_alloc_request request
= {0};
376 amdgpu_bo_handle buf_handle
;
378 struct amdgpu_winsys_bo
*bo
;
379 amdgpu_va_handle va_handle
;
380 unsigned va_gap_size
;
383 assert(initial_domain
& RADEON_DOMAIN_VRAM_GTT
);
384 bo
= CALLOC_STRUCT(amdgpu_winsys_bo
);
389 pb_cache_init_entry(&ws
->bo_cache
, &bo
->u
.real
.cache_entry
, &bo
->base
,
391 request
.alloc_size
= size
;
392 request
.phys_alignment
= alignment
;
394 if (initial_domain
& RADEON_DOMAIN_VRAM
)
395 request
.preferred_heap
|= AMDGPU_GEM_DOMAIN_VRAM
;
396 if (initial_domain
& RADEON_DOMAIN_GTT
)
397 request
.preferred_heap
|= AMDGPU_GEM_DOMAIN_GTT
;
399 if (flags
& RADEON_FLAG_CPU_ACCESS
)
400 request
.flags
|= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
;
401 if (flags
& RADEON_FLAG_NO_CPU_ACCESS
)
402 request
.flags
|= AMDGPU_GEM_CREATE_NO_CPU_ACCESS
;
403 if (flags
& RADEON_FLAG_GTT_WC
)
404 request
.flags
|= AMDGPU_GEM_CREATE_CPU_GTT_USWC
;
406 r
= amdgpu_bo_alloc(ws
->dev
, &request
, &buf_handle
);
408 fprintf(stderr
, "amdgpu: Failed to allocate a buffer:\n");
409 fprintf(stderr
, "amdgpu: size : %"PRIu64
" bytes\n", size
);
410 fprintf(stderr
, "amdgpu: alignment : %u bytes\n", alignment
);
411 fprintf(stderr
, "amdgpu: domains : %u\n", initial_domain
);
415 va_gap_size
= ws
->check_vm
? MAX2(4 * alignment
, 64 * 1024) : 0;
416 r
= amdgpu_va_range_alloc(ws
->dev
, amdgpu_gpu_va_range_general
,
417 size
+ va_gap_size
, alignment
, 0, &va
, &va_handle
, 0);
421 r
= amdgpu_bo_va_op(buf_handle
, 0, size
, va
, 0, AMDGPU_VA_OP_MAP
);
425 pipe_reference_init(&bo
->base
.reference
, 1);
426 bo
->base
.alignment
= alignment
;
427 bo
->base
.usage
= usage
;
428 bo
->base
.size
= size
;
429 bo
->base
.vtbl
= &amdgpu_winsys_bo_vtbl
;
433 bo
->u
.real
.va_handle
= va_handle
;
434 bo
->initial_domain
= initial_domain
;
435 bo
->unique_id
= __sync_fetch_and_add(&ws
->next_bo_unique_id
, 1);
437 if (initial_domain
& RADEON_DOMAIN_VRAM
)
438 ws
->allocated_vram
+= align64(size
, ws
->info
.gart_page_size
);
439 else if (initial_domain
& RADEON_DOMAIN_GTT
)
440 ws
->allocated_gtt
+= align64(size
, ws
->info
.gart_page_size
);
442 amdgpu_add_buffer_to_global_list(bo
);
447 amdgpu_va_range_free(va_handle
);
450 amdgpu_bo_free(buf_handle
);
457 bool amdgpu_bo_can_reclaim(struct pb_buffer
*_buf
)
459 struct amdgpu_winsys_bo
*bo
= amdgpu_winsys_bo(_buf
);
461 if (amdgpu_bo_is_referenced_by_any_cs(bo
)) {
465 return amdgpu_bo_wait(_buf
, 0, RADEON_USAGE_READWRITE
);
468 bool amdgpu_bo_can_reclaim_slab(void *priv
, struct pb_slab_entry
*entry
)
470 struct amdgpu_winsys_bo
*bo
= NULL
; /* fix container_of */
471 bo
= container_of(entry
, bo
, u
.slab
.entry
);
473 return amdgpu_bo_can_reclaim(&bo
->base
);
476 static void amdgpu_bo_slab_destroy(struct pb_buffer
*_buf
)
478 struct amdgpu_winsys_bo
*bo
= amdgpu_winsys_bo(_buf
);
482 pb_slab_free(&bo
->ws
->bo_slabs
, &bo
->u
.slab
.entry
);
485 static const struct pb_vtbl amdgpu_winsys_bo_slab_vtbl
= {
486 amdgpu_bo_slab_destroy
487 /* other functions are never called */
490 struct pb_slab
*amdgpu_bo_slab_alloc(void *priv
, unsigned heap
,
492 unsigned group_index
)
494 struct amdgpu_winsys
*ws
= priv
;
495 struct amdgpu_slab
*slab
= CALLOC_STRUCT(amdgpu_slab
);
496 enum radeon_bo_domain domains
;
497 enum radeon_bo_flag flags
= 0;
504 flags
|= RADEON_FLAG_GTT_WC
;
506 flags
|= RADEON_FLAG_CPU_ACCESS
;
510 domains
= RADEON_DOMAIN_VRAM
;
514 domains
= RADEON_DOMAIN_VRAM_GTT
;
517 domains
= RADEON_DOMAIN_GTT
;
521 slab
->buffer
= amdgpu_winsys_bo(amdgpu_bo_create(&ws
->base
,
522 64 * 1024, 64 * 1024,
527 assert(slab
->buffer
->bo
);
529 slab
->base
.num_entries
= slab
->buffer
->base
.size
/ entry_size
;
530 slab
->base
.num_free
= slab
->base
.num_entries
;
531 slab
->entries
= CALLOC(slab
->base
.num_entries
, sizeof(*slab
->entries
));
535 LIST_INITHEAD(&slab
->base
.free
);
537 base_id
= __sync_fetch_and_add(&ws
->next_bo_unique_id
, slab
->base
.num_entries
);
539 for (unsigned i
= 0; i
< slab
->base
.num_entries
; ++i
) {
540 struct amdgpu_winsys_bo
*bo
= &slab
->entries
[i
];
542 bo
->base
.alignment
= entry_size
;
543 bo
->base
.usage
= slab
->buffer
->base
.usage
;
544 bo
->base
.size
= entry_size
;
545 bo
->base
.vtbl
= &amdgpu_winsys_bo_slab_vtbl
;
547 bo
->va
= slab
->buffer
->va
+ i
* entry_size
;
548 bo
->initial_domain
= domains
;
549 bo
->unique_id
= base_id
+ i
;
550 bo
->u
.slab
.entry
.slab
= &slab
->base
;
551 bo
->u
.slab
.entry
.group_index
= group_index
;
552 bo
->u
.slab
.real
= slab
->buffer
;
554 LIST_ADDTAIL(&bo
->u
.slab
.entry
.head
, &slab
->base
.free
);
560 amdgpu_winsys_bo_reference(&slab
->buffer
, NULL
);
566 void amdgpu_bo_slab_free(void *priv
, struct pb_slab
*pslab
)
568 struct amdgpu_slab
*slab
= amdgpu_slab(pslab
);
570 for (unsigned i
= 0; i
< slab
->base
.num_entries
; ++i
)
571 amdgpu_bo_remove_fences(&slab
->entries
[i
]);
574 amdgpu_winsys_bo_reference(&slab
->buffer
, NULL
);
578 static unsigned eg_tile_split(unsigned tile_split
)
580 switch (tile_split
) {
581 case 0: tile_split
= 64; break;
582 case 1: tile_split
= 128; break;
583 case 2: tile_split
= 256; break;
584 case 3: tile_split
= 512; break;
586 case 4: tile_split
= 1024; break;
587 case 5: tile_split
= 2048; break;
588 case 6: tile_split
= 4096; break;
593 static unsigned eg_tile_split_rev(unsigned eg_tile_split
)
595 switch (eg_tile_split
) {
607 static void amdgpu_buffer_get_metadata(struct pb_buffer
*_buf
,
608 struct radeon_bo_metadata
*md
)
610 struct amdgpu_winsys_bo
*bo
= amdgpu_winsys_bo(_buf
);
611 struct amdgpu_bo_info info
= {0};
612 uint64_t tiling_flags
;
615 assert(bo
->bo
&& "must not be called for slab entries");
617 r
= amdgpu_bo_query_info(bo
->bo
, &info
);
621 tiling_flags
= info
.metadata
.tiling_info
;
623 if (bo
->ws
->info
.chip_class
>= GFX9
) {
624 md
->u
.gfx9
.swizzle_mode
= AMDGPU_TILING_GET(tiling_flags
, SWIZZLE_MODE
);
626 md
->u
.legacy
.microtile
= RADEON_LAYOUT_LINEAR
;
627 md
->u
.legacy
.macrotile
= RADEON_LAYOUT_LINEAR
;
629 if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
) == 4) /* 2D_TILED_THIN1 */
630 md
->u
.legacy
.macrotile
= RADEON_LAYOUT_TILED
;
631 else if (AMDGPU_TILING_GET(tiling_flags
, ARRAY_MODE
) == 2) /* 1D_TILED_THIN1 */
632 md
->u
.legacy
.microtile
= RADEON_LAYOUT_TILED
;
634 md
->u
.legacy
.pipe_config
= AMDGPU_TILING_GET(tiling_flags
, PIPE_CONFIG
);
635 md
->u
.legacy
.bankw
= 1 << AMDGPU_TILING_GET(tiling_flags
, BANK_WIDTH
);
636 md
->u
.legacy
.bankh
= 1 << AMDGPU_TILING_GET(tiling_flags
, BANK_HEIGHT
);
637 md
->u
.legacy
.tile_split
= eg_tile_split(AMDGPU_TILING_GET(tiling_flags
, TILE_SPLIT
));
638 md
->u
.legacy
.mtilea
= 1 << AMDGPU_TILING_GET(tiling_flags
, MACRO_TILE_ASPECT
);
639 md
->u
.legacy
.num_banks
= 2 << AMDGPU_TILING_GET(tiling_flags
, NUM_BANKS
);
640 md
->u
.legacy
.scanout
= AMDGPU_TILING_GET(tiling_flags
, MICRO_TILE_MODE
) == 0; /* DISPLAY */
643 md
->size_metadata
= info
.metadata
.size_metadata
;
644 memcpy(md
->metadata
, info
.metadata
.umd_metadata
, sizeof(md
->metadata
));
647 static void amdgpu_buffer_set_metadata(struct pb_buffer
*_buf
,
648 struct radeon_bo_metadata
*md
)
650 struct amdgpu_winsys_bo
*bo
= amdgpu_winsys_bo(_buf
);
651 struct amdgpu_bo_metadata metadata
= {0};
652 uint64_t tiling_flags
= 0;
654 assert(bo
->bo
&& "must not be called for slab entries");
656 if (bo
->ws
->info
.chip_class
>= GFX9
) {
657 tiling_flags
|= AMDGPU_TILING_SET(SWIZZLE_MODE
, md
->u
.gfx9
.swizzle_mode
);
659 if (md
->u
.legacy
.macrotile
== RADEON_LAYOUT_TILED
)
660 tiling_flags
|= AMDGPU_TILING_SET(ARRAY_MODE
, 4); /* 2D_TILED_THIN1 */
661 else if (md
->u
.legacy
.microtile
== RADEON_LAYOUT_TILED
)
662 tiling_flags
|= AMDGPU_TILING_SET(ARRAY_MODE
, 2); /* 1D_TILED_THIN1 */
664 tiling_flags
|= AMDGPU_TILING_SET(ARRAY_MODE
, 1); /* LINEAR_ALIGNED */
666 tiling_flags
|= AMDGPU_TILING_SET(PIPE_CONFIG
, md
->u
.legacy
.pipe_config
);
667 tiling_flags
|= AMDGPU_TILING_SET(BANK_WIDTH
, util_logbase2(md
->u
.legacy
.bankw
));
668 tiling_flags
|= AMDGPU_TILING_SET(BANK_HEIGHT
, util_logbase2(md
->u
.legacy
.bankh
));
669 if (md
->u
.legacy
.tile_split
)
670 tiling_flags
|= AMDGPU_TILING_SET(TILE_SPLIT
, eg_tile_split_rev(md
->u
.legacy
.tile_split
));
671 tiling_flags
|= AMDGPU_TILING_SET(MACRO_TILE_ASPECT
, util_logbase2(md
->u
.legacy
.mtilea
));
672 tiling_flags
|= AMDGPU_TILING_SET(NUM_BANKS
, util_logbase2(md
->u
.legacy
.num_banks
)-1);
674 if (md
->u
.legacy
.scanout
)
675 tiling_flags
|= AMDGPU_TILING_SET(MICRO_TILE_MODE
, 0); /* DISPLAY_MICRO_TILING */
677 tiling_flags
|= AMDGPU_TILING_SET(MICRO_TILE_MODE
, 1); /* THIN_MICRO_TILING */
680 metadata
.tiling_info
= tiling_flags
;
681 metadata
.size_metadata
= md
->size_metadata
;
682 memcpy(metadata
.umd_metadata
, md
->metadata
, sizeof(md
->metadata
));
684 amdgpu_bo_set_metadata(bo
->bo
, &metadata
);
687 static struct pb_buffer
*
688 amdgpu_bo_create(struct radeon_winsys
*rws
,
691 enum radeon_bo_domain domain
,
692 enum radeon_bo_flag flags
)
694 struct amdgpu_winsys
*ws
= amdgpu_winsys(rws
);
695 struct amdgpu_winsys_bo
*bo
;
696 unsigned usage
= 0, pb_cache_bucket
;
698 /* Sub-allocate small buffers from slabs. */
699 if (!(flags
& RADEON_FLAG_HANDLE
) &&
700 size
<= (1 << AMDGPU_SLAB_MAX_SIZE_LOG2
) &&
701 alignment
<= MAX2(1 << AMDGPU_SLAB_MIN_SIZE_LOG2
, util_next_power_of_two(size
))) {
702 struct pb_slab_entry
*entry
;
705 if (flags
& RADEON_FLAG_GTT_WC
)
707 if (flags
& RADEON_FLAG_CPU_ACCESS
)
709 if (flags
& ~(RADEON_FLAG_GTT_WC
| RADEON_FLAG_CPU_ACCESS
))
713 case RADEON_DOMAIN_VRAM
:
716 case RADEON_DOMAIN_VRAM_GTT
:
719 case RADEON_DOMAIN_GTT
:
726 entry
= pb_slab_alloc(&ws
->bo_slabs
, size
, heap
);
728 /* Clear the cache and try again. */
729 pb_cache_release_all_buffers(&ws
->bo_cache
);
731 entry
= pb_slab_alloc(&ws
->bo_slabs
, size
, heap
);
737 bo
= container_of(entry
, bo
, u
.slab
.entry
);
739 pipe_reference_init(&bo
->base
.reference
, 1);
745 /* This flag is irrelevant for the cache. */
746 flags
&= ~RADEON_FLAG_HANDLE
;
748 /* Align size to page size. This is the minimum alignment for normal
749 * BOs. Aligning this here helps the cached bufmgr. Especially small BOs,
750 * like constant/uniform buffers, can benefit from better and more reuse.
752 size
= align64(size
, ws
->info
.gart_page_size
);
753 alignment
= align(alignment
, ws
->info
.gart_page_size
);
755 /* Only set one usage bit each for domains and flags, or the cache manager
756 * might consider different sets of domains / flags compatible
758 if (domain
== RADEON_DOMAIN_VRAM_GTT
)
762 assert(flags
< sizeof(usage
) * 8 - 3);
763 usage
|= 1 << (flags
+ 3);
765 /* Determine the pb_cache bucket for minimizing pb_cache misses. */
767 if (domain
& RADEON_DOMAIN_VRAM
) /* VRAM or VRAM+GTT */
768 pb_cache_bucket
+= 1;
769 if (flags
== RADEON_FLAG_GTT_WC
) /* WC */
770 pb_cache_bucket
+= 2;
771 assert(pb_cache_bucket
< ARRAY_SIZE(ws
->bo_cache
.buckets
));
773 /* Get a buffer from the cache. */
774 bo
= (struct amdgpu_winsys_bo
*)
775 pb_cache_reclaim_buffer(&ws
->bo_cache
, size
, alignment
, usage
,
780 /* Create a new one. */
781 bo
= amdgpu_create_bo(ws
, size
, alignment
, usage
, domain
, flags
,
784 /* Clear the cache and try again. */
785 pb_slabs_reclaim(&ws
->bo_slabs
);
786 pb_cache_release_all_buffers(&ws
->bo_cache
);
787 bo
= amdgpu_create_bo(ws
, size
, alignment
, usage
, domain
, flags
,
793 bo
->u
.real
.use_reusable_pool
= true;
797 static struct pb_buffer
*amdgpu_bo_from_handle(struct radeon_winsys
*rws
,
798 struct winsys_handle
*whandle
,
802 struct amdgpu_winsys
*ws
= amdgpu_winsys(rws
);
803 struct amdgpu_winsys_bo
*bo
;
804 enum amdgpu_bo_handle_type type
;
805 struct amdgpu_bo_import_result result
= {0};
807 amdgpu_va_handle va_handle
;
808 struct amdgpu_bo_info info
= {0};
809 enum radeon_bo_domain initial
= 0;
812 /* Initialize the structure. */
813 bo
= CALLOC_STRUCT(amdgpu_winsys_bo
);
818 switch (whandle
->type
) {
819 case DRM_API_HANDLE_TYPE_SHARED
:
820 type
= amdgpu_bo_handle_type_gem_flink_name
;
822 case DRM_API_HANDLE_TYPE_FD
:
823 type
= amdgpu_bo_handle_type_dma_buf_fd
;
829 r
= amdgpu_bo_import(ws
->dev
, type
, whandle
->handle
, &result
);
833 /* Get initial domains. */
834 r
= amdgpu_bo_query_info(result
.buf_handle
, &info
);
838 r
= amdgpu_va_range_alloc(ws
->dev
, amdgpu_gpu_va_range_general
,
839 result
.alloc_size
, 1 << 20, 0, &va
, &va_handle
, 0);
843 r
= amdgpu_bo_va_op(result
.buf_handle
, 0, result
.alloc_size
, va
, 0, AMDGPU_VA_OP_MAP
);
847 if (info
.preferred_heap
& AMDGPU_GEM_DOMAIN_VRAM
)
848 initial
|= RADEON_DOMAIN_VRAM
;
849 if (info
.preferred_heap
& AMDGPU_GEM_DOMAIN_GTT
)
850 initial
|= RADEON_DOMAIN_GTT
;
853 pipe_reference_init(&bo
->base
.reference
, 1);
854 bo
->base
.alignment
= info
.phys_alignment
;
855 bo
->bo
= result
.buf_handle
;
856 bo
->base
.size
= result
.alloc_size
;
857 bo
->base
.vtbl
= &amdgpu_winsys_bo_vtbl
;
860 bo
->u
.real
.va_handle
= va_handle
;
861 bo
->initial_domain
= initial
;
862 bo
->unique_id
= __sync_fetch_and_add(&ws
->next_bo_unique_id
, 1);
863 bo
->is_shared
= true;
866 *stride
= whandle
->stride
;
868 *offset
= whandle
->offset
;
870 if (bo
->initial_domain
& RADEON_DOMAIN_VRAM
)
871 ws
->allocated_vram
+= align64(bo
->base
.size
, ws
->info
.gart_page_size
);
872 else if (bo
->initial_domain
& RADEON_DOMAIN_GTT
)
873 ws
->allocated_gtt
+= align64(bo
->base
.size
, ws
->info
.gart_page_size
);
875 amdgpu_add_buffer_to_global_list(bo
);
880 amdgpu_va_range_free(va_handle
);
883 amdgpu_bo_free(result
.buf_handle
);
890 static bool amdgpu_bo_get_handle(struct pb_buffer
*buffer
,
891 unsigned stride
, unsigned offset
,
893 struct winsys_handle
*whandle
)
895 struct amdgpu_winsys_bo
*bo
= amdgpu_winsys_bo(buffer
);
896 enum amdgpu_bo_handle_type type
;
900 offset
+= bo
->va
- bo
->u
.slab
.real
->va
;
901 bo
= bo
->u
.slab
.real
;
904 bo
->u
.real
.use_reusable_pool
= false;
906 switch (whandle
->type
) {
907 case DRM_API_HANDLE_TYPE_SHARED
:
908 type
= amdgpu_bo_handle_type_gem_flink_name
;
910 case DRM_API_HANDLE_TYPE_FD
:
911 type
= amdgpu_bo_handle_type_dma_buf_fd
;
913 case DRM_API_HANDLE_TYPE_KMS
:
914 type
= amdgpu_bo_handle_type_kms
;
920 r
= amdgpu_bo_export(bo
->bo
, type
, &whandle
->handle
);
924 whandle
->stride
= stride
;
925 whandle
->offset
= offset
;
926 whandle
->offset
+= slice_size
* whandle
->layer
;
927 bo
->is_shared
= true;
931 static struct pb_buffer
*amdgpu_bo_from_ptr(struct radeon_winsys
*rws
,
932 void *pointer
, uint64_t size
)
934 struct amdgpu_winsys
*ws
= amdgpu_winsys(rws
);
935 amdgpu_bo_handle buf_handle
;
936 struct amdgpu_winsys_bo
*bo
;
938 amdgpu_va_handle va_handle
;
940 bo
= CALLOC_STRUCT(amdgpu_winsys_bo
);
944 if (amdgpu_create_bo_from_user_mem(ws
->dev
, pointer
, size
, &buf_handle
))
947 if (amdgpu_va_range_alloc(ws
->dev
, amdgpu_gpu_va_range_general
,
948 size
, 1 << 12, 0, &va
, &va_handle
, 0))
951 if (amdgpu_bo_va_op(buf_handle
, 0, size
, va
, 0, AMDGPU_VA_OP_MAP
))
955 pipe_reference_init(&bo
->base
.reference
, 1);
957 bo
->base
.alignment
= 0;
958 bo
->base
.size
= size
;
959 bo
->base
.vtbl
= &amdgpu_winsys_bo_vtbl
;
961 bo
->user_ptr
= pointer
;
963 bo
->u
.real
.va_handle
= va_handle
;
964 bo
->initial_domain
= RADEON_DOMAIN_GTT
;
965 bo
->unique_id
= __sync_fetch_and_add(&ws
->next_bo_unique_id
, 1);
967 ws
->allocated_gtt
+= align64(bo
->base
.size
, ws
->info
.gart_page_size
);
969 amdgpu_add_buffer_to_global_list(bo
);
971 return (struct pb_buffer
*)bo
;
974 amdgpu_va_range_free(va_handle
);
977 amdgpu_bo_free(buf_handle
);
984 static bool amdgpu_bo_is_user_ptr(struct pb_buffer
*buf
)
986 return ((struct amdgpu_winsys_bo
*)buf
)->user_ptr
!= NULL
;
989 static uint64_t amdgpu_bo_get_va(struct pb_buffer
*buf
)
991 return ((struct amdgpu_winsys_bo
*)buf
)->va
;
994 void amdgpu_bo_init_functions(struct amdgpu_winsys
*ws
)
996 ws
->base
.buffer_set_metadata
= amdgpu_buffer_set_metadata
;
997 ws
->base
.buffer_get_metadata
= amdgpu_buffer_get_metadata
;
998 ws
->base
.buffer_map
= amdgpu_bo_map
;
999 ws
->base
.buffer_unmap
= amdgpu_bo_unmap
;
1000 ws
->base
.buffer_wait
= amdgpu_bo_wait
;
1001 ws
->base
.buffer_create
= amdgpu_bo_create
;
1002 ws
->base
.buffer_from_handle
= amdgpu_bo_from_handle
;
1003 ws
->base
.buffer_from_ptr
= amdgpu_bo_from_ptr
;
1004 ws
->base
.buffer_is_user_ptr
= amdgpu_bo_is_user_ptr
;
1005 ws
->base
.buffer_get_handle
= amdgpu_bo_get_handle
;
1006 ws
->base
.buffer_get_virtual_address
= amdgpu_bo_get_va
;
1007 ws
->base
.buffer_get_initial_domain
= amdgpu_bo_get_initial_domain
;