2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based on amdgpu winsys.
6 * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
7 * Copyright © 2015 Advanced Micro Devices, Inc.
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
25 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
31 #include "radv_amdgpu_bo.h"
34 #include <amdgpu_drm.h>
39 #include "util/u_atomic.h"
41 static void radv_amdgpu_winsys_bo_destroy(struct radeon_winsys_bo
*_bo
);
44 radv_amdgpu_bo_va_op(struct radv_amdgpu_winsys
*ws
,
52 uint64_t flags
= AMDGPU_VM_PAGE_READABLE
|
53 AMDGPU_VM_PAGE_EXECUTABLE
;
55 if ((bo_flags
& RADEON_FLAG_VA_UNCACHED
) && ws
->info
.chip_class
>= GFX9
)
56 flags
|= AMDGPU_VM_MTYPE_UC
;
58 if (!(bo_flags
& RADEON_FLAG_READ_ONLY
))
59 flags
|= AMDGPU_VM_PAGE_WRITEABLE
;
61 size
= ALIGN(size
, getpagesize());
63 return amdgpu_bo_va_op_raw(ws
->dev
, bo
, offset
, size
, addr
,
68 radv_amdgpu_winsys_virtual_map(struct radv_amdgpu_winsys_bo
*bo
,
69 const struct radv_amdgpu_map_range
*range
)
74 return; /* TODO: PRT mapping */
76 p_atomic_inc(&range
->bo
->ref_count
);
77 int r
= radv_amdgpu_bo_va_op(bo
->ws
, range
->bo
->bo
, range
->bo_offset
,
78 range
->size
, range
->offset
+ bo
->base
.va
,
85 radv_amdgpu_winsys_virtual_unmap(struct radv_amdgpu_winsys_bo
*bo
,
86 const struct radv_amdgpu_map_range
*range
)
91 return; /* TODO: PRT mapping */
93 int r
= radv_amdgpu_bo_va_op(bo
->ws
, range
->bo
->bo
, range
->bo_offset
,
94 range
->size
, range
->offset
+ bo
->base
.va
,
95 0, AMDGPU_VA_OP_UNMAP
);
98 radv_amdgpu_winsys_bo_destroy((struct radeon_winsys_bo
*)range
->bo
);
101 static int bo_comparator(const void *ap
, const void *bp
) {
102 struct radv_amdgpu_bo
*a
= *(struct radv_amdgpu_bo
*const *)ap
;
103 struct radv_amdgpu_bo
*b
= *(struct radv_amdgpu_bo
*const *)bp
;
104 return (a
> b
) ? 1 : (a
< b
) ? -1 : 0;
108 radv_amdgpu_winsys_rebuild_bo_list(struct radv_amdgpu_winsys_bo
*bo
)
110 if (bo
->bo_capacity
< bo
->range_count
) {
111 uint32_t new_count
= MAX2(bo
->bo_capacity
* 2, bo
->range_count
);
112 bo
->bos
= realloc(bo
->bos
, new_count
* sizeof(struct radv_amdgpu_winsys_bo
*));
113 bo
->bo_capacity
= new_count
;
116 uint32_t temp_bo_count
= 0;
117 for (uint32_t i
= 0; i
< bo
->range_count
; ++i
)
118 if (bo
->ranges
[i
].bo
)
119 bo
->bos
[temp_bo_count
++] = bo
->ranges
[i
].bo
;
121 qsort(bo
->bos
, temp_bo_count
, sizeof(struct radv_amdgpu_winsys_bo
*), &bo_comparator
);
123 uint32_t final_bo_count
= 1;
124 for (uint32_t i
= 1; i
< temp_bo_count
; ++i
)
125 if (bo
->bos
[i
] != bo
->bos
[i
- 1])
126 bo
->bos
[final_bo_count
++] = bo
->bos
[i
];
128 bo
->bo_count
= final_bo_count
;
132 radv_amdgpu_winsys_bo_virtual_bind(struct radeon_winsys_bo
*_parent
,
133 uint64_t offset
, uint64_t size
,
134 struct radeon_winsys_bo
*_bo
, uint64_t bo_offset
)
136 struct radv_amdgpu_winsys_bo
*parent
= (struct radv_amdgpu_winsys_bo
*)_parent
;
137 struct radv_amdgpu_winsys_bo
*bo
= (struct radv_amdgpu_winsys_bo
*)_bo
;
138 int range_count_delta
, new_idx
;
140 struct radv_amdgpu_map_range new_first
, new_last
;
142 assert(parent
->is_virtual
);
143 assert(!bo
|| !bo
->is_virtual
);
148 /* We have at most 2 new ranges (1 by the bind, and another one by splitting a range that contains the newly bound range). */
149 if (parent
->range_capacity
- parent
->range_count
< 2) {
150 parent
->range_capacity
+= 2;
151 parent
->ranges
= realloc(parent
->ranges
,
152 parent
->range_capacity
* sizeof(struct radv_amdgpu_map_range
));
156 * [first, last] is exactly the range of ranges that either overlap the
157 * new parent, or are adjacent to it. This corresponds to the bind ranges
160 while(first
+ 1 < parent
->range_count
&& parent
->ranges
[first
].offset
+ parent
->ranges
[first
].size
< offset
)
164 while(last
+ 1 < parent
->range_count
&& parent
->ranges
[last
].offset
<= offset
+ size
)
167 /* Whether the first or last range are going to be totally removed or just
168 * resized/left alone. Note that in the case of first == last, we will split
169 * this into a part before and after the new range. The remove flag is then
170 * whether to not create the corresponding split part. */
171 bool remove_first
= parent
->ranges
[first
].offset
== offset
;
172 bool remove_last
= parent
->ranges
[last
].offset
+ parent
->ranges
[last
].size
== offset
+ size
;
173 bool unmapped_first
= false;
175 assert(parent
->ranges
[first
].offset
<= offset
);
176 assert(parent
->ranges
[last
].offset
+ parent
->ranges
[last
].size
>= offset
+ size
);
178 /* Try to merge the new range with the first range. */
179 if (parent
->ranges
[first
].bo
== bo
&& (!bo
|| offset
- bo_offset
== parent
->ranges
[first
].offset
- parent
->ranges
[first
].bo_offset
)) {
180 size
+= offset
- parent
->ranges
[first
].offset
;
181 offset
= parent
->ranges
[first
].offset
;
182 bo_offset
= parent
->ranges
[first
].bo_offset
;
186 /* Try to merge the new range with the last range. */
187 if (parent
->ranges
[last
].bo
== bo
&& (!bo
|| offset
- bo_offset
== parent
->ranges
[last
].offset
- parent
->ranges
[last
].bo_offset
)) {
188 size
= parent
->ranges
[last
].offset
+ parent
->ranges
[last
].size
- offset
;
192 range_count_delta
= 1 - (last
- first
+ 1) + !remove_first
+ !remove_last
;
193 new_idx
= first
+ !remove_first
;
195 /* Any range between first and last is going to be entirely covered by the new range so just unmap them. */
196 for (int i
= first
+ 1; i
< last
; ++i
)
197 radv_amdgpu_winsys_virtual_unmap(parent
, parent
->ranges
+ i
);
199 /* If the first/last range are not left alone we unmap then and optionally map
200 * them again after modifications. Not that this implicitly can do the splitting
201 * if first == last. */
202 new_first
= parent
->ranges
[first
];
203 new_last
= parent
->ranges
[last
];
205 if (parent
->ranges
[first
].offset
+ parent
->ranges
[first
].size
> offset
|| remove_first
) {
206 radv_amdgpu_winsys_virtual_unmap(parent
, parent
->ranges
+ first
);
207 unmapped_first
= true;
210 new_first
.size
= offset
- new_first
.offset
;
211 radv_amdgpu_winsys_virtual_map(parent
, &new_first
);
215 if (parent
->ranges
[last
].offset
< offset
+ size
|| remove_last
) {
216 if (first
!= last
|| !unmapped_first
)
217 radv_amdgpu_winsys_virtual_unmap(parent
, parent
->ranges
+ last
);
220 new_last
.size
-= offset
+ size
- new_last
.offset
;
221 new_last
.offset
= offset
+ size
;
222 radv_amdgpu_winsys_virtual_map(parent
, &new_last
);
226 /* Moves the range list after last to account for the changed number of ranges. */
227 memmove(parent
->ranges
+ last
+ 1 + range_count_delta
, parent
->ranges
+ last
+ 1,
228 sizeof(struct radv_amdgpu_map_range
) * (parent
->range_count
- last
- 1));
231 parent
->ranges
[first
] = new_first
;
234 parent
->ranges
[new_idx
+ 1] = new_last
;
236 /* Actually set up the new range. */
237 parent
->ranges
[new_idx
].offset
= offset
;
238 parent
->ranges
[new_idx
].size
= size
;
239 parent
->ranges
[new_idx
].bo
= bo
;
240 parent
->ranges
[new_idx
].bo_offset
= bo_offset
;
242 radv_amdgpu_winsys_virtual_map(parent
, parent
->ranges
+ new_idx
);
244 parent
->range_count
+= range_count_delta
;
246 radv_amdgpu_winsys_rebuild_bo_list(parent
);
249 static void radv_amdgpu_winsys_bo_destroy(struct radeon_winsys_bo
*_bo
)
251 struct radv_amdgpu_winsys_bo
*bo
= radv_amdgpu_winsys_bo(_bo
);
253 if (p_atomic_dec_return(&bo
->ref_count
))
255 if (bo
->is_virtual
) {
256 for (uint32_t i
= 0; i
< bo
->range_count
; ++i
) {
257 radv_amdgpu_winsys_virtual_unmap(bo
, bo
->ranges
+ i
);
262 if (bo
->ws
->debug_all_bos
) {
263 pthread_mutex_lock(&bo
->ws
->global_bo_list_lock
);
264 LIST_DEL(&bo
->global_list_item
);
265 bo
->ws
->num_buffers
--;
266 pthread_mutex_unlock(&bo
->ws
->global_bo_list_lock
);
268 radv_amdgpu_bo_va_op(bo
->ws
, bo
->bo
, 0, bo
->size
, bo
->base
.va
,
269 0, AMDGPU_VA_OP_UNMAP
);
270 amdgpu_bo_free(bo
->bo
);
272 amdgpu_va_range_free(bo
->va_handle
);
276 static void radv_amdgpu_add_buffer_to_global_list(struct radv_amdgpu_winsys_bo
*bo
)
278 struct radv_amdgpu_winsys
*ws
= bo
->ws
;
280 if (bo
->ws
->debug_all_bos
) {
281 pthread_mutex_lock(&ws
->global_bo_list_lock
);
282 LIST_ADDTAIL(&bo
->global_list_item
, &ws
->global_bo_list
);
284 pthread_mutex_unlock(&ws
->global_bo_list_lock
);
288 static struct radeon_winsys_bo
*
289 radv_amdgpu_winsys_bo_create(struct radeon_winsys
*_ws
,
292 enum radeon_bo_domain initial_domain
,
295 struct radv_amdgpu_winsys
*ws
= radv_amdgpu_winsys(_ws
);
296 struct radv_amdgpu_winsys_bo
*bo
;
297 struct amdgpu_bo_alloc_request request
= {0};
298 amdgpu_bo_handle buf_handle
;
300 amdgpu_va_handle va_handle
;
302 bo
= CALLOC_STRUCT(radv_amdgpu_winsys_bo
);
307 unsigned virt_alignment
= alignment
;
308 if (size
>= ws
->info
.pte_fragment_size
)
309 virt_alignment
= MAX2(virt_alignment
, ws
->info
.pte_fragment_size
);
311 r
= amdgpu_va_range_alloc(ws
->dev
, amdgpu_gpu_va_range_general
,
312 size
, virt_alignment
, 0, &va
, &va_handle
,
313 (flags
& RADEON_FLAG_32BIT
? AMDGPU_VA_RANGE_32_BIT
: 0) |
314 AMDGPU_VA_RANGE_HIGH
);
319 bo
->va_handle
= va_handle
;
322 bo
->is_virtual
= !!(flags
& RADEON_FLAG_VIRTUAL
);
325 if (flags
& RADEON_FLAG_VIRTUAL
) {
326 bo
->ranges
= realloc(NULL
, sizeof(struct radv_amdgpu_map_range
));
328 bo
->range_capacity
= 1;
330 bo
->ranges
[0].offset
= 0;
331 bo
->ranges
[0].size
= size
;
332 bo
->ranges
[0].bo
= NULL
;
333 bo
->ranges
[0].bo_offset
= 0;
335 radv_amdgpu_winsys_virtual_map(bo
, bo
->ranges
);
336 return (struct radeon_winsys_bo
*)bo
;
339 request
.alloc_size
= size
;
340 request
.phys_alignment
= alignment
;
342 if (initial_domain
& RADEON_DOMAIN_VRAM
)
343 request
.preferred_heap
|= AMDGPU_GEM_DOMAIN_VRAM
;
344 if (initial_domain
& RADEON_DOMAIN_GTT
)
345 request
.preferred_heap
|= AMDGPU_GEM_DOMAIN_GTT
;
347 if (flags
& RADEON_FLAG_CPU_ACCESS
)
348 request
.flags
|= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
;
349 if (flags
& RADEON_FLAG_NO_CPU_ACCESS
)
350 request
.flags
|= AMDGPU_GEM_CREATE_NO_CPU_ACCESS
;
351 if (flags
& RADEON_FLAG_GTT_WC
)
352 request
.flags
|= AMDGPU_GEM_CREATE_CPU_GTT_USWC
;
353 if (!(flags
& RADEON_FLAG_IMPLICIT_SYNC
) && ws
->info
.drm_minor
>= 22)
354 request
.flags
|= AMDGPU_GEM_CREATE_EXPLICIT_SYNC
;
355 if (flags
& RADEON_FLAG_NO_INTERPROCESS_SHARING
&&
356 ws
->info
.has_local_buffers
&& ws
->use_local_bos
) {
357 bo
->base
.is_local
= true;
358 request
.flags
|= AMDGPU_GEM_CREATE_VM_ALWAYS_VALID
;
361 /* this won't do anything on pre 4.9 kernels */
362 if (ws
->zero_all_vram_allocs
&& (initial_domain
& RADEON_DOMAIN_VRAM
))
363 request
.flags
|= AMDGPU_GEM_CREATE_VRAM_CLEARED
;
364 r
= amdgpu_bo_alloc(ws
->dev
, &request
, &buf_handle
);
366 fprintf(stderr
, "amdgpu: Failed to allocate a buffer:\n");
367 fprintf(stderr
, "amdgpu: size : %"PRIu64
" bytes\n", size
);
368 fprintf(stderr
, "amdgpu: alignment : %u bytes\n", alignment
);
369 fprintf(stderr
, "amdgpu: domains : %u\n", initial_domain
);
373 r
= radv_amdgpu_bo_va_op(ws
, buf_handle
, 0, size
, va
, flags
,
379 bo
->initial_domain
= initial_domain
;
380 bo
->is_shared
= false;
381 radv_amdgpu_add_buffer_to_global_list(bo
);
382 return (struct radeon_winsys_bo
*)bo
;
384 amdgpu_bo_free(buf_handle
);
387 amdgpu_va_range_free(va_handle
);
395 radv_amdgpu_winsys_bo_map(struct radeon_winsys_bo
*_bo
)
397 struct radv_amdgpu_winsys_bo
*bo
= radv_amdgpu_winsys_bo(_bo
);
400 ret
= amdgpu_bo_cpu_map(bo
->bo
, &data
);
407 radv_amdgpu_winsys_bo_unmap(struct radeon_winsys_bo
*_bo
)
409 struct radv_amdgpu_winsys_bo
*bo
= radv_amdgpu_winsys_bo(_bo
);
410 amdgpu_bo_cpu_unmap(bo
->bo
);
413 static struct radeon_winsys_bo
*
414 radv_amdgpu_winsys_bo_from_ptr(struct radeon_winsys
*_ws
,
418 struct radv_amdgpu_winsys
*ws
= radv_amdgpu_winsys(_ws
);
419 amdgpu_bo_handle buf_handle
;
420 struct radv_amdgpu_winsys_bo
*bo
;
422 amdgpu_va_handle va_handle
;
424 bo
= CALLOC_STRUCT(radv_amdgpu_winsys_bo
);
428 if (amdgpu_create_bo_from_user_mem(ws
->dev
, pointer
, size
, &buf_handle
))
431 if (amdgpu_va_range_alloc(ws
->dev
, amdgpu_gpu_va_range_general
,
432 size
, 1 << 12, 0, &va
, &va_handle
,
433 AMDGPU_VA_RANGE_HIGH
))
436 if (amdgpu_bo_va_op(buf_handle
, 0, size
, va
, 0, AMDGPU_VA_OP_MAP
))
441 bo
->va_handle
= va_handle
;
446 bo
->initial_domain
= RADEON_DOMAIN_GTT
;
448 radv_amdgpu_add_buffer_to_global_list(bo
);
449 return (struct radeon_winsys_bo
*)bo
;
452 amdgpu_va_range_free(va_handle
);
455 amdgpu_bo_free(buf_handle
);
462 static struct radeon_winsys_bo
*
463 radv_amdgpu_winsys_bo_from_fd(struct radeon_winsys
*_ws
,
464 int fd
, unsigned *stride
,
467 struct radv_amdgpu_winsys
*ws
= radv_amdgpu_winsys(_ws
);
468 struct radv_amdgpu_winsys_bo
*bo
;
470 amdgpu_va_handle va_handle
;
471 enum amdgpu_bo_handle_type type
= amdgpu_bo_handle_type_dma_buf_fd
;
472 struct amdgpu_bo_import_result result
= {0};
473 struct amdgpu_bo_info info
= {0};
474 enum radeon_bo_domain initial
= 0;
476 bo
= CALLOC_STRUCT(radv_amdgpu_winsys_bo
);
480 r
= amdgpu_bo_import(ws
->dev
, type
, fd
, &result
);
484 r
= amdgpu_bo_query_info(result
.buf_handle
, &info
);
488 r
= amdgpu_va_range_alloc(ws
->dev
, amdgpu_gpu_va_range_general
,
489 result
.alloc_size
, 1 << 20, 0, &va
, &va_handle
,
490 AMDGPU_VA_RANGE_HIGH
);
494 r
= radv_amdgpu_bo_va_op(ws
, result
.buf_handle
, 0, result
.alloc_size
,
495 va
, 0, AMDGPU_VA_OP_MAP
);
499 if (info
.preferred_heap
& AMDGPU_GEM_DOMAIN_VRAM
)
500 initial
|= RADEON_DOMAIN_VRAM
;
501 if (info
.preferred_heap
& AMDGPU_GEM_DOMAIN_GTT
)
502 initial
|= RADEON_DOMAIN_GTT
;
504 bo
->bo
= result
.buf_handle
;
506 bo
->va_handle
= va_handle
;
507 bo
->initial_domain
= initial
;
508 bo
->size
= result
.alloc_size
;
509 bo
->is_shared
= true;
512 radv_amdgpu_add_buffer_to_global_list(bo
);
513 return (struct radeon_winsys_bo
*)bo
;
515 amdgpu_va_range_free(va_handle
);
518 amdgpu_bo_free(result
.buf_handle
);
526 radv_amdgpu_winsys_get_fd(struct radeon_winsys
*_ws
,
527 struct radeon_winsys_bo
*_bo
,
530 struct radv_amdgpu_winsys_bo
*bo
= radv_amdgpu_winsys_bo(_bo
);
531 enum amdgpu_bo_handle_type type
= amdgpu_bo_handle_type_dma_buf_fd
;
534 r
= amdgpu_bo_export(bo
->bo
, type
, &handle
);
539 bo
->is_shared
= true;
543 static unsigned radv_eg_tile_split_rev(unsigned eg_tile_split
)
545 switch (eg_tile_split
) {
558 radv_amdgpu_winsys_bo_set_metadata(struct radeon_winsys_bo
*_bo
,
559 struct radeon_bo_metadata
*md
)
561 struct radv_amdgpu_winsys_bo
*bo
= radv_amdgpu_winsys_bo(_bo
);
562 struct amdgpu_bo_metadata metadata
= {0};
563 uint32_t tiling_flags
= 0;
565 if (bo
->ws
->info
.chip_class
>= GFX9
) {
566 tiling_flags
|= AMDGPU_TILING_SET(SWIZZLE_MODE
, md
->u
.gfx9
.swizzle_mode
);
568 if (md
->u
.legacy
.macrotile
== RADEON_LAYOUT_TILED
)
569 tiling_flags
|= AMDGPU_TILING_SET(ARRAY_MODE
, 4); /* 2D_TILED_THIN1 */
570 else if (md
->u
.legacy
.microtile
== RADEON_LAYOUT_TILED
)
571 tiling_flags
|= AMDGPU_TILING_SET(ARRAY_MODE
, 2); /* 1D_TILED_THIN1 */
573 tiling_flags
|= AMDGPU_TILING_SET(ARRAY_MODE
, 1); /* LINEAR_ALIGNED */
575 tiling_flags
|= AMDGPU_TILING_SET(PIPE_CONFIG
, md
->u
.legacy
.pipe_config
);
576 tiling_flags
|= AMDGPU_TILING_SET(BANK_WIDTH
, util_logbase2(md
->u
.legacy
.bankw
));
577 tiling_flags
|= AMDGPU_TILING_SET(BANK_HEIGHT
, util_logbase2(md
->u
.legacy
.bankh
));
578 if (md
->u
.legacy
.tile_split
)
579 tiling_flags
|= AMDGPU_TILING_SET(TILE_SPLIT
, radv_eg_tile_split_rev(md
->u
.legacy
.tile_split
));
580 tiling_flags
|= AMDGPU_TILING_SET(MACRO_TILE_ASPECT
, util_logbase2(md
->u
.legacy
.mtilea
));
581 tiling_flags
|= AMDGPU_TILING_SET(NUM_BANKS
, util_logbase2(md
->u
.legacy
.num_banks
)-1);
583 if (md
->u
.legacy
.scanout
)
584 tiling_flags
|= AMDGPU_TILING_SET(MICRO_TILE_MODE
, 0); /* DISPLAY_MICRO_TILING */
586 tiling_flags
|= AMDGPU_TILING_SET(MICRO_TILE_MODE
, 1); /* THIN_MICRO_TILING */
589 metadata
.tiling_info
= tiling_flags
;
590 metadata
.size_metadata
= md
->size_metadata
;
591 memcpy(metadata
.umd_metadata
, md
->metadata
, sizeof(md
->metadata
));
593 amdgpu_bo_set_metadata(bo
->bo
, &metadata
);
596 void radv_amdgpu_bo_init_functions(struct radv_amdgpu_winsys
*ws
)
598 ws
->base
.buffer_create
= radv_amdgpu_winsys_bo_create
;
599 ws
->base
.buffer_destroy
= radv_amdgpu_winsys_bo_destroy
;
600 ws
->base
.buffer_map
= radv_amdgpu_winsys_bo_map
;
601 ws
->base
.buffer_unmap
= radv_amdgpu_winsys_bo_unmap
;
602 ws
->base
.buffer_from_ptr
= radv_amdgpu_winsys_bo_from_ptr
;
603 ws
->base
.buffer_from_fd
= radv_amdgpu_winsys_bo_from_fd
;
604 ws
->base
.buffer_get_fd
= radv_amdgpu_winsys_get_fd
;
605 ws
->base
.buffer_set_metadata
= radv_amdgpu_winsys_bo_set_metadata
;
606 ws
->base
.buffer_virtual_bind
= radv_amdgpu_winsys_bo_virtual_bind
;