winsys/amdgpu: remove hack for low VRAM configuration
[mesa.git] / src / gallium / winsys / amdgpu / drm / amdgpu_bo.c
1 /*
2 * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
3 * Copyright © 2015 Advanced Micro Devices, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
16 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
18 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
25 * of the Software.
26 */
27 /*
28 * Authors:
29 * Marek Olšák <maraeo@gmail.com>
30 */
31
32 #include "amdgpu_cs.h"
33
34 #include "os/os_time.h"
35 #include "state_tracker/drm_driver.h"
36 #include <amdgpu_drm.h>
37 #include <xf86drm.h>
38 #include <stdio.h>
39 #include <inttypes.h>
40
41 static inline struct amdgpu_winsys_bo *amdgpu_winsys_bo(struct pb_buffer *bo)
42 {
43 return (struct amdgpu_winsys_bo *)bo;
44 }
45
46 static bool amdgpu_bo_wait(struct pb_buffer *_buf, uint64_t timeout,
47 enum radeon_bo_usage usage)
48 {
49 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
50 struct amdgpu_winsys *ws = bo->ws;
51 int i;
52
53 if (bo->is_shared) {
54 /* We can't use user fences for shared buffers, because user fences
55 * are local to this process only. If we want to wait for all buffer
56 * uses in all processes, we have to use amdgpu_bo_wait_for_idle.
57 */
58 bool buffer_busy = true;
59 int r;
60
61 r = amdgpu_bo_wait_for_idle(bo->bo, timeout, &buffer_busy);
62 if (r)
63 fprintf(stderr, "%s: amdgpu_bo_wait_for_idle failed %i\n", __func__,
64 r);
65 return !buffer_busy;
66 }
67
68 if (timeout == 0) {
69 /* Timeout == 0 is quite simple. */
70 pipe_mutex_lock(ws->bo_fence_lock);
71 for (i = 0; i < RING_LAST; i++)
72 if (bo->fence[i]) {
73 if (amdgpu_fence_wait(bo->fence[i], 0, false)) {
74 /* Release the idle fence to avoid checking it again later. */
75 amdgpu_fence_reference(&bo->fence[i], NULL);
76 } else {
77 pipe_mutex_unlock(ws->bo_fence_lock);
78 return false;
79 }
80 }
81 pipe_mutex_unlock(ws->bo_fence_lock);
82 return true;
83
84 } else {
85 struct pipe_fence_handle *fence[RING_LAST] = {};
86 bool fence_idle[RING_LAST] = {};
87 bool buffer_idle = true;
88 int64_t abs_timeout = os_time_get_absolute_timeout(timeout);
89
90 /* Take references to all fences, so that we can wait for them
91 * without the lock. */
92 pipe_mutex_lock(ws->bo_fence_lock);
93 for (i = 0; i < RING_LAST; i++)
94 amdgpu_fence_reference(&fence[i], bo->fence[i]);
95 pipe_mutex_unlock(ws->bo_fence_lock);
96
97 /* Now wait for the fences. */
98 for (i = 0; i < RING_LAST; i++) {
99 if (fence[i]) {
100 if (amdgpu_fence_wait(fence[i], abs_timeout, true))
101 fence_idle[i] = true;
102 else
103 buffer_idle = false;
104 }
105 }
106
107 /* Release idle fences to avoid checking them again later. */
108 pipe_mutex_lock(ws->bo_fence_lock);
109 for (i = 0; i < RING_LAST; i++) {
110 if (fence[i] == bo->fence[i] && fence_idle[i])
111 amdgpu_fence_reference(&bo->fence[i], NULL);
112
113 amdgpu_fence_reference(&fence[i], NULL);
114 }
115 pipe_mutex_unlock(ws->bo_fence_lock);
116
117 return buffer_idle;
118 }
119 }
120
121 static enum radeon_bo_domain amdgpu_bo_get_initial_domain(
122 struct pb_buffer *buf)
123 {
124 return ((struct amdgpu_winsys_bo*)buf)->initial_domain;
125 }
126
127 void amdgpu_bo_destroy(struct pb_buffer *_buf)
128 {
129 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
130 int i;
131
132 pipe_mutex_lock(bo->ws->global_bo_list_lock);
133 LIST_DEL(&bo->global_list_item);
134 bo->ws->num_buffers--;
135 pipe_mutex_unlock(bo->ws->global_bo_list_lock);
136
137 amdgpu_bo_va_op(bo->bo, 0, bo->base.size, bo->va, 0, AMDGPU_VA_OP_UNMAP);
138 amdgpu_va_range_free(bo->va_handle);
139 amdgpu_bo_free(bo->bo);
140
141 for (i = 0; i < RING_LAST; i++)
142 amdgpu_fence_reference(&bo->fence[i], NULL);
143
144 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
145 bo->ws->allocated_vram -= align64(bo->base.size, bo->ws->gart_page_size);
146 else if (bo->initial_domain & RADEON_DOMAIN_GTT)
147 bo->ws->allocated_gtt -= align64(bo->base.size, bo->ws->gart_page_size);
148 FREE(bo);
149 }
150
151 static void amdgpu_bo_destroy_or_cache(struct pb_buffer *_buf)
152 {
153 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
154
155 if (bo->use_reusable_pool)
156 pb_cache_add_buffer(&bo->cache_entry);
157 else
158 amdgpu_bo_destroy(_buf);
159 }
160
161 static void *amdgpu_bo_map(struct pb_buffer *buf,
162 struct radeon_winsys_cs *rcs,
163 enum pipe_transfer_usage usage)
164 {
165 struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
166 struct amdgpu_cs *cs = (struct amdgpu_cs*)rcs;
167 int r;
168 void *cpu = NULL;
169
170 /* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
171 if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
172 /* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
173 if (usage & PIPE_TRANSFER_DONTBLOCK) {
174 if (!(usage & PIPE_TRANSFER_WRITE)) {
175 /* Mapping for read.
176 *
177 * Since we are mapping for read, we don't need to wait
178 * if the GPU is using the buffer for read too
179 * (neither one is changing it).
180 *
181 * Only check whether the buffer is being used for write. */
182 if (cs && amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
183 RADEON_USAGE_WRITE)) {
184 cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
185 return NULL;
186 }
187
188 if (!amdgpu_bo_wait((struct pb_buffer*)bo, 0,
189 RADEON_USAGE_WRITE)) {
190 return NULL;
191 }
192 } else {
193 if (cs && amdgpu_bo_is_referenced_by_cs(cs, bo)) {
194 cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
195 return NULL;
196 }
197
198 if (!amdgpu_bo_wait((struct pb_buffer*)bo, 0,
199 RADEON_USAGE_READWRITE)) {
200 return NULL;
201 }
202 }
203 } else {
204 uint64_t time = os_time_get_nano();
205
206 if (!(usage & PIPE_TRANSFER_WRITE)) {
207 /* Mapping for read.
208 *
209 * Since we are mapping for read, we don't need to wait
210 * if the GPU is using the buffer for read too
211 * (neither one is changing it).
212 *
213 * Only check whether the buffer is being used for write. */
214 if (cs && amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
215 RADEON_USAGE_WRITE)) {
216 cs->flush_cs(cs->flush_data, 0, NULL);
217 }
218 amdgpu_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
219 RADEON_USAGE_WRITE);
220 } else {
221 /* Mapping for write. */
222 if (cs && amdgpu_bo_is_referenced_by_cs(cs, bo))
223 cs->flush_cs(cs->flush_data, 0, NULL);
224
225 amdgpu_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
226 RADEON_USAGE_READWRITE);
227 }
228
229 bo->ws->buffer_wait_time += os_time_get_nano() - time;
230 }
231 }
232
233 /* If the buffer is created from user memory, return the user pointer. */
234 if (bo->user_ptr)
235 return bo->user_ptr;
236
237 r = amdgpu_bo_cpu_map(bo->bo, &cpu);
238 if (r) {
239 /* Clear the cache and try again. */
240 pb_cache_release_all_buffers(&bo->ws->bo_cache);
241 r = amdgpu_bo_cpu_map(bo->bo, &cpu);
242 }
243 return r ? NULL : cpu;
244 }
245
246 static void amdgpu_bo_unmap(struct pb_buffer *buf)
247 {
248 struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
249
250 amdgpu_bo_cpu_unmap(bo->bo);
251 }
252
253 static const struct pb_vtbl amdgpu_winsys_bo_vtbl = {
254 amdgpu_bo_destroy_or_cache
255 /* other functions are never called */
256 };
257
258 static void amdgpu_add_buffer_to_global_list(struct amdgpu_winsys_bo *bo)
259 {
260 struct amdgpu_winsys *ws = bo->ws;
261
262 pipe_mutex_lock(ws->global_bo_list_lock);
263 LIST_ADDTAIL(&bo->global_list_item, &ws->global_bo_list);
264 ws->num_buffers++;
265 pipe_mutex_unlock(ws->global_bo_list_lock);
266 }
267
268 static struct amdgpu_winsys_bo *amdgpu_create_bo(struct amdgpu_winsys *ws,
269 uint64_t size,
270 unsigned alignment,
271 unsigned usage,
272 enum radeon_bo_domain initial_domain,
273 unsigned flags)
274 {
275 struct amdgpu_bo_alloc_request request = {0};
276 amdgpu_bo_handle buf_handle;
277 uint64_t va = 0;
278 struct amdgpu_winsys_bo *bo;
279 amdgpu_va_handle va_handle;
280 int r;
281
282 assert(initial_domain & RADEON_DOMAIN_VRAM_GTT);
283 bo = CALLOC_STRUCT(amdgpu_winsys_bo);
284 if (!bo) {
285 return NULL;
286 }
287
288 pb_cache_init_entry(&ws->bo_cache, &bo->cache_entry, &bo->base);
289 request.alloc_size = size;
290 request.phys_alignment = alignment;
291
292 if (initial_domain & RADEON_DOMAIN_VRAM)
293 request.preferred_heap |= AMDGPU_GEM_DOMAIN_VRAM;
294 if (initial_domain & RADEON_DOMAIN_GTT)
295 request.preferred_heap |= AMDGPU_GEM_DOMAIN_GTT;
296
297 if (flags & RADEON_FLAG_CPU_ACCESS)
298 request.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
299 if (flags & RADEON_FLAG_NO_CPU_ACCESS)
300 request.flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
301 if (flags & RADEON_FLAG_GTT_WC)
302 request.flags |= AMDGPU_GEM_CREATE_CPU_GTT_USWC;
303
304 r = amdgpu_bo_alloc(ws->dev, &request, &buf_handle);
305 if (r) {
306 fprintf(stderr, "amdgpu: Failed to allocate a buffer:\n");
307 fprintf(stderr, "amdgpu: size : %"PRIu64" bytes\n", size);
308 fprintf(stderr, "amdgpu: alignment : %u bytes\n", alignment);
309 fprintf(stderr, "amdgpu: domains : %u\n", initial_domain);
310 goto error_bo_alloc;
311 }
312
313 r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
314 size, alignment, 0, &va, &va_handle, 0);
315 if (r)
316 goto error_va_alloc;
317
318 r = amdgpu_bo_va_op(buf_handle, 0, size, va, 0, AMDGPU_VA_OP_MAP);
319 if (r)
320 goto error_va_map;
321
322 pipe_reference_init(&bo->base.reference, 1);
323 bo->base.alignment = alignment;
324 bo->base.usage = usage;
325 bo->base.size = size;
326 bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
327 bo->ws = ws;
328 bo->bo = buf_handle;
329 bo->va = va;
330 bo->va_handle = va_handle;
331 bo->initial_domain = initial_domain;
332 bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
333
334 if (initial_domain & RADEON_DOMAIN_VRAM)
335 ws->allocated_vram += align64(size, ws->gart_page_size);
336 else if (initial_domain & RADEON_DOMAIN_GTT)
337 ws->allocated_gtt += align64(size, ws->gart_page_size);
338
339 amdgpu_add_buffer_to_global_list(bo);
340
341 return bo;
342
343 error_va_map:
344 amdgpu_va_range_free(va_handle);
345
346 error_va_alloc:
347 amdgpu_bo_free(buf_handle);
348
349 error_bo_alloc:
350 FREE(bo);
351 return NULL;
352 }
353
354 bool amdgpu_bo_can_reclaim(struct pb_buffer *_buf)
355 {
356 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
357
358 if (amdgpu_bo_is_referenced_by_any_cs(bo)) {
359 return false;
360 }
361
362 return amdgpu_bo_wait(_buf, 0, RADEON_USAGE_READWRITE);
363 }
364
365 static unsigned eg_tile_split(unsigned tile_split)
366 {
367 switch (tile_split) {
368 case 0: tile_split = 64; break;
369 case 1: tile_split = 128; break;
370 case 2: tile_split = 256; break;
371 case 3: tile_split = 512; break;
372 default:
373 case 4: tile_split = 1024; break;
374 case 5: tile_split = 2048; break;
375 case 6: tile_split = 4096; break;
376 }
377 return tile_split;
378 }
379
380 static unsigned eg_tile_split_rev(unsigned eg_tile_split)
381 {
382 switch (eg_tile_split) {
383 case 64: return 0;
384 case 128: return 1;
385 case 256: return 2;
386 case 512: return 3;
387 default:
388 case 1024: return 4;
389 case 2048: return 5;
390 case 4096: return 6;
391 }
392 }
393
394 static void amdgpu_buffer_get_metadata(struct pb_buffer *_buf,
395 struct radeon_bo_metadata *md)
396 {
397 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
398 struct amdgpu_bo_info info = {0};
399 uint32_t tiling_flags;
400 int r;
401
402 r = amdgpu_bo_query_info(bo->bo, &info);
403 if (r)
404 return;
405
406 tiling_flags = info.metadata.tiling_info;
407
408 md->microtile = RADEON_LAYOUT_LINEAR;
409 md->macrotile = RADEON_LAYOUT_LINEAR;
410
411 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 4) /* 2D_TILED_THIN1 */
412 md->macrotile = RADEON_LAYOUT_TILED;
413 else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 2) /* 1D_TILED_THIN1 */
414 md->microtile = RADEON_LAYOUT_TILED;
415
416 md->bankw = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
417 md->bankh = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
418 md->tile_split = eg_tile_split(AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT));
419 md->mtilea = 1 << AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
420 md->scanout = AMDGPU_TILING_GET(tiling_flags, MICRO_TILE_MODE) == 0; /* DISPLAY */
421
422 md->size_metadata = info.metadata.size_metadata;
423 memcpy(md->metadata, info.metadata.umd_metadata, sizeof(md->metadata));
424 }
425
426 static void amdgpu_buffer_set_metadata(struct pb_buffer *_buf,
427 struct radeon_bo_metadata *md)
428 {
429 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
430 struct amdgpu_bo_metadata metadata = {0};
431 uint32_t tiling_flags = 0;
432
433 if (md->macrotile == RADEON_LAYOUT_TILED)
434 tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 4); /* 2D_TILED_THIN1 */
435 else if (md->microtile == RADEON_LAYOUT_TILED)
436 tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 2); /* 1D_TILED_THIN1 */
437 else
438 tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 1); /* LINEAR_ALIGNED */
439
440 tiling_flags |= AMDGPU_TILING_SET(PIPE_CONFIG, md->pipe_config);
441 tiling_flags |= AMDGPU_TILING_SET(BANK_WIDTH, util_logbase2(md->bankw));
442 tiling_flags |= AMDGPU_TILING_SET(BANK_HEIGHT, util_logbase2(md->bankh));
443 if (md->tile_split)
444 tiling_flags |= AMDGPU_TILING_SET(TILE_SPLIT, eg_tile_split_rev(md->tile_split));
445 tiling_flags |= AMDGPU_TILING_SET(MACRO_TILE_ASPECT, util_logbase2(md->mtilea));
446 tiling_flags |= AMDGPU_TILING_SET(NUM_BANKS, util_logbase2(md->num_banks)-1);
447
448 if (md->scanout)
449 tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 0); /* DISPLAY_MICRO_TILING */
450 else
451 tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 1); /* THIN_MICRO_TILING */
452
453 metadata.tiling_info = tiling_flags;
454 metadata.size_metadata = md->size_metadata;
455 memcpy(metadata.umd_metadata, md->metadata, sizeof(md->metadata));
456
457 amdgpu_bo_set_metadata(bo->bo, &metadata);
458 }
459
460 static struct pb_buffer *
461 amdgpu_bo_create(struct radeon_winsys *rws,
462 uint64_t size,
463 unsigned alignment,
464 boolean use_reusable_pool,
465 enum radeon_bo_domain domain,
466 enum radeon_bo_flag flags)
467 {
468 struct amdgpu_winsys *ws = amdgpu_winsys(rws);
469 struct amdgpu_winsys_bo *bo;
470 unsigned usage = 0;
471
472 /* Align size to page size. This is the minimum alignment for normal
473 * BOs. Aligning this here helps the cached bufmgr. Especially small BOs,
474 * like constant/uniform buffers, can benefit from better and more reuse.
475 */
476 size = align64(size, ws->gart_page_size);
477
478 /* Only set one usage bit each for domains and flags, or the cache manager
479 * might consider different sets of domains / flags compatible
480 */
481 if (domain == RADEON_DOMAIN_VRAM_GTT)
482 usage = 1 << 2;
483 else
484 usage = domain >> 1;
485 assert(flags < sizeof(usage) * 8 - 3);
486 usage |= 1 << (flags + 3);
487
488 /* Get a buffer from the cache. */
489 if (use_reusable_pool) {
490 bo = (struct amdgpu_winsys_bo*)
491 pb_cache_reclaim_buffer(&ws->bo_cache, size, alignment,
492 usage);
493 if (bo)
494 return &bo->base;
495 }
496
497 /* Create a new one. */
498 bo = amdgpu_create_bo(ws, size, alignment, usage, domain, flags);
499 if (!bo) {
500 /* Clear the cache and try again. */
501 pb_cache_release_all_buffers(&ws->bo_cache);
502 bo = amdgpu_create_bo(ws, size, alignment, usage, domain, flags);
503 if (!bo)
504 return NULL;
505 }
506
507 bo->use_reusable_pool = use_reusable_pool;
508 return &bo->base;
509 }
510
511 static struct pb_buffer *amdgpu_bo_from_handle(struct radeon_winsys *rws,
512 struct winsys_handle *whandle,
513 unsigned *stride,
514 unsigned *offset)
515 {
516 struct amdgpu_winsys *ws = amdgpu_winsys(rws);
517 struct amdgpu_winsys_bo *bo;
518 enum amdgpu_bo_handle_type type;
519 struct amdgpu_bo_import_result result = {0};
520 uint64_t va;
521 amdgpu_va_handle va_handle;
522 struct amdgpu_bo_info info = {0};
523 enum radeon_bo_domain initial = 0;
524 int r;
525
526 /* Initialize the structure. */
527 bo = CALLOC_STRUCT(amdgpu_winsys_bo);
528 if (!bo) {
529 return NULL;
530 }
531
532 switch (whandle->type) {
533 case DRM_API_HANDLE_TYPE_SHARED:
534 type = amdgpu_bo_handle_type_gem_flink_name;
535 break;
536 case DRM_API_HANDLE_TYPE_FD:
537 type = amdgpu_bo_handle_type_dma_buf_fd;
538 break;
539 default:
540 return NULL;
541 }
542
543 r = amdgpu_bo_import(ws->dev, type, whandle->handle, &result);
544 if (r)
545 goto error;
546
547 /* Get initial domains. */
548 r = amdgpu_bo_query_info(result.buf_handle, &info);
549 if (r)
550 goto error_query;
551
552 r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
553 result.alloc_size, 1 << 20, 0, &va, &va_handle, 0);
554 if (r)
555 goto error_query;
556
557 r = amdgpu_bo_va_op(result.buf_handle, 0, result.alloc_size, va, 0, AMDGPU_VA_OP_MAP);
558 if (r)
559 goto error_va_map;
560
561 if (info.preferred_heap & AMDGPU_GEM_DOMAIN_VRAM)
562 initial |= RADEON_DOMAIN_VRAM;
563 if (info.preferred_heap & AMDGPU_GEM_DOMAIN_GTT)
564 initial |= RADEON_DOMAIN_GTT;
565
566
567 pipe_reference_init(&bo->base.reference, 1);
568 bo->base.alignment = info.phys_alignment;
569 bo->base.usage = PB_USAGE_GPU_WRITE | PB_USAGE_GPU_READ;
570 bo->bo = result.buf_handle;
571 bo->base.size = result.alloc_size;
572 bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
573 bo->ws = ws;
574 bo->va = va;
575 bo->va_handle = va_handle;
576 bo->initial_domain = initial;
577 bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
578 bo->is_shared = true;
579
580 if (stride)
581 *stride = whandle->stride;
582 if (offset)
583 *offset = whandle->offset;
584
585 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
586 ws->allocated_vram += align64(bo->base.size, ws->gart_page_size);
587 else if (bo->initial_domain & RADEON_DOMAIN_GTT)
588 ws->allocated_gtt += align64(bo->base.size, ws->gart_page_size);
589
590 amdgpu_add_buffer_to_global_list(bo);
591
592 return &bo->base;
593
594 error_va_map:
595 amdgpu_va_range_free(va_handle);
596
597 error_query:
598 amdgpu_bo_free(result.buf_handle);
599
600 error:
601 FREE(bo);
602 return NULL;
603 }
604
605 static boolean amdgpu_bo_get_handle(struct pb_buffer *buffer,
606 unsigned stride, unsigned offset,
607 unsigned slice_size,
608 struct winsys_handle *whandle)
609 {
610 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(buffer);
611 enum amdgpu_bo_handle_type type;
612 int r;
613
614 bo->use_reusable_pool = false;
615
616 switch (whandle->type) {
617 case DRM_API_HANDLE_TYPE_SHARED:
618 type = amdgpu_bo_handle_type_gem_flink_name;
619 break;
620 case DRM_API_HANDLE_TYPE_FD:
621 type = amdgpu_bo_handle_type_dma_buf_fd;
622 break;
623 case DRM_API_HANDLE_TYPE_KMS:
624 type = amdgpu_bo_handle_type_kms;
625 break;
626 default:
627 return FALSE;
628 }
629
630 r = amdgpu_bo_export(bo->bo, type, &whandle->handle);
631 if (r)
632 return FALSE;
633
634 whandle->stride = stride;
635 whandle->offset = offset;
636 whandle->offset += slice_size * whandle->layer;
637 bo->is_shared = true;
638 return TRUE;
639 }
640
641 static struct pb_buffer *amdgpu_bo_from_ptr(struct radeon_winsys *rws,
642 void *pointer, uint64_t size)
643 {
644 struct amdgpu_winsys *ws = amdgpu_winsys(rws);
645 amdgpu_bo_handle buf_handle;
646 struct amdgpu_winsys_bo *bo;
647 uint64_t va;
648 amdgpu_va_handle va_handle;
649
650 bo = CALLOC_STRUCT(amdgpu_winsys_bo);
651 if (!bo)
652 return NULL;
653
654 if (amdgpu_create_bo_from_user_mem(ws->dev, pointer, size, &buf_handle))
655 goto error;
656
657 if (amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
658 size, 1 << 12, 0, &va, &va_handle, 0))
659 goto error_va_alloc;
660
661 if (amdgpu_bo_va_op(buf_handle, 0, size, va, 0, AMDGPU_VA_OP_MAP))
662 goto error_va_map;
663
664 /* Initialize it. */
665 pipe_reference_init(&bo->base.reference, 1);
666 bo->bo = buf_handle;
667 bo->base.alignment = 0;
668 bo->base.usage = PB_USAGE_GPU_WRITE | PB_USAGE_GPU_READ;
669 bo->base.size = size;
670 bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
671 bo->ws = ws;
672 bo->user_ptr = pointer;
673 bo->va = va;
674 bo->va_handle = va_handle;
675 bo->initial_domain = RADEON_DOMAIN_GTT;
676 bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
677
678 ws->allocated_gtt += align64(bo->base.size, ws->gart_page_size);
679
680 amdgpu_add_buffer_to_global_list(bo);
681
682 return (struct pb_buffer*)bo;
683
684 error_va_map:
685 amdgpu_va_range_free(va_handle);
686
687 error_va_alloc:
688 amdgpu_bo_free(buf_handle);
689
690 error:
691 FREE(bo);
692 return NULL;
693 }
694
695 static bool amdgpu_bo_is_user_ptr(struct pb_buffer *buf)
696 {
697 return ((struct amdgpu_winsys_bo*)buf)->user_ptr != NULL;
698 }
699
700 static uint64_t amdgpu_bo_get_va(struct pb_buffer *buf)
701 {
702 return ((struct amdgpu_winsys_bo*)buf)->va;
703 }
704
705 void amdgpu_bo_init_functions(struct amdgpu_winsys *ws)
706 {
707 ws->base.buffer_set_metadata = amdgpu_buffer_set_metadata;
708 ws->base.buffer_get_metadata = amdgpu_buffer_get_metadata;
709 ws->base.buffer_map = amdgpu_bo_map;
710 ws->base.buffer_unmap = amdgpu_bo_unmap;
711 ws->base.buffer_wait = amdgpu_bo_wait;
712 ws->base.buffer_create = amdgpu_bo_create;
713 ws->base.buffer_from_handle = amdgpu_bo_from_handle;
714 ws->base.buffer_from_ptr = amdgpu_bo_from_ptr;
715 ws->base.buffer_is_user_ptr = amdgpu_bo_is_user_ptr;
716 ws->base.buffer_get_handle = amdgpu_bo_get_handle;
717 ws->base.buffer_get_virtual_address = amdgpu_bo_get_va;
718 ws->base.buffer_get_initial_domain = amdgpu_bo_get_initial_domain;
719 }