Merge remote-tracking branch 'public/master' into vulkan
[mesa.git] / src / gallium / winsys / amdgpu / drm / amdgpu_bo.c
1 /*
2 * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
3 * Copyright © 2015 Advanced Micro Devices, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
16 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
18 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
25 * of the Software.
26 */
27 /*
28 * Authors:
29 * Marek Olšák <maraeo@gmail.com>
30 */
31
32 #include "amdgpu_cs.h"
33
34 #include "os/os_time.h"
35 #include "state_tracker/drm_driver.h"
36 #include <amdgpu_drm.h>
37 #include <xf86drm.h>
38 #include <stdio.h>
39
40 static inline struct amdgpu_winsys_bo *amdgpu_winsys_bo(struct pb_buffer *bo)
41 {
42 return (struct amdgpu_winsys_bo *)bo;
43 }
44
45 static bool amdgpu_bo_wait(struct pb_buffer *_buf, uint64_t timeout,
46 enum radeon_bo_usage usage)
47 {
48 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
49 struct amdgpu_winsys *ws = bo->ws;
50 int i;
51
52 if (bo->is_shared) {
53 /* We can't use user fences for shared buffers, because user fences
54 * are local to this process only. If we want to wait for all buffer
55 * uses in all processes, we have to use amdgpu_bo_wait_for_idle.
56 */
57 bool buffer_busy = true;
58 int r;
59
60 r = amdgpu_bo_wait_for_idle(bo->bo, timeout, &buffer_busy);
61 if (r)
62 fprintf(stderr, "%s: amdgpu_bo_wait_for_idle failed %i\n", __func__,
63 r);
64 return !buffer_busy;
65 }
66
67 if (timeout == 0) {
68 /* Timeout == 0 is quite simple. */
69 pipe_mutex_lock(ws->bo_fence_lock);
70 for (i = 0; i < RING_LAST; i++)
71 if (bo->fence[i]) {
72 if (amdgpu_fence_wait(bo->fence[i], 0, false)) {
73 /* Release the idle fence to avoid checking it again later. */
74 amdgpu_fence_reference(&bo->fence[i], NULL);
75 } else {
76 pipe_mutex_unlock(ws->bo_fence_lock);
77 return false;
78 }
79 }
80 pipe_mutex_unlock(ws->bo_fence_lock);
81 return true;
82
83 } else {
84 struct pipe_fence_handle *fence[RING_LAST] = {};
85 bool fence_idle[RING_LAST] = {};
86 bool buffer_idle = true;
87 int64_t abs_timeout = os_time_get_absolute_timeout(timeout);
88
89 /* Take references to all fences, so that we can wait for them
90 * without the lock. */
91 pipe_mutex_lock(ws->bo_fence_lock);
92 for (i = 0; i < RING_LAST; i++)
93 amdgpu_fence_reference(&fence[i], bo->fence[i]);
94 pipe_mutex_unlock(ws->bo_fence_lock);
95
96 /* Now wait for the fences. */
97 for (i = 0; i < RING_LAST; i++) {
98 if (fence[i]) {
99 if (amdgpu_fence_wait(fence[i], abs_timeout, true))
100 fence_idle[i] = true;
101 else
102 buffer_idle = false;
103 }
104 }
105
106 /* Release idle fences to avoid checking them again later. */
107 pipe_mutex_lock(ws->bo_fence_lock);
108 for (i = 0; i < RING_LAST; i++) {
109 if (fence[i] == bo->fence[i] && fence_idle[i])
110 amdgpu_fence_reference(&bo->fence[i], NULL);
111
112 amdgpu_fence_reference(&fence[i], NULL);
113 }
114 pipe_mutex_unlock(ws->bo_fence_lock);
115
116 return buffer_idle;
117 }
118 }
119
120 static enum radeon_bo_domain amdgpu_bo_get_initial_domain(
121 struct pb_buffer *buf)
122 {
123 return ((struct amdgpu_winsys_bo*)buf)->initial_domain;
124 }
125
126 void amdgpu_bo_destroy(struct pb_buffer *_buf)
127 {
128 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
129 int i;
130
131 pipe_mutex_lock(bo->ws->global_bo_list_lock);
132 LIST_DEL(&bo->global_list_item);
133 bo->ws->num_buffers--;
134 pipe_mutex_unlock(bo->ws->global_bo_list_lock);
135
136 amdgpu_bo_va_op(bo->bo, 0, bo->base.size, bo->va, 0, AMDGPU_VA_OP_UNMAP);
137 amdgpu_va_range_free(bo->va_handle);
138 amdgpu_bo_free(bo->bo);
139
140 for (i = 0; i < RING_LAST; i++)
141 amdgpu_fence_reference(&bo->fence[i], NULL);
142
143 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
144 bo->ws->allocated_vram -= align(bo->base.size, bo->ws->gart_page_size);
145 else if (bo->initial_domain & RADEON_DOMAIN_GTT)
146 bo->ws->allocated_gtt -= align(bo->base.size, bo->ws->gart_page_size);
147 FREE(bo);
148 }
149
150 static void amdgpu_bo_destroy_or_cache(struct pb_buffer *_buf)
151 {
152 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
153
154 if (bo->use_reusable_pool)
155 pb_cache_add_buffer(&bo->cache_entry);
156 else
157 amdgpu_bo_destroy(_buf);
158 }
159
160 static void *amdgpu_bo_map(struct pb_buffer *buf,
161 struct radeon_winsys_cs *rcs,
162 enum pipe_transfer_usage usage)
163 {
164 struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
165 struct amdgpu_cs *cs = (struct amdgpu_cs*)rcs;
166 int r;
167 void *cpu = NULL;
168
169 /* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
170 if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
171 /* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
172 if (usage & PIPE_TRANSFER_DONTBLOCK) {
173 if (!(usage & PIPE_TRANSFER_WRITE)) {
174 /* Mapping for read.
175 *
176 * Since we are mapping for read, we don't need to wait
177 * if the GPU is using the buffer for read too
178 * (neither one is changing it).
179 *
180 * Only check whether the buffer is being used for write. */
181 if (cs && amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
182 RADEON_USAGE_WRITE)) {
183 cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
184 return NULL;
185 }
186
187 if (!amdgpu_bo_wait((struct pb_buffer*)bo, 0,
188 RADEON_USAGE_WRITE)) {
189 return NULL;
190 }
191 } else {
192 if (cs && amdgpu_bo_is_referenced_by_cs(cs, bo)) {
193 cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
194 return NULL;
195 }
196
197 if (!amdgpu_bo_wait((struct pb_buffer*)bo, 0,
198 RADEON_USAGE_READWRITE)) {
199 return NULL;
200 }
201 }
202 } else {
203 uint64_t time = os_time_get_nano();
204
205 if (!(usage & PIPE_TRANSFER_WRITE)) {
206 /* Mapping for read.
207 *
208 * Since we are mapping for read, we don't need to wait
209 * if the GPU is using the buffer for read too
210 * (neither one is changing it).
211 *
212 * Only check whether the buffer is being used for write. */
213 if (cs && amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
214 RADEON_USAGE_WRITE)) {
215 cs->flush_cs(cs->flush_data, 0, NULL);
216 }
217 amdgpu_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
218 RADEON_USAGE_WRITE);
219 } else {
220 /* Mapping for write. */
221 if (cs && amdgpu_bo_is_referenced_by_cs(cs, bo))
222 cs->flush_cs(cs->flush_data, 0, NULL);
223
224 amdgpu_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
225 RADEON_USAGE_READWRITE);
226 }
227
228 bo->ws->buffer_wait_time += os_time_get_nano() - time;
229 }
230 }
231
232 /* If the buffer is created from user memory, return the user pointer. */
233 if (bo->user_ptr)
234 return bo->user_ptr;
235
236 r = amdgpu_bo_cpu_map(bo->bo, &cpu);
237 if (r) {
238 /* Clear the cache and try again. */
239 pb_cache_release_all_buffers(&bo->ws->bo_cache);
240 r = amdgpu_bo_cpu_map(bo->bo, &cpu);
241 }
242 return r ? NULL : cpu;
243 }
244
245 static void amdgpu_bo_unmap(struct pb_buffer *buf)
246 {
247 struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
248
249 amdgpu_bo_cpu_unmap(bo->bo);
250 }
251
252 static const struct pb_vtbl amdgpu_winsys_bo_vtbl = {
253 amdgpu_bo_destroy_or_cache
254 /* other functions are never called */
255 };
256
257 static void amdgpu_add_buffer_to_global_list(struct amdgpu_winsys_bo *bo)
258 {
259 struct amdgpu_winsys *ws = bo->ws;
260
261 pipe_mutex_lock(ws->global_bo_list_lock);
262 LIST_ADDTAIL(&bo->global_list_item, &ws->global_bo_list);
263 ws->num_buffers++;
264 pipe_mutex_unlock(ws->global_bo_list_lock);
265 }
266
267 static struct amdgpu_winsys_bo *amdgpu_create_bo(struct amdgpu_winsys *ws,
268 unsigned size,
269 unsigned alignment,
270 unsigned usage,
271 enum radeon_bo_domain initial_domain,
272 unsigned flags)
273 {
274 struct amdgpu_bo_alloc_request request = {0};
275 amdgpu_bo_handle buf_handle;
276 uint64_t va = 0;
277 struct amdgpu_winsys_bo *bo;
278 amdgpu_va_handle va_handle;
279 int r;
280
281 assert(initial_domain & RADEON_DOMAIN_VRAM_GTT);
282 bo = CALLOC_STRUCT(amdgpu_winsys_bo);
283 if (!bo) {
284 return NULL;
285 }
286
287 pb_cache_init_entry(&ws->bo_cache, &bo->cache_entry, &bo->base);
288 request.alloc_size = size;
289 request.phys_alignment = alignment;
290
291 if (initial_domain & RADEON_DOMAIN_VRAM)
292 request.preferred_heap |= AMDGPU_GEM_DOMAIN_VRAM;
293 if (initial_domain & RADEON_DOMAIN_GTT)
294 request.preferred_heap |= AMDGPU_GEM_DOMAIN_GTT;
295
296 if (flags & RADEON_FLAG_CPU_ACCESS)
297 request.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
298 if (flags & RADEON_FLAG_NO_CPU_ACCESS)
299 request.flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
300 if (flags & RADEON_FLAG_GTT_WC)
301 request.flags |= AMDGPU_GEM_CREATE_CPU_GTT_USWC;
302
303 r = amdgpu_bo_alloc(ws->dev, &request, &buf_handle);
304 if (r) {
305 fprintf(stderr, "amdgpu: Failed to allocate a buffer:\n");
306 fprintf(stderr, "amdgpu: size : %d bytes\n", size);
307 fprintf(stderr, "amdgpu: alignment : %d bytes\n", alignment);
308 fprintf(stderr, "amdgpu: domains : %d\n", initial_domain);
309 goto error_bo_alloc;
310 }
311
312 r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
313 size, alignment, 0, &va, &va_handle, 0);
314 if (r)
315 goto error_va_alloc;
316
317 r = amdgpu_bo_va_op(buf_handle, 0, size, va, 0, AMDGPU_VA_OP_MAP);
318 if (r)
319 goto error_va_map;
320
321 pipe_reference_init(&bo->base.reference, 1);
322 bo->base.alignment = alignment;
323 bo->base.usage = usage;
324 bo->base.size = size;
325 bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
326 bo->ws = ws;
327 bo->bo = buf_handle;
328 bo->va = va;
329 bo->va_handle = va_handle;
330 bo->initial_domain = initial_domain;
331 bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
332
333 if (initial_domain & RADEON_DOMAIN_VRAM)
334 ws->allocated_vram += align(size, ws->gart_page_size);
335 else if (initial_domain & RADEON_DOMAIN_GTT)
336 ws->allocated_gtt += align(size, ws->gart_page_size);
337
338 amdgpu_add_buffer_to_global_list(bo);
339
340 return bo;
341
342 error_va_map:
343 amdgpu_va_range_free(va_handle);
344
345 error_va_alloc:
346 amdgpu_bo_free(buf_handle);
347
348 error_bo_alloc:
349 FREE(bo);
350 return NULL;
351 }
352
353 bool amdgpu_bo_can_reclaim(struct pb_buffer *_buf)
354 {
355 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
356
357 if (amdgpu_bo_is_referenced_by_any_cs(bo)) {
358 return false;
359 }
360
361 return amdgpu_bo_wait(_buf, 0, RADEON_USAGE_READWRITE);
362 }
363
364 static unsigned eg_tile_split(unsigned tile_split)
365 {
366 switch (tile_split) {
367 case 0: tile_split = 64; break;
368 case 1: tile_split = 128; break;
369 case 2: tile_split = 256; break;
370 case 3: tile_split = 512; break;
371 default:
372 case 4: tile_split = 1024; break;
373 case 5: tile_split = 2048; break;
374 case 6: tile_split = 4096; break;
375 }
376 return tile_split;
377 }
378
379 static unsigned eg_tile_split_rev(unsigned eg_tile_split)
380 {
381 switch (eg_tile_split) {
382 case 64: return 0;
383 case 128: return 1;
384 case 256: return 2;
385 case 512: return 3;
386 default:
387 case 1024: return 4;
388 case 2048: return 5;
389 case 4096: return 6;
390 }
391 }
392
393 static void amdgpu_buffer_get_metadata(struct pb_buffer *_buf,
394 struct radeon_bo_metadata *md)
395 {
396 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
397 struct amdgpu_bo_info info = {0};
398 uint32_t tiling_flags;
399 int r;
400
401 r = amdgpu_bo_query_info(bo->bo, &info);
402 if (r)
403 return;
404
405 tiling_flags = info.metadata.tiling_info;
406
407 md->microtile = RADEON_LAYOUT_LINEAR;
408 md->macrotile = RADEON_LAYOUT_LINEAR;
409
410 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 4) /* 2D_TILED_THIN1 */
411 md->macrotile = RADEON_LAYOUT_TILED;
412 else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 2) /* 1D_TILED_THIN1 */
413 md->microtile = RADEON_LAYOUT_TILED;
414
415 md->bankw = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
416 md->bankh = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
417 md->tile_split = eg_tile_split(AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT));
418 md->mtilea = 1 << AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
419 md->scanout = AMDGPU_TILING_GET(tiling_flags, MICRO_TILE_MODE) == 0; /* DISPLAY */
420
421 md->size_metadata = info.metadata.size_metadata;
422 memcpy(md->metadata, info.metadata.umd_metadata, sizeof(md->metadata));
423 }
424
425 static void amdgpu_buffer_set_metadata(struct pb_buffer *_buf,
426 struct radeon_bo_metadata *md)
427 {
428 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
429 struct amdgpu_bo_metadata metadata = {0};
430 uint32_t tiling_flags = 0;
431
432 if (md->macrotile == RADEON_LAYOUT_TILED)
433 tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 4); /* 2D_TILED_THIN1 */
434 else if (md->microtile == RADEON_LAYOUT_TILED)
435 tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 2); /* 1D_TILED_THIN1 */
436 else
437 tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 1); /* LINEAR_ALIGNED */
438
439 tiling_flags |= AMDGPU_TILING_SET(PIPE_CONFIG, md->pipe_config);
440 tiling_flags |= AMDGPU_TILING_SET(BANK_WIDTH, util_logbase2(md->bankw));
441 tiling_flags |= AMDGPU_TILING_SET(BANK_HEIGHT, util_logbase2(md->bankh));
442 if (md->tile_split)
443 tiling_flags |= AMDGPU_TILING_SET(TILE_SPLIT, eg_tile_split_rev(md->tile_split));
444 tiling_flags |= AMDGPU_TILING_SET(MACRO_TILE_ASPECT, util_logbase2(md->mtilea));
445 tiling_flags |= AMDGPU_TILING_SET(NUM_BANKS, util_logbase2(md->num_banks)-1);
446
447 if (md->scanout)
448 tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 0); /* DISPLAY_MICRO_TILING */
449 else
450 tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 1); /* THIN_MICRO_TILING */
451
452 metadata.tiling_info = tiling_flags;
453 metadata.size_metadata = md->size_metadata;
454 memcpy(metadata.umd_metadata, md->metadata, sizeof(md->metadata));
455
456 amdgpu_bo_set_metadata(bo->bo, &metadata);
457 }
458
459 static struct pb_buffer *
460 amdgpu_bo_create(struct radeon_winsys *rws,
461 unsigned size,
462 unsigned alignment,
463 boolean use_reusable_pool,
464 enum radeon_bo_domain domain,
465 enum radeon_bo_flag flags)
466 {
467 struct amdgpu_winsys *ws = amdgpu_winsys(rws);
468 struct amdgpu_winsys_bo *bo;
469 unsigned usage = 0;
470
471 /* Don't use VRAM if the GPU doesn't have much. This is only the initial
472 * domain. The kernel is free to move the buffer if it wants to.
473 *
474 * 64MB means no VRAM by todays standards.
475 */
476 if (domain & RADEON_DOMAIN_VRAM && ws->info.vram_size <= 64*1024*1024) {
477 domain = RADEON_DOMAIN_GTT;
478 flags = RADEON_FLAG_GTT_WC;
479 }
480
481 /* Align size to page size. This is the minimum alignment for normal
482 * BOs. Aligning this here helps the cached bufmgr. Especially small BOs,
483 * like constant/uniform buffers, can benefit from better and more reuse.
484 */
485 size = align(size, ws->gart_page_size);
486
487 /* Only set one usage bit each for domains and flags, or the cache manager
488 * might consider different sets of domains / flags compatible
489 */
490 if (domain == RADEON_DOMAIN_VRAM_GTT)
491 usage = 1 << 2;
492 else
493 usage = domain >> 1;
494 assert(flags < sizeof(usage) * 8 - 3);
495 usage |= 1 << (flags + 3);
496
497 /* Get a buffer from the cache. */
498 if (use_reusable_pool) {
499 bo = (struct amdgpu_winsys_bo*)
500 pb_cache_reclaim_buffer(&ws->bo_cache, size, alignment,
501 usage);
502 if (bo)
503 return &bo->base;
504 }
505
506 /* Create a new one. */
507 bo = amdgpu_create_bo(ws, size, alignment, usage, domain, flags);
508 if (!bo) {
509 /* Clear the cache and try again. */
510 pb_cache_release_all_buffers(&ws->bo_cache);
511 bo = amdgpu_create_bo(ws, size, alignment, usage, domain, flags);
512 if (!bo)
513 return NULL;
514 }
515
516 bo->use_reusable_pool = use_reusable_pool;
517 return &bo->base;
518 }
519
520 static struct pb_buffer *amdgpu_bo_from_handle(struct radeon_winsys *rws,
521 struct winsys_handle *whandle,
522 unsigned *stride)
523 {
524 struct amdgpu_winsys *ws = amdgpu_winsys(rws);
525 struct amdgpu_winsys_bo *bo;
526 enum amdgpu_bo_handle_type type;
527 struct amdgpu_bo_import_result result = {0};
528 uint64_t va;
529 amdgpu_va_handle va_handle;
530 struct amdgpu_bo_info info = {0};
531 enum radeon_bo_domain initial = 0;
532 int r;
533
534 /* Initialize the structure. */
535 bo = CALLOC_STRUCT(amdgpu_winsys_bo);
536 if (!bo) {
537 return NULL;
538 }
539
540 switch (whandle->type) {
541 case DRM_API_HANDLE_TYPE_SHARED:
542 type = amdgpu_bo_handle_type_gem_flink_name;
543 break;
544 case DRM_API_HANDLE_TYPE_FD:
545 type = amdgpu_bo_handle_type_dma_buf_fd;
546 break;
547 default:
548 return NULL;
549 }
550
551 r = amdgpu_bo_import(ws->dev, type, whandle->handle, &result);
552 if (r)
553 goto error;
554
555 /* Get initial domains. */
556 r = amdgpu_bo_query_info(result.buf_handle, &info);
557 if (r)
558 goto error_query;
559
560 r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
561 result.alloc_size, 1 << 20, 0, &va, &va_handle, 0);
562 if (r)
563 goto error_query;
564
565 r = amdgpu_bo_va_op(result.buf_handle, 0, result.alloc_size, va, 0, AMDGPU_VA_OP_MAP);
566 if (r)
567 goto error_va_map;
568
569 if (info.preferred_heap & AMDGPU_GEM_DOMAIN_VRAM)
570 initial |= RADEON_DOMAIN_VRAM;
571 if (info.preferred_heap & AMDGPU_GEM_DOMAIN_GTT)
572 initial |= RADEON_DOMAIN_GTT;
573
574
575 pipe_reference_init(&bo->base.reference, 1);
576 bo->base.alignment = info.phys_alignment;
577 bo->base.usage = PB_USAGE_GPU_WRITE | PB_USAGE_GPU_READ;
578 bo->bo = result.buf_handle;
579 bo->base.size = result.alloc_size;
580 bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
581 bo->ws = ws;
582 bo->va = va;
583 bo->va_handle = va_handle;
584 bo->initial_domain = initial;
585 bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
586 bo->is_shared = true;
587
588 if (stride)
589 *stride = whandle->stride;
590
591 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
592 ws->allocated_vram += align(bo->base.size, ws->gart_page_size);
593 else if (bo->initial_domain & RADEON_DOMAIN_GTT)
594 ws->allocated_gtt += align(bo->base.size, ws->gart_page_size);
595
596 amdgpu_add_buffer_to_global_list(bo);
597
598 return &bo->base;
599
600 error_va_map:
601 amdgpu_va_range_free(va_handle);
602
603 error_query:
604 amdgpu_bo_free(result.buf_handle);
605
606 error:
607 FREE(bo);
608 return NULL;
609 }
610
611 static boolean amdgpu_bo_get_handle(struct pb_buffer *buffer,
612 unsigned stride,
613 struct winsys_handle *whandle)
614 {
615 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(buffer);
616 enum amdgpu_bo_handle_type type;
617 int r;
618
619 bo->use_reusable_pool = false;
620
621 switch (whandle->type) {
622 case DRM_API_HANDLE_TYPE_SHARED:
623 type = amdgpu_bo_handle_type_gem_flink_name;
624 break;
625 case DRM_API_HANDLE_TYPE_FD:
626 type = amdgpu_bo_handle_type_dma_buf_fd;
627 break;
628 case DRM_API_HANDLE_TYPE_KMS:
629 type = amdgpu_bo_handle_type_kms;
630 break;
631 default:
632 return FALSE;
633 }
634
635 r = amdgpu_bo_export(bo->bo, type, &whandle->handle);
636 if (r)
637 return FALSE;
638
639 whandle->stride = stride;
640 bo->is_shared = true;
641 return TRUE;
642 }
643
644 static struct pb_buffer *amdgpu_bo_from_ptr(struct radeon_winsys *rws,
645 void *pointer, unsigned size)
646 {
647 struct amdgpu_winsys *ws = amdgpu_winsys(rws);
648 amdgpu_bo_handle buf_handle;
649 struct amdgpu_winsys_bo *bo;
650 uint64_t va;
651 amdgpu_va_handle va_handle;
652
653 bo = CALLOC_STRUCT(amdgpu_winsys_bo);
654 if (!bo)
655 return NULL;
656
657 if (amdgpu_create_bo_from_user_mem(ws->dev, pointer, size, &buf_handle))
658 goto error;
659
660 if (amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
661 size, 1 << 12, 0, &va, &va_handle, 0))
662 goto error_va_alloc;
663
664 if (amdgpu_bo_va_op(buf_handle, 0, size, va, 0, AMDGPU_VA_OP_MAP))
665 goto error_va_map;
666
667 /* Initialize it. */
668 pipe_reference_init(&bo->base.reference, 1);
669 bo->bo = buf_handle;
670 bo->base.alignment = 0;
671 bo->base.usage = PB_USAGE_GPU_WRITE | PB_USAGE_GPU_READ;
672 bo->base.size = size;
673 bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
674 bo->ws = ws;
675 bo->user_ptr = pointer;
676 bo->va = va;
677 bo->va_handle = va_handle;
678 bo->initial_domain = RADEON_DOMAIN_GTT;
679 bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
680
681 ws->allocated_gtt += align(bo->base.size, ws->gart_page_size);
682
683 amdgpu_add_buffer_to_global_list(bo);
684
685 return (struct pb_buffer*)bo;
686
687 error_va_map:
688 amdgpu_va_range_free(va_handle);
689
690 error_va_alloc:
691 amdgpu_bo_free(buf_handle);
692
693 error:
694 FREE(bo);
695 return NULL;
696 }
697
698 static bool amdgpu_bo_is_user_ptr(struct pb_buffer *buf)
699 {
700 return ((struct amdgpu_winsys_bo*)buf)->user_ptr != NULL;
701 }
702
703 static uint64_t amdgpu_bo_get_va(struct pb_buffer *buf)
704 {
705 return ((struct amdgpu_winsys_bo*)buf)->va;
706 }
707
708 void amdgpu_bo_init_functions(struct amdgpu_winsys *ws)
709 {
710 ws->base.buffer_set_metadata = amdgpu_buffer_set_metadata;
711 ws->base.buffer_get_metadata = amdgpu_buffer_get_metadata;
712 ws->base.buffer_map = amdgpu_bo_map;
713 ws->base.buffer_unmap = amdgpu_bo_unmap;
714 ws->base.buffer_wait = amdgpu_bo_wait;
715 ws->base.buffer_create = amdgpu_bo_create;
716 ws->base.buffer_from_handle = amdgpu_bo_from_handle;
717 ws->base.buffer_from_ptr = amdgpu_bo_from_ptr;
718 ws->base.buffer_is_user_ptr = amdgpu_bo_is_user_ptr;
719 ws->base.buffer_get_handle = amdgpu_bo_get_handle;
720 ws->base.buffer_get_virtual_address = amdgpu_bo_get_va;
721 ws->base.buffer_get_initial_domain = amdgpu_bo_get_initial_domain;
722 }