winsys/amdgpu: pass PIPE_CONFIG to addrlib on texture import
[mesa.git] / src / gallium / winsys / amdgpu / drm / amdgpu_bo.c
1 /*
2 * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
3 * Copyright © 2015 Advanced Micro Devices, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
16 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
18 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
25 * of the Software.
26 */
27 /*
28 * Authors:
29 * Marek Olšák <maraeo@gmail.com>
30 */
31
32 #include "amdgpu_cs.h"
33
34 #include "os/os_time.h"
35 #include "state_tracker/drm_driver.h"
36 #include <amdgpu_drm.h>
37 #include <xf86drm.h>
38 #include <stdio.h>
39 #include <inttypes.h>
40
41 static bool amdgpu_bo_wait(struct pb_buffer *_buf, uint64_t timeout,
42 enum radeon_bo_usage usage)
43 {
44 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
45 struct amdgpu_winsys *ws = bo->ws;
46 int i;
47
48 if (bo->is_shared) {
49 /* We can't use user fences for shared buffers, because user fences
50 * are local to this process only. If we want to wait for all buffer
51 * uses in all processes, we have to use amdgpu_bo_wait_for_idle.
52 */
53 bool buffer_busy = true;
54 int r;
55
56 r = amdgpu_bo_wait_for_idle(bo->bo, timeout, &buffer_busy);
57 if (r)
58 fprintf(stderr, "%s: amdgpu_bo_wait_for_idle failed %i\n", __func__,
59 r);
60 return !buffer_busy;
61 }
62
63 if (timeout == 0) {
64 /* Timeout == 0 is quite simple. */
65 pipe_mutex_lock(ws->bo_fence_lock);
66 for (i = 0; i < RING_LAST; i++)
67 if (bo->fence[i]) {
68 if (amdgpu_fence_wait(bo->fence[i], 0, false)) {
69 /* Release the idle fence to avoid checking it again later. */
70 amdgpu_fence_reference(&bo->fence[i], NULL);
71 } else {
72 pipe_mutex_unlock(ws->bo_fence_lock);
73 return false;
74 }
75 }
76 pipe_mutex_unlock(ws->bo_fence_lock);
77 return true;
78
79 } else {
80 struct pipe_fence_handle *fence[RING_LAST] = {};
81 bool fence_idle[RING_LAST] = {};
82 bool buffer_idle = true;
83 int64_t abs_timeout = os_time_get_absolute_timeout(timeout);
84
85 /* Take references to all fences, so that we can wait for them
86 * without the lock. */
87 pipe_mutex_lock(ws->bo_fence_lock);
88 for (i = 0; i < RING_LAST; i++)
89 amdgpu_fence_reference(&fence[i], bo->fence[i]);
90 pipe_mutex_unlock(ws->bo_fence_lock);
91
92 /* Now wait for the fences. */
93 for (i = 0; i < RING_LAST; i++) {
94 if (fence[i]) {
95 if (amdgpu_fence_wait(fence[i], abs_timeout, true))
96 fence_idle[i] = true;
97 else
98 buffer_idle = false;
99 }
100 }
101
102 /* Release idle fences to avoid checking them again later. */
103 pipe_mutex_lock(ws->bo_fence_lock);
104 for (i = 0; i < RING_LAST; i++) {
105 if (fence[i] == bo->fence[i] && fence_idle[i])
106 amdgpu_fence_reference(&bo->fence[i], NULL);
107
108 amdgpu_fence_reference(&fence[i], NULL);
109 }
110 pipe_mutex_unlock(ws->bo_fence_lock);
111
112 return buffer_idle;
113 }
114 }
115
116 static enum radeon_bo_domain amdgpu_bo_get_initial_domain(
117 struct pb_buffer *buf)
118 {
119 return ((struct amdgpu_winsys_bo*)buf)->initial_domain;
120 }
121
122 void amdgpu_bo_destroy(struct pb_buffer *_buf)
123 {
124 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
125 int i;
126
127 pipe_mutex_lock(bo->ws->global_bo_list_lock);
128 LIST_DEL(&bo->global_list_item);
129 bo->ws->num_buffers--;
130 pipe_mutex_unlock(bo->ws->global_bo_list_lock);
131
132 amdgpu_bo_va_op(bo->bo, 0, bo->base.size, bo->va, 0, AMDGPU_VA_OP_UNMAP);
133 amdgpu_va_range_free(bo->va_handle);
134 amdgpu_bo_free(bo->bo);
135
136 for (i = 0; i < RING_LAST; i++)
137 amdgpu_fence_reference(&bo->fence[i], NULL);
138
139 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
140 bo->ws->allocated_vram -= align64(bo->base.size, bo->ws->gart_page_size);
141 else if (bo->initial_domain & RADEON_DOMAIN_GTT)
142 bo->ws->allocated_gtt -= align64(bo->base.size, bo->ws->gart_page_size);
143 FREE(bo);
144 }
145
146 static void amdgpu_bo_destroy_or_cache(struct pb_buffer *_buf)
147 {
148 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
149
150 if (bo->use_reusable_pool)
151 pb_cache_add_buffer(&bo->cache_entry);
152 else
153 amdgpu_bo_destroy(_buf);
154 }
155
156 static void *amdgpu_bo_map(struct pb_buffer *buf,
157 struct radeon_winsys_cs *rcs,
158 enum pipe_transfer_usage usage)
159 {
160 struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
161 struct amdgpu_cs *cs = (struct amdgpu_cs*)rcs;
162 int r;
163 void *cpu = NULL;
164
165 /* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
166 if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
167 /* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
168 if (usage & PIPE_TRANSFER_DONTBLOCK) {
169 if (!(usage & PIPE_TRANSFER_WRITE)) {
170 /* Mapping for read.
171 *
172 * Since we are mapping for read, we don't need to wait
173 * if the GPU is using the buffer for read too
174 * (neither one is changing it).
175 *
176 * Only check whether the buffer is being used for write. */
177 if (cs && amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
178 RADEON_USAGE_WRITE)) {
179 cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
180 return NULL;
181 }
182
183 if (!amdgpu_bo_wait((struct pb_buffer*)bo, 0,
184 RADEON_USAGE_WRITE)) {
185 return NULL;
186 }
187 } else {
188 if (cs && amdgpu_bo_is_referenced_by_cs(cs, bo)) {
189 cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
190 return NULL;
191 }
192
193 if (!amdgpu_bo_wait((struct pb_buffer*)bo, 0,
194 RADEON_USAGE_READWRITE)) {
195 return NULL;
196 }
197 }
198 } else {
199 uint64_t time = os_time_get_nano();
200
201 if (!(usage & PIPE_TRANSFER_WRITE)) {
202 /* Mapping for read.
203 *
204 * Since we are mapping for read, we don't need to wait
205 * if the GPU is using the buffer for read too
206 * (neither one is changing it).
207 *
208 * Only check whether the buffer is being used for write. */
209 if (cs && amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
210 RADEON_USAGE_WRITE)) {
211 cs->flush_cs(cs->flush_data, 0, NULL);
212 }
213 amdgpu_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
214 RADEON_USAGE_WRITE);
215 } else {
216 /* Mapping for write. */
217 if (cs && amdgpu_bo_is_referenced_by_cs(cs, bo))
218 cs->flush_cs(cs->flush_data, 0, NULL);
219
220 amdgpu_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
221 RADEON_USAGE_READWRITE);
222 }
223
224 bo->ws->buffer_wait_time += os_time_get_nano() - time;
225 }
226 }
227
228 /* If the buffer is created from user memory, return the user pointer. */
229 if (bo->user_ptr)
230 return bo->user_ptr;
231
232 r = amdgpu_bo_cpu_map(bo->bo, &cpu);
233 if (r) {
234 /* Clear the cache and try again. */
235 pb_cache_release_all_buffers(&bo->ws->bo_cache);
236 r = amdgpu_bo_cpu_map(bo->bo, &cpu);
237 }
238 return r ? NULL : cpu;
239 }
240
241 static void amdgpu_bo_unmap(struct pb_buffer *buf)
242 {
243 struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
244
245 amdgpu_bo_cpu_unmap(bo->bo);
246 }
247
248 static const struct pb_vtbl amdgpu_winsys_bo_vtbl = {
249 amdgpu_bo_destroy_or_cache
250 /* other functions are never called */
251 };
252
253 static void amdgpu_add_buffer_to_global_list(struct amdgpu_winsys_bo *bo)
254 {
255 struct amdgpu_winsys *ws = bo->ws;
256
257 pipe_mutex_lock(ws->global_bo_list_lock);
258 LIST_ADDTAIL(&bo->global_list_item, &ws->global_bo_list);
259 ws->num_buffers++;
260 pipe_mutex_unlock(ws->global_bo_list_lock);
261 }
262
263 static struct amdgpu_winsys_bo *amdgpu_create_bo(struct amdgpu_winsys *ws,
264 uint64_t size,
265 unsigned alignment,
266 unsigned usage,
267 enum radeon_bo_domain initial_domain,
268 unsigned flags)
269 {
270 struct amdgpu_bo_alloc_request request = {0};
271 amdgpu_bo_handle buf_handle;
272 uint64_t va = 0;
273 struct amdgpu_winsys_bo *bo;
274 amdgpu_va_handle va_handle;
275 int r;
276
277 assert(initial_domain & RADEON_DOMAIN_VRAM_GTT);
278 bo = CALLOC_STRUCT(amdgpu_winsys_bo);
279 if (!bo) {
280 return NULL;
281 }
282
283 pb_cache_init_entry(&ws->bo_cache, &bo->cache_entry, &bo->base);
284 request.alloc_size = size;
285 request.phys_alignment = alignment;
286
287 if (initial_domain & RADEON_DOMAIN_VRAM)
288 request.preferred_heap |= AMDGPU_GEM_DOMAIN_VRAM;
289 if (initial_domain & RADEON_DOMAIN_GTT)
290 request.preferred_heap |= AMDGPU_GEM_DOMAIN_GTT;
291
292 if (flags & RADEON_FLAG_CPU_ACCESS)
293 request.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
294 if (flags & RADEON_FLAG_NO_CPU_ACCESS)
295 request.flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
296 if (flags & RADEON_FLAG_GTT_WC)
297 request.flags |= AMDGPU_GEM_CREATE_CPU_GTT_USWC;
298
299 r = amdgpu_bo_alloc(ws->dev, &request, &buf_handle);
300 if (r) {
301 fprintf(stderr, "amdgpu: Failed to allocate a buffer:\n");
302 fprintf(stderr, "amdgpu: size : %"PRIu64" bytes\n", size);
303 fprintf(stderr, "amdgpu: alignment : %u bytes\n", alignment);
304 fprintf(stderr, "amdgpu: domains : %u\n", initial_domain);
305 goto error_bo_alloc;
306 }
307
308 r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
309 size, alignment, 0, &va, &va_handle, 0);
310 if (r)
311 goto error_va_alloc;
312
313 r = amdgpu_bo_va_op(buf_handle, 0, size, va, 0, AMDGPU_VA_OP_MAP);
314 if (r)
315 goto error_va_map;
316
317 pipe_reference_init(&bo->base.reference, 1);
318 bo->base.alignment = alignment;
319 bo->base.usage = usage;
320 bo->base.size = size;
321 bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
322 bo->ws = ws;
323 bo->bo = buf_handle;
324 bo->va = va;
325 bo->va_handle = va_handle;
326 bo->initial_domain = initial_domain;
327 bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
328
329 if (initial_domain & RADEON_DOMAIN_VRAM)
330 ws->allocated_vram += align64(size, ws->gart_page_size);
331 else if (initial_domain & RADEON_DOMAIN_GTT)
332 ws->allocated_gtt += align64(size, ws->gart_page_size);
333
334 amdgpu_add_buffer_to_global_list(bo);
335
336 return bo;
337
338 error_va_map:
339 amdgpu_va_range_free(va_handle);
340
341 error_va_alloc:
342 amdgpu_bo_free(buf_handle);
343
344 error_bo_alloc:
345 FREE(bo);
346 return NULL;
347 }
348
349 bool amdgpu_bo_can_reclaim(struct pb_buffer *_buf)
350 {
351 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
352
353 if (amdgpu_bo_is_referenced_by_any_cs(bo)) {
354 return false;
355 }
356
357 return amdgpu_bo_wait(_buf, 0, RADEON_USAGE_READWRITE);
358 }
359
360 static unsigned eg_tile_split(unsigned tile_split)
361 {
362 switch (tile_split) {
363 case 0: tile_split = 64; break;
364 case 1: tile_split = 128; break;
365 case 2: tile_split = 256; break;
366 case 3: tile_split = 512; break;
367 default:
368 case 4: tile_split = 1024; break;
369 case 5: tile_split = 2048; break;
370 case 6: tile_split = 4096; break;
371 }
372 return tile_split;
373 }
374
375 static unsigned eg_tile_split_rev(unsigned eg_tile_split)
376 {
377 switch (eg_tile_split) {
378 case 64: return 0;
379 case 128: return 1;
380 case 256: return 2;
381 case 512: return 3;
382 default:
383 case 1024: return 4;
384 case 2048: return 5;
385 case 4096: return 6;
386 }
387 }
388
389 static void amdgpu_buffer_get_metadata(struct pb_buffer *_buf,
390 struct radeon_bo_metadata *md)
391 {
392 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
393 struct amdgpu_bo_info info = {0};
394 uint32_t tiling_flags;
395 int r;
396
397 r = amdgpu_bo_query_info(bo->bo, &info);
398 if (r)
399 return;
400
401 tiling_flags = info.metadata.tiling_info;
402
403 md->microtile = RADEON_LAYOUT_LINEAR;
404 md->macrotile = RADEON_LAYOUT_LINEAR;
405
406 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 4) /* 2D_TILED_THIN1 */
407 md->macrotile = RADEON_LAYOUT_TILED;
408 else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 2) /* 1D_TILED_THIN1 */
409 md->microtile = RADEON_LAYOUT_TILED;
410
411 md->pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
412 md->bankw = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
413 md->bankh = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
414 md->tile_split = eg_tile_split(AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT));
415 md->mtilea = 1 << AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
416 md->num_banks = 2 << AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
417 md->scanout = AMDGPU_TILING_GET(tiling_flags, MICRO_TILE_MODE) == 0; /* DISPLAY */
418
419 md->size_metadata = info.metadata.size_metadata;
420 memcpy(md->metadata, info.metadata.umd_metadata, sizeof(md->metadata));
421 }
422
423 static void amdgpu_buffer_set_metadata(struct pb_buffer *_buf,
424 struct radeon_bo_metadata *md)
425 {
426 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
427 struct amdgpu_bo_metadata metadata = {0};
428 uint32_t tiling_flags = 0;
429
430 if (md->macrotile == RADEON_LAYOUT_TILED)
431 tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 4); /* 2D_TILED_THIN1 */
432 else if (md->microtile == RADEON_LAYOUT_TILED)
433 tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 2); /* 1D_TILED_THIN1 */
434 else
435 tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 1); /* LINEAR_ALIGNED */
436
437 tiling_flags |= AMDGPU_TILING_SET(PIPE_CONFIG, md->pipe_config);
438 tiling_flags |= AMDGPU_TILING_SET(BANK_WIDTH, util_logbase2(md->bankw));
439 tiling_flags |= AMDGPU_TILING_SET(BANK_HEIGHT, util_logbase2(md->bankh));
440 if (md->tile_split)
441 tiling_flags |= AMDGPU_TILING_SET(TILE_SPLIT, eg_tile_split_rev(md->tile_split));
442 tiling_flags |= AMDGPU_TILING_SET(MACRO_TILE_ASPECT, util_logbase2(md->mtilea));
443 tiling_flags |= AMDGPU_TILING_SET(NUM_BANKS, util_logbase2(md->num_banks)-1);
444
445 if (md->scanout)
446 tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 0); /* DISPLAY_MICRO_TILING */
447 else
448 tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 1); /* THIN_MICRO_TILING */
449
450 metadata.tiling_info = tiling_flags;
451 metadata.size_metadata = md->size_metadata;
452 memcpy(metadata.umd_metadata, md->metadata, sizeof(md->metadata));
453
454 amdgpu_bo_set_metadata(bo->bo, &metadata);
455 }
456
457 static struct pb_buffer *
458 amdgpu_bo_create(struct radeon_winsys *rws,
459 uint64_t size,
460 unsigned alignment,
461 enum radeon_bo_domain domain,
462 enum radeon_bo_flag flags)
463 {
464 struct amdgpu_winsys *ws = amdgpu_winsys(rws);
465 struct amdgpu_winsys_bo *bo;
466 unsigned usage = 0;
467
468 /* Align size to page size. This is the minimum alignment for normal
469 * BOs. Aligning this here helps the cached bufmgr. Especially small BOs,
470 * like constant/uniform buffers, can benefit from better and more reuse.
471 */
472 size = align64(size, ws->gart_page_size);
473
474 /* Only set one usage bit each for domains and flags, or the cache manager
475 * might consider different sets of domains / flags compatible
476 */
477 if (domain == RADEON_DOMAIN_VRAM_GTT)
478 usage = 1 << 2;
479 else
480 usage = domain >> 1;
481 assert(flags < sizeof(usage) * 8 - 3);
482 usage |= 1 << (flags + 3);
483
484 /* Get a buffer from the cache. */
485 bo = (struct amdgpu_winsys_bo*)
486 pb_cache_reclaim_buffer(&ws->bo_cache, size, alignment, usage);
487 if (bo)
488 return &bo->base;
489
490 /* Create a new one. */
491 bo = amdgpu_create_bo(ws, size, alignment, usage, domain, flags);
492 if (!bo) {
493 /* Clear the cache and try again. */
494 pb_cache_release_all_buffers(&ws->bo_cache);
495 bo = amdgpu_create_bo(ws, size, alignment, usage, domain, flags);
496 if (!bo)
497 return NULL;
498 }
499
500 bo->use_reusable_pool = true;
501 return &bo->base;
502 }
503
504 static struct pb_buffer *amdgpu_bo_from_handle(struct radeon_winsys *rws,
505 struct winsys_handle *whandle,
506 unsigned *stride,
507 unsigned *offset)
508 {
509 struct amdgpu_winsys *ws = amdgpu_winsys(rws);
510 struct amdgpu_winsys_bo *bo;
511 enum amdgpu_bo_handle_type type;
512 struct amdgpu_bo_import_result result = {0};
513 uint64_t va;
514 amdgpu_va_handle va_handle;
515 struct amdgpu_bo_info info = {0};
516 enum radeon_bo_domain initial = 0;
517 int r;
518
519 /* Initialize the structure. */
520 bo = CALLOC_STRUCT(amdgpu_winsys_bo);
521 if (!bo) {
522 return NULL;
523 }
524
525 switch (whandle->type) {
526 case DRM_API_HANDLE_TYPE_SHARED:
527 type = amdgpu_bo_handle_type_gem_flink_name;
528 break;
529 case DRM_API_HANDLE_TYPE_FD:
530 type = amdgpu_bo_handle_type_dma_buf_fd;
531 break;
532 default:
533 return NULL;
534 }
535
536 r = amdgpu_bo_import(ws->dev, type, whandle->handle, &result);
537 if (r)
538 goto error;
539
540 /* Get initial domains. */
541 r = amdgpu_bo_query_info(result.buf_handle, &info);
542 if (r)
543 goto error_query;
544
545 r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
546 result.alloc_size, 1 << 20, 0, &va, &va_handle, 0);
547 if (r)
548 goto error_query;
549
550 r = amdgpu_bo_va_op(result.buf_handle, 0, result.alloc_size, va, 0, AMDGPU_VA_OP_MAP);
551 if (r)
552 goto error_va_map;
553
554 if (info.preferred_heap & AMDGPU_GEM_DOMAIN_VRAM)
555 initial |= RADEON_DOMAIN_VRAM;
556 if (info.preferred_heap & AMDGPU_GEM_DOMAIN_GTT)
557 initial |= RADEON_DOMAIN_GTT;
558
559
560 pipe_reference_init(&bo->base.reference, 1);
561 bo->base.alignment = info.phys_alignment;
562 bo->base.usage = PB_USAGE_GPU_WRITE | PB_USAGE_GPU_READ;
563 bo->bo = result.buf_handle;
564 bo->base.size = result.alloc_size;
565 bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
566 bo->ws = ws;
567 bo->va = va;
568 bo->va_handle = va_handle;
569 bo->initial_domain = initial;
570 bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
571 bo->is_shared = true;
572
573 if (stride)
574 *stride = whandle->stride;
575 if (offset)
576 *offset = whandle->offset;
577
578 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
579 ws->allocated_vram += align64(bo->base.size, ws->gart_page_size);
580 else if (bo->initial_domain & RADEON_DOMAIN_GTT)
581 ws->allocated_gtt += align64(bo->base.size, ws->gart_page_size);
582
583 amdgpu_add_buffer_to_global_list(bo);
584
585 return &bo->base;
586
587 error_va_map:
588 amdgpu_va_range_free(va_handle);
589
590 error_query:
591 amdgpu_bo_free(result.buf_handle);
592
593 error:
594 FREE(bo);
595 return NULL;
596 }
597
598 static boolean amdgpu_bo_get_handle(struct pb_buffer *buffer,
599 unsigned stride, unsigned offset,
600 unsigned slice_size,
601 struct winsys_handle *whandle)
602 {
603 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(buffer);
604 enum amdgpu_bo_handle_type type;
605 int r;
606
607 bo->use_reusable_pool = false;
608
609 switch (whandle->type) {
610 case DRM_API_HANDLE_TYPE_SHARED:
611 type = amdgpu_bo_handle_type_gem_flink_name;
612 break;
613 case DRM_API_HANDLE_TYPE_FD:
614 type = amdgpu_bo_handle_type_dma_buf_fd;
615 break;
616 case DRM_API_HANDLE_TYPE_KMS:
617 type = amdgpu_bo_handle_type_kms;
618 break;
619 default:
620 return FALSE;
621 }
622
623 r = amdgpu_bo_export(bo->bo, type, &whandle->handle);
624 if (r)
625 return FALSE;
626
627 whandle->stride = stride;
628 whandle->offset = offset;
629 whandle->offset += slice_size * whandle->layer;
630 bo->is_shared = true;
631 return TRUE;
632 }
633
634 static struct pb_buffer *amdgpu_bo_from_ptr(struct radeon_winsys *rws,
635 void *pointer, uint64_t size)
636 {
637 struct amdgpu_winsys *ws = amdgpu_winsys(rws);
638 amdgpu_bo_handle buf_handle;
639 struct amdgpu_winsys_bo *bo;
640 uint64_t va;
641 amdgpu_va_handle va_handle;
642
643 bo = CALLOC_STRUCT(amdgpu_winsys_bo);
644 if (!bo)
645 return NULL;
646
647 if (amdgpu_create_bo_from_user_mem(ws->dev, pointer, size, &buf_handle))
648 goto error;
649
650 if (amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
651 size, 1 << 12, 0, &va, &va_handle, 0))
652 goto error_va_alloc;
653
654 if (amdgpu_bo_va_op(buf_handle, 0, size, va, 0, AMDGPU_VA_OP_MAP))
655 goto error_va_map;
656
657 /* Initialize it. */
658 pipe_reference_init(&bo->base.reference, 1);
659 bo->bo = buf_handle;
660 bo->base.alignment = 0;
661 bo->base.usage = PB_USAGE_GPU_WRITE | PB_USAGE_GPU_READ;
662 bo->base.size = size;
663 bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
664 bo->ws = ws;
665 bo->user_ptr = pointer;
666 bo->va = va;
667 bo->va_handle = va_handle;
668 bo->initial_domain = RADEON_DOMAIN_GTT;
669 bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
670
671 ws->allocated_gtt += align64(bo->base.size, ws->gart_page_size);
672
673 amdgpu_add_buffer_to_global_list(bo);
674
675 return (struct pb_buffer*)bo;
676
677 error_va_map:
678 amdgpu_va_range_free(va_handle);
679
680 error_va_alloc:
681 amdgpu_bo_free(buf_handle);
682
683 error:
684 FREE(bo);
685 return NULL;
686 }
687
688 static bool amdgpu_bo_is_user_ptr(struct pb_buffer *buf)
689 {
690 return ((struct amdgpu_winsys_bo*)buf)->user_ptr != NULL;
691 }
692
693 static uint64_t amdgpu_bo_get_va(struct pb_buffer *buf)
694 {
695 return ((struct amdgpu_winsys_bo*)buf)->va;
696 }
697
698 void amdgpu_bo_init_functions(struct amdgpu_winsys *ws)
699 {
700 ws->base.buffer_set_metadata = amdgpu_buffer_set_metadata;
701 ws->base.buffer_get_metadata = amdgpu_buffer_get_metadata;
702 ws->base.buffer_map = amdgpu_bo_map;
703 ws->base.buffer_unmap = amdgpu_bo_unmap;
704 ws->base.buffer_wait = amdgpu_bo_wait;
705 ws->base.buffer_create = amdgpu_bo_create;
706 ws->base.buffer_from_handle = amdgpu_bo_from_handle;
707 ws->base.buffer_from_ptr = amdgpu_bo_from_ptr;
708 ws->base.buffer_is_user_ptr = amdgpu_bo_is_user_ptr;
709 ws->base.buffer_get_handle = amdgpu_bo_get_handle;
710 ws->base.buffer_get_virtual_address = amdgpu_bo_get_va;
711 ws->base.buffer_get_initial_domain = amdgpu_bo_get_initial_domain;
712 }