winsys/amdgpu: add sparse buffer data structures
[mesa.git] / src / gallium / winsys / amdgpu / drm / amdgpu_bo.c
1 /*
2 * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
3 * Copyright © 2015 Advanced Micro Devices, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
16 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
18 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
25 * of the Software.
26 */
27 /*
28 * Authors:
29 * Marek Olšák <maraeo@gmail.com>
30 */
31
32 #include "amdgpu_cs.h"
33
34 #include "os/os_time.h"
35 #include "state_tracker/drm_driver.h"
36 #include <amdgpu_drm.h>
37 #include <xf86drm.h>
38 #include <stdio.h>
39 #include <inttypes.h>
40
41
42 struct amdgpu_sparse_backing_chunk {
43 uint32_t begin, end;
44 };
45
46 static struct pb_buffer *
47 amdgpu_bo_create(struct radeon_winsys *rws,
48 uint64_t size,
49 unsigned alignment,
50 enum radeon_bo_domain domain,
51 enum radeon_bo_flag flags);
52
53 static bool amdgpu_bo_wait(struct pb_buffer *_buf, uint64_t timeout,
54 enum radeon_bo_usage usage)
55 {
56 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
57 struct amdgpu_winsys *ws = bo->ws;
58 int64_t abs_timeout;
59
60 if (timeout == 0) {
61 if (p_atomic_read(&bo->num_active_ioctls))
62 return false;
63
64 } else {
65 abs_timeout = os_time_get_absolute_timeout(timeout);
66
67 /* Wait if any ioctl is being submitted with this buffer. */
68 if (!os_wait_until_zero_abs_timeout(&bo->num_active_ioctls, abs_timeout))
69 return false;
70 }
71
72 if (bo->is_shared) {
73 /* We can't use user fences for shared buffers, because user fences
74 * are local to this process only. If we want to wait for all buffer
75 * uses in all processes, we have to use amdgpu_bo_wait_for_idle.
76 */
77 bool buffer_busy = true;
78 int r;
79
80 r = amdgpu_bo_wait_for_idle(bo->bo, timeout, &buffer_busy);
81 if (r)
82 fprintf(stderr, "%s: amdgpu_bo_wait_for_idle failed %i\n", __func__,
83 r);
84 return !buffer_busy;
85 }
86
87 if (timeout == 0) {
88 unsigned idle_fences;
89 bool buffer_idle;
90
91 mtx_lock(&ws->bo_fence_lock);
92
93 for (idle_fences = 0; idle_fences < bo->num_fences; ++idle_fences) {
94 if (!amdgpu_fence_wait(bo->fences[idle_fences], 0, false))
95 break;
96 }
97
98 /* Release the idle fences to avoid checking them again later. */
99 for (unsigned i = 0; i < idle_fences; ++i)
100 amdgpu_fence_reference(&bo->fences[i], NULL);
101
102 memmove(&bo->fences[0], &bo->fences[idle_fences],
103 (bo->num_fences - idle_fences) * sizeof(*bo->fences));
104 bo->num_fences -= idle_fences;
105
106 buffer_idle = !bo->num_fences;
107 mtx_unlock(&ws->bo_fence_lock);
108
109 return buffer_idle;
110 } else {
111 bool buffer_idle = true;
112
113 mtx_lock(&ws->bo_fence_lock);
114 while (bo->num_fences && buffer_idle) {
115 struct pipe_fence_handle *fence = NULL;
116 bool fence_idle = false;
117
118 amdgpu_fence_reference(&fence, bo->fences[0]);
119
120 /* Wait for the fence. */
121 mtx_unlock(&ws->bo_fence_lock);
122 if (amdgpu_fence_wait(fence, abs_timeout, true))
123 fence_idle = true;
124 else
125 buffer_idle = false;
126 mtx_lock(&ws->bo_fence_lock);
127
128 /* Release an idle fence to avoid checking it again later, keeping in
129 * mind that the fence array may have been modified by other threads.
130 */
131 if (fence_idle && bo->num_fences && bo->fences[0] == fence) {
132 amdgpu_fence_reference(&bo->fences[0], NULL);
133 memmove(&bo->fences[0], &bo->fences[1],
134 (bo->num_fences - 1) * sizeof(*bo->fences));
135 bo->num_fences--;
136 }
137
138 amdgpu_fence_reference(&fence, NULL);
139 }
140 mtx_unlock(&ws->bo_fence_lock);
141
142 return buffer_idle;
143 }
144 }
145
146 static enum radeon_bo_domain amdgpu_bo_get_initial_domain(
147 struct pb_buffer *buf)
148 {
149 return ((struct amdgpu_winsys_bo*)buf)->initial_domain;
150 }
151
152 static void amdgpu_bo_remove_fences(struct amdgpu_winsys_bo *bo)
153 {
154 for (unsigned i = 0; i < bo->num_fences; ++i)
155 amdgpu_fence_reference(&bo->fences[i], NULL);
156
157 FREE(bo->fences);
158 bo->num_fences = 0;
159 bo->max_fences = 0;
160 }
161
162 void amdgpu_bo_destroy(struct pb_buffer *_buf)
163 {
164 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
165
166 assert(bo->bo && "must not be called for slab entries");
167
168 mtx_lock(&bo->ws->global_bo_list_lock);
169 LIST_DEL(&bo->u.real.global_list_item);
170 bo->ws->num_buffers--;
171 mtx_unlock(&bo->ws->global_bo_list_lock);
172
173 amdgpu_bo_va_op(bo->bo, 0, bo->base.size, bo->va, 0, AMDGPU_VA_OP_UNMAP);
174 amdgpu_va_range_free(bo->u.real.va_handle);
175 amdgpu_bo_free(bo->bo);
176
177 amdgpu_bo_remove_fences(bo);
178
179 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
180 bo->ws->allocated_vram -= align64(bo->base.size, bo->ws->info.gart_page_size);
181 else if (bo->initial_domain & RADEON_DOMAIN_GTT)
182 bo->ws->allocated_gtt -= align64(bo->base.size, bo->ws->info.gart_page_size);
183
184 if (bo->u.real.map_count >= 1) {
185 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
186 bo->ws->mapped_vram -= bo->base.size;
187 else if (bo->initial_domain & RADEON_DOMAIN_GTT)
188 bo->ws->mapped_gtt -= bo->base.size;
189 bo->ws->num_mapped_buffers--;
190 }
191
192 FREE(bo);
193 }
194
195 static void amdgpu_bo_destroy_or_cache(struct pb_buffer *_buf)
196 {
197 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
198
199 assert(bo->bo); /* slab buffers have a separate vtbl */
200
201 if (bo->u.real.use_reusable_pool)
202 pb_cache_add_buffer(&bo->u.real.cache_entry);
203 else
204 amdgpu_bo_destroy(_buf);
205 }
206
207 static void *amdgpu_bo_map(struct pb_buffer *buf,
208 struct radeon_winsys_cs *rcs,
209 enum pipe_transfer_usage usage)
210 {
211 struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
212 struct amdgpu_winsys_bo *real;
213 struct amdgpu_cs *cs = (struct amdgpu_cs*)rcs;
214 int r;
215 void *cpu = NULL;
216 uint64_t offset = 0;
217
218 assert(!bo->sparse);
219
220 /* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
221 if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
222 /* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
223 if (usage & PIPE_TRANSFER_DONTBLOCK) {
224 if (!(usage & PIPE_TRANSFER_WRITE)) {
225 /* Mapping for read.
226 *
227 * Since we are mapping for read, we don't need to wait
228 * if the GPU is using the buffer for read too
229 * (neither one is changing it).
230 *
231 * Only check whether the buffer is being used for write. */
232 if (cs && amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
233 RADEON_USAGE_WRITE)) {
234 cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
235 return NULL;
236 }
237
238 if (!amdgpu_bo_wait((struct pb_buffer*)bo, 0,
239 RADEON_USAGE_WRITE)) {
240 return NULL;
241 }
242 } else {
243 if (cs && amdgpu_bo_is_referenced_by_cs(cs, bo)) {
244 cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
245 return NULL;
246 }
247
248 if (!amdgpu_bo_wait((struct pb_buffer*)bo, 0,
249 RADEON_USAGE_READWRITE)) {
250 return NULL;
251 }
252 }
253 } else {
254 uint64_t time = os_time_get_nano();
255
256 if (!(usage & PIPE_TRANSFER_WRITE)) {
257 /* Mapping for read.
258 *
259 * Since we are mapping for read, we don't need to wait
260 * if the GPU is using the buffer for read too
261 * (neither one is changing it).
262 *
263 * Only check whether the buffer is being used for write. */
264 if (cs) {
265 if (amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo,
266 RADEON_USAGE_WRITE)) {
267 cs->flush_cs(cs->flush_data, 0, NULL);
268 } else {
269 /* Try to avoid busy-waiting in amdgpu_bo_wait. */
270 if (p_atomic_read(&bo->num_active_ioctls))
271 amdgpu_cs_sync_flush(rcs);
272 }
273 }
274
275 amdgpu_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
276 RADEON_USAGE_WRITE);
277 } else {
278 /* Mapping for write. */
279 if (cs) {
280 if (amdgpu_bo_is_referenced_by_cs(cs, bo)) {
281 cs->flush_cs(cs->flush_data, 0, NULL);
282 } else {
283 /* Try to avoid busy-waiting in amdgpu_bo_wait. */
284 if (p_atomic_read(&bo->num_active_ioctls))
285 amdgpu_cs_sync_flush(rcs);
286 }
287 }
288
289 amdgpu_bo_wait((struct pb_buffer*)bo, PIPE_TIMEOUT_INFINITE,
290 RADEON_USAGE_READWRITE);
291 }
292
293 bo->ws->buffer_wait_time += os_time_get_nano() - time;
294 }
295 }
296
297 /* If the buffer is created from user memory, return the user pointer. */
298 if (bo->user_ptr)
299 return bo->user_ptr;
300
301 if (bo->bo) {
302 real = bo;
303 } else {
304 real = bo->u.slab.real;
305 offset = bo->va - real->va;
306 }
307
308 r = amdgpu_bo_cpu_map(real->bo, &cpu);
309 if (r) {
310 /* Clear the cache and try again. */
311 pb_cache_release_all_buffers(&real->ws->bo_cache);
312 r = amdgpu_bo_cpu_map(real->bo, &cpu);
313 if (r)
314 return NULL;
315 }
316
317 if (p_atomic_inc_return(&real->u.real.map_count) == 1) {
318 if (real->initial_domain & RADEON_DOMAIN_VRAM)
319 real->ws->mapped_vram += real->base.size;
320 else if (real->initial_domain & RADEON_DOMAIN_GTT)
321 real->ws->mapped_gtt += real->base.size;
322 real->ws->num_mapped_buffers++;
323 }
324 return (uint8_t*)cpu + offset;
325 }
326
327 static void amdgpu_bo_unmap(struct pb_buffer *buf)
328 {
329 struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
330 struct amdgpu_winsys_bo *real;
331
332 assert(!bo->sparse);
333
334 if (bo->user_ptr)
335 return;
336
337 real = bo->bo ? bo : bo->u.slab.real;
338
339 if (p_atomic_dec_zero(&real->u.real.map_count)) {
340 if (real->initial_domain & RADEON_DOMAIN_VRAM)
341 real->ws->mapped_vram -= real->base.size;
342 else if (real->initial_domain & RADEON_DOMAIN_GTT)
343 real->ws->mapped_gtt -= real->base.size;
344 real->ws->num_mapped_buffers--;
345 }
346
347 amdgpu_bo_cpu_unmap(real->bo);
348 }
349
350 static const struct pb_vtbl amdgpu_winsys_bo_vtbl = {
351 amdgpu_bo_destroy_or_cache
352 /* other functions are never called */
353 };
354
355 static void amdgpu_add_buffer_to_global_list(struct amdgpu_winsys_bo *bo)
356 {
357 struct amdgpu_winsys *ws = bo->ws;
358
359 assert(bo->bo);
360
361 mtx_lock(&ws->global_bo_list_lock);
362 LIST_ADDTAIL(&bo->u.real.global_list_item, &ws->global_bo_list);
363 ws->num_buffers++;
364 mtx_unlock(&ws->global_bo_list_lock);
365 }
366
367 static struct amdgpu_winsys_bo *amdgpu_create_bo(struct amdgpu_winsys *ws,
368 uint64_t size,
369 unsigned alignment,
370 unsigned usage,
371 enum radeon_bo_domain initial_domain,
372 unsigned flags,
373 unsigned pb_cache_bucket)
374 {
375 struct amdgpu_bo_alloc_request request = {0};
376 amdgpu_bo_handle buf_handle;
377 uint64_t va = 0;
378 struct amdgpu_winsys_bo *bo;
379 amdgpu_va_handle va_handle;
380 unsigned va_gap_size;
381 int r;
382
383 assert(initial_domain & RADEON_DOMAIN_VRAM_GTT);
384 bo = CALLOC_STRUCT(amdgpu_winsys_bo);
385 if (!bo) {
386 return NULL;
387 }
388
389 pb_cache_init_entry(&ws->bo_cache, &bo->u.real.cache_entry, &bo->base,
390 pb_cache_bucket);
391 request.alloc_size = size;
392 request.phys_alignment = alignment;
393
394 if (initial_domain & RADEON_DOMAIN_VRAM)
395 request.preferred_heap |= AMDGPU_GEM_DOMAIN_VRAM;
396 if (initial_domain & RADEON_DOMAIN_GTT)
397 request.preferred_heap |= AMDGPU_GEM_DOMAIN_GTT;
398
399 if (flags & RADEON_FLAG_CPU_ACCESS)
400 request.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
401 if (flags & RADEON_FLAG_NO_CPU_ACCESS)
402 request.flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
403 if (flags & RADEON_FLAG_GTT_WC)
404 request.flags |= AMDGPU_GEM_CREATE_CPU_GTT_USWC;
405
406 r = amdgpu_bo_alloc(ws->dev, &request, &buf_handle);
407 if (r) {
408 fprintf(stderr, "amdgpu: Failed to allocate a buffer:\n");
409 fprintf(stderr, "amdgpu: size : %"PRIu64" bytes\n", size);
410 fprintf(stderr, "amdgpu: alignment : %u bytes\n", alignment);
411 fprintf(stderr, "amdgpu: domains : %u\n", initial_domain);
412 goto error_bo_alloc;
413 }
414
415 va_gap_size = ws->check_vm ? MAX2(4 * alignment, 64 * 1024) : 0;
416 r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
417 size + va_gap_size, alignment, 0, &va, &va_handle, 0);
418 if (r)
419 goto error_va_alloc;
420
421 r = amdgpu_bo_va_op(buf_handle, 0, size, va, 0, AMDGPU_VA_OP_MAP);
422 if (r)
423 goto error_va_map;
424
425 pipe_reference_init(&bo->base.reference, 1);
426 bo->base.alignment = alignment;
427 bo->base.usage = usage;
428 bo->base.size = size;
429 bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
430 bo->ws = ws;
431 bo->bo = buf_handle;
432 bo->va = va;
433 bo->u.real.va_handle = va_handle;
434 bo->initial_domain = initial_domain;
435 bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
436
437 if (initial_domain & RADEON_DOMAIN_VRAM)
438 ws->allocated_vram += align64(size, ws->info.gart_page_size);
439 else if (initial_domain & RADEON_DOMAIN_GTT)
440 ws->allocated_gtt += align64(size, ws->info.gart_page_size);
441
442 amdgpu_add_buffer_to_global_list(bo);
443
444 return bo;
445
446 error_va_map:
447 amdgpu_va_range_free(va_handle);
448
449 error_va_alloc:
450 amdgpu_bo_free(buf_handle);
451
452 error_bo_alloc:
453 FREE(bo);
454 return NULL;
455 }
456
457 bool amdgpu_bo_can_reclaim(struct pb_buffer *_buf)
458 {
459 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
460
461 if (amdgpu_bo_is_referenced_by_any_cs(bo)) {
462 return false;
463 }
464
465 return amdgpu_bo_wait(_buf, 0, RADEON_USAGE_READWRITE);
466 }
467
468 bool amdgpu_bo_can_reclaim_slab(void *priv, struct pb_slab_entry *entry)
469 {
470 struct amdgpu_winsys_bo *bo = NULL; /* fix container_of */
471 bo = container_of(entry, bo, u.slab.entry);
472
473 return amdgpu_bo_can_reclaim(&bo->base);
474 }
475
476 static void amdgpu_bo_slab_destroy(struct pb_buffer *_buf)
477 {
478 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
479
480 assert(!bo->bo);
481
482 pb_slab_free(&bo->ws->bo_slabs, &bo->u.slab.entry);
483 }
484
485 static const struct pb_vtbl amdgpu_winsys_bo_slab_vtbl = {
486 amdgpu_bo_slab_destroy
487 /* other functions are never called */
488 };
489
490 struct pb_slab *amdgpu_bo_slab_alloc(void *priv, unsigned heap,
491 unsigned entry_size,
492 unsigned group_index)
493 {
494 struct amdgpu_winsys *ws = priv;
495 struct amdgpu_slab *slab = CALLOC_STRUCT(amdgpu_slab);
496 enum radeon_bo_domain domains;
497 enum radeon_bo_flag flags = 0;
498 uint32_t base_id;
499
500 if (!slab)
501 return NULL;
502
503 if (heap & 1)
504 flags |= RADEON_FLAG_GTT_WC;
505 if (heap & 2)
506 flags |= RADEON_FLAG_CPU_ACCESS;
507
508 switch (heap >> 2) {
509 case 0:
510 domains = RADEON_DOMAIN_VRAM;
511 break;
512 default:
513 case 1:
514 domains = RADEON_DOMAIN_VRAM_GTT;
515 break;
516 case 2:
517 domains = RADEON_DOMAIN_GTT;
518 break;
519 }
520
521 slab->buffer = amdgpu_winsys_bo(amdgpu_bo_create(&ws->base,
522 64 * 1024, 64 * 1024,
523 domains, flags));
524 if (!slab->buffer)
525 goto fail;
526
527 assert(slab->buffer->bo);
528
529 slab->base.num_entries = slab->buffer->base.size / entry_size;
530 slab->base.num_free = slab->base.num_entries;
531 slab->entries = CALLOC(slab->base.num_entries, sizeof(*slab->entries));
532 if (!slab->entries)
533 goto fail_buffer;
534
535 LIST_INITHEAD(&slab->base.free);
536
537 base_id = __sync_fetch_and_add(&ws->next_bo_unique_id, slab->base.num_entries);
538
539 for (unsigned i = 0; i < slab->base.num_entries; ++i) {
540 struct amdgpu_winsys_bo *bo = &slab->entries[i];
541
542 bo->base.alignment = entry_size;
543 bo->base.usage = slab->buffer->base.usage;
544 bo->base.size = entry_size;
545 bo->base.vtbl = &amdgpu_winsys_bo_slab_vtbl;
546 bo->ws = ws;
547 bo->va = slab->buffer->va + i * entry_size;
548 bo->initial_domain = domains;
549 bo->unique_id = base_id + i;
550 bo->u.slab.entry.slab = &slab->base;
551 bo->u.slab.entry.group_index = group_index;
552 bo->u.slab.real = slab->buffer;
553
554 LIST_ADDTAIL(&bo->u.slab.entry.head, &slab->base.free);
555 }
556
557 return &slab->base;
558
559 fail_buffer:
560 amdgpu_winsys_bo_reference(&slab->buffer, NULL);
561 fail:
562 FREE(slab);
563 return NULL;
564 }
565
566 void amdgpu_bo_slab_free(void *priv, struct pb_slab *pslab)
567 {
568 struct amdgpu_slab *slab = amdgpu_slab(pslab);
569
570 for (unsigned i = 0; i < slab->base.num_entries; ++i)
571 amdgpu_bo_remove_fences(&slab->entries[i]);
572
573 FREE(slab->entries);
574 amdgpu_winsys_bo_reference(&slab->buffer, NULL);
575 FREE(slab);
576 }
577
578 static unsigned eg_tile_split(unsigned tile_split)
579 {
580 switch (tile_split) {
581 case 0: tile_split = 64; break;
582 case 1: tile_split = 128; break;
583 case 2: tile_split = 256; break;
584 case 3: tile_split = 512; break;
585 default:
586 case 4: tile_split = 1024; break;
587 case 5: tile_split = 2048; break;
588 case 6: tile_split = 4096; break;
589 }
590 return tile_split;
591 }
592
593 static unsigned eg_tile_split_rev(unsigned eg_tile_split)
594 {
595 switch (eg_tile_split) {
596 case 64: return 0;
597 case 128: return 1;
598 case 256: return 2;
599 case 512: return 3;
600 default:
601 case 1024: return 4;
602 case 2048: return 5;
603 case 4096: return 6;
604 }
605 }
606
607 static void amdgpu_buffer_get_metadata(struct pb_buffer *_buf,
608 struct radeon_bo_metadata *md)
609 {
610 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
611 struct amdgpu_bo_info info = {0};
612 uint64_t tiling_flags;
613 int r;
614
615 assert(bo->bo && "must not be called for slab entries");
616
617 r = amdgpu_bo_query_info(bo->bo, &info);
618 if (r)
619 return;
620
621 tiling_flags = info.metadata.tiling_info;
622
623 if (bo->ws->info.chip_class >= GFX9) {
624 md->u.gfx9.swizzle_mode = AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
625 } else {
626 md->u.legacy.microtile = RADEON_LAYOUT_LINEAR;
627 md->u.legacy.macrotile = RADEON_LAYOUT_LINEAR;
628
629 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 4) /* 2D_TILED_THIN1 */
630 md->u.legacy.macrotile = RADEON_LAYOUT_TILED;
631 else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 2) /* 1D_TILED_THIN1 */
632 md->u.legacy.microtile = RADEON_LAYOUT_TILED;
633
634 md->u.legacy.pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
635 md->u.legacy.bankw = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
636 md->u.legacy.bankh = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
637 md->u.legacy.tile_split = eg_tile_split(AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT));
638 md->u.legacy.mtilea = 1 << AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
639 md->u.legacy.num_banks = 2 << AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
640 md->u.legacy.scanout = AMDGPU_TILING_GET(tiling_flags, MICRO_TILE_MODE) == 0; /* DISPLAY */
641 }
642
643 md->size_metadata = info.metadata.size_metadata;
644 memcpy(md->metadata, info.metadata.umd_metadata, sizeof(md->metadata));
645 }
646
647 static void amdgpu_buffer_set_metadata(struct pb_buffer *_buf,
648 struct radeon_bo_metadata *md)
649 {
650 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
651 struct amdgpu_bo_metadata metadata = {0};
652 uint64_t tiling_flags = 0;
653
654 assert(bo->bo && "must not be called for slab entries");
655
656 if (bo->ws->info.chip_class >= GFX9) {
657 tiling_flags |= AMDGPU_TILING_SET(SWIZZLE_MODE, md->u.gfx9.swizzle_mode);
658 } else {
659 if (md->u.legacy.macrotile == RADEON_LAYOUT_TILED)
660 tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 4); /* 2D_TILED_THIN1 */
661 else if (md->u.legacy.microtile == RADEON_LAYOUT_TILED)
662 tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 2); /* 1D_TILED_THIN1 */
663 else
664 tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 1); /* LINEAR_ALIGNED */
665
666 tiling_flags |= AMDGPU_TILING_SET(PIPE_CONFIG, md->u.legacy.pipe_config);
667 tiling_flags |= AMDGPU_TILING_SET(BANK_WIDTH, util_logbase2(md->u.legacy.bankw));
668 tiling_flags |= AMDGPU_TILING_SET(BANK_HEIGHT, util_logbase2(md->u.legacy.bankh));
669 if (md->u.legacy.tile_split)
670 tiling_flags |= AMDGPU_TILING_SET(TILE_SPLIT, eg_tile_split_rev(md->u.legacy.tile_split));
671 tiling_flags |= AMDGPU_TILING_SET(MACRO_TILE_ASPECT, util_logbase2(md->u.legacy.mtilea));
672 tiling_flags |= AMDGPU_TILING_SET(NUM_BANKS, util_logbase2(md->u.legacy.num_banks)-1);
673
674 if (md->u.legacy.scanout)
675 tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 0); /* DISPLAY_MICRO_TILING */
676 else
677 tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 1); /* THIN_MICRO_TILING */
678 }
679
680 metadata.tiling_info = tiling_flags;
681 metadata.size_metadata = md->size_metadata;
682 memcpy(metadata.umd_metadata, md->metadata, sizeof(md->metadata));
683
684 amdgpu_bo_set_metadata(bo->bo, &metadata);
685 }
686
687 static struct pb_buffer *
688 amdgpu_bo_create(struct radeon_winsys *rws,
689 uint64_t size,
690 unsigned alignment,
691 enum radeon_bo_domain domain,
692 enum radeon_bo_flag flags)
693 {
694 struct amdgpu_winsys *ws = amdgpu_winsys(rws);
695 struct amdgpu_winsys_bo *bo;
696 unsigned usage = 0, pb_cache_bucket;
697
698 /* Sub-allocate small buffers from slabs. */
699 if (!(flags & RADEON_FLAG_HANDLE) &&
700 size <= (1 << AMDGPU_SLAB_MAX_SIZE_LOG2) &&
701 alignment <= MAX2(1 << AMDGPU_SLAB_MIN_SIZE_LOG2, util_next_power_of_two(size))) {
702 struct pb_slab_entry *entry;
703 unsigned heap = 0;
704
705 if (flags & RADEON_FLAG_GTT_WC)
706 heap |= 1;
707 if (flags & RADEON_FLAG_CPU_ACCESS)
708 heap |= 2;
709 if (flags & ~(RADEON_FLAG_GTT_WC | RADEON_FLAG_CPU_ACCESS))
710 goto no_slab;
711
712 switch (domain) {
713 case RADEON_DOMAIN_VRAM:
714 heap |= 0 * 4;
715 break;
716 case RADEON_DOMAIN_VRAM_GTT:
717 heap |= 1 * 4;
718 break;
719 case RADEON_DOMAIN_GTT:
720 heap |= 2 * 4;
721 break;
722 default:
723 goto no_slab;
724 }
725
726 entry = pb_slab_alloc(&ws->bo_slabs, size, heap);
727 if (!entry) {
728 /* Clear the cache and try again. */
729 pb_cache_release_all_buffers(&ws->bo_cache);
730
731 entry = pb_slab_alloc(&ws->bo_slabs, size, heap);
732 }
733 if (!entry)
734 return NULL;
735
736 bo = NULL;
737 bo = container_of(entry, bo, u.slab.entry);
738
739 pipe_reference_init(&bo->base.reference, 1);
740
741 return &bo->base;
742 }
743 no_slab:
744
745 /* This flag is irrelevant for the cache. */
746 flags &= ~RADEON_FLAG_HANDLE;
747
748 /* Align size to page size. This is the minimum alignment for normal
749 * BOs. Aligning this here helps the cached bufmgr. Especially small BOs,
750 * like constant/uniform buffers, can benefit from better and more reuse.
751 */
752 size = align64(size, ws->info.gart_page_size);
753 alignment = align(alignment, ws->info.gart_page_size);
754
755 /* Only set one usage bit each for domains and flags, or the cache manager
756 * might consider different sets of domains / flags compatible
757 */
758 if (domain == RADEON_DOMAIN_VRAM_GTT)
759 usage = 1 << 2;
760 else
761 usage = domain >> 1;
762 assert(flags < sizeof(usage) * 8 - 3);
763 usage |= 1 << (flags + 3);
764
765 /* Determine the pb_cache bucket for minimizing pb_cache misses. */
766 pb_cache_bucket = 0;
767 if (domain & RADEON_DOMAIN_VRAM) /* VRAM or VRAM+GTT */
768 pb_cache_bucket += 1;
769 if (flags == RADEON_FLAG_GTT_WC) /* WC */
770 pb_cache_bucket += 2;
771 assert(pb_cache_bucket < ARRAY_SIZE(ws->bo_cache.buckets));
772
773 /* Get a buffer from the cache. */
774 bo = (struct amdgpu_winsys_bo*)
775 pb_cache_reclaim_buffer(&ws->bo_cache, size, alignment, usage,
776 pb_cache_bucket);
777 if (bo)
778 return &bo->base;
779
780 /* Create a new one. */
781 bo = amdgpu_create_bo(ws, size, alignment, usage, domain, flags,
782 pb_cache_bucket);
783 if (!bo) {
784 /* Clear the cache and try again. */
785 pb_slabs_reclaim(&ws->bo_slabs);
786 pb_cache_release_all_buffers(&ws->bo_cache);
787 bo = amdgpu_create_bo(ws, size, alignment, usage, domain, flags,
788 pb_cache_bucket);
789 if (!bo)
790 return NULL;
791 }
792
793 bo->u.real.use_reusable_pool = true;
794 return &bo->base;
795 }
796
797 static struct pb_buffer *amdgpu_bo_from_handle(struct radeon_winsys *rws,
798 struct winsys_handle *whandle,
799 unsigned *stride,
800 unsigned *offset)
801 {
802 struct amdgpu_winsys *ws = amdgpu_winsys(rws);
803 struct amdgpu_winsys_bo *bo;
804 enum amdgpu_bo_handle_type type;
805 struct amdgpu_bo_import_result result = {0};
806 uint64_t va;
807 amdgpu_va_handle va_handle;
808 struct amdgpu_bo_info info = {0};
809 enum radeon_bo_domain initial = 0;
810 int r;
811
812 /* Initialize the structure. */
813 bo = CALLOC_STRUCT(amdgpu_winsys_bo);
814 if (!bo) {
815 return NULL;
816 }
817
818 switch (whandle->type) {
819 case DRM_API_HANDLE_TYPE_SHARED:
820 type = amdgpu_bo_handle_type_gem_flink_name;
821 break;
822 case DRM_API_HANDLE_TYPE_FD:
823 type = amdgpu_bo_handle_type_dma_buf_fd;
824 break;
825 default:
826 return NULL;
827 }
828
829 r = amdgpu_bo_import(ws->dev, type, whandle->handle, &result);
830 if (r)
831 goto error;
832
833 /* Get initial domains. */
834 r = amdgpu_bo_query_info(result.buf_handle, &info);
835 if (r)
836 goto error_query;
837
838 r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
839 result.alloc_size, 1 << 20, 0, &va, &va_handle, 0);
840 if (r)
841 goto error_query;
842
843 r = amdgpu_bo_va_op(result.buf_handle, 0, result.alloc_size, va, 0, AMDGPU_VA_OP_MAP);
844 if (r)
845 goto error_va_map;
846
847 if (info.preferred_heap & AMDGPU_GEM_DOMAIN_VRAM)
848 initial |= RADEON_DOMAIN_VRAM;
849 if (info.preferred_heap & AMDGPU_GEM_DOMAIN_GTT)
850 initial |= RADEON_DOMAIN_GTT;
851
852
853 pipe_reference_init(&bo->base.reference, 1);
854 bo->base.alignment = info.phys_alignment;
855 bo->bo = result.buf_handle;
856 bo->base.size = result.alloc_size;
857 bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
858 bo->ws = ws;
859 bo->va = va;
860 bo->u.real.va_handle = va_handle;
861 bo->initial_domain = initial;
862 bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
863 bo->is_shared = true;
864
865 if (stride)
866 *stride = whandle->stride;
867 if (offset)
868 *offset = whandle->offset;
869
870 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
871 ws->allocated_vram += align64(bo->base.size, ws->info.gart_page_size);
872 else if (bo->initial_domain & RADEON_DOMAIN_GTT)
873 ws->allocated_gtt += align64(bo->base.size, ws->info.gart_page_size);
874
875 amdgpu_add_buffer_to_global_list(bo);
876
877 return &bo->base;
878
879 error_va_map:
880 amdgpu_va_range_free(va_handle);
881
882 error_query:
883 amdgpu_bo_free(result.buf_handle);
884
885 error:
886 FREE(bo);
887 return NULL;
888 }
889
890 static bool amdgpu_bo_get_handle(struct pb_buffer *buffer,
891 unsigned stride, unsigned offset,
892 unsigned slice_size,
893 struct winsys_handle *whandle)
894 {
895 struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(buffer);
896 enum amdgpu_bo_handle_type type;
897 int r;
898
899 if (!bo->bo) {
900 offset += bo->va - bo->u.slab.real->va;
901 bo = bo->u.slab.real;
902 }
903
904 bo->u.real.use_reusable_pool = false;
905
906 switch (whandle->type) {
907 case DRM_API_HANDLE_TYPE_SHARED:
908 type = amdgpu_bo_handle_type_gem_flink_name;
909 break;
910 case DRM_API_HANDLE_TYPE_FD:
911 type = amdgpu_bo_handle_type_dma_buf_fd;
912 break;
913 case DRM_API_HANDLE_TYPE_KMS:
914 type = amdgpu_bo_handle_type_kms;
915 break;
916 default:
917 return false;
918 }
919
920 r = amdgpu_bo_export(bo->bo, type, &whandle->handle);
921 if (r)
922 return false;
923
924 whandle->stride = stride;
925 whandle->offset = offset;
926 whandle->offset += slice_size * whandle->layer;
927 bo->is_shared = true;
928 return true;
929 }
930
931 static struct pb_buffer *amdgpu_bo_from_ptr(struct radeon_winsys *rws,
932 void *pointer, uint64_t size)
933 {
934 struct amdgpu_winsys *ws = amdgpu_winsys(rws);
935 amdgpu_bo_handle buf_handle;
936 struct amdgpu_winsys_bo *bo;
937 uint64_t va;
938 amdgpu_va_handle va_handle;
939
940 bo = CALLOC_STRUCT(amdgpu_winsys_bo);
941 if (!bo)
942 return NULL;
943
944 if (amdgpu_create_bo_from_user_mem(ws->dev, pointer, size, &buf_handle))
945 goto error;
946
947 if (amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
948 size, 1 << 12, 0, &va, &va_handle, 0))
949 goto error_va_alloc;
950
951 if (amdgpu_bo_va_op(buf_handle, 0, size, va, 0, AMDGPU_VA_OP_MAP))
952 goto error_va_map;
953
954 /* Initialize it. */
955 pipe_reference_init(&bo->base.reference, 1);
956 bo->bo = buf_handle;
957 bo->base.alignment = 0;
958 bo->base.size = size;
959 bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
960 bo->ws = ws;
961 bo->user_ptr = pointer;
962 bo->va = va;
963 bo->u.real.va_handle = va_handle;
964 bo->initial_domain = RADEON_DOMAIN_GTT;
965 bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
966
967 ws->allocated_gtt += align64(bo->base.size, ws->info.gart_page_size);
968
969 amdgpu_add_buffer_to_global_list(bo);
970
971 return (struct pb_buffer*)bo;
972
973 error_va_map:
974 amdgpu_va_range_free(va_handle);
975
976 error_va_alloc:
977 amdgpu_bo_free(buf_handle);
978
979 error:
980 FREE(bo);
981 return NULL;
982 }
983
984 static bool amdgpu_bo_is_user_ptr(struct pb_buffer *buf)
985 {
986 return ((struct amdgpu_winsys_bo*)buf)->user_ptr != NULL;
987 }
988
989 static uint64_t amdgpu_bo_get_va(struct pb_buffer *buf)
990 {
991 return ((struct amdgpu_winsys_bo*)buf)->va;
992 }
993
994 void amdgpu_bo_init_functions(struct amdgpu_winsys *ws)
995 {
996 ws->base.buffer_set_metadata = amdgpu_buffer_set_metadata;
997 ws->base.buffer_get_metadata = amdgpu_buffer_get_metadata;
998 ws->base.buffer_map = amdgpu_bo_map;
999 ws->base.buffer_unmap = amdgpu_bo_unmap;
1000 ws->base.buffer_wait = amdgpu_bo_wait;
1001 ws->base.buffer_create = amdgpu_bo_create;
1002 ws->base.buffer_from_handle = amdgpu_bo_from_handle;
1003 ws->base.buffer_from_ptr = amdgpu_bo_from_ptr;
1004 ws->base.buffer_is_user_ptr = amdgpu_bo_is_user_ptr;
1005 ws->base.buffer_get_handle = amdgpu_bo_get_handle;
1006 ws->base.buffer_get_virtual_address = amdgpu_bo_get_va;
1007 ws->base.buffer_get_initial_domain = amdgpu_bo_get_initial_domain;
1008 }