i965/bufmgr: Garbage-collect vma cache/pruning
[mesa.git] / src / mesa / drivers / dri / i965 / brw_bufmgr.c
1 /**************************************************************************
2 *
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007-2012 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
26 * of the Software.
27 *
28 *
29 **************************************************************************/
30 /*
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
35 */
36
37 #ifdef HAVE_CONFIG_H
38 #include "config.h"
39 #endif
40
41 #include <xf86drm.h>
42 #include <util/u_atomic.h>
43 #include <fcntl.h>
44 #include <stdio.h>
45 #include <stdlib.h>
46 #include <string.h>
47 #include <unistd.h>
48 #include <assert.h>
49 #include <pthread.h>
50 #include <sys/ioctl.h>
51 #include <sys/stat.h>
52 #include <sys/types.h>
53 #include <stdbool.h>
54
55 #include "errno.h"
56 #ifndef ETIME
57 #define ETIME ETIMEDOUT
58 #endif
59 #include "common/gen_debug.h"
60 #include "common/gen_device_info.h"
61 #include "libdrm_macros.h"
62 #include "main/macros.h"
63 #include "util/macros.h"
64 #include "util/hash_table.h"
65 #include "util/list.h"
66 #include "brw_bufmgr.h"
67 #include "string.h"
68
69 #include "i915_drm.h"
70
71 #ifdef HAVE_VALGRIND
72 #include <valgrind.h>
73 #include <memcheck.h>
74 #define VG(x) x
75 #else
76 #define VG(x)
77 #endif
78
79 #define memclear(s) memset(&s, 0, sizeof(s))
80
81 #define FILE_DEBUG_FLAG DEBUG_BUFMGR
82
83 static inline int
84 atomic_add_unless(int *v, int add, int unless)
85 {
86 int c, old;
87 c = p_atomic_read(v);
88 while (c != unless && (old = p_atomic_cmpxchg(v, c, c + add)) != c)
89 c = old;
90 return c == unless;
91 }
92
93 struct bo_cache_bucket {
94 struct list_head head;
95 unsigned long size;
96 };
97
98 struct brw_bufmgr {
99 int fd;
100
101 pthread_mutex_t lock;
102
103 /** Array of lists of cached gem objects of power-of-two sizes */
104 struct bo_cache_bucket cache_bucket[14 * 4];
105 int num_buckets;
106 time_t time;
107
108 struct hash_table *name_table;
109 struct hash_table *handle_table;
110
111 unsigned int has_llc:1;
112 unsigned int bo_reuse:1;
113 };
114
115 static int bo_set_tiling_internal(struct brw_bo *bo, uint32_t tiling_mode,
116 uint32_t stride);
117
118 static void bo_free(struct brw_bo *bo);
119
120 static uint32_t
121 key_hash_uint(const void *key)
122 {
123 return _mesa_hash_data(key, 4);
124 }
125
126 static bool
127 key_uint_equal(const void *a, const void *b)
128 {
129 return *((unsigned *) a) == *((unsigned *) b);
130 }
131
132 static struct brw_bo *
133 hash_find_bo(struct hash_table *ht, unsigned int key)
134 {
135 struct hash_entry *entry = _mesa_hash_table_search(ht, &key);
136 return entry ? (struct brw_bo *) entry->data : NULL;
137 }
138
139 static unsigned long
140 bo_tile_size(struct brw_bufmgr *bufmgr, unsigned long size,
141 uint32_t *tiling_mode)
142 {
143 if (*tiling_mode == I915_TILING_NONE)
144 return size;
145
146 /* 965+ just need multiples of page size for tiling */
147 return ALIGN(size, 4096);
148 }
149
150 /*
151 * Round a given pitch up to the minimum required for X tiling on a
152 * given chip. We use 512 as the minimum to allow for a later tiling
153 * change.
154 */
155 static unsigned long
156 bo_tile_pitch(struct brw_bufmgr *bufmgr,
157 unsigned long pitch, uint32_t *tiling_mode)
158 {
159 unsigned long tile_width;
160
161 /* If untiled, then just align it so that we can do rendering
162 * to it with the 3D engine.
163 */
164 if (*tiling_mode == I915_TILING_NONE)
165 return ALIGN(pitch, 64);
166
167 if (*tiling_mode == I915_TILING_X)
168 tile_width = 512;
169 else
170 tile_width = 128;
171
172 /* 965 is flexible */
173 return ALIGN(pitch, tile_width);
174 }
175
176 static struct bo_cache_bucket *
177 bucket_for_size(struct brw_bufmgr *bufmgr, unsigned long size)
178 {
179 int i;
180
181 for (i = 0; i < bufmgr->num_buckets; i++) {
182 struct bo_cache_bucket *bucket = &bufmgr->cache_bucket[i];
183 if (bucket->size >= size) {
184 return bucket;
185 }
186 }
187
188 return NULL;
189 }
190
191 inline void
192 brw_bo_reference(struct brw_bo *bo)
193 {
194 p_atomic_inc(&bo->refcount);
195 }
196
197 int
198 brw_bo_busy(struct brw_bo *bo)
199 {
200 struct brw_bufmgr *bufmgr = bo->bufmgr;
201 struct drm_i915_gem_busy busy;
202 int ret;
203
204 memclear(busy);
205 busy.handle = bo->gem_handle;
206
207 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
208 if (ret == 0) {
209 bo->idle = !busy.busy;
210 return busy.busy;
211 } else {
212 return false;
213 }
214 return (ret == 0 && busy.busy);
215 }
216
217 int
218 brw_bo_madvise(struct brw_bo *bo, int state)
219 {
220 struct drm_i915_gem_madvise madv;
221
222 memclear(madv);
223 madv.handle = bo->gem_handle;
224 madv.madv = state;
225 madv.retained = 1;
226 drmIoctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
227
228 return madv.retained;
229 }
230
231 /* drop the oldest entries that have been purged by the kernel */
232 static void
233 brw_bo_cache_purge_bucket(struct brw_bufmgr *bufmgr,
234 struct bo_cache_bucket *bucket)
235 {
236 list_for_each_entry_safe(struct brw_bo, bo, &bucket->head, head) {
237 if (brw_bo_madvise(bo, I915_MADV_DONTNEED))
238 break;
239
240 list_del(&bo->head);
241 bo_free(bo);
242 }
243 }
244
245 static struct brw_bo *
246 bo_alloc_internal(struct brw_bufmgr *bufmgr,
247 const char *name,
248 unsigned long size,
249 unsigned long flags,
250 uint32_t tiling_mode,
251 unsigned long stride, unsigned int alignment)
252 {
253 struct brw_bo *bo;
254 unsigned int page_size = getpagesize();
255 int ret;
256 struct bo_cache_bucket *bucket;
257 bool alloc_from_cache;
258 unsigned long bo_size;
259 bool for_render = false;
260
261 if (flags & BO_ALLOC_FOR_RENDER)
262 for_render = true;
263
264 /* Round the allocated size up to a power of two number of pages. */
265 bucket = bucket_for_size(bufmgr, size);
266
267 /* If we don't have caching at this size, don't actually round the
268 * allocation up.
269 */
270 if (bucket == NULL) {
271 bo_size = size;
272 if (bo_size < page_size)
273 bo_size = page_size;
274 } else {
275 bo_size = bucket->size;
276 }
277
278 pthread_mutex_lock(&bufmgr->lock);
279 /* Get a buffer out of the cache if available */
280 retry:
281 alloc_from_cache = false;
282 if (bucket != NULL && !list_empty(&bucket->head)) {
283 if (for_render) {
284 /* Allocate new render-target BOs from the tail (MRU)
285 * of the list, as it will likely be hot in the GPU
286 * cache and in the aperture for us.
287 */
288 bo = LIST_ENTRY(struct brw_bo, bucket->head.prev, head);
289 list_del(&bo->head);
290 alloc_from_cache = true;
291 bo->align = alignment;
292 } else {
293 assert(alignment == 0);
294 /* For non-render-target BOs (where we're probably
295 * going to map it first thing in order to fill it
296 * with data), check if the last BO in the cache is
297 * unbusy, and only reuse in that case. Otherwise,
298 * allocating a new buffer is probably faster than
299 * waiting for the GPU to finish.
300 */
301 bo = LIST_ENTRY(struct brw_bo, bucket->head.next, head);
302 if (!brw_bo_busy(bo)) {
303 alloc_from_cache = true;
304 list_del(&bo->head);
305 }
306 }
307
308 if (alloc_from_cache) {
309 if (!brw_bo_madvise(bo, I915_MADV_WILLNEED)) {
310 bo_free(bo);
311 brw_bo_cache_purge_bucket(bufmgr, bucket);
312 goto retry;
313 }
314
315 if (bo_set_tiling_internal(bo, tiling_mode, stride)) {
316 bo_free(bo);
317 goto retry;
318 }
319 }
320 }
321
322 if (!alloc_from_cache) {
323 struct drm_i915_gem_create create;
324
325 bo = calloc(1, sizeof(*bo));
326 if (!bo)
327 goto err;
328
329 bo->size = bo_size;
330
331 memclear(create);
332 create.size = bo_size;
333
334 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CREATE, &create);
335 if (ret != 0) {
336 free(bo);
337 goto err;
338 }
339
340 bo->gem_handle = create.handle;
341 _mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
342
343 bo->bufmgr = bufmgr;
344 bo->align = alignment;
345
346 bo->tiling_mode = I915_TILING_NONE;
347 bo->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
348 bo->stride = 0;
349
350 if (bo_set_tiling_internal(bo, tiling_mode, stride))
351 goto err_free;
352 }
353
354 bo->name = name;
355 p_atomic_set(&bo->refcount, 1);
356 bo->reusable = true;
357
358 pthread_mutex_unlock(&bufmgr->lock);
359
360 DBG("bo_create: buf %d (%s) %ldb\n", bo->gem_handle, bo->name, size);
361
362 return bo;
363
364 err_free:
365 bo_free(bo);
366 err:
367 pthread_mutex_unlock(&bufmgr->lock);
368 return NULL;
369 }
370
371 struct brw_bo *
372 brw_bo_alloc(struct brw_bufmgr *bufmgr,
373 const char *name, unsigned long size, unsigned int alignment)
374 {
375 return bo_alloc_internal(bufmgr, name, size, 0, I915_TILING_NONE, 0, 0);
376 }
377
378 struct brw_bo *
379 brw_bo_alloc_tiled(struct brw_bufmgr *bufmgr, const char *name,
380 int x, int y, int cpp, uint32_t *tiling_mode,
381 unsigned long *pitch, unsigned long flags)
382 {
383 unsigned long size, stride;
384 uint32_t tiling;
385
386 do {
387 unsigned long aligned_y, height_alignment;
388
389 tiling = *tiling_mode;
390
391 /* If we're tiled, our allocations are in 8 or 32-row blocks,
392 * so failure to align our height means that we won't allocate
393 * enough pages.
394 *
395 * If we're untiled, we still have to align to 2 rows high
396 * because the data port accesses 2x2 blocks even if the
397 * bottom row isn't to be rendered, so failure to align means
398 * we could walk off the end of the GTT and fault. This is
399 * documented on 965, and may be the case on older chipsets
400 * too so we try to be careful.
401 */
402 aligned_y = y;
403 height_alignment = 2;
404
405 if (tiling == I915_TILING_X)
406 height_alignment = 8;
407 else if (tiling == I915_TILING_Y)
408 height_alignment = 32;
409 aligned_y = ALIGN(y, height_alignment);
410
411 stride = x * cpp;
412 stride = bo_tile_pitch(bufmgr, stride, tiling_mode);
413 size = stride * aligned_y;
414 size = bo_tile_size(bufmgr, size, tiling_mode);
415 } while (*tiling_mode != tiling);
416 *pitch = stride;
417
418 if (tiling == I915_TILING_NONE)
419 stride = 0;
420
421 return bo_alloc_internal(bufmgr, name, size, flags, tiling, stride, 0);
422 }
423
424 /**
425 * Returns a brw_bo wrapping the given buffer object handle.
426 *
427 * This can be used when one application needs to pass a buffer object
428 * to another.
429 */
430 struct brw_bo *
431 brw_bo_gem_create_from_name(struct brw_bufmgr *bufmgr,
432 const char *name, unsigned int handle)
433 {
434 struct brw_bo *bo;
435 int ret;
436 struct drm_gem_open open_arg;
437 struct drm_i915_gem_get_tiling get_tiling;
438
439 /* At the moment most applications only have a few named bo.
440 * For instance, in a DRI client only the render buffers passed
441 * between X and the client are named. And since X returns the
442 * alternating names for the front/back buffer a linear search
443 * provides a sufficiently fast match.
444 */
445 pthread_mutex_lock(&bufmgr->lock);
446 bo = hash_find_bo(bufmgr->name_table, handle);
447 if (bo) {
448 brw_bo_reference(bo);
449 goto out;
450 }
451
452 memclear(open_arg);
453 open_arg.name = handle;
454 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_GEM_OPEN, &open_arg);
455 if (ret != 0) {
456 DBG("Couldn't reference %s handle 0x%08x: %s\n",
457 name, handle, strerror(errno));
458 bo = NULL;
459 goto out;
460 }
461 /* Now see if someone has used a prime handle to get this
462 * object from the kernel before by looking through the list
463 * again for a matching gem_handle
464 */
465 bo = hash_find_bo(bufmgr->handle_table, open_arg.handle);
466 if (bo) {
467 brw_bo_reference(bo);
468 goto out;
469 }
470
471 bo = calloc(1, sizeof(*bo));
472 if (!bo)
473 goto out;
474
475 p_atomic_set(&bo->refcount, 1);
476
477 bo->size = open_arg.size;
478 bo->offset64 = 0;
479 bo->virtual = NULL;
480 bo->bufmgr = bufmgr;
481 bo->gem_handle = open_arg.handle;
482 bo->name = name;
483 bo->global_name = handle;
484 bo->reusable = false;
485
486 _mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
487 _mesa_hash_table_insert(bufmgr->name_table, &bo->global_name, bo);
488
489 memclear(get_tiling);
490 get_tiling.handle = bo->gem_handle;
491 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling);
492 if (ret != 0)
493 goto err_unref;
494
495 bo->tiling_mode = get_tiling.tiling_mode;
496 bo->swizzle_mode = get_tiling.swizzle_mode;
497 /* XXX stride is unknown */
498 DBG("bo_create_from_handle: %d (%s)\n", handle, bo->name);
499
500 out:
501 pthread_mutex_unlock(&bufmgr->lock);
502 return bo;
503
504 err_unref:
505 bo_free(bo);
506 pthread_mutex_unlock(&bufmgr->lock);
507 return NULL;
508 }
509
510 static void
511 bo_free(struct brw_bo *bo)
512 {
513 struct brw_bufmgr *bufmgr = bo->bufmgr;
514 struct drm_gem_close close;
515 struct hash_entry *entry;
516 int ret;
517
518 if (bo->mem_virtual) {
519 VG(VALGRIND_FREELIKE_BLOCK(bo->mem_virtual, 0));
520 drm_munmap(bo->mem_virtual, bo->size);
521 }
522 if (bo->wc_virtual) {
523 VG(VALGRIND_FREELIKE_BLOCK(bo->wc_virtual, 0));
524 drm_munmap(bo->wc_virtual, bo->size);
525 }
526 if (bo->gtt_virtual) {
527 drm_munmap(bo->gtt_virtual, bo->size);
528 }
529
530 if (bo->global_name) {
531 entry = _mesa_hash_table_search(bufmgr->name_table, &bo->global_name);
532 _mesa_hash_table_remove(bufmgr->name_table, entry);
533 }
534 entry = _mesa_hash_table_search(bufmgr->handle_table, &bo->gem_handle);
535 _mesa_hash_table_remove(bufmgr->handle_table, entry);
536
537 /* Close this object */
538 memclear(close);
539 close.handle = bo->gem_handle;
540 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_GEM_CLOSE, &close);
541 if (ret != 0) {
542 DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
543 bo->gem_handle, bo->name, strerror(errno));
544 }
545 free(bo);
546 }
547
548 static void
549 bo_mark_mmaps_incoherent(struct brw_bo *bo)
550 {
551 #if HAVE_VALGRIND
552 if (bo->mem_virtual)
553 VALGRIND_MAKE_MEM_NOACCESS(bo->mem_virtual, bo->size);
554
555 if (bo->wc_virtual)
556 VALGRIND_MAKE_MEM_NOACCESS(bo->wc_virtual, bo->size);
557
558 if (bo->gtt_virtual)
559 VALGRIND_MAKE_MEM_NOACCESS(bo->gtt_virtual, bo->size);
560 #endif
561 }
562
563 /** Frees all cached buffers significantly older than @time. */
564 static void
565 cleanup_bo_cache(struct brw_bufmgr *bufmgr, time_t time)
566 {
567 int i;
568
569 if (bufmgr->time == time)
570 return;
571
572 for (i = 0; i < bufmgr->num_buckets; i++) {
573 struct bo_cache_bucket *bucket = &bufmgr->cache_bucket[i];
574
575 list_for_each_entry_safe(struct brw_bo, bo, &bucket->head, head) {
576 if (time - bo->free_time <= 1)
577 break;
578
579 list_del(&bo->head);
580
581 bo_free(bo);
582 }
583 }
584
585 bufmgr->time = time;
586 }
587
588 static void
589 bo_unreference_final(struct brw_bo *bo, time_t time)
590 {
591 struct brw_bufmgr *bufmgr = bo->bufmgr;
592 struct bo_cache_bucket *bucket;
593
594 DBG("bo_unreference final: %d (%s)\n", bo->gem_handle, bo->name);
595
596 /* Clear any left-over mappings */
597 if (bo->map_count) {
598 DBG("bo freed with non-zero map-count %d\n", bo->map_count);
599 bo->map_count = 0;
600 bo_mark_mmaps_incoherent(bo);
601 }
602
603 bucket = bucket_for_size(bufmgr, bo->size);
604 /* Put the buffer into our internal cache for reuse if we can. */
605 if (bufmgr->bo_reuse && bo->reusable && bucket != NULL &&
606 brw_bo_madvise(bo, I915_MADV_DONTNEED)) {
607 bo->free_time = time;
608
609 bo->name = NULL;
610
611 list_addtail(&bo->head, &bucket->head);
612 } else {
613 bo_free(bo);
614 }
615 }
616
617 void
618 brw_bo_unreference(struct brw_bo *bo)
619 {
620 if (bo == NULL)
621 return;
622
623 assert(p_atomic_read(&bo->refcount) > 0);
624
625 if (atomic_add_unless(&bo->refcount, -1, 1)) {
626 struct brw_bufmgr *bufmgr = bo->bufmgr;
627 struct timespec time;
628
629 clock_gettime(CLOCK_MONOTONIC, &time);
630
631 pthread_mutex_lock(&bufmgr->lock);
632
633 if (p_atomic_dec_zero(&bo->refcount)) {
634 bo_unreference_final(bo, time.tv_sec);
635 cleanup_bo_cache(bufmgr, time.tv_sec);
636 }
637
638 pthread_mutex_unlock(&bufmgr->lock);
639 }
640 }
641
642 int
643 brw_bo_map(struct brw_bo *bo, int write_enable)
644 {
645 struct brw_bufmgr *bufmgr = bo->bufmgr;
646 struct drm_i915_gem_set_domain set_domain;
647 int ret;
648
649 pthread_mutex_lock(&bufmgr->lock);
650
651 if (!bo->mem_virtual) {
652 struct drm_i915_gem_mmap mmap_arg;
653
654 DBG("bo_map: %d (%s), map_count=%d\n",
655 bo->gem_handle, bo->name, bo->map_count);
656
657 memclear(mmap_arg);
658 mmap_arg.handle = bo->gem_handle;
659 mmap_arg.size = bo->size;
660 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
661 if (ret != 0) {
662 ret = -errno;
663 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
664 __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
665 pthread_mutex_unlock(&bufmgr->lock);
666 return ret;
667 }
668 bo->map_count++;
669 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
670 bo->mem_virtual = (void *) (uintptr_t) mmap_arg.addr_ptr;
671 }
672 DBG("bo_map: %d (%s) -> %p\n", bo->gem_handle, bo->name, bo->mem_virtual);
673 bo->virtual = bo->mem_virtual;
674
675 memclear(set_domain);
676 set_domain.handle = bo->gem_handle;
677 set_domain.read_domains = I915_GEM_DOMAIN_CPU;
678 if (write_enable)
679 set_domain.write_domain = I915_GEM_DOMAIN_CPU;
680 else
681 set_domain.write_domain = 0;
682 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
683 if (ret != 0) {
684 DBG("%s:%d: Error setting to CPU domain %d: %s\n",
685 __FILE__, __LINE__, bo->gem_handle, strerror(errno));
686 }
687
688 bo_mark_mmaps_incoherent(bo);
689 VG(VALGRIND_MAKE_MEM_DEFINED(bo->mem_virtual, bo->size));
690 pthread_mutex_unlock(&bufmgr->lock);
691
692 return 0;
693 }
694
695 static int
696 map_gtt(struct brw_bo *bo)
697 {
698 struct brw_bufmgr *bufmgr = bo->bufmgr;
699 int ret;
700
701 /* Get a mapping of the buffer if we haven't before. */
702 if (bo->gtt_virtual == NULL) {
703 struct drm_i915_gem_mmap_gtt mmap_arg;
704
705 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
706 bo->gem_handle, bo->name, bo->map_count);
707
708 memclear(mmap_arg);
709 mmap_arg.handle = bo->gem_handle;
710
711 /* Get the fake offset back... */
712 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg);
713 if (ret != 0) {
714 ret = -errno;
715 DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
716 __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
717 return ret;
718 }
719
720 /* and mmap it */
721 bo->gtt_virtual = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
722 MAP_SHARED, bufmgr->fd, mmap_arg.offset);
723 if (bo->gtt_virtual == MAP_FAILED) {
724 bo->gtt_virtual = NULL;
725 ret = -errno;
726 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
727 __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
728 return ret;
729 }
730 }
731
732 bo->map_count++;
733 bo->virtual = bo->gtt_virtual;
734
735 DBG("bo_map_gtt: %d (%s) -> %p\n", bo->gem_handle, bo->name,
736 bo->gtt_virtual);
737
738 return 0;
739 }
740
741 int
742 brw_bo_map_gtt(struct brw_bo *bo)
743 {
744 struct brw_bufmgr *bufmgr = bo->bufmgr;
745 struct drm_i915_gem_set_domain set_domain;
746 int ret;
747
748 pthread_mutex_lock(&bufmgr->lock);
749
750 ret = map_gtt(bo);
751 if (ret) {
752 pthread_mutex_unlock(&bufmgr->lock);
753 return ret;
754 }
755
756 /* Now move it to the GTT domain so that the GPU and CPU
757 * caches are flushed and the GPU isn't actively using the
758 * buffer.
759 *
760 * The pagefault handler does this domain change for us when
761 * it has unbound the BO from the GTT, but it's up to us to
762 * tell it when we're about to use things if we had done
763 * rendering and it still happens to be bound to the GTT.
764 */
765 memclear(set_domain);
766 set_domain.handle = bo->gem_handle;
767 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
768 set_domain.write_domain = I915_GEM_DOMAIN_GTT;
769 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
770 if (ret != 0) {
771 DBG("%s:%d: Error setting domain %d: %s\n",
772 __FILE__, __LINE__, bo->gem_handle, strerror(errno));
773 }
774
775 bo_mark_mmaps_incoherent(bo);
776 VG(VALGRIND_MAKE_MEM_DEFINED(bo->gtt_virtual, bo->size));
777 pthread_mutex_unlock(&bufmgr->lock);
778
779 return 0;
780 }
781
782 /**
783 * Performs a mapping of the buffer object like the normal GTT
784 * mapping, but avoids waiting for the GPU to be done reading from or
785 * rendering to the buffer.
786 *
787 * This is used in the implementation of GL_ARB_map_buffer_range: The
788 * user asks to create a buffer, then does a mapping, fills some
789 * space, runs a drawing command, then asks to map it again without
790 * synchronizing because it guarantees that it won't write over the
791 * data that the GPU is busy using (or, more specifically, that if it
792 * does write over the data, it acknowledges that rendering is
793 * undefined).
794 */
795
796 int
797 brw_bo_map_unsynchronized(struct brw_bo *bo)
798 {
799 struct brw_bufmgr *bufmgr = bo->bufmgr;
800 int ret;
801
802 /* If the CPU cache isn't coherent with the GTT, then use a
803 * regular synchronized mapping. The problem is that we don't
804 * track where the buffer was last used on the CPU side in
805 * terms of brw_bo_map vs brw_bo_map_gtt, so
806 * we would potentially corrupt the buffer even when the user
807 * does reasonable things.
808 */
809 if (!bufmgr->has_llc)
810 return brw_bo_map_gtt(bo);
811
812 pthread_mutex_lock(&bufmgr->lock);
813
814 ret = map_gtt(bo);
815 if (ret == 0) {
816 bo_mark_mmaps_incoherent(bo);
817 VG(VALGRIND_MAKE_MEM_DEFINED(bo->gtt_virtual, bo->size));
818 }
819
820 pthread_mutex_unlock(&bufmgr->lock);
821
822 return ret;
823 }
824
825 int
826 brw_bo_unmap(struct brw_bo *bo)
827 {
828 struct brw_bufmgr *bufmgr = bo->bufmgr;
829 int ret = 0;
830
831 if (bo == NULL)
832 return 0;
833
834 pthread_mutex_lock(&bufmgr->lock);
835
836 if (bo->map_count <= 0) {
837 DBG("attempted to unmap an unmapped bo\n");
838 pthread_mutex_unlock(&bufmgr->lock);
839 /* Preserve the old behaviour of just treating this as a
840 * no-op rather than reporting the error.
841 */
842 return 0;
843 }
844
845 if (--bo->map_count == 0) {
846 bo_mark_mmaps_incoherent(bo);
847 bo->virtual = NULL;
848 }
849 pthread_mutex_unlock(&bufmgr->lock);
850
851 return ret;
852 }
853
854 int
855 brw_bo_subdata(struct brw_bo *bo, unsigned long offset,
856 unsigned long size, const void *data)
857 {
858 struct brw_bufmgr *bufmgr = bo->bufmgr;
859 struct drm_i915_gem_pwrite pwrite;
860 int ret;
861
862 memclear(pwrite);
863 pwrite.handle = bo->gem_handle;
864 pwrite.offset = offset;
865 pwrite.size = size;
866 pwrite.data_ptr = (uint64_t) (uintptr_t) data;
867 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite);
868 if (ret != 0) {
869 ret = -errno;
870 DBG("%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
871 __FILE__, __LINE__, bo->gem_handle, (int) offset,
872 (int) size, strerror(errno));
873 }
874
875 return ret;
876 }
877
878 int
879 brw_bo_get_subdata(struct brw_bo *bo, unsigned long offset,
880 unsigned long size, void *data)
881 {
882 struct brw_bufmgr *bufmgr = bo->bufmgr;
883 struct drm_i915_gem_pread pread;
884 int ret;
885
886 memclear(pread);
887 pread.handle = bo->gem_handle;
888 pread.offset = offset;
889 pread.size = size;
890 pread.data_ptr = (uint64_t) (uintptr_t) data;
891 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_PREAD, &pread);
892 if (ret != 0) {
893 ret = -errno;
894 DBG("%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
895 __FILE__, __LINE__, bo->gem_handle, (int) offset,
896 (int) size, strerror(errno));
897 }
898
899 return ret;
900 }
901
902 /** Waits for all GPU rendering with the object to have completed. */
903 void
904 brw_bo_wait_rendering(struct brw_bo *bo)
905 {
906 struct brw_bufmgr *bufmgr = bo->bufmgr;
907 struct drm_i915_gem_set_domain set_domain;
908 int ret;
909
910 memclear(set_domain);
911 set_domain.handle = bo->gem_handle;
912 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
913 set_domain.write_domain = I915_GEM_DOMAIN_GTT;
914 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
915 if (ret != 0) {
916 DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
917 __FILE__, __LINE__, bo->gem_handle,
918 set_domain.read_domains, set_domain.write_domain, strerror(errno));
919 }
920 }
921
922 /**
923 * Waits on a BO for the given amount of time.
924 *
925 * @bo: buffer object to wait for
926 * @timeout_ns: amount of time to wait in nanoseconds.
927 * If value is less than 0, an infinite wait will occur.
928 *
929 * Returns 0 if the wait was successful ie. the last batch referencing the
930 * object has completed within the allotted time. Otherwise some negative return
931 * value describes the error. Of particular interest is -ETIME when the wait has
932 * failed to yield the desired result.
933 *
934 * Similar to brw_bo_wait_rendering except a timeout parameter allows
935 * the operation to give up after a certain amount of time. Another subtle
936 * difference is the internal locking semantics are different (this variant does
937 * not hold the lock for the duration of the wait). This makes the wait subject
938 * to a larger userspace race window.
939 *
940 * The implementation shall wait until the object is no longer actively
941 * referenced within a batch buffer at the time of the call. The wait will
942 * not guarantee that the buffer is re-issued via another thread, or an flinked
943 * handle. Userspace must make sure this race does not occur if such precision
944 * is important.
945 *
946 * Note that some kernels have broken the inifite wait for negative values
947 * promise, upgrade to latest stable kernels if this is the case.
948 */
949 int
950 brw_bo_wait(struct brw_bo *bo, int64_t timeout_ns)
951 {
952 struct brw_bufmgr *bufmgr = bo->bufmgr;
953 struct drm_i915_gem_wait wait;
954 int ret;
955
956 memclear(wait);
957 wait.bo_handle = bo->gem_handle;
958 wait.timeout_ns = timeout_ns;
959 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
960 if (ret == -1)
961 return -errno;
962
963 return ret;
964 }
965
966 void
967 brw_bufmgr_destroy(struct brw_bufmgr *bufmgr)
968 {
969 pthread_mutex_destroy(&bufmgr->lock);
970
971 /* Free any cached buffer objects we were going to reuse */
972 for (int i = 0; i < bufmgr->num_buckets; i++) {
973 struct bo_cache_bucket *bucket = &bufmgr->cache_bucket[i];
974
975 list_for_each_entry_safe(struct brw_bo, bo, &bucket->head, head) {
976 list_del(&bo->head);
977
978 bo_free(bo);
979 }
980 }
981
982 _mesa_hash_table_destroy(bufmgr->name_table, NULL);
983 _mesa_hash_table_destroy(bufmgr->handle_table, NULL);
984
985 free(bufmgr);
986 }
987
988 static int
989 bo_set_tiling_internal(struct brw_bo *bo, uint32_t tiling_mode,
990 uint32_t stride)
991 {
992 struct brw_bufmgr *bufmgr = bo->bufmgr;
993 struct drm_i915_gem_set_tiling set_tiling;
994 int ret;
995
996 if (bo->global_name == 0 &&
997 tiling_mode == bo->tiling_mode && stride == bo->stride)
998 return 0;
999
1000 memset(&set_tiling, 0, sizeof(set_tiling));
1001 do {
1002 /* set_tiling is slightly broken and overwrites the
1003 * input on the error path, so we have to open code
1004 * rmIoctl.
1005 */
1006 set_tiling.handle = bo->gem_handle;
1007 set_tiling.tiling_mode = tiling_mode;
1008 set_tiling.stride = stride;
1009
1010 ret = ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
1011 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
1012 if (ret == -1)
1013 return -errno;
1014
1015 bo->tiling_mode = set_tiling.tiling_mode;
1016 bo->swizzle_mode = set_tiling.swizzle_mode;
1017 bo->stride = set_tiling.stride;
1018 return 0;
1019 }
1020
1021 int
1022 brw_bo_get_tiling(struct brw_bo *bo, uint32_t *tiling_mode,
1023 uint32_t *swizzle_mode)
1024 {
1025 *tiling_mode = bo->tiling_mode;
1026 *swizzle_mode = bo->swizzle_mode;
1027 return 0;
1028 }
1029
1030 struct brw_bo *
1031 brw_bo_gem_create_from_prime(struct brw_bufmgr *bufmgr, int prime_fd,
1032 int size)
1033 {
1034 int ret;
1035 uint32_t handle;
1036 struct brw_bo *bo;
1037 struct drm_i915_gem_get_tiling get_tiling;
1038
1039 pthread_mutex_lock(&bufmgr->lock);
1040 ret = drmPrimeFDToHandle(bufmgr->fd, prime_fd, &handle);
1041 if (ret) {
1042 DBG("create_from_prime: failed to obtain handle from fd: %s\n",
1043 strerror(errno));
1044 pthread_mutex_unlock(&bufmgr->lock);
1045 return NULL;
1046 }
1047
1048 /*
1049 * See if the kernel has already returned this buffer to us. Just as
1050 * for named buffers, we must not create two bo's pointing at the same
1051 * kernel object
1052 */
1053 bo = hash_find_bo(bufmgr->handle_table, handle);
1054 if (bo) {
1055 brw_bo_reference(bo);
1056 goto out;
1057 }
1058
1059 bo = calloc(1, sizeof(*bo));
1060 if (!bo)
1061 goto out;
1062
1063 p_atomic_set(&bo->refcount, 1);
1064
1065 /* Determine size of bo. The fd-to-handle ioctl really should
1066 * return the size, but it doesn't. If we have kernel 3.12 or
1067 * later, we can lseek on the prime fd to get the size. Older
1068 * kernels will just fail, in which case we fall back to the
1069 * provided (estimated or guess size). */
1070 ret = lseek(prime_fd, 0, SEEK_END);
1071 if (ret != -1)
1072 bo->size = ret;
1073 else
1074 bo->size = size;
1075
1076 bo->bufmgr = bufmgr;
1077
1078 bo->gem_handle = handle;
1079 _mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
1080
1081 bo->name = "prime";
1082 bo->reusable = false;
1083
1084 memclear(get_tiling);
1085 get_tiling.handle = bo->gem_handle;
1086 if (drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling))
1087 goto err;
1088
1089 bo->tiling_mode = get_tiling.tiling_mode;
1090 bo->swizzle_mode = get_tiling.swizzle_mode;
1091 /* XXX stride is unknown */
1092
1093 out:
1094 pthread_mutex_unlock(&bufmgr->lock);
1095 return bo;
1096
1097 err:
1098 bo_free(bo);
1099 pthread_mutex_unlock(&bufmgr->lock);
1100 return NULL;
1101 }
1102
1103 int
1104 brw_bo_gem_export_to_prime(struct brw_bo *bo, int *prime_fd)
1105 {
1106 struct brw_bufmgr *bufmgr = bo->bufmgr;
1107
1108 if (drmPrimeHandleToFD(bufmgr->fd, bo->gem_handle,
1109 DRM_CLOEXEC, prime_fd) != 0)
1110 return -errno;
1111
1112 bo->reusable = false;
1113
1114 return 0;
1115 }
1116
1117 int
1118 brw_bo_flink(struct brw_bo *bo, uint32_t *name)
1119 {
1120 struct brw_bufmgr *bufmgr = bo->bufmgr;
1121
1122 if (!bo->global_name) {
1123 struct drm_gem_flink flink;
1124
1125 memclear(flink);
1126 flink.handle = bo->gem_handle;
1127 if (drmIoctl(bufmgr->fd, DRM_IOCTL_GEM_FLINK, &flink))
1128 return -errno;
1129
1130 pthread_mutex_lock(&bufmgr->lock);
1131 if (!bo->global_name) {
1132 bo->global_name = flink.name;
1133 bo->reusable = false;
1134
1135 _mesa_hash_table_insert(bufmgr->name_table, &bo->global_name, bo);
1136 }
1137 pthread_mutex_unlock(&bufmgr->lock);
1138 }
1139
1140 *name = bo->global_name;
1141 return 0;
1142 }
1143
1144 /**
1145 * Enables unlimited caching of buffer objects for reuse.
1146 *
1147 * This is potentially very memory expensive, as the cache at each bucket
1148 * size is only bounded by how many buffers of that size we've managed to have
1149 * in flight at once.
1150 */
1151 void
1152 brw_bufmgr_enable_reuse(struct brw_bufmgr *bufmgr)
1153 {
1154 bufmgr->bo_reuse = true;
1155 }
1156
1157 static void
1158 add_bucket(struct brw_bufmgr *bufmgr, int size)
1159 {
1160 unsigned int i = bufmgr->num_buckets;
1161
1162 assert(i < ARRAY_SIZE(bufmgr->cache_bucket));
1163
1164 list_inithead(&bufmgr->cache_bucket[i].head);
1165 bufmgr->cache_bucket[i].size = size;
1166 bufmgr->num_buckets++;
1167 }
1168
1169 static void
1170 init_cache_buckets(struct brw_bufmgr *bufmgr)
1171 {
1172 unsigned long size, cache_max_size = 64 * 1024 * 1024;
1173
1174 /* OK, so power of two buckets was too wasteful of memory.
1175 * Give 3 other sizes between each power of two, to hopefully
1176 * cover things accurately enough. (The alternative is
1177 * probably to just go for exact matching of sizes, and assume
1178 * that for things like composited window resize the tiled
1179 * width/height alignment and rounding of sizes to pages will
1180 * get us useful cache hit rates anyway)
1181 */
1182 add_bucket(bufmgr, 4096);
1183 add_bucket(bufmgr, 4096 * 2);
1184 add_bucket(bufmgr, 4096 * 3);
1185
1186 /* Initialize the linked lists for BO reuse cache. */
1187 for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
1188 add_bucket(bufmgr, size);
1189
1190 add_bucket(bufmgr, size + size * 1 / 4);
1191 add_bucket(bufmgr, size + size * 2 / 4);
1192 add_bucket(bufmgr, size + size * 3 / 4);
1193 }
1194 }
1195
1196 uint32_t
1197 brw_create_hw_context(struct brw_bufmgr *bufmgr)
1198 {
1199 struct drm_i915_gem_context_create create;
1200 int ret;
1201
1202 memclear(create);
1203 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
1204 if (ret != 0) {
1205 DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n", strerror(errno));
1206 return 0;
1207 }
1208
1209 return create.ctx_id;
1210 }
1211
1212 void
1213 brw_destroy_hw_context(struct brw_bufmgr *bufmgr, uint32_t ctx_id)
1214 {
1215 struct drm_i915_gem_context_destroy d = {.ctx_id = ctx_id };
1216
1217 if (ctx_id != 0 &&
1218 drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, &d) != 0) {
1219 fprintf(stderr, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
1220 strerror(errno));
1221 }
1222 }
1223
1224 int
1225 brw_reg_read(struct brw_bufmgr *bufmgr, uint32_t offset, uint64_t *result)
1226 {
1227 struct drm_i915_reg_read reg_read;
1228 int ret;
1229
1230 memclear(reg_read);
1231 reg_read.offset = offset;
1232
1233 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_REG_READ, &reg_read);
1234
1235 *result = reg_read.val;
1236 return ret;
1237 }
1238
1239 void *
1240 brw_bo_map__gtt(struct brw_bo *bo)
1241 {
1242 struct brw_bufmgr *bufmgr = bo->bufmgr;
1243
1244 if (bo->gtt_virtual)
1245 return bo->gtt_virtual;
1246
1247 pthread_mutex_lock(&bufmgr->lock);
1248 if (bo->gtt_virtual == NULL) {
1249 struct drm_i915_gem_mmap_gtt mmap_arg;
1250 void *ptr;
1251
1252 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
1253 bo->gem_handle, bo->name, bo->map_count);
1254
1255 memclear(mmap_arg);
1256 mmap_arg.handle = bo->gem_handle;
1257
1258 /* Get the fake offset back... */
1259 ptr = MAP_FAILED;
1260 if (drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg) == 0) {
1261 /* and mmap it */
1262 ptr = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
1263 MAP_SHARED, bufmgr->fd, mmap_arg.offset);
1264 }
1265 if (ptr == MAP_FAILED) {
1266 --bo->map_count;
1267 ptr = NULL;
1268 }
1269
1270 bo->gtt_virtual = ptr;
1271 }
1272 pthread_mutex_unlock(&bufmgr->lock);
1273
1274 return bo->gtt_virtual;
1275 }
1276
1277 void *
1278 brw_bo_map__cpu(struct brw_bo *bo)
1279 {
1280 struct brw_bufmgr *bufmgr = bo->bufmgr;
1281
1282 if (bo->mem_virtual)
1283 return bo->mem_virtual;
1284
1285 pthread_mutex_lock(&bufmgr->lock);
1286 if (!bo->mem_virtual) {
1287 struct drm_i915_gem_mmap mmap_arg;
1288
1289 DBG("bo_map: %d (%s), map_count=%d\n",
1290 bo->gem_handle, bo->name, bo->map_count);
1291
1292 memclear(mmap_arg);
1293 mmap_arg.handle = bo->gem_handle;
1294 mmap_arg.size = bo->size;
1295 if (drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg)) {
1296 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1297 __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
1298 } else {
1299 bo->map_count++;
1300 VG(VALGRIND_MALLOCLIKE_BLOCK
1301 (mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
1302 bo->mem_virtual = (void *) (uintptr_t) mmap_arg.addr_ptr;
1303 }
1304 }
1305 pthread_mutex_unlock(&bufmgr->lock);
1306
1307 return bo->mem_virtual;
1308 }
1309
1310 void *
1311 brw_bo_map__wc(struct brw_bo *bo)
1312 {
1313 struct brw_bufmgr *bufmgr = bo->bufmgr;
1314
1315 if (bo->wc_virtual)
1316 return bo->wc_virtual;
1317
1318 pthread_mutex_lock(&bufmgr->lock);
1319 if (!bo->wc_virtual) {
1320 struct drm_i915_gem_mmap mmap_arg;
1321
1322 DBG("bo_map: %d (%s), map_count=%d\n",
1323 bo->gem_handle, bo->name, bo->map_count);
1324
1325 memclear(mmap_arg);
1326 mmap_arg.handle = bo->gem_handle;
1327 mmap_arg.size = bo->size;
1328 mmap_arg.flags = I915_MMAP_WC;
1329 if (drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg)) {
1330 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1331 __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
1332 } else {
1333 bo->map_count++;
1334 VG(VALGRIND_MALLOCLIKE_BLOCK
1335 (mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
1336 bo->wc_virtual = (void *) (uintptr_t) mmap_arg.addr_ptr;
1337 }
1338 }
1339 pthread_mutex_unlock(&bufmgr->lock);
1340
1341 return bo->wc_virtual;
1342 }
1343
1344 /**
1345 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
1346 * and manage map buffer objections.
1347 *
1348 * \param fd File descriptor of the opened DRM device.
1349 */
1350 struct brw_bufmgr *
1351 brw_bufmgr_init(struct gen_device_info *devinfo, int fd, int batch_size)
1352 {
1353 struct brw_bufmgr *bufmgr;
1354
1355 bufmgr = calloc(1, sizeof(*bufmgr));
1356 if (bufmgr == NULL)
1357 return NULL;
1358
1359 /* Handles to buffer objects belong to the device fd and are not
1360 * reference counted by the kernel. If the same fd is used by
1361 * multiple parties (threads sharing the same screen bufmgr, or
1362 * even worse the same device fd passed to multiple libraries)
1363 * ownership of those handles is shared by those independent parties.
1364 *
1365 * Don't do this! Ensure that each library/bufmgr has its own device
1366 * fd so that its namespace does not clash with another.
1367 */
1368 bufmgr->fd = fd;
1369
1370 if (pthread_mutex_init(&bufmgr->lock, NULL) != 0) {
1371 free(bufmgr);
1372 return NULL;
1373 }
1374
1375 bufmgr->has_llc = devinfo->has_llc;
1376
1377 init_cache_buckets(bufmgr);
1378
1379 bufmgr->name_table =
1380 _mesa_hash_table_create(NULL, key_hash_uint, key_uint_equal);
1381 bufmgr->handle_table =
1382 _mesa_hash_table_create(NULL, key_hash_uint, key_uint_equal);
1383
1384 return bufmgr;
1385 }