i965/drm: Make alignment parameter a uint64_t.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_bufmgr.c
1 /**************************************************************************
2 *
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007-2012 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
26 * of the Software.
27 *
28 *
29 **************************************************************************/
30 /*
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
35 */
36
37 #ifdef HAVE_CONFIG_H
38 #include "config.h"
39 #endif
40
41 #include <xf86drm.h>
42 #include <util/u_atomic.h>
43 #include <fcntl.h>
44 #include <stdio.h>
45 #include <stdlib.h>
46 #include <string.h>
47 #include <unistd.h>
48 #include <assert.h>
49 #include <pthread.h>
50 #include <sys/ioctl.h>
51 #include <sys/stat.h>
52 #include <sys/types.h>
53 #include <stdbool.h>
54
55 #include "errno.h"
56 #ifndef ETIME
57 #define ETIME ETIMEDOUT
58 #endif
59 #include "common/gen_debug.h"
60 #include "common/gen_device_info.h"
61 #include "libdrm_macros.h"
62 #include "main/macros.h"
63 #include "util/macros.h"
64 #include "util/hash_table.h"
65 #include "util/list.h"
66 #include "brw_bufmgr.h"
67 #include "brw_context.h"
68 #include "string.h"
69
70 #include "i915_drm.h"
71
72 #ifdef HAVE_VALGRIND
73 #include <valgrind.h>
74 #include <memcheck.h>
75 #define VG(x) x
76 #else
77 #define VG(x)
78 #endif
79
80 #define memclear(s) memset(&s, 0, sizeof(s))
81
82 #define FILE_DEBUG_FLAG DEBUG_BUFMGR
83
84 static inline int
85 atomic_add_unless(int *v, int add, int unless)
86 {
87 int c, old;
88 c = p_atomic_read(v);
89 while (c != unless && (old = p_atomic_cmpxchg(v, c, c + add)) != c)
90 c = old;
91 return c == unless;
92 }
93
94 struct bo_cache_bucket {
95 struct list_head head;
96 unsigned long size;
97 };
98
99 struct brw_bufmgr {
100 int fd;
101
102 pthread_mutex_t lock;
103
104 /** Array of lists of cached gem objects of power-of-two sizes */
105 struct bo_cache_bucket cache_bucket[14 * 4];
106 int num_buckets;
107 time_t time;
108
109 struct hash_table *name_table;
110 struct hash_table *handle_table;
111
112 unsigned int has_llc:1;
113 unsigned int bo_reuse:1;
114 };
115
116 static int bo_set_tiling_internal(struct brw_bo *bo, uint32_t tiling_mode,
117 uint32_t stride);
118
119 static void bo_free(struct brw_bo *bo);
120
121 static uint32_t
122 key_hash_uint(const void *key)
123 {
124 return _mesa_hash_data(key, 4);
125 }
126
127 static bool
128 key_uint_equal(const void *a, const void *b)
129 {
130 return *((unsigned *) a) == *((unsigned *) b);
131 }
132
133 static struct brw_bo *
134 hash_find_bo(struct hash_table *ht, unsigned int key)
135 {
136 struct hash_entry *entry = _mesa_hash_table_search(ht, &key);
137 return entry ? (struct brw_bo *) entry->data : NULL;
138 }
139
140 static unsigned long
141 bo_tile_size(struct brw_bufmgr *bufmgr, unsigned long size, uint32_t tiling)
142 {
143 if (tiling == I915_TILING_NONE)
144 return size;
145
146 /* 965+ just need multiples of page size for tiling */
147 return ALIGN(size, 4096);
148 }
149
150 /*
151 * Round a given pitch up to the minimum required for X tiling on a
152 * given chip. We use 512 as the minimum to allow for a later tiling
153 * change.
154 */
155 static uint32_t
156 bo_tile_pitch(struct brw_bufmgr *bufmgr, uint32_t pitch, uint32_t tiling)
157 {
158 unsigned long tile_width;
159
160 /* If untiled, then just align it so that we can do rendering
161 * to it with the 3D engine.
162 */
163 if (tiling == I915_TILING_NONE)
164 return ALIGN(pitch, 64);
165
166 if (tiling == I915_TILING_X)
167 tile_width = 512;
168 else
169 tile_width = 128;
170
171 /* 965 is flexible */
172 return ALIGN(pitch, tile_width);
173 }
174
175 static struct bo_cache_bucket *
176 bucket_for_size(struct brw_bufmgr *bufmgr, unsigned long size)
177 {
178 int i;
179
180 for (i = 0; i < bufmgr->num_buckets; i++) {
181 struct bo_cache_bucket *bucket = &bufmgr->cache_bucket[i];
182 if (bucket->size >= size) {
183 return bucket;
184 }
185 }
186
187 return NULL;
188 }
189
190 inline void
191 brw_bo_reference(struct brw_bo *bo)
192 {
193 p_atomic_inc(&bo->refcount);
194 }
195
196 int
197 brw_bo_busy(struct brw_bo *bo)
198 {
199 struct brw_bufmgr *bufmgr = bo->bufmgr;
200 struct drm_i915_gem_busy busy;
201 int ret;
202
203 memclear(busy);
204 busy.handle = bo->gem_handle;
205
206 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
207 if (ret == 0) {
208 bo->idle = !busy.busy;
209 return busy.busy;
210 } else {
211 return false;
212 }
213 return (ret == 0 && busy.busy);
214 }
215
216 int
217 brw_bo_madvise(struct brw_bo *bo, int state)
218 {
219 struct drm_i915_gem_madvise madv;
220
221 memclear(madv);
222 madv.handle = bo->gem_handle;
223 madv.madv = state;
224 madv.retained = 1;
225 drmIoctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
226
227 return madv.retained;
228 }
229
230 /* drop the oldest entries that have been purged by the kernel */
231 static void
232 brw_bo_cache_purge_bucket(struct brw_bufmgr *bufmgr,
233 struct bo_cache_bucket *bucket)
234 {
235 list_for_each_entry_safe(struct brw_bo, bo, &bucket->head, head) {
236 if (brw_bo_madvise(bo, I915_MADV_DONTNEED))
237 break;
238
239 list_del(&bo->head);
240 bo_free(bo);
241 }
242 }
243
244 static struct brw_bo *
245 bo_alloc_internal(struct brw_bufmgr *bufmgr,
246 const char *name,
247 unsigned long size,
248 unsigned long flags,
249 uint32_t tiling_mode,
250 uint32_t stride, uint64_t alignment)
251 {
252 struct brw_bo *bo;
253 unsigned int page_size = getpagesize();
254 int ret;
255 struct bo_cache_bucket *bucket;
256 bool alloc_from_cache;
257 unsigned long bo_size;
258 bool for_render = false;
259
260 if (flags & BO_ALLOC_FOR_RENDER)
261 for_render = true;
262
263 /* Round the allocated size up to a power of two number of pages. */
264 bucket = bucket_for_size(bufmgr, size);
265
266 /* If we don't have caching at this size, don't actually round the
267 * allocation up.
268 */
269 if (bucket == NULL) {
270 bo_size = size;
271 if (bo_size < page_size)
272 bo_size = page_size;
273 } else {
274 bo_size = bucket->size;
275 }
276
277 pthread_mutex_lock(&bufmgr->lock);
278 /* Get a buffer out of the cache if available */
279 retry:
280 alloc_from_cache = false;
281 if (bucket != NULL && !list_empty(&bucket->head)) {
282 if (for_render) {
283 /* Allocate new render-target BOs from the tail (MRU)
284 * of the list, as it will likely be hot in the GPU
285 * cache and in the aperture for us.
286 */
287 bo = LIST_ENTRY(struct brw_bo, bucket->head.prev, head);
288 list_del(&bo->head);
289 alloc_from_cache = true;
290 bo->align = alignment;
291 } else {
292 assert(alignment == 0);
293 /* For non-render-target BOs (where we're probably
294 * going to map it first thing in order to fill it
295 * with data), check if the last BO in the cache is
296 * unbusy, and only reuse in that case. Otherwise,
297 * allocating a new buffer is probably faster than
298 * waiting for the GPU to finish.
299 */
300 bo = LIST_ENTRY(struct brw_bo, bucket->head.next, head);
301 if (!brw_bo_busy(bo)) {
302 alloc_from_cache = true;
303 list_del(&bo->head);
304 }
305 }
306
307 if (alloc_from_cache) {
308 if (!brw_bo_madvise(bo, I915_MADV_WILLNEED)) {
309 bo_free(bo);
310 brw_bo_cache_purge_bucket(bufmgr, bucket);
311 goto retry;
312 }
313
314 if (bo_set_tiling_internal(bo, tiling_mode, stride)) {
315 bo_free(bo);
316 goto retry;
317 }
318 }
319 }
320
321 if (!alloc_from_cache) {
322 struct drm_i915_gem_create create;
323
324 bo = calloc(1, sizeof(*bo));
325 if (!bo)
326 goto err;
327
328 bo->size = bo_size;
329
330 memclear(create);
331 create.size = bo_size;
332
333 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CREATE, &create);
334 if (ret != 0) {
335 free(bo);
336 goto err;
337 }
338
339 bo->gem_handle = create.handle;
340 _mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
341
342 bo->bufmgr = bufmgr;
343 bo->align = alignment;
344
345 bo->tiling_mode = I915_TILING_NONE;
346 bo->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
347 bo->stride = 0;
348
349 if (bo_set_tiling_internal(bo, tiling_mode, stride))
350 goto err_free;
351 }
352
353 bo->name = name;
354 p_atomic_set(&bo->refcount, 1);
355 bo->reusable = true;
356
357 pthread_mutex_unlock(&bufmgr->lock);
358
359 DBG("bo_create: buf %d (%s) %ldb\n", bo->gem_handle, bo->name, size);
360
361 return bo;
362
363 err_free:
364 bo_free(bo);
365 err:
366 pthread_mutex_unlock(&bufmgr->lock);
367 return NULL;
368 }
369
370 struct brw_bo *
371 brw_bo_alloc(struct brw_bufmgr *bufmgr,
372 const char *name, unsigned long size, uint64_t alignment)
373 {
374 return bo_alloc_internal(bufmgr, name, size, 0, I915_TILING_NONE, 0, 0);
375 }
376
377 struct brw_bo *
378 brw_bo_alloc_tiled(struct brw_bufmgr *bufmgr, const char *name,
379 int x, int y, int cpp, uint32_t tiling,
380 uint32_t *pitch, unsigned long flags)
381 {
382 unsigned long size;
383 uint32_t stride;
384 unsigned long aligned_y, height_alignment;
385
386 /* If we're tiled, our allocations are in 8 or 32-row blocks,
387 * so failure to align our height means that we won't allocate
388 * enough pages.
389 *
390 * If we're untiled, we still have to align to 2 rows high
391 * because the data port accesses 2x2 blocks even if the
392 * bottom row isn't to be rendered, so failure to align means
393 * we could walk off the end of the GTT and fault. This is
394 * documented on 965, and may be the case on older chipsets
395 * too so we try to be careful.
396 */
397 aligned_y = y;
398 height_alignment = 2;
399
400 if (tiling == I915_TILING_X)
401 height_alignment = 8;
402 else if (tiling == I915_TILING_Y)
403 height_alignment = 32;
404 aligned_y = ALIGN(y, height_alignment);
405
406 stride = x * cpp;
407 stride = bo_tile_pitch(bufmgr, stride, tiling);
408 size = stride * aligned_y;
409 size = bo_tile_size(bufmgr, size, tiling);
410 *pitch = stride;
411
412 if (tiling == I915_TILING_NONE)
413 stride = 0;
414
415 return bo_alloc_internal(bufmgr, name, size, flags, tiling, stride, 0);
416 }
417
418 /**
419 * Returns a brw_bo wrapping the given buffer object handle.
420 *
421 * This can be used when one application needs to pass a buffer object
422 * to another.
423 */
424 struct brw_bo *
425 brw_bo_gem_create_from_name(struct brw_bufmgr *bufmgr,
426 const char *name, unsigned int handle)
427 {
428 struct brw_bo *bo;
429 int ret;
430 struct drm_gem_open open_arg;
431 struct drm_i915_gem_get_tiling get_tiling;
432
433 /* At the moment most applications only have a few named bo.
434 * For instance, in a DRI client only the render buffers passed
435 * between X and the client are named. And since X returns the
436 * alternating names for the front/back buffer a linear search
437 * provides a sufficiently fast match.
438 */
439 pthread_mutex_lock(&bufmgr->lock);
440 bo = hash_find_bo(bufmgr->name_table, handle);
441 if (bo) {
442 brw_bo_reference(bo);
443 goto out;
444 }
445
446 memclear(open_arg);
447 open_arg.name = handle;
448 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_GEM_OPEN, &open_arg);
449 if (ret != 0) {
450 DBG("Couldn't reference %s handle 0x%08x: %s\n",
451 name, handle, strerror(errno));
452 bo = NULL;
453 goto out;
454 }
455 /* Now see if someone has used a prime handle to get this
456 * object from the kernel before by looking through the list
457 * again for a matching gem_handle
458 */
459 bo = hash_find_bo(bufmgr->handle_table, open_arg.handle);
460 if (bo) {
461 brw_bo_reference(bo);
462 goto out;
463 }
464
465 bo = calloc(1, sizeof(*bo));
466 if (!bo)
467 goto out;
468
469 p_atomic_set(&bo->refcount, 1);
470
471 bo->size = open_arg.size;
472 bo->offset64 = 0;
473 bo->virtual = NULL;
474 bo->bufmgr = bufmgr;
475 bo->gem_handle = open_arg.handle;
476 bo->name = name;
477 bo->global_name = handle;
478 bo->reusable = false;
479
480 _mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
481 _mesa_hash_table_insert(bufmgr->name_table, &bo->global_name, bo);
482
483 memclear(get_tiling);
484 get_tiling.handle = bo->gem_handle;
485 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling);
486 if (ret != 0)
487 goto err_unref;
488
489 bo->tiling_mode = get_tiling.tiling_mode;
490 bo->swizzle_mode = get_tiling.swizzle_mode;
491 /* XXX stride is unknown */
492 DBG("bo_create_from_handle: %d (%s)\n", handle, bo->name);
493
494 out:
495 pthread_mutex_unlock(&bufmgr->lock);
496 return bo;
497
498 err_unref:
499 bo_free(bo);
500 pthread_mutex_unlock(&bufmgr->lock);
501 return NULL;
502 }
503
504 static void
505 bo_free(struct brw_bo *bo)
506 {
507 struct brw_bufmgr *bufmgr = bo->bufmgr;
508 struct drm_gem_close close;
509 struct hash_entry *entry;
510 int ret;
511
512 if (bo->mem_virtual) {
513 VG(VALGRIND_FREELIKE_BLOCK(bo->mem_virtual, 0));
514 drm_munmap(bo->mem_virtual, bo->size);
515 }
516 if (bo->wc_virtual) {
517 VG(VALGRIND_FREELIKE_BLOCK(bo->wc_virtual, 0));
518 drm_munmap(bo->wc_virtual, bo->size);
519 }
520 if (bo->gtt_virtual) {
521 drm_munmap(bo->gtt_virtual, bo->size);
522 }
523
524 if (bo->global_name) {
525 entry = _mesa_hash_table_search(bufmgr->name_table, &bo->global_name);
526 _mesa_hash_table_remove(bufmgr->name_table, entry);
527 }
528 entry = _mesa_hash_table_search(bufmgr->handle_table, &bo->gem_handle);
529 _mesa_hash_table_remove(bufmgr->handle_table, entry);
530
531 /* Close this object */
532 memclear(close);
533 close.handle = bo->gem_handle;
534 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_GEM_CLOSE, &close);
535 if (ret != 0) {
536 DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
537 bo->gem_handle, bo->name, strerror(errno));
538 }
539 free(bo);
540 }
541
542 static void
543 bo_mark_mmaps_incoherent(struct brw_bo *bo)
544 {
545 #if HAVE_VALGRIND
546 if (bo->mem_virtual)
547 VALGRIND_MAKE_MEM_NOACCESS(bo->mem_virtual, bo->size);
548
549 if (bo->wc_virtual)
550 VALGRIND_MAKE_MEM_NOACCESS(bo->wc_virtual, bo->size);
551
552 if (bo->gtt_virtual)
553 VALGRIND_MAKE_MEM_NOACCESS(bo->gtt_virtual, bo->size);
554 #endif
555 }
556
557 /** Frees all cached buffers significantly older than @time. */
558 static void
559 cleanup_bo_cache(struct brw_bufmgr *bufmgr, time_t time)
560 {
561 int i;
562
563 if (bufmgr->time == time)
564 return;
565
566 for (i = 0; i < bufmgr->num_buckets; i++) {
567 struct bo_cache_bucket *bucket = &bufmgr->cache_bucket[i];
568
569 list_for_each_entry_safe(struct brw_bo, bo, &bucket->head, head) {
570 if (time - bo->free_time <= 1)
571 break;
572
573 list_del(&bo->head);
574
575 bo_free(bo);
576 }
577 }
578
579 bufmgr->time = time;
580 }
581
582 static void
583 bo_unreference_final(struct brw_bo *bo, time_t time)
584 {
585 struct brw_bufmgr *bufmgr = bo->bufmgr;
586 struct bo_cache_bucket *bucket;
587
588 DBG("bo_unreference final: %d (%s)\n", bo->gem_handle, bo->name);
589
590 /* Clear any left-over mappings */
591 if (bo->map_count) {
592 DBG("bo freed with non-zero map-count %d\n", bo->map_count);
593 bo->map_count = 0;
594 bo_mark_mmaps_incoherent(bo);
595 }
596
597 bucket = bucket_for_size(bufmgr, bo->size);
598 /* Put the buffer into our internal cache for reuse if we can. */
599 if (bufmgr->bo_reuse && bo->reusable && bucket != NULL &&
600 brw_bo_madvise(bo, I915_MADV_DONTNEED)) {
601 bo->free_time = time;
602
603 bo->name = NULL;
604
605 list_addtail(&bo->head, &bucket->head);
606 } else {
607 bo_free(bo);
608 }
609 }
610
611 void
612 brw_bo_unreference(struct brw_bo *bo)
613 {
614 if (bo == NULL)
615 return;
616
617 assert(p_atomic_read(&bo->refcount) > 0);
618
619 if (atomic_add_unless(&bo->refcount, -1, 1)) {
620 struct brw_bufmgr *bufmgr = bo->bufmgr;
621 struct timespec time;
622
623 clock_gettime(CLOCK_MONOTONIC, &time);
624
625 pthread_mutex_lock(&bufmgr->lock);
626
627 if (p_atomic_dec_zero(&bo->refcount)) {
628 bo_unreference_final(bo, time.tv_sec);
629 cleanup_bo_cache(bufmgr, time.tv_sec);
630 }
631
632 pthread_mutex_unlock(&bufmgr->lock);
633 }
634 }
635
636 static void
637 set_domain(struct brw_context *brw, const char *action,
638 struct brw_bo *bo, uint32_t read_domains, uint32_t write_domain)
639 {
640 struct drm_i915_gem_set_domain sd = {
641 .handle = bo->gem_handle,
642 .read_domains = read_domains,
643 .write_domain = write_domain,
644 };
645
646 double elapsed = unlikely(brw && brw->perf_debug) ? -get_time() : 0.0;
647
648 if (drmIoctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &sd) != 0) {
649 DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s.\n",
650 __FILE__, __LINE__, bo->gem_handle, read_domains, write_domain,
651 strerror(errno));
652 }
653
654 if (unlikely(brw && brw->perf_debug)) {
655 elapsed += get_time();
656 if (elapsed > 1e-5) /* 0.01ms */
657 perf_debug("%s a busy \"%s\" BO stalled and took %.03f ms.\n",
658 action, bo->name, elapsed * 1000);
659 }
660 }
661
662 int
663 brw_bo_map(struct brw_context *brw, struct brw_bo *bo, int write_enable)
664 {
665 struct brw_bufmgr *bufmgr = bo->bufmgr;
666 int ret;
667
668 pthread_mutex_lock(&bufmgr->lock);
669
670 if (!bo->mem_virtual) {
671 struct drm_i915_gem_mmap mmap_arg;
672
673 DBG("bo_map: %d (%s), map_count=%d\n",
674 bo->gem_handle, bo->name, bo->map_count);
675
676 memclear(mmap_arg);
677 mmap_arg.handle = bo->gem_handle;
678 mmap_arg.size = bo->size;
679 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
680 if (ret != 0) {
681 ret = -errno;
682 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
683 __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
684 pthread_mutex_unlock(&bufmgr->lock);
685 return ret;
686 }
687 bo->map_count++;
688 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
689 bo->mem_virtual = (void *) (uintptr_t) mmap_arg.addr_ptr;
690 }
691 DBG("bo_map: %d (%s) -> %p\n", bo->gem_handle, bo->name, bo->mem_virtual);
692 bo->virtual = bo->mem_virtual;
693
694 set_domain(brw, "CPU mapping", bo, I915_GEM_DOMAIN_CPU,
695 write_enable ? I915_GEM_DOMAIN_CPU : 0);
696
697 bo_mark_mmaps_incoherent(bo);
698 VG(VALGRIND_MAKE_MEM_DEFINED(bo->mem_virtual, bo->size));
699 pthread_mutex_unlock(&bufmgr->lock);
700
701 return 0;
702 }
703
704 static int
705 map_gtt(struct brw_bo *bo)
706 {
707 struct brw_bufmgr *bufmgr = bo->bufmgr;
708 int ret;
709
710 /* Get a mapping of the buffer if we haven't before. */
711 if (bo->gtt_virtual == NULL) {
712 struct drm_i915_gem_mmap_gtt mmap_arg;
713
714 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
715 bo->gem_handle, bo->name, bo->map_count);
716
717 memclear(mmap_arg);
718 mmap_arg.handle = bo->gem_handle;
719
720 /* Get the fake offset back... */
721 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg);
722 if (ret != 0) {
723 ret = -errno;
724 DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
725 __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
726 return ret;
727 }
728
729 /* and mmap it */
730 bo->gtt_virtual = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
731 MAP_SHARED, bufmgr->fd, mmap_arg.offset);
732 if (bo->gtt_virtual == MAP_FAILED) {
733 bo->gtt_virtual = NULL;
734 ret = -errno;
735 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
736 __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
737 return ret;
738 }
739 }
740
741 bo->map_count++;
742 bo->virtual = bo->gtt_virtual;
743
744 DBG("bo_map_gtt: %d (%s) -> %p\n", bo->gem_handle, bo->name,
745 bo->gtt_virtual);
746
747 return 0;
748 }
749
750 int
751 brw_bo_map_gtt(struct brw_context *brw, struct brw_bo *bo)
752 {
753 struct brw_bufmgr *bufmgr = bo->bufmgr;
754 int ret;
755
756 pthread_mutex_lock(&bufmgr->lock);
757
758 ret = map_gtt(bo);
759 if (ret) {
760 pthread_mutex_unlock(&bufmgr->lock);
761 return ret;
762 }
763
764 /* Now move it to the GTT domain so that the GPU and CPU
765 * caches are flushed and the GPU isn't actively using the
766 * buffer.
767 *
768 * The pagefault handler does this domain change for us when
769 * it has unbound the BO from the GTT, but it's up to us to
770 * tell it when we're about to use things if we had done
771 * rendering and it still happens to be bound to the GTT.
772 */
773 set_domain(brw, "GTT mapping", bo,
774 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
775
776 bo_mark_mmaps_incoherent(bo);
777 VG(VALGRIND_MAKE_MEM_DEFINED(bo->gtt_virtual, bo->size));
778 pthread_mutex_unlock(&bufmgr->lock);
779
780 return 0;
781 }
782
783 /**
784 * Performs a mapping of the buffer object like the normal GTT
785 * mapping, but avoids waiting for the GPU to be done reading from or
786 * rendering to the buffer.
787 *
788 * This is used in the implementation of GL_ARB_map_buffer_range: The
789 * user asks to create a buffer, then does a mapping, fills some
790 * space, runs a drawing command, then asks to map it again without
791 * synchronizing because it guarantees that it won't write over the
792 * data that the GPU is busy using (or, more specifically, that if it
793 * does write over the data, it acknowledges that rendering is
794 * undefined).
795 */
796
797 int
798 brw_bo_map_unsynchronized(struct brw_context *brw, struct brw_bo *bo)
799 {
800 struct brw_bufmgr *bufmgr = bo->bufmgr;
801 int ret;
802
803 /* If the CPU cache isn't coherent with the GTT, then use a
804 * regular synchronized mapping. The problem is that we don't
805 * track where the buffer was last used on the CPU side in
806 * terms of brw_bo_map vs brw_bo_map_gtt, so
807 * we would potentially corrupt the buffer even when the user
808 * does reasonable things.
809 */
810 if (!bufmgr->has_llc)
811 return brw_bo_map_gtt(brw, bo);
812
813 pthread_mutex_lock(&bufmgr->lock);
814
815 ret = map_gtt(bo);
816 if (ret == 0) {
817 bo_mark_mmaps_incoherent(bo);
818 VG(VALGRIND_MAKE_MEM_DEFINED(bo->gtt_virtual, bo->size));
819 }
820
821 pthread_mutex_unlock(&bufmgr->lock);
822
823 return ret;
824 }
825
826 int
827 brw_bo_unmap(struct brw_bo *bo)
828 {
829 struct brw_bufmgr *bufmgr = bo->bufmgr;
830 int ret = 0;
831
832 if (bo == NULL)
833 return 0;
834
835 pthread_mutex_lock(&bufmgr->lock);
836
837 if (bo->map_count <= 0) {
838 DBG("attempted to unmap an unmapped bo\n");
839 pthread_mutex_unlock(&bufmgr->lock);
840 /* Preserve the old behaviour of just treating this as a
841 * no-op rather than reporting the error.
842 */
843 return 0;
844 }
845
846 if (--bo->map_count == 0) {
847 bo_mark_mmaps_incoherent(bo);
848 bo->virtual = NULL;
849 }
850 pthread_mutex_unlock(&bufmgr->lock);
851
852 return ret;
853 }
854
855 int
856 brw_bo_subdata(struct brw_bo *bo, uint64_t offset,
857 uint64_t size, const void *data)
858 {
859 struct brw_bufmgr *bufmgr = bo->bufmgr;
860 struct drm_i915_gem_pwrite pwrite;
861 int ret;
862
863 memclear(pwrite);
864 pwrite.handle = bo->gem_handle;
865 pwrite.offset = offset;
866 pwrite.size = size;
867 pwrite.data_ptr = (uint64_t) (uintptr_t) data;
868 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite);
869 if (ret != 0) {
870 ret = -errno;
871 DBG("%s:%d: Error writing data to buffer %d: "
872 "(%"PRIu64" %"PRIu64") %s .\n",
873 __FILE__, __LINE__, bo->gem_handle, offset, size, strerror(errno));
874 }
875
876 return ret;
877 }
878
879 int
880 brw_bo_get_subdata(struct brw_bo *bo, uint64_t offset,
881 uint64_t size, void *data)
882 {
883 struct brw_bufmgr *bufmgr = bo->bufmgr;
884 struct drm_i915_gem_pread pread;
885 int ret;
886
887 memclear(pread);
888 pread.handle = bo->gem_handle;
889 pread.offset = offset;
890 pread.size = size;
891 pread.data_ptr = (uint64_t) (uintptr_t) data;
892 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_PREAD, &pread);
893 if (ret != 0) {
894 ret = -errno;
895 DBG("%s:%d: Error reading data from buffer %d: "
896 "(%"PRIu64" %"PRIu64") %s .\n",
897 __FILE__, __LINE__, bo->gem_handle, offset, size, strerror(errno));
898 }
899
900 return ret;
901 }
902
903 /** Waits for all GPU rendering with the object to have completed. */
904 void
905 brw_bo_wait_rendering(struct brw_context *brw, struct brw_bo *bo)
906 {
907 set_domain(brw, "waiting for",
908 bo, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
909 }
910
911 /**
912 * Waits on a BO for the given amount of time.
913 *
914 * @bo: buffer object to wait for
915 * @timeout_ns: amount of time to wait in nanoseconds.
916 * If value is less than 0, an infinite wait will occur.
917 *
918 * Returns 0 if the wait was successful ie. the last batch referencing the
919 * object has completed within the allotted time. Otherwise some negative return
920 * value describes the error. Of particular interest is -ETIME when the wait has
921 * failed to yield the desired result.
922 *
923 * Similar to brw_bo_wait_rendering except a timeout parameter allows
924 * the operation to give up after a certain amount of time. Another subtle
925 * difference is the internal locking semantics are different (this variant does
926 * not hold the lock for the duration of the wait). This makes the wait subject
927 * to a larger userspace race window.
928 *
929 * The implementation shall wait until the object is no longer actively
930 * referenced within a batch buffer at the time of the call. The wait will
931 * not guarantee that the buffer is re-issued via another thread, or an flinked
932 * handle. Userspace must make sure this race does not occur if such precision
933 * is important.
934 *
935 * Note that some kernels have broken the inifite wait for negative values
936 * promise, upgrade to latest stable kernels if this is the case.
937 */
938 int
939 brw_bo_wait(struct brw_bo *bo, int64_t timeout_ns)
940 {
941 struct brw_bufmgr *bufmgr = bo->bufmgr;
942 struct drm_i915_gem_wait wait;
943 int ret;
944
945 memclear(wait);
946 wait.bo_handle = bo->gem_handle;
947 wait.timeout_ns = timeout_ns;
948 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
949 if (ret == -1)
950 return -errno;
951
952 return ret;
953 }
954
955 void
956 brw_bufmgr_destroy(struct brw_bufmgr *bufmgr)
957 {
958 pthread_mutex_destroy(&bufmgr->lock);
959
960 /* Free any cached buffer objects we were going to reuse */
961 for (int i = 0; i < bufmgr->num_buckets; i++) {
962 struct bo_cache_bucket *bucket = &bufmgr->cache_bucket[i];
963
964 list_for_each_entry_safe(struct brw_bo, bo, &bucket->head, head) {
965 list_del(&bo->head);
966
967 bo_free(bo);
968 }
969 }
970
971 _mesa_hash_table_destroy(bufmgr->name_table, NULL);
972 _mesa_hash_table_destroy(bufmgr->handle_table, NULL);
973
974 free(bufmgr);
975 }
976
977 static int
978 bo_set_tiling_internal(struct brw_bo *bo, uint32_t tiling_mode,
979 uint32_t stride)
980 {
981 struct brw_bufmgr *bufmgr = bo->bufmgr;
982 struct drm_i915_gem_set_tiling set_tiling;
983 int ret;
984
985 if (bo->global_name == 0 &&
986 tiling_mode == bo->tiling_mode && stride == bo->stride)
987 return 0;
988
989 memset(&set_tiling, 0, sizeof(set_tiling));
990 do {
991 /* set_tiling is slightly broken and overwrites the
992 * input on the error path, so we have to open code
993 * rmIoctl.
994 */
995 set_tiling.handle = bo->gem_handle;
996 set_tiling.tiling_mode = tiling_mode;
997 set_tiling.stride = stride;
998
999 ret = ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
1000 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
1001 if (ret == -1)
1002 return -errno;
1003
1004 bo->tiling_mode = set_tiling.tiling_mode;
1005 bo->swizzle_mode = set_tiling.swizzle_mode;
1006 bo->stride = set_tiling.stride;
1007 return 0;
1008 }
1009
1010 int
1011 brw_bo_get_tiling(struct brw_bo *bo, uint32_t *tiling_mode,
1012 uint32_t *swizzle_mode)
1013 {
1014 *tiling_mode = bo->tiling_mode;
1015 *swizzle_mode = bo->swizzle_mode;
1016 return 0;
1017 }
1018
1019 struct brw_bo *
1020 brw_bo_gem_create_from_prime(struct brw_bufmgr *bufmgr, int prime_fd,
1021 int size)
1022 {
1023 int ret;
1024 uint32_t handle;
1025 struct brw_bo *bo;
1026 struct drm_i915_gem_get_tiling get_tiling;
1027
1028 pthread_mutex_lock(&bufmgr->lock);
1029 ret = drmPrimeFDToHandle(bufmgr->fd, prime_fd, &handle);
1030 if (ret) {
1031 DBG("create_from_prime: failed to obtain handle from fd: %s\n",
1032 strerror(errno));
1033 pthread_mutex_unlock(&bufmgr->lock);
1034 return NULL;
1035 }
1036
1037 /*
1038 * See if the kernel has already returned this buffer to us. Just as
1039 * for named buffers, we must not create two bo's pointing at the same
1040 * kernel object
1041 */
1042 bo = hash_find_bo(bufmgr->handle_table, handle);
1043 if (bo) {
1044 brw_bo_reference(bo);
1045 goto out;
1046 }
1047
1048 bo = calloc(1, sizeof(*bo));
1049 if (!bo)
1050 goto out;
1051
1052 p_atomic_set(&bo->refcount, 1);
1053
1054 /* Determine size of bo. The fd-to-handle ioctl really should
1055 * return the size, but it doesn't. If we have kernel 3.12 or
1056 * later, we can lseek on the prime fd to get the size. Older
1057 * kernels will just fail, in which case we fall back to the
1058 * provided (estimated or guess size). */
1059 ret = lseek(prime_fd, 0, SEEK_END);
1060 if (ret != -1)
1061 bo->size = ret;
1062 else
1063 bo->size = size;
1064
1065 bo->bufmgr = bufmgr;
1066
1067 bo->gem_handle = handle;
1068 _mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
1069
1070 bo->name = "prime";
1071 bo->reusable = false;
1072
1073 memclear(get_tiling);
1074 get_tiling.handle = bo->gem_handle;
1075 if (drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling))
1076 goto err;
1077
1078 bo->tiling_mode = get_tiling.tiling_mode;
1079 bo->swizzle_mode = get_tiling.swizzle_mode;
1080 /* XXX stride is unknown */
1081
1082 out:
1083 pthread_mutex_unlock(&bufmgr->lock);
1084 return bo;
1085
1086 err:
1087 bo_free(bo);
1088 pthread_mutex_unlock(&bufmgr->lock);
1089 return NULL;
1090 }
1091
1092 int
1093 brw_bo_gem_export_to_prime(struct brw_bo *bo, int *prime_fd)
1094 {
1095 struct brw_bufmgr *bufmgr = bo->bufmgr;
1096
1097 if (drmPrimeHandleToFD(bufmgr->fd, bo->gem_handle,
1098 DRM_CLOEXEC, prime_fd) != 0)
1099 return -errno;
1100
1101 bo->reusable = false;
1102
1103 return 0;
1104 }
1105
1106 int
1107 brw_bo_flink(struct brw_bo *bo, uint32_t *name)
1108 {
1109 struct brw_bufmgr *bufmgr = bo->bufmgr;
1110
1111 if (!bo->global_name) {
1112 struct drm_gem_flink flink;
1113
1114 memclear(flink);
1115 flink.handle = bo->gem_handle;
1116 if (drmIoctl(bufmgr->fd, DRM_IOCTL_GEM_FLINK, &flink))
1117 return -errno;
1118
1119 pthread_mutex_lock(&bufmgr->lock);
1120 if (!bo->global_name) {
1121 bo->global_name = flink.name;
1122 bo->reusable = false;
1123
1124 _mesa_hash_table_insert(bufmgr->name_table, &bo->global_name, bo);
1125 }
1126 pthread_mutex_unlock(&bufmgr->lock);
1127 }
1128
1129 *name = bo->global_name;
1130 return 0;
1131 }
1132
1133 /**
1134 * Enables unlimited caching of buffer objects for reuse.
1135 *
1136 * This is potentially very memory expensive, as the cache at each bucket
1137 * size is only bounded by how many buffers of that size we've managed to have
1138 * in flight at once.
1139 */
1140 void
1141 brw_bufmgr_enable_reuse(struct brw_bufmgr *bufmgr)
1142 {
1143 bufmgr->bo_reuse = true;
1144 }
1145
1146 static void
1147 add_bucket(struct brw_bufmgr *bufmgr, int size)
1148 {
1149 unsigned int i = bufmgr->num_buckets;
1150
1151 assert(i < ARRAY_SIZE(bufmgr->cache_bucket));
1152
1153 list_inithead(&bufmgr->cache_bucket[i].head);
1154 bufmgr->cache_bucket[i].size = size;
1155 bufmgr->num_buckets++;
1156 }
1157
1158 static void
1159 init_cache_buckets(struct brw_bufmgr *bufmgr)
1160 {
1161 unsigned long size, cache_max_size = 64 * 1024 * 1024;
1162
1163 /* OK, so power of two buckets was too wasteful of memory.
1164 * Give 3 other sizes between each power of two, to hopefully
1165 * cover things accurately enough. (The alternative is
1166 * probably to just go for exact matching of sizes, and assume
1167 * that for things like composited window resize the tiled
1168 * width/height alignment and rounding of sizes to pages will
1169 * get us useful cache hit rates anyway)
1170 */
1171 add_bucket(bufmgr, 4096);
1172 add_bucket(bufmgr, 4096 * 2);
1173 add_bucket(bufmgr, 4096 * 3);
1174
1175 /* Initialize the linked lists for BO reuse cache. */
1176 for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
1177 add_bucket(bufmgr, size);
1178
1179 add_bucket(bufmgr, size + size * 1 / 4);
1180 add_bucket(bufmgr, size + size * 2 / 4);
1181 add_bucket(bufmgr, size + size * 3 / 4);
1182 }
1183 }
1184
1185 uint32_t
1186 brw_create_hw_context(struct brw_bufmgr *bufmgr)
1187 {
1188 struct drm_i915_gem_context_create create;
1189 int ret;
1190
1191 memclear(create);
1192 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
1193 if (ret != 0) {
1194 DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n", strerror(errno));
1195 return 0;
1196 }
1197
1198 return create.ctx_id;
1199 }
1200
1201 void
1202 brw_destroy_hw_context(struct brw_bufmgr *bufmgr, uint32_t ctx_id)
1203 {
1204 struct drm_i915_gem_context_destroy d = {.ctx_id = ctx_id };
1205
1206 if (ctx_id != 0 &&
1207 drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, &d) != 0) {
1208 fprintf(stderr, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
1209 strerror(errno));
1210 }
1211 }
1212
1213 int
1214 brw_reg_read(struct brw_bufmgr *bufmgr, uint32_t offset, uint64_t *result)
1215 {
1216 struct drm_i915_reg_read reg_read;
1217 int ret;
1218
1219 memclear(reg_read);
1220 reg_read.offset = offset;
1221
1222 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_REG_READ, &reg_read);
1223
1224 *result = reg_read.val;
1225 return ret;
1226 }
1227
1228 void *
1229 brw_bo_map__gtt(struct brw_bo *bo)
1230 {
1231 struct brw_bufmgr *bufmgr = bo->bufmgr;
1232
1233 if (bo->gtt_virtual)
1234 return bo->gtt_virtual;
1235
1236 pthread_mutex_lock(&bufmgr->lock);
1237 if (bo->gtt_virtual == NULL) {
1238 struct drm_i915_gem_mmap_gtt mmap_arg;
1239 void *ptr;
1240
1241 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
1242 bo->gem_handle, bo->name, bo->map_count);
1243
1244 memclear(mmap_arg);
1245 mmap_arg.handle = bo->gem_handle;
1246
1247 /* Get the fake offset back... */
1248 ptr = MAP_FAILED;
1249 if (drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg) == 0) {
1250 /* and mmap it */
1251 ptr = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
1252 MAP_SHARED, bufmgr->fd, mmap_arg.offset);
1253 }
1254 if (ptr == MAP_FAILED) {
1255 --bo->map_count;
1256 ptr = NULL;
1257 }
1258
1259 bo->gtt_virtual = ptr;
1260 }
1261 pthread_mutex_unlock(&bufmgr->lock);
1262
1263 return bo->gtt_virtual;
1264 }
1265
1266 void *
1267 brw_bo_map__cpu(struct brw_bo *bo)
1268 {
1269 struct brw_bufmgr *bufmgr = bo->bufmgr;
1270
1271 if (bo->mem_virtual)
1272 return bo->mem_virtual;
1273
1274 pthread_mutex_lock(&bufmgr->lock);
1275 if (!bo->mem_virtual) {
1276 struct drm_i915_gem_mmap mmap_arg;
1277
1278 DBG("bo_map: %d (%s), map_count=%d\n",
1279 bo->gem_handle, bo->name, bo->map_count);
1280
1281 memclear(mmap_arg);
1282 mmap_arg.handle = bo->gem_handle;
1283 mmap_arg.size = bo->size;
1284 if (drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg)) {
1285 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1286 __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
1287 } else {
1288 bo->map_count++;
1289 VG(VALGRIND_MALLOCLIKE_BLOCK
1290 (mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
1291 bo->mem_virtual = (void *) (uintptr_t) mmap_arg.addr_ptr;
1292 }
1293 }
1294 pthread_mutex_unlock(&bufmgr->lock);
1295
1296 return bo->mem_virtual;
1297 }
1298
1299 void *
1300 brw_bo_map__wc(struct brw_bo *bo)
1301 {
1302 struct brw_bufmgr *bufmgr = bo->bufmgr;
1303
1304 if (bo->wc_virtual)
1305 return bo->wc_virtual;
1306
1307 pthread_mutex_lock(&bufmgr->lock);
1308 if (!bo->wc_virtual) {
1309 struct drm_i915_gem_mmap mmap_arg;
1310
1311 DBG("bo_map: %d (%s), map_count=%d\n",
1312 bo->gem_handle, bo->name, bo->map_count);
1313
1314 memclear(mmap_arg);
1315 mmap_arg.handle = bo->gem_handle;
1316 mmap_arg.size = bo->size;
1317 mmap_arg.flags = I915_MMAP_WC;
1318 if (drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg)) {
1319 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1320 __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
1321 } else {
1322 bo->map_count++;
1323 VG(VALGRIND_MALLOCLIKE_BLOCK
1324 (mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
1325 bo->wc_virtual = (void *) (uintptr_t) mmap_arg.addr_ptr;
1326 }
1327 }
1328 pthread_mutex_unlock(&bufmgr->lock);
1329
1330 return bo->wc_virtual;
1331 }
1332
1333 /**
1334 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
1335 * and manage map buffer objections.
1336 *
1337 * \param fd File descriptor of the opened DRM device.
1338 */
1339 struct brw_bufmgr *
1340 brw_bufmgr_init(struct gen_device_info *devinfo, int fd, int batch_size)
1341 {
1342 struct brw_bufmgr *bufmgr;
1343
1344 bufmgr = calloc(1, sizeof(*bufmgr));
1345 if (bufmgr == NULL)
1346 return NULL;
1347
1348 /* Handles to buffer objects belong to the device fd and are not
1349 * reference counted by the kernel. If the same fd is used by
1350 * multiple parties (threads sharing the same screen bufmgr, or
1351 * even worse the same device fd passed to multiple libraries)
1352 * ownership of those handles is shared by those independent parties.
1353 *
1354 * Don't do this! Ensure that each library/bufmgr has its own device
1355 * fd so that its namespace does not clash with another.
1356 */
1357 bufmgr->fd = fd;
1358
1359 if (pthread_mutex_init(&bufmgr->lock, NULL) != 0) {
1360 free(bufmgr);
1361 return NULL;
1362 }
1363
1364 bufmgr->has_llc = devinfo->has_llc;
1365
1366 init_cache_buckets(bufmgr);
1367
1368 bufmgr->name_table =
1369 _mesa_hash_table_create(NULL, key_hash_uint, key_uint_equal);
1370 bufmgr->handle_table =
1371 _mesa_hash_table_create(NULL, key_hash_uint, key_uint_equal);
1372
1373 return bufmgr;
1374 }