i965: Mark shader programs for capture in the error state.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_bufmgr.c
1 /**************************************************************************
2 *
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007-2012 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
26 * of the Software.
27 *
28 *
29 **************************************************************************/
30 /*
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
35 */
36
37 #ifdef HAVE_CONFIG_H
38 #include "config.h"
39 #endif
40
41 #include <xf86drm.h>
42 #include <util/u_atomic.h>
43 #include <fcntl.h>
44 #include <stdio.h>
45 #include <stdlib.h>
46 #include <string.h>
47 #include <unistd.h>
48 #include <assert.h>
49 #include <pthread.h>
50 #include <sys/ioctl.h>
51 #include <sys/stat.h>
52 #include <sys/types.h>
53 #include <stdbool.h>
54
55 #include "errno.h"
56 #ifndef ETIME
57 #define ETIME ETIMEDOUT
58 #endif
59 #include "common/gen_debug.h"
60 #include "common/gen_device_info.h"
61 #include "libdrm_macros.h"
62 #include "main/macros.h"
63 #include "util/macros.h"
64 #include "util/hash_table.h"
65 #include "util/list.h"
66 #include "brw_bufmgr.h"
67 #include "brw_context.h"
68 #include "string.h"
69
70 #include "i915_drm.h"
71
72 #ifdef HAVE_VALGRIND
73 #include <valgrind.h>
74 #include <memcheck.h>
75 #define VG(x) x
76 #else
77 #define VG(x)
78 #endif
79
80 #define memclear(s) memset(&s, 0, sizeof(s))
81
82 #define FILE_DEBUG_FLAG DEBUG_BUFMGR
83
84 static inline int
85 atomic_add_unless(int *v, int add, int unless)
86 {
87 int c, old;
88 c = p_atomic_read(v);
89 while (c != unless && (old = p_atomic_cmpxchg(v, c, c + add)) != c)
90 c = old;
91 return c == unless;
92 }
93
94 struct bo_cache_bucket {
95 struct list_head head;
96 uint64_t size;
97 };
98
99 struct brw_bufmgr {
100 int fd;
101
102 pthread_mutex_t lock;
103
104 /** Array of lists of cached gem objects of power-of-two sizes */
105 struct bo_cache_bucket cache_bucket[14 * 4];
106 int num_buckets;
107 time_t time;
108
109 struct hash_table *name_table;
110 struct hash_table *handle_table;
111
112 bool has_llc:1;
113 bool bo_reuse:1;
114 };
115
116 static int bo_set_tiling_internal(struct brw_bo *bo, uint32_t tiling_mode,
117 uint32_t stride);
118
119 static void bo_free(struct brw_bo *bo);
120
121 static uint32_t
122 key_hash_uint(const void *key)
123 {
124 return _mesa_hash_data(key, 4);
125 }
126
127 static bool
128 key_uint_equal(const void *a, const void *b)
129 {
130 return *((unsigned *) a) == *((unsigned *) b);
131 }
132
133 static struct brw_bo *
134 hash_find_bo(struct hash_table *ht, unsigned int key)
135 {
136 struct hash_entry *entry = _mesa_hash_table_search(ht, &key);
137 return entry ? (struct brw_bo *) entry->data : NULL;
138 }
139
140 static uint64_t
141 bo_tile_size(struct brw_bufmgr *bufmgr, uint64_t size, uint32_t tiling)
142 {
143 if (tiling == I915_TILING_NONE)
144 return size;
145
146 /* 965+ just need multiples of page size for tiling */
147 return ALIGN(size, 4096);
148 }
149
150 /*
151 * Round a given pitch up to the minimum required for X tiling on a
152 * given chip. We use 512 as the minimum to allow for a later tiling
153 * change.
154 */
155 static uint32_t
156 bo_tile_pitch(struct brw_bufmgr *bufmgr, uint32_t pitch, uint32_t tiling)
157 {
158 unsigned long tile_width;
159
160 /* If untiled, then just align it so that we can do rendering
161 * to it with the 3D engine.
162 */
163 if (tiling == I915_TILING_NONE)
164 return ALIGN(pitch, 64);
165
166 if (tiling == I915_TILING_X)
167 tile_width = 512;
168 else
169 tile_width = 128;
170
171 /* 965 is flexible */
172 return ALIGN(pitch, tile_width);
173 }
174
175 static struct bo_cache_bucket *
176 bucket_for_size(struct brw_bufmgr *bufmgr, uint64_t size)
177 {
178 int i;
179
180 for (i = 0; i < bufmgr->num_buckets; i++) {
181 struct bo_cache_bucket *bucket = &bufmgr->cache_bucket[i];
182 if (bucket->size >= size) {
183 return bucket;
184 }
185 }
186
187 return NULL;
188 }
189
190 inline void
191 brw_bo_reference(struct brw_bo *bo)
192 {
193 p_atomic_inc(&bo->refcount);
194 }
195
196 int
197 brw_bo_busy(struct brw_bo *bo)
198 {
199 struct brw_bufmgr *bufmgr = bo->bufmgr;
200 struct drm_i915_gem_busy busy;
201 int ret;
202
203 memclear(busy);
204 busy.handle = bo->gem_handle;
205
206 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
207 if (ret == 0) {
208 bo->idle = !busy.busy;
209 return busy.busy;
210 }
211 return false;
212 }
213
214 int
215 brw_bo_madvise(struct brw_bo *bo, int state)
216 {
217 struct drm_i915_gem_madvise madv;
218
219 memclear(madv);
220 madv.handle = bo->gem_handle;
221 madv.madv = state;
222 madv.retained = 1;
223 drmIoctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
224
225 return madv.retained;
226 }
227
228 /* drop the oldest entries that have been purged by the kernel */
229 static void
230 brw_bo_cache_purge_bucket(struct brw_bufmgr *bufmgr,
231 struct bo_cache_bucket *bucket)
232 {
233 list_for_each_entry_safe(struct brw_bo, bo, &bucket->head, head) {
234 if (brw_bo_madvise(bo, I915_MADV_DONTNEED))
235 break;
236
237 list_del(&bo->head);
238 bo_free(bo);
239 }
240 }
241
242 static struct brw_bo *
243 bo_alloc_internal(struct brw_bufmgr *bufmgr,
244 const char *name,
245 uint64_t size,
246 unsigned flags,
247 uint32_t tiling_mode,
248 uint32_t stride, uint64_t alignment)
249 {
250 struct brw_bo *bo;
251 unsigned int page_size = getpagesize();
252 int ret;
253 struct bo_cache_bucket *bucket;
254 bool alloc_from_cache;
255 uint64_t bo_size;
256 bool for_render = false;
257
258 if (flags & BO_ALLOC_FOR_RENDER)
259 for_render = true;
260
261 /* Round the allocated size up to a power of two number of pages. */
262 bucket = bucket_for_size(bufmgr, size);
263
264 /* If we don't have caching at this size, don't actually round the
265 * allocation up.
266 */
267 if (bucket == NULL) {
268 bo_size = size;
269 if (bo_size < page_size)
270 bo_size = page_size;
271 } else {
272 bo_size = bucket->size;
273 }
274
275 pthread_mutex_lock(&bufmgr->lock);
276 /* Get a buffer out of the cache if available */
277 retry:
278 alloc_from_cache = false;
279 if (bucket != NULL && !list_empty(&bucket->head)) {
280 if (for_render) {
281 /* Allocate new render-target BOs from the tail (MRU)
282 * of the list, as it will likely be hot in the GPU
283 * cache and in the aperture for us.
284 */
285 bo = LIST_ENTRY(struct brw_bo, bucket->head.prev, head);
286 list_del(&bo->head);
287 alloc_from_cache = true;
288 bo->align = alignment;
289 } else {
290 assert(alignment == 0);
291 /* For non-render-target BOs (where we're probably
292 * going to map it first thing in order to fill it
293 * with data), check if the last BO in the cache is
294 * unbusy, and only reuse in that case. Otherwise,
295 * allocating a new buffer is probably faster than
296 * waiting for the GPU to finish.
297 */
298 bo = LIST_ENTRY(struct brw_bo, bucket->head.next, head);
299 if (!brw_bo_busy(bo)) {
300 alloc_from_cache = true;
301 list_del(&bo->head);
302 }
303 }
304
305 if (alloc_from_cache) {
306 if (!brw_bo_madvise(bo, I915_MADV_WILLNEED)) {
307 bo_free(bo);
308 brw_bo_cache_purge_bucket(bufmgr, bucket);
309 goto retry;
310 }
311
312 if (bo_set_tiling_internal(bo, tiling_mode, stride)) {
313 bo_free(bo);
314 goto retry;
315 }
316 }
317 }
318
319 if (!alloc_from_cache) {
320 struct drm_i915_gem_create create;
321
322 bo = calloc(1, sizeof(*bo));
323 if (!bo)
324 goto err;
325
326 bo->size = bo_size;
327
328 memclear(create);
329 create.size = bo_size;
330
331 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CREATE, &create);
332 if (ret != 0) {
333 free(bo);
334 goto err;
335 }
336
337 bo->gem_handle = create.handle;
338 _mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
339
340 bo->bufmgr = bufmgr;
341 bo->align = alignment;
342
343 bo->tiling_mode = I915_TILING_NONE;
344 bo->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
345 bo->stride = 0;
346
347 if (bo_set_tiling_internal(bo, tiling_mode, stride))
348 goto err_free;
349 }
350
351 bo->name = name;
352 p_atomic_set(&bo->refcount, 1);
353 bo->reusable = true;
354
355 pthread_mutex_unlock(&bufmgr->lock);
356
357 DBG("bo_create: buf %d (%s) %ldb\n", bo->gem_handle, bo->name, size);
358
359 return bo;
360
361 err_free:
362 bo_free(bo);
363 err:
364 pthread_mutex_unlock(&bufmgr->lock);
365 return NULL;
366 }
367
368 struct brw_bo *
369 brw_bo_alloc(struct brw_bufmgr *bufmgr,
370 const char *name, uint64_t size, uint64_t alignment)
371 {
372 return bo_alloc_internal(bufmgr, name, size, 0, I915_TILING_NONE, 0, 0);
373 }
374
375 struct brw_bo *
376 brw_bo_alloc_tiled(struct brw_bufmgr *bufmgr, const char *name,
377 int x, int y, int cpp, uint32_t tiling,
378 uint32_t *pitch, unsigned flags)
379 {
380 uint64_t size;
381 uint32_t stride;
382 unsigned long aligned_y, height_alignment;
383
384 /* If we're tiled, our allocations are in 8 or 32-row blocks,
385 * so failure to align our height means that we won't allocate
386 * enough pages.
387 *
388 * If we're untiled, we still have to align to 2 rows high
389 * because the data port accesses 2x2 blocks even if the
390 * bottom row isn't to be rendered, so failure to align means
391 * we could walk off the end of the GTT and fault. This is
392 * documented on 965, and may be the case on older chipsets
393 * too so we try to be careful.
394 */
395 aligned_y = y;
396 height_alignment = 2;
397
398 if (tiling == I915_TILING_X)
399 height_alignment = 8;
400 else if (tiling == I915_TILING_Y)
401 height_alignment = 32;
402 aligned_y = ALIGN(y, height_alignment);
403
404 stride = x * cpp;
405 stride = bo_tile_pitch(bufmgr, stride, tiling);
406 size = stride * aligned_y;
407 size = bo_tile_size(bufmgr, size, tiling);
408 *pitch = stride;
409
410 if (tiling == I915_TILING_NONE)
411 stride = 0;
412
413 return bo_alloc_internal(bufmgr, name, size, flags, tiling, stride, 0);
414 }
415
416 /**
417 * Returns a brw_bo wrapping the given buffer object handle.
418 *
419 * This can be used when one application needs to pass a buffer object
420 * to another.
421 */
422 struct brw_bo *
423 brw_bo_gem_create_from_name(struct brw_bufmgr *bufmgr,
424 const char *name, unsigned int handle)
425 {
426 struct brw_bo *bo;
427 int ret;
428 struct drm_gem_open open_arg;
429 struct drm_i915_gem_get_tiling get_tiling;
430
431 /* At the moment most applications only have a few named bo.
432 * For instance, in a DRI client only the render buffers passed
433 * between X and the client are named. And since X returns the
434 * alternating names for the front/back buffer a linear search
435 * provides a sufficiently fast match.
436 */
437 pthread_mutex_lock(&bufmgr->lock);
438 bo = hash_find_bo(bufmgr->name_table, handle);
439 if (bo) {
440 brw_bo_reference(bo);
441 goto out;
442 }
443
444 memclear(open_arg);
445 open_arg.name = handle;
446 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_GEM_OPEN, &open_arg);
447 if (ret != 0) {
448 DBG("Couldn't reference %s handle 0x%08x: %s\n",
449 name, handle, strerror(errno));
450 bo = NULL;
451 goto out;
452 }
453 /* Now see if someone has used a prime handle to get this
454 * object from the kernel before by looking through the list
455 * again for a matching gem_handle
456 */
457 bo = hash_find_bo(bufmgr->handle_table, open_arg.handle);
458 if (bo) {
459 brw_bo_reference(bo);
460 goto out;
461 }
462
463 bo = calloc(1, sizeof(*bo));
464 if (!bo)
465 goto out;
466
467 p_atomic_set(&bo->refcount, 1);
468
469 bo->size = open_arg.size;
470 bo->offset64 = 0;
471 bo->virtual = NULL;
472 bo->bufmgr = bufmgr;
473 bo->gem_handle = open_arg.handle;
474 bo->name = name;
475 bo->global_name = handle;
476 bo->reusable = false;
477
478 _mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
479 _mesa_hash_table_insert(bufmgr->name_table, &bo->global_name, bo);
480
481 memclear(get_tiling);
482 get_tiling.handle = bo->gem_handle;
483 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling);
484 if (ret != 0)
485 goto err_unref;
486
487 bo->tiling_mode = get_tiling.tiling_mode;
488 bo->swizzle_mode = get_tiling.swizzle_mode;
489 /* XXX stride is unknown */
490 DBG("bo_create_from_handle: %d (%s)\n", handle, bo->name);
491
492 out:
493 pthread_mutex_unlock(&bufmgr->lock);
494 return bo;
495
496 err_unref:
497 bo_free(bo);
498 pthread_mutex_unlock(&bufmgr->lock);
499 return NULL;
500 }
501
502 static void
503 bo_free(struct brw_bo *bo)
504 {
505 struct brw_bufmgr *bufmgr = bo->bufmgr;
506 struct drm_gem_close close;
507 struct hash_entry *entry;
508 int ret;
509
510 if (bo->mem_virtual) {
511 VG(VALGRIND_FREELIKE_BLOCK(bo->mem_virtual, 0));
512 drm_munmap(bo->mem_virtual, bo->size);
513 }
514 if (bo->wc_virtual) {
515 VG(VALGRIND_FREELIKE_BLOCK(bo->wc_virtual, 0));
516 drm_munmap(bo->wc_virtual, bo->size);
517 }
518 if (bo->gtt_virtual) {
519 drm_munmap(bo->gtt_virtual, bo->size);
520 }
521
522 if (bo->global_name) {
523 entry = _mesa_hash_table_search(bufmgr->name_table, &bo->global_name);
524 _mesa_hash_table_remove(bufmgr->name_table, entry);
525 }
526 entry = _mesa_hash_table_search(bufmgr->handle_table, &bo->gem_handle);
527 _mesa_hash_table_remove(bufmgr->handle_table, entry);
528
529 /* Close this object */
530 memclear(close);
531 close.handle = bo->gem_handle;
532 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_GEM_CLOSE, &close);
533 if (ret != 0) {
534 DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
535 bo->gem_handle, bo->name, strerror(errno));
536 }
537 free(bo);
538 }
539
540 static void
541 bo_mark_mmaps_incoherent(struct brw_bo *bo)
542 {
543 #if HAVE_VALGRIND
544 if (bo->mem_virtual)
545 VALGRIND_MAKE_MEM_NOACCESS(bo->mem_virtual, bo->size);
546
547 if (bo->wc_virtual)
548 VALGRIND_MAKE_MEM_NOACCESS(bo->wc_virtual, bo->size);
549
550 if (bo->gtt_virtual)
551 VALGRIND_MAKE_MEM_NOACCESS(bo->gtt_virtual, bo->size);
552 #endif
553 }
554
555 /** Frees all cached buffers significantly older than @time. */
556 static void
557 cleanup_bo_cache(struct brw_bufmgr *bufmgr, time_t time)
558 {
559 int i;
560
561 if (bufmgr->time == time)
562 return;
563
564 for (i = 0; i < bufmgr->num_buckets; i++) {
565 struct bo_cache_bucket *bucket = &bufmgr->cache_bucket[i];
566
567 list_for_each_entry_safe(struct brw_bo, bo, &bucket->head, head) {
568 if (time - bo->free_time <= 1)
569 break;
570
571 list_del(&bo->head);
572
573 bo_free(bo);
574 }
575 }
576
577 bufmgr->time = time;
578 }
579
580 static void
581 bo_unreference_final(struct brw_bo *bo, time_t time)
582 {
583 struct brw_bufmgr *bufmgr = bo->bufmgr;
584 struct bo_cache_bucket *bucket;
585
586 DBG("bo_unreference final: %d (%s)\n", bo->gem_handle, bo->name);
587
588 /* Clear any left-over mappings */
589 if (bo->map_count) {
590 DBG("bo freed with non-zero map-count %d\n", bo->map_count);
591 bo->map_count = 0;
592 bo_mark_mmaps_incoherent(bo);
593 }
594
595 bucket = bucket_for_size(bufmgr, bo->size);
596 /* Put the buffer into our internal cache for reuse if we can. */
597 if (bufmgr->bo_reuse && bo->reusable && bucket != NULL &&
598 brw_bo_madvise(bo, I915_MADV_DONTNEED)) {
599 bo->free_time = time;
600
601 bo->name = NULL;
602 bo->kflags = 0;
603
604 list_addtail(&bo->head, &bucket->head);
605 } else {
606 bo_free(bo);
607 }
608 }
609
610 void
611 brw_bo_unreference(struct brw_bo *bo)
612 {
613 if (bo == NULL)
614 return;
615
616 assert(p_atomic_read(&bo->refcount) > 0);
617
618 if (atomic_add_unless(&bo->refcount, -1, 1)) {
619 struct brw_bufmgr *bufmgr = bo->bufmgr;
620 struct timespec time;
621
622 clock_gettime(CLOCK_MONOTONIC, &time);
623
624 pthread_mutex_lock(&bufmgr->lock);
625
626 if (p_atomic_dec_zero(&bo->refcount)) {
627 bo_unreference_final(bo, time.tv_sec);
628 cleanup_bo_cache(bufmgr, time.tv_sec);
629 }
630
631 pthread_mutex_unlock(&bufmgr->lock);
632 }
633 }
634
635 static void
636 set_domain(struct brw_context *brw, const char *action,
637 struct brw_bo *bo, uint32_t read_domains, uint32_t write_domain)
638 {
639 struct drm_i915_gem_set_domain sd = {
640 .handle = bo->gem_handle,
641 .read_domains = read_domains,
642 .write_domain = write_domain,
643 };
644
645 double elapsed = unlikely(brw && brw->perf_debug) ? -get_time() : 0.0;
646
647 if (drmIoctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &sd) != 0) {
648 DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s.\n",
649 __FILE__, __LINE__, bo->gem_handle, read_domains, write_domain,
650 strerror(errno));
651 }
652
653 if (unlikely(brw && brw->perf_debug)) {
654 elapsed += get_time();
655 if (elapsed > 1e-5) /* 0.01ms */
656 perf_debug("%s a busy \"%s\" BO stalled and took %.03f ms.\n",
657 action, bo->name, elapsed * 1000);
658 }
659 }
660
661 int
662 brw_bo_map(struct brw_context *brw, struct brw_bo *bo, int write_enable)
663 {
664 struct brw_bufmgr *bufmgr = bo->bufmgr;
665 int ret;
666
667 pthread_mutex_lock(&bufmgr->lock);
668
669 if (!bo->mem_virtual) {
670 struct drm_i915_gem_mmap mmap_arg;
671
672 DBG("bo_map: %d (%s), map_count=%d\n",
673 bo->gem_handle, bo->name, bo->map_count);
674
675 memclear(mmap_arg);
676 mmap_arg.handle = bo->gem_handle;
677 mmap_arg.size = bo->size;
678 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
679 if (ret != 0) {
680 ret = -errno;
681 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
682 __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
683 pthread_mutex_unlock(&bufmgr->lock);
684 return ret;
685 }
686 bo->map_count++;
687 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
688 bo->mem_virtual = (void *) (uintptr_t) mmap_arg.addr_ptr;
689 }
690 DBG("bo_map: %d (%s) -> %p\n", bo->gem_handle, bo->name, bo->mem_virtual);
691 bo->virtual = bo->mem_virtual;
692
693 set_domain(brw, "CPU mapping", bo, I915_GEM_DOMAIN_CPU,
694 write_enable ? I915_GEM_DOMAIN_CPU : 0);
695
696 bo_mark_mmaps_incoherent(bo);
697 VG(VALGRIND_MAKE_MEM_DEFINED(bo->mem_virtual, bo->size));
698 pthread_mutex_unlock(&bufmgr->lock);
699
700 return 0;
701 }
702
703 static int
704 map_gtt(struct brw_bo *bo)
705 {
706 struct brw_bufmgr *bufmgr = bo->bufmgr;
707 int ret;
708
709 /* Get a mapping of the buffer if we haven't before. */
710 if (bo->gtt_virtual == NULL) {
711 struct drm_i915_gem_mmap_gtt mmap_arg;
712
713 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
714 bo->gem_handle, bo->name, bo->map_count);
715
716 memclear(mmap_arg);
717 mmap_arg.handle = bo->gem_handle;
718
719 /* Get the fake offset back... */
720 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg);
721 if (ret != 0) {
722 ret = -errno;
723 DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
724 __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
725 return ret;
726 }
727
728 /* and mmap it */
729 bo->gtt_virtual = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
730 MAP_SHARED, bufmgr->fd, mmap_arg.offset);
731 if (bo->gtt_virtual == MAP_FAILED) {
732 bo->gtt_virtual = NULL;
733 ret = -errno;
734 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
735 __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
736 return ret;
737 }
738 }
739
740 bo->map_count++;
741 bo->virtual = bo->gtt_virtual;
742
743 DBG("bo_map_gtt: %d (%s) -> %p\n", bo->gem_handle, bo->name,
744 bo->gtt_virtual);
745
746 return 0;
747 }
748
749 int
750 brw_bo_map_gtt(struct brw_context *brw, struct brw_bo *bo)
751 {
752 struct brw_bufmgr *bufmgr = bo->bufmgr;
753 int ret;
754
755 pthread_mutex_lock(&bufmgr->lock);
756
757 ret = map_gtt(bo);
758 if (ret) {
759 pthread_mutex_unlock(&bufmgr->lock);
760 return ret;
761 }
762
763 /* Now move it to the GTT domain so that the GPU and CPU
764 * caches are flushed and the GPU isn't actively using the
765 * buffer.
766 *
767 * The pagefault handler does this domain change for us when
768 * it has unbound the BO from the GTT, but it's up to us to
769 * tell it when we're about to use things if we had done
770 * rendering and it still happens to be bound to the GTT.
771 */
772 set_domain(brw, "GTT mapping", bo,
773 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
774
775 bo_mark_mmaps_incoherent(bo);
776 VG(VALGRIND_MAKE_MEM_DEFINED(bo->gtt_virtual, bo->size));
777 pthread_mutex_unlock(&bufmgr->lock);
778
779 return 0;
780 }
781
782 /**
783 * Performs a mapping of the buffer object like the normal GTT
784 * mapping, but avoids waiting for the GPU to be done reading from or
785 * rendering to the buffer.
786 *
787 * This is used in the implementation of GL_ARB_map_buffer_range: The
788 * user asks to create a buffer, then does a mapping, fills some
789 * space, runs a drawing command, then asks to map it again without
790 * synchronizing because it guarantees that it won't write over the
791 * data that the GPU is busy using (or, more specifically, that if it
792 * does write over the data, it acknowledges that rendering is
793 * undefined).
794 */
795
796 int
797 brw_bo_map_unsynchronized(struct brw_context *brw, struct brw_bo *bo)
798 {
799 struct brw_bufmgr *bufmgr = bo->bufmgr;
800 int ret;
801
802 /* If the CPU cache isn't coherent with the GTT, then use a
803 * regular synchronized mapping. The problem is that we don't
804 * track where the buffer was last used on the CPU side in
805 * terms of brw_bo_map vs brw_bo_map_gtt, so
806 * we would potentially corrupt the buffer even when the user
807 * does reasonable things.
808 */
809 if (!bufmgr->has_llc)
810 return brw_bo_map_gtt(brw, bo);
811
812 pthread_mutex_lock(&bufmgr->lock);
813
814 ret = map_gtt(bo);
815 if (ret == 0) {
816 bo_mark_mmaps_incoherent(bo);
817 VG(VALGRIND_MAKE_MEM_DEFINED(bo->gtt_virtual, bo->size));
818 }
819
820 pthread_mutex_unlock(&bufmgr->lock);
821
822 return ret;
823 }
824
825 int
826 brw_bo_unmap(struct brw_bo *bo)
827 {
828 struct brw_bufmgr *bufmgr = bo->bufmgr;
829 int ret = 0;
830
831 pthread_mutex_lock(&bufmgr->lock);
832
833 if (bo->map_count <= 0) {
834 DBG("attempted to unmap an unmapped bo\n");
835 pthread_mutex_unlock(&bufmgr->lock);
836 /* Preserve the old behaviour of just treating this as a
837 * no-op rather than reporting the error.
838 */
839 return 0;
840 }
841
842 if (--bo->map_count == 0) {
843 bo_mark_mmaps_incoherent(bo);
844 bo->virtual = NULL;
845 }
846 pthread_mutex_unlock(&bufmgr->lock);
847
848 return ret;
849 }
850
851 int
852 brw_bo_subdata(struct brw_bo *bo, uint64_t offset,
853 uint64_t size, const void *data)
854 {
855 struct brw_bufmgr *bufmgr = bo->bufmgr;
856 struct drm_i915_gem_pwrite pwrite;
857 int ret;
858
859 memclear(pwrite);
860 pwrite.handle = bo->gem_handle;
861 pwrite.offset = offset;
862 pwrite.size = size;
863 pwrite.data_ptr = (uint64_t) (uintptr_t) data;
864 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite);
865 if (ret != 0) {
866 ret = -errno;
867 DBG("%s:%d: Error writing data to buffer %d: "
868 "(%"PRIu64" %"PRIu64") %s .\n",
869 __FILE__, __LINE__, bo->gem_handle, offset, size, strerror(errno));
870 }
871
872 return ret;
873 }
874
875 int
876 brw_bo_get_subdata(struct brw_bo *bo, uint64_t offset,
877 uint64_t size, void *data)
878 {
879 struct brw_bufmgr *bufmgr = bo->bufmgr;
880 struct drm_i915_gem_pread pread;
881 int ret;
882
883 memclear(pread);
884 pread.handle = bo->gem_handle;
885 pread.offset = offset;
886 pread.size = size;
887 pread.data_ptr = (uint64_t) (uintptr_t) data;
888 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_PREAD, &pread);
889 if (ret != 0) {
890 ret = -errno;
891 DBG("%s:%d: Error reading data from buffer %d: "
892 "(%"PRIu64" %"PRIu64") %s .\n",
893 __FILE__, __LINE__, bo->gem_handle, offset, size, strerror(errno));
894 }
895
896 return ret;
897 }
898
899 /** Waits for all GPU rendering with the object to have completed. */
900 void
901 brw_bo_wait_rendering(struct brw_context *brw, struct brw_bo *bo)
902 {
903 set_domain(brw, "waiting for",
904 bo, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
905 }
906
907 /**
908 * Waits on a BO for the given amount of time.
909 *
910 * @bo: buffer object to wait for
911 * @timeout_ns: amount of time to wait in nanoseconds.
912 * If value is less than 0, an infinite wait will occur.
913 *
914 * Returns 0 if the wait was successful ie. the last batch referencing the
915 * object has completed within the allotted time. Otherwise some negative return
916 * value describes the error. Of particular interest is -ETIME when the wait has
917 * failed to yield the desired result.
918 *
919 * Similar to brw_bo_wait_rendering except a timeout parameter allows
920 * the operation to give up after a certain amount of time. Another subtle
921 * difference is the internal locking semantics are different (this variant does
922 * not hold the lock for the duration of the wait). This makes the wait subject
923 * to a larger userspace race window.
924 *
925 * The implementation shall wait until the object is no longer actively
926 * referenced within a batch buffer at the time of the call. The wait will
927 * not guarantee that the buffer is re-issued via another thread, or an flinked
928 * handle. Userspace must make sure this race does not occur if such precision
929 * is important.
930 *
931 * Note that some kernels have broken the inifite wait for negative values
932 * promise, upgrade to latest stable kernels if this is the case.
933 */
934 int
935 brw_bo_wait(struct brw_bo *bo, int64_t timeout_ns)
936 {
937 struct brw_bufmgr *bufmgr = bo->bufmgr;
938 struct drm_i915_gem_wait wait;
939 int ret;
940
941 memclear(wait);
942 wait.bo_handle = bo->gem_handle;
943 wait.timeout_ns = timeout_ns;
944 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
945 if (ret == -1)
946 return -errno;
947
948 return ret;
949 }
950
951 void
952 brw_bufmgr_destroy(struct brw_bufmgr *bufmgr)
953 {
954 pthread_mutex_destroy(&bufmgr->lock);
955
956 /* Free any cached buffer objects we were going to reuse */
957 for (int i = 0; i < bufmgr->num_buckets; i++) {
958 struct bo_cache_bucket *bucket = &bufmgr->cache_bucket[i];
959
960 list_for_each_entry_safe(struct brw_bo, bo, &bucket->head, head) {
961 list_del(&bo->head);
962
963 bo_free(bo);
964 }
965 }
966
967 _mesa_hash_table_destroy(bufmgr->name_table, NULL);
968 _mesa_hash_table_destroy(bufmgr->handle_table, NULL);
969
970 free(bufmgr);
971 }
972
973 static int
974 bo_set_tiling_internal(struct brw_bo *bo, uint32_t tiling_mode,
975 uint32_t stride)
976 {
977 struct brw_bufmgr *bufmgr = bo->bufmgr;
978 struct drm_i915_gem_set_tiling set_tiling;
979 int ret;
980
981 if (bo->global_name == 0 &&
982 tiling_mode == bo->tiling_mode && stride == bo->stride)
983 return 0;
984
985 memset(&set_tiling, 0, sizeof(set_tiling));
986 do {
987 /* set_tiling is slightly broken and overwrites the
988 * input on the error path, so we have to open code
989 * rmIoctl.
990 */
991 set_tiling.handle = bo->gem_handle;
992 set_tiling.tiling_mode = tiling_mode;
993 set_tiling.stride = stride;
994
995 ret = ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
996 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
997 if (ret == -1)
998 return -errno;
999
1000 bo->tiling_mode = set_tiling.tiling_mode;
1001 bo->swizzle_mode = set_tiling.swizzle_mode;
1002 bo->stride = set_tiling.stride;
1003 return 0;
1004 }
1005
1006 int
1007 brw_bo_get_tiling(struct brw_bo *bo, uint32_t *tiling_mode,
1008 uint32_t *swizzle_mode)
1009 {
1010 *tiling_mode = bo->tiling_mode;
1011 *swizzle_mode = bo->swizzle_mode;
1012 return 0;
1013 }
1014
1015 struct brw_bo *
1016 brw_bo_gem_create_from_prime(struct brw_bufmgr *bufmgr, int prime_fd,
1017 int size)
1018 {
1019 int ret;
1020 uint32_t handle;
1021 struct brw_bo *bo;
1022 struct drm_i915_gem_get_tiling get_tiling;
1023
1024 pthread_mutex_lock(&bufmgr->lock);
1025 ret = drmPrimeFDToHandle(bufmgr->fd, prime_fd, &handle);
1026 if (ret) {
1027 DBG("create_from_prime: failed to obtain handle from fd: %s\n",
1028 strerror(errno));
1029 pthread_mutex_unlock(&bufmgr->lock);
1030 return NULL;
1031 }
1032
1033 /*
1034 * See if the kernel has already returned this buffer to us. Just as
1035 * for named buffers, we must not create two bo's pointing at the same
1036 * kernel object
1037 */
1038 bo = hash_find_bo(bufmgr->handle_table, handle);
1039 if (bo) {
1040 brw_bo_reference(bo);
1041 goto out;
1042 }
1043
1044 bo = calloc(1, sizeof(*bo));
1045 if (!bo)
1046 goto out;
1047
1048 p_atomic_set(&bo->refcount, 1);
1049
1050 /* Determine size of bo. The fd-to-handle ioctl really should
1051 * return the size, but it doesn't. If we have kernel 3.12 or
1052 * later, we can lseek on the prime fd to get the size. Older
1053 * kernels will just fail, in which case we fall back to the
1054 * provided (estimated or guess size). */
1055 ret = lseek(prime_fd, 0, SEEK_END);
1056 if (ret != -1)
1057 bo->size = ret;
1058 else
1059 bo->size = size;
1060
1061 bo->bufmgr = bufmgr;
1062
1063 bo->gem_handle = handle;
1064 _mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
1065
1066 bo->name = "prime";
1067 bo->reusable = false;
1068
1069 memclear(get_tiling);
1070 get_tiling.handle = bo->gem_handle;
1071 if (drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling))
1072 goto err;
1073
1074 bo->tiling_mode = get_tiling.tiling_mode;
1075 bo->swizzle_mode = get_tiling.swizzle_mode;
1076 /* XXX stride is unknown */
1077
1078 out:
1079 pthread_mutex_unlock(&bufmgr->lock);
1080 return bo;
1081
1082 err:
1083 bo_free(bo);
1084 pthread_mutex_unlock(&bufmgr->lock);
1085 return NULL;
1086 }
1087
1088 int
1089 brw_bo_gem_export_to_prime(struct brw_bo *bo, int *prime_fd)
1090 {
1091 struct brw_bufmgr *bufmgr = bo->bufmgr;
1092
1093 if (drmPrimeHandleToFD(bufmgr->fd, bo->gem_handle,
1094 DRM_CLOEXEC, prime_fd) != 0)
1095 return -errno;
1096
1097 bo->reusable = false;
1098
1099 return 0;
1100 }
1101
1102 int
1103 brw_bo_flink(struct brw_bo *bo, uint32_t *name)
1104 {
1105 struct brw_bufmgr *bufmgr = bo->bufmgr;
1106
1107 if (!bo->global_name) {
1108 struct drm_gem_flink flink;
1109
1110 memclear(flink);
1111 flink.handle = bo->gem_handle;
1112 if (drmIoctl(bufmgr->fd, DRM_IOCTL_GEM_FLINK, &flink))
1113 return -errno;
1114
1115 pthread_mutex_lock(&bufmgr->lock);
1116 if (!bo->global_name) {
1117 bo->global_name = flink.name;
1118 bo->reusable = false;
1119
1120 _mesa_hash_table_insert(bufmgr->name_table, &bo->global_name, bo);
1121 }
1122 pthread_mutex_unlock(&bufmgr->lock);
1123 }
1124
1125 *name = bo->global_name;
1126 return 0;
1127 }
1128
1129 /**
1130 * Enables unlimited caching of buffer objects for reuse.
1131 *
1132 * This is potentially very memory expensive, as the cache at each bucket
1133 * size is only bounded by how many buffers of that size we've managed to have
1134 * in flight at once.
1135 */
1136 void
1137 brw_bufmgr_enable_reuse(struct brw_bufmgr *bufmgr)
1138 {
1139 bufmgr->bo_reuse = true;
1140 }
1141
1142 static void
1143 add_bucket(struct brw_bufmgr *bufmgr, int size)
1144 {
1145 unsigned int i = bufmgr->num_buckets;
1146
1147 assert(i < ARRAY_SIZE(bufmgr->cache_bucket));
1148
1149 list_inithead(&bufmgr->cache_bucket[i].head);
1150 bufmgr->cache_bucket[i].size = size;
1151 bufmgr->num_buckets++;
1152 }
1153
1154 static void
1155 init_cache_buckets(struct brw_bufmgr *bufmgr)
1156 {
1157 uint64_t size, cache_max_size = 64 * 1024 * 1024;
1158
1159 /* OK, so power of two buckets was too wasteful of memory.
1160 * Give 3 other sizes between each power of two, to hopefully
1161 * cover things accurately enough. (The alternative is
1162 * probably to just go for exact matching of sizes, and assume
1163 * that for things like composited window resize the tiled
1164 * width/height alignment and rounding of sizes to pages will
1165 * get us useful cache hit rates anyway)
1166 */
1167 add_bucket(bufmgr, 4096);
1168 add_bucket(bufmgr, 4096 * 2);
1169 add_bucket(bufmgr, 4096 * 3);
1170
1171 /* Initialize the linked lists for BO reuse cache. */
1172 for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
1173 add_bucket(bufmgr, size);
1174
1175 add_bucket(bufmgr, size + size * 1 / 4);
1176 add_bucket(bufmgr, size + size * 2 / 4);
1177 add_bucket(bufmgr, size + size * 3 / 4);
1178 }
1179 }
1180
1181 uint32_t
1182 brw_create_hw_context(struct brw_bufmgr *bufmgr)
1183 {
1184 struct drm_i915_gem_context_create create;
1185 int ret;
1186
1187 memclear(create);
1188 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
1189 if (ret != 0) {
1190 DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n", strerror(errno));
1191 return 0;
1192 }
1193
1194 return create.ctx_id;
1195 }
1196
1197 void
1198 brw_destroy_hw_context(struct brw_bufmgr *bufmgr, uint32_t ctx_id)
1199 {
1200 struct drm_i915_gem_context_destroy d = {.ctx_id = ctx_id };
1201
1202 if (ctx_id != 0 &&
1203 drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, &d) != 0) {
1204 fprintf(stderr, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
1205 strerror(errno));
1206 }
1207 }
1208
1209 int
1210 brw_reg_read(struct brw_bufmgr *bufmgr, uint32_t offset, uint64_t *result)
1211 {
1212 struct drm_i915_reg_read reg_read;
1213 int ret;
1214
1215 memclear(reg_read);
1216 reg_read.offset = offset;
1217
1218 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_REG_READ, &reg_read);
1219
1220 *result = reg_read.val;
1221 return ret;
1222 }
1223
1224 void *
1225 brw_bo_map__gtt(struct brw_bo *bo)
1226 {
1227 struct brw_bufmgr *bufmgr = bo->bufmgr;
1228
1229 if (bo->gtt_virtual)
1230 return bo->gtt_virtual;
1231
1232 pthread_mutex_lock(&bufmgr->lock);
1233 if (bo->gtt_virtual == NULL) {
1234 struct drm_i915_gem_mmap_gtt mmap_arg;
1235 void *ptr;
1236
1237 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
1238 bo->gem_handle, bo->name, bo->map_count);
1239
1240 memclear(mmap_arg);
1241 mmap_arg.handle = bo->gem_handle;
1242
1243 /* Get the fake offset back... */
1244 ptr = MAP_FAILED;
1245 if (drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg) == 0) {
1246 /* and mmap it */
1247 ptr = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
1248 MAP_SHARED, bufmgr->fd, mmap_arg.offset);
1249 }
1250 if (ptr == MAP_FAILED) {
1251 --bo->map_count;
1252 ptr = NULL;
1253 }
1254
1255 bo->gtt_virtual = ptr;
1256 }
1257 pthread_mutex_unlock(&bufmgr->lock);
1258
1259 return bo->gtt_virtual;
1260 }
1261
1262 void *
1263 brw_bo_map__cpu(struct brw_bo *bo)
1264 {
1265 struct brw_bufmgr *bufmgr = bo->bufmgr;
1266
1267 if (bo->mem_virtual)
1268 return bo->mem_virtual;
1269
1270 pthread_mutex_lock(&bufmgr->lock);
1271 if (!bo->mem_virtual) {
1272 struct drm_i915_gem_mmap mmap_arg;
1273
1274 DBG("bo_map: %d (%s), map_count=%d\n",
1275 bo->gem_handle, bo->name, bo->map_count);
1276
1277 memclear(mmap_arg);
1278 mmap_arg.handle = bo->gem_handle;
1279 mmap_arg.size = bo->size;
1280 if (drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg)) {
1281 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1282 __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
1283 } else {
1284 bo->map_count++;
1285 VG(VALGRIND_MALLOCLIKE_BLOCK
1286 (mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
1287 bo->mem_virtual = (void *) (uintptr_t) mmap_arg.addr_ptr;
1288 }
1289 }
1290 pthread_mutex_unlock(&bufmgr->lock);
1291
1292 return bo->mem_virtual;
1293 }
1294
1295 void *
1296 brw_bo_map__wc(struct brw_bo *bo)
1297 {
1298 struct brw_bufmgr *bufmgr = bo->bufmgr;
1299
1300 if (bo->wc_virtual)
1301 return bo->wc_virtual;
1302
1303 pthread_mutex_lock(&bufmgr->lock);
1304 if (!bo->wc_virtual) {
1305 struct drm_i915_gem_mmap mmap_arg;
1306
1307 DBG("bo_map: %d (%s), map_count=%d\n",
1308 bo->gem_handle, bo->name, bo->map_count);
1309
1310 memclear(mmap_arg);
1311 mmap_arg.handle = bo->gem_handle;
1312 mmap_arg.size = bo->size;
1313 mmap_arg.flags = I915_MMAP_WC;
1314 if (drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg)) {
1315 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1316 __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
1317 } else {
1318 bo->map_count++;
1319 VG(VALGRIND_MALLOCLIKE_BLOCK
1320 (mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
1321 bo->wc_virtual = (void *) (uintptr_t) mmap_arg.addr_ptr;
1322 }
1323 }
1324 pthread_mutex_unlock(&bufmgr->lock);
1325
1326 return bo->wc_virtual;
1327 }
1328
1329 /**
1330 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
1331 * and manage map buffer objections.
1332 *
1333 * \param fd File descriptor of the opened DRM device.
1334 */
1335 struct brw_bufmgr *
1336 brw_bufmgr_init(struct gen_device_info *devinfo, int fd, int batch_size)
1337 {
1338 struct brw_bufmgr *bufmgr;
1339
1340 bufmgr = calloc(1, sizeof(*bufmgr));
1341 if (bufmgr == NULL)
1342 return NULL;
1343
1344 /* Handles to buffer objects belong to the device fd and are not
1345 * reference counted by the kernel. If the same fd is used by
1346 * multiple parties (threads sharing the same screen bufmgr, or
1347 * even worse the same device fd passed to multiple libraries)
1348 * ownership of those handles is shared by those independent parties.
1349 *
1350 * Don't do this! Ensure that each library/bufmgr has its own device
1351 * fd so that its namespace does not clash with another.
1352 */
1353 bufmgr->fd = fd;
1354
1355 if (pthread_mutex_init(&bufmgr->lock, NULL) != 0) {
1356 free(bufmgr);
1357 return NULL;
1358 }
1359
1360 bufmgr->has_llc = devinfo->has_llc;
1361
1362 init_cache_buckets(bufmgr);
1363
1364 bufmgr->name_table =
1365 _mesa_hash_table_create(NULL, key_hash_uint, key_uint_equal);
1366 bufmgr->handle_table =
1367 _mesa_hash_table_create(NULL, key_hash_uint, key_uint_equal);
1368
1369 return bufmgr;
1370 }