i965/drm: Add stall warnings when mapping or waiting on BOs.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_bufmgr.c
1 /**************************************************************************
2 *
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007-2012 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
26 * of the Software.
27 *
28 *
29 **************************************************************************/
30 /*
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
35 */
36
37 #ifdef HAVE_CONFIG_H
38 #include "config.h"
39 #endif
40
41 #include <xf86drm.h>
42 #include <util/u_atomic.h>
43 #include <fcntl.h>
44 #include <stdio.h>
45 #include <stdlib.h>
46 #include <string.h>
47 #include <unistd.h>
48 #include <assert.h>
49 #include <pthread.h>
50 #include <sys/ioctl.h>
51 #include <sys/stat.h>
52 #include <sys/types.h>
53 #include <stdbool.h>
54
55 #include "errno.h"
56 #ifndef ETIME
57 #define ETIME ETIMEDOUT
58 #endif
59 #include "common/gen_debug.h"
60 #include "common/gen_device_info.h"
61 #include "libdrm_macros.h"
62 #include "main/macros.h"
63 #include "util/macros.h"
64 #include "util/hash_table.h"
65 #include "util/list.h"
66 #include "brw_bufmgr.h"
67 #include "brw_context.h"
68 #include "string.h"
69
70 #include "i915_drm.h"
71
72 #ifdef HAVE_VALGRIND
73 #include <valgrind.h>
74 #include <memcheck.h>
75 #define VG(x) x
76 #else
77 #define VG(x)
78 #endif
79
80 #define memclear(s) memset(&s, 0, sizeof(s))
81
82 #define FILE_DEBUG_FLAG DEBUG_BUFMGR
83
84 static inline int
85 atomic_add_unless(int *v, int add, int unless)
86 {
87 int c, old;
88 c = p_atomic_read(v);
89 while (c != unless && (old = p_atomic_cmpxchg(v, c, c + add)) != c)
90 c = old;
91 return c == unless;
92 }
93
94 struct bo_cache_bucket {
95 struct list_head head;
96 unsigned long size;
97 };
98
99 struct brw_bufmgr {
100 int fd;
101
102 pthread_mutex_t lock;
103
104 /** Array of lists of cached gem objects of power-of-two sizes */
105 struct bo_cache_bucket cache_bucket[14 * 4];
106 int num_buckets;
107 time_t time;
108
109 struct hash_table *name_table;
110 struct hash_table *handle_table;
111
112 unsigned int has_llc:1;
113 unsigned int bo_reuse:1;
114 };
115
116 static int bo_set_tiling_internal(struct brw_bo *bo, uint32_t tiling_mode,
117 uint32_t stride);
118
119 static void bo_free(struct brw_bo *bo);
120
121 static uint32_t
122 key_hash_uint(const void *key)
123 {
124 return _mesa_hash_data(key, 4);
125 }
126
127 static bool
128 key_uint_equal(const void *a, const void *b)
129 {
130 return *((unsigned *) a) == *((unsigned *) b);
131 }
132
133 static struct brw_bo *
134 hash_find_bo(struct hash_table *ht, unsigned int key)
135 {
136 struct hash_entry *entry = _mesa_hash_table_search(ht, &key);
137 return entry ? (struct brw_bo *) entry->data : NULL;
138 }
139
140 static unsigned long
141 bo_tile_size(struct brw_bufmgr *bufmgr, unsigned long size,
142 uint32_t *tiling_mode)
143 {
144 if (*tiling_mode == I915_TILING_NONE)
145 return size;
146
147 /* 965+ just need multiples of page size for tiling */
148 return ALIGN(size, 4096);
149 }
150
151 /*
152 * Round a given pitch up to the minimum required for X tiling on a
153 * given chip. We use 512 as the minimum to allow for a later tiling
154 * change.
155 */
156 static unsigned long
157 bo_tile_pitch(struct brw_bufmgr *bufmgr,
158 unsigned long pitch, uint32_t *tiling_mode)
159 {
160 unsigned long tile_width;
161
162 /* If untiled, then just align it so that we can do rendering
163 * to it with the 3D engine.
164 */
165 if (*tiling_mode == I915_TILING_NONE)
166 return ALIGN(pitch, 64);
167
168 if (*tiling_mode == I915_TILING_X)
169 tile_width = 512;
170 else
171 tile_width = 128;
172
173 /* 965 is flexible */
174 return ALIGN(pitch, tile_width);
175 }
176
177 static struct bo_cache_bucket *
178 bucket_for_size(struct brw_bufmgr *bufmgr, unsigned long size)
179 {
180 int i;
181
182 for (i = 0; i < bufmgr->num_buckets; i++) {
183 struct bo_cache_bucket *bucket = &bufmgr->cache_bucket[i];
184 if (bucket->size >= size) {
185 return bucket;
186 }
187 }
188
189 return NULL;
190 }
191
192 inline void
193 brw_bo_reference(struct brw_bo *bo)
194 {
195 p_atomic_inc(&bo->refcount);
196 }
197
198 int
199 brw_bo_busy(struct brw_bo *bo)
200 {
201 struct brw_bufmgr *bufmgr = bo->bufmgr;
202 struct drm_i915_gem_busy busy;
203 int ret;
204
205 memclear(busy);
206 busy.handle = bo->gem_handle;
207
208 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
209 if (ret == 0) {
210 bo->idle = !busy.busy;
211 return busy.busy;
212 } else {
213 return false;
214 }
215 return (ret == 0 && busy.busy);
216 }
217
218 int
219 brw_bo_madvise(struct brw_bo *bo, int state)
220 {
221 struct drm_i915_gem_madvise madv;
222
223 memclear(madv);
224 madv.handle = bo->gem_handle;
225 madv.madv = state;
226 madv.retained = 1;
227 drmIoctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
228
229 return madv.retained;
230 }
231
232 /* drop the oldest entries that have been purged by the kernel */
233 static void
234 brw_bo_cache_purge_bucket(struct brw_bufmgr *bufmgr,
235 struct bo_cache_bucket *bucket)
236 {
237 list_for_each_entry_safe(struct brw_bo, bo, &bucket->head, head) {
238 if (brw_bo_madvise(bo, I915_MADV_DONTNEED))
239 break;
240
241 list_del(&bo->head);
242 bo_free(bo);
243 }
244 }
245
246 static struct brw_bo *
247 bo_alloc_internal(struct brw_bufmgr *bufmgr,
248 const char *name,
249 unsigned long size,
250 unsigned long flags,
251 uint32_t tiling_mode,
252 unsigned long stride, unsigned int alignment)
253 {
254 struct brw_bo *bo;
255 unsigned int page_size = getpagesize();
256 int ret;
257 struct bo_cache_bucket *bucket;
258 bool alloc_from_cache;
259 unsigned long bo_size;
260 bool for_render = false;
261
262 if (flags & BO_ALLOC_FOR_RENDER)
263 for_render = true;
264
265 /* Round the allocated size up to a power of two number of pages. */
266 bucket = bucket_for_size(bufmgr, size);
267
268 /* If we don't have caching at this size, don't actually round the
269 * allocation up.
270 */
271 if (bucket == NULL) {
272 bo_size = size;
273 if (bo_size < page_size)
274 bo_size = page_size;
275 } else {
276 bo_size = bucket->size;
277 }
278
279 pthread_mutex_lock(&bufmgr->lock);
280 /* Get a buffer out of the cache if available */
281 retry:
282 alloc_from_cache = false;
283 if (bucket != NULL && !list_empty(&bucket->head)) {
284 if (for_render) {
285 /* Allocate new render-target BOs from the tail (MRU)
286 * of the list, as it will likely be hot in the GPU
287 * cache and in the aperture for us.
288 */
289 bo = LIST_ENTRY(struct brw_bo, bucket->head.prev, head);
290 list_del(&bo->head);
291 alloc_from_cache = true;
292 bo->align = alignment;
293 } else {
294 assert(alignment == 0);
295 /* For non-render-target BOs (where we're probably
296 * going to map it first thing in order to fill it
297 * with data), check if the last BO in the cache is
298 * unbusy, and only reuse in that case. Otherwise,
299 * allocating a new buffer is probably faster than
300 * waiting for the GPU to finish.
301 */
302 bo = LIST_ENTRY(struct brw_bo, bucket->head.next, head);
303 if (!brw_bo_busy(bo)) {
304 alloc_from_cache = true;
305 list_del(&bo->head);
306 }
307 }
308
309 if (alloc_from_cache) {
310 if (!brw_bo_madvise(bo, I915_MADV_WILLNEED)) {
311 bo_free(bo);
312 brw_bo_cache_purge_bucket(bufmgr, bucket);
313 goto retry;
314 }
315
316 if (bo_set_tiling_internal(bo, tiling_mode, stride)) {
317 bo_free(bo);
318 goto retry;
319 }
320 }
321 }
322
323 if (!alloc_from_cache) {
324 struct drm_i915_gem_create create;
325
326 bo = calloc(1, sizeof(*bo));
327 if (!bo)
328 goto err;
329
330 bo->size = bo_size;
331
332 memclear(create);
333 create.size = bo_size;
334
335 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CREATE, &create);
336 if (ret != 0) {
337 free(bo);
338 goto err;
339 }
340
341 bo->gem_handle = create.handle;
342 _mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
343
344 bo->bufmgr = bufmgr;
345 bo->align = alignment;
346
347 bo->tiling_mode = I915_TILING_NONE;
348 bo->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
349 bo->stride = 0;
350
351 if (bo_set_tiling_internal(bo, tiling_mode, stride))
352 goto err_free;
353 }
354
355 bo->name = name;
356 p_atomic_set(&bo->refcount, 1);
357 bo->reusable = true;
358
359 pthread_mutex_unlock(&bufmgr->lock);
360
361 DBG("bo_create: buf %d (%s) %ldb\n", bo->gem_handle, bo->name, size);
362
363 return bo;
364
365 err_free:
366 bo_free(bo);
367 err:
368 pthread_mutex_unlock(&bufmgr->lock);
369 return NULL;
370 }
371
372 struct brw_bo *
373 brw_bo_alloc(struct brw_bufmgr *bufmgr,
374 const char *name, unsigned long size, unsigned int alignment)
375 {
376 return bo_alloc_internal(bufmgr, name, size, 0, I915_TILING_NONE, 0, 0);
377 }
378
379 struct brw_bo *
380 brw_bo_alloc_tiled(struct brw_bufmgr *bufmgr, const char *name,
381 int x, int y, int cpp, uint32_t *tiling_mode,
382 unsigned long *pitch, unsigned long flags)
383 {
384 unsigned long size, stride;
385 uint32_t tiling;
386
387 do {
388 unsigned long aligned_y, height_alignment;
389
390 tiling = *tiling_mode;
391
392 /* If we're tiled, our allocations are in 8 or 32-row blocks,
393 * so failure to align our height means that we won't allocate
394 * enough pages.
395 *
396 * If we're untiled, we still have to align to 2 rows high
397 * because the data port accesses 2x2 blocks even if the
398 * bottom row isn't to be rendered, so failure to align means
399 * we could walk off the end of the GTT and fault. This is
400 * documented on 965, and may be the case on older chipsets
401 * too so we try to be careful.
402 */
403 aligned_y = y;
404 height_alignment = 2;
405
406 if (tiling == I915_TILING_X)
407 height_alignment = 8;
408 else if (tiling == I915_TILING_Y)
409 height_alignment = 32;
410 aligned_y = ALIGN(y, height_alignment);
411
412 stride = x * cpp;
413 stride = bo_tile_pitch(bufmgr, stride, tiling_mode);
414 size = stride * aligned_y;
415 size = bo_tile_size(bufmgr, size, tiling_mode);
416 } while (*tiling_mode != tiling);
417 *pitch = stride;
418
419 if (tiling == I915_TILING_NONE)
420 stride = 0;
421
422 return bo_alloc_internal(bufmgr, name, size, flags, tiling, stride, 0);
423 }
424
425 /**
426 * Returns a brw_bo wrapping the given buffer object handle.
427 *
428 * This can be used when one application needs to pass a buffer object
429 * to another.
430 */
431 struct brw_bo *
432 brw_bo_gem_create_from_name(struct brw_bufmgr *bufmgr,
433 const char *name, unsigned int handle)
434 {
435 struct brw_bo *bo;
436 int ret;
437 struct drm_gem_open open_arg;
438 struct drm_i915_gem_get_tiling get_tiling;
439
440 /* At the moment most applications only have a few named bo.
441 * For instance, in a DRI client only the render buffers passed
442 * between X and the client are named. And since X returns the
443 * alternating names for the front/back buffer a linear search
444 * provides a sufficiently fast match.
445 */
446 pthread_mutex_lock(&bufmgr->lock);
447 bo = hash_find_bo(bufmgr->name_table, handle);
448 if (bo) {
449 brw_bo_reference(bo);
450 goto out;
451 }
452
453 memclear(open_arg);
454 open_arg.name = handle;
455 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_GEM_OPEN, &open_arg);
456 if (ret != 0) {
457 DBG("Couldn't reference %s handle 0x%08x: %s\n",
458 name, handle, strerror(errno));
459 bo = NULL;
460 goto out;
461 }
462 /* Now see if someone has used a prime handle to get this
463 * object from the kernel before by looking through the list
464 * again for a matching gem_handle
465 */
466 bo = hash_find_bo(bufmgr->handle_table, open_arg.handle);
467 if (bo) {
468 brw_bo_reference(bo);
469 goto out;
470 }
471
472 bo = calloc(1, sizeof(*bo));
473 if (!bo)
474 goto out;
475
476 p_atomic_set(&bo->refcount, 1);
477
478 bo->size = open_arg.size;
479 bo->offset64 = 0;
480 bo->virtual = NULL;
481 bo->bufmgr = bufmgr;
482 bo->gem_handle = open_arg.handle;
483 bo->name = name;
484 bo->global_name = handle;
485 bo->reusable = false;
486
487 _mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
488 _mesa_hash_table_insert(bufmgr->name_table, &bo->global_name, bo);
489
490 memclear(get_tiling);
491 get_tiling.handle = bo->gem_handle;
492 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling);
493 if (ret != 0)
494 goto err_unref;
495
496 bo->tiling_mode = get_tiling.tiling_mode;
497 bo->swizzle_mode = get_tiling.swizzle_mode;
498 /* XXX stride is unknown */
499 DBG("bo_create_from_handle: %d (%s)\n", handle, bo->name);
500
501 out:
502 pthread_mutex_unlock(&bufmgr->lock);
503 return bo;
504
505 err_unref:
506 bo_free(bo);
507 pthread_mutex_unlock(&bufmgr->lock);
508 return NULL;
509 }
510
511 static void
512 bo_free(struct brw_bo *bo)
513 {
514 struct brw_bufmgr *bufmgr = bo->bufmgr;
515 struct drm_gem_close close;
516 struct hash_entry *entry;
517 int ret;
518
519 if (bo->mem_virtual) {
520 VG(VALGRIND_FREELIKE_BLOCK(bo->mem_virtual, 0));
521 drm_munmap(bo->mem_virtual, bo->size);
522 }
523 if (bo->wc_virtual) {
524 VG(VALGRIND_FREELIKE_BLOCK(bo->wc_virtual, 0));
525 drm_munmap(bo->wc_virtual, bo->size);
526 }
527 if (bo->gtt_virtual) {
528 drm_munmap(bo->gtt_virtual, bo->size);
529 }
530
531 if (bo->global_name) {
532 entry = _mesa_hash_table_search(bufmgr->name_table, &bo->global_name);
533 _mesa_hash_table_remove(bufmgr->name_table, entry);
534 }
535 entry = _mesa_hash_table_search(bufmgr->handle_table, &bo->gem_handle);
536 _mesa_hash_table_remove(bufmgr->handle_table, entry);
537
538 /* Close this object */
539 memclear(close);
540 close.handle = bo->gem_handle;
541 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_GEM_CLOSE, &close);
542 if (ret != 0) {
543 DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
544 bo->gem_handle, bo->name, strerror(errno));
545 }
546 free(bo);
547 }
548
549 static void
550 bo_mark_mmaps_incoherent(struct brw_bo *bo)
551 {
552 #if HAVE_VALGRIND
553 if (bo->mem_virtual)
554 VALGRIND_MAKE_MEM_NOACCESS(bo->mem_virtual, bo->size);
555
556 if (bo->wc_virtual)
557 VALGRIND_MAKE_MEM_NOACCESS(bo->wc_virtual, bo->size);
558
559 if (bo->gtt_virtual)
560 VALGRIND_MAKE_MEM_NOACCESS(bo->gtt_virtual, bo->size);
561 #endif
562 }
563
564 /** Frees all cached buffers significantly older than @time. */
565 static void
566 cleanup_bo_cache(struct brw_bufmgr *bufmgr, time_t time)
567 {
568 int i;
569
570 if (bufmgr->time == time)
571 return;
572
573 for (i = 0; i < bufmgr->num_buckets; i++) {
574 struct bo_cache_bucket *bucket = &bufmgr->cache_bucket[i];
575
576 list_for_each_entry_safe(struct brw_bo, bo, &bucket->head, head) {
577 if (time - bo->free_time <= 1)
578 break;
579
580 list_del(&bo->head);
581
582 bo_free(bo);
583 }
584 }
585
586 bufmgr->time = time;
587 }
588
589 static void
590 bo_unreference_final(struct brw_bo *bo, time_t time)
591 {
592 struct brw_bufmgr *bufmgr = bo->bufmgr;
593 struct bo_cache_bucket *bucket;
594
595 DBG("bo_unreference final: %d (%s)\n", bo->gem_handle, bo->name);
596
597 /* Clear any left-over mappings */
598 if (bo->map_count) {
599 DBG("bo freed with non-zero map-count %d\n", bo->map_count);
600 bo->map_count = 0;
601 bo_mark_mmaps_incoherent(bo);
602 }
603
604 bucket = bucket_for_size(bufmgr, bo->size);
605 /* Put the buffer into our internal cache for reuse if we can. */
606 if (bufmgr->bo_reuse && bo->reusable && bucket != NULL &&
607 brw_bo_madvise(bo, I915_MADV_DONTNEED)) {
608 bo->free_time = time;
609
610 bo->name = NULL;
611
612 list_addtail(&bo->head, &bucket->head);
613 } else {
614 bo_free(bo);
615 }
616 }
617
618 void
619 brw_bo_unreference(struct brw_bo *bo)
620 {
621 if (bo == NULL)
622 return;
623
624 assert(p_atomic_read(&bo->refcount) > 0);
625
626 if (atomic_add_unless(&bo->refcount, -1, 1)) {
627 struct brw_bufmgr *bufmgr = bo->bufmgr;
628 struct timespec time;
629
630 clock_gettime(CLOCK_MONOTONIC, &time);
631
632 pthread_mutex_lock(&bufmgr->lock);
633
634 if (p_atomic_dec_zero(&bo->refcount)) {
635 bo_unreference_final(bo, time.tv_sec);
636 cleanup_bo_cache(bufmgr, time.tv_sec);
637 }
638
639 pthread_mutex_unlock(&bufmgr->lock);
640 }
641 }
642
643 static void
644 set_domain(struct brw_context *brw, const char *action,
645 struct brw_bo *bo, uint32_t read_domains, uint32_t write_domain)
646 {
647 struct drm_i915_gem_set_domain sd = {
648 .handle = bo->gem_handle,
649 .read_domains = read_domains,
650 .write_domain = write_domain,
651 };
652
653 double elapsed = unlikely(brw && brw->perf_debug) ? -get_time() : 0.0;
654
655 if (drmIoctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &sd) != 0) {
656 DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s.\n",
657 __FILE__, __LINE__, bo->gem_handle, read_domains, write_domain,
658 strerror(errno));
659 }
660
661 if (unlikely(brw && brw->perf_debug)) {
662 elapsed += get_time();
663 if (elapsed > 1e-5) /* 0.01ms */
664 perf_debug("%s a busy \"%s\" BO stalled and took %.03f ms.\n",
665 action, bo->name, elapsed * 1000);
666 }
667 }
668
669 int
670 brw_bo_map(struct brw_context *brw, struct brw_bo *bo, int write_enable)
671 {
672 struct brw_bufmgr *bufmgr = bo->bufmgr;
673 int ret;
674
675 pthread_mutex_lock(&bufmgr->lock);
676
677 if (!bo->mem_virtual) {
678 struct drm_i915_gem_mmap mmap_arg;
679
680 DBG("bo_map: %d (%s), map_count=%d\n",
681 bo->gem_handle, bo->name, bo->map_count);
682
683 memclear(mmap_arg);
684 mmap_arg.handle = bo->gem_handle;
685 mmap_arg.size = bo->size;
686 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
687 if (ret != 0) {
688 ret = -errno;
689 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
690 __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
691 pthread_mutex_unlock(&bufmgr->lock);
692 return ret;
693 }
694 bo->map_count++;
695 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
696 bo->mem_virtual = (void *) (uintptr_t) mmap_arg.addr_ptr;
697 }
698 DBG("bo_map: %d (%s) -> %p\n", bo->gem_handle, bo->name, bo->mem_virtual);
699 bo->virtual = bo->mem_virtual;
700
701 set_domain(brw, "CPU mapping", bo, I915_GEM_DOMAIN_CPU,
702 write_enable ? I915_GEM_DOMAIN_CPU : 0);
703
704 bo_mark_mmaps_incoherent(bo);
705 VG(VALGRIND_MAKE_MEM_DEFINED(bo->mem_virtual, bo->size));
706 pthread_mutex_unlock(&bufmgr->lock);
707
708 return 0;
709 }
710
711 static int
712 map_gtt(struct brw_bo *bo)
713 {
714 struct brw_bufmgr *bufmgr = bo->bufmgr;
715 int ret;
716
717 /* Get a mapping of the buffer if we haven't before. */
718 if (bo->gtt_virtual == NULL) {
719 struct drm_i915_gem_mmap_gtt mmap_arg;
720
721 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
722 bo->gem_handle, bo->name, bo->map_count);
723
724 memclear(mmap_arg);
725 mmap_arg.handle = bo->gem_handle;
726
727 /* Get the fake offset back... */
728 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg);
729 if (ret != 0) {
730 ret = -errno;
731 DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
732 __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
733 return ret;
734 }
735
736 /* and mmap it */
737 bo->gtt_virtual = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
738 MAP_SHARED, bufmgr->fd, mmap_arg.offset);
739 if (bo->gtt_virtual == MAP_FAILED) {
740 bo->gtt_virtual = NULL;
741 ret = -errno;
742 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
743 __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
744 return ret;
745 }
746 }
747
748 bo->map_count++;
749 bo->virtual = bo->gtt_virtual;
750
751 DBG("bo_map_gtt: %d (%s) -> %p\n", bo->gem_handle, bo->name,
752 bo->gtt_virtual);
753
754 return 0;
755 }
756
757 int
758 brw_bo_map_gtt(struct brw_context *brw, struct brw_bo *bo)
759 {
760 struct brw_bufmgr *bufmgr = bo->bufmgr;
761 int ret;
762
763 pthread_mutex_lock(&bufmgr->lock);
764
765 ret = map_gtt(bo);
766 if (ret) {
767 pthread_mutex_unlock(&bufmgr->lock);
768 return ret;
769 }
770
771 /* Now move it to the GTT domain so that the GPU and CPU
772 * caches are flushed and the GPU isn't actively using the
773 * buffer.
774 *
775 * The pagefault handler does this domain change for us when
776 * it has unbound the BO from the GTT, but it's up to us to
777 * tell it when we're about to use things if we had done
778 * rendering and it still happens to be bound to the GTT.
779 */
780 set_domain(brw, "GTT mapping", bo,
781 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
782
783 bo_mark_mmaps_incoherent(bo);
784 VG(VALGRIND_MAKE_MEM_DEFINED(bo->gtt_virtual, bo->size));
785 pthread_mutex_unlock(&bufmgr->lock);
786
787 return 0;
788 }
789
790 /**
791 * Performs a mapping of the buffer object like the normal GTT
792 * mapping, but avoids waiting for the GPU to be done reading from or
793 * rendering to the buffer.
794 *
795 * This is used in the implementation of GL_ARB_map_buffer_range: The
796 * user asks to create a buffer, then does a mapping, fills some
797 * space, runs a drawing command, then asks to map it again without
798 * synchronizing because it guarantees that it won't write over the
799 * data that the GPU is busy using (or, more specifically, that if it
800 * does write over the data, it acknowledges that rendering is
801 * undefined).
802 */
803
804 int
805 brw_bo_map_unsynchronized(struct brw_context *brw, struct brw_bo *bo)
806 {
807 struct brw_bufmgr *bufmgr = bo->bufmgr;
808 int ret;
809
810 /* If the CPU cache isn't coherent with the GTT, then use a
811 * regular synchronized mapping. The problem is that we don't
812 * track where the buffer was last used on the CPU side in
813 * terms of brw_bo_map vs brw_bo_map_gtt, so
814 * we would potentially corrupt the buffer even when the user
815 * does reasonable things.
816 */
817 if (!bufmgr->has_llc)
818 return brw_bo_map_gtt(brw, bo);
819
820 pthread_mutex_lock(&bufmgr->lock);
821
822 ret = map_gtt(bo);
823 if (ret == 0) {
824 bo_mark_mmaps_incoherent(bo);
825 VG(VALGRIND_MAKE_MEM_DEFINED(bo->gtt_virtual, bo->size));
826 }
827
828 pthread_mutex_unlock(&bufmgr->lock);
829
830 return ret;
831 }
832
833 int
834 brw_bo_unmap(struct brw_bo *bo)
835 {
836 struct brw_bufmgr *bufmgr = bo->bufmgr;
837 int ret = 0;
838
839 if (bo == NULL)
840 return 0;
841
842 pthread_mutex_lock(&bufmgr->lock);
843
844 if (bo->map_count <= 0) {
845 DBG("attempted to unmap an unmapped bo\n");
846 pthread_mutex_unlock(&bufmgr->lock);
847 /* Preserve the old behaviour of just treating this as a
848 * no-op rather than reporting the error.
849 */
850 return 0;
851 }
852
853 if (--bo->map_count == 0) {
854 bo_mark_mmaps_incoherent(bo);
855 bo->virtual = NULL;
856 }
857 pthread_mutex_unlock(&bufmgr->lock);
858
859 return ret;
860 }
861
862 int
863 brw_bo_subdata(struct brw_bo *bo, unsigned long offset,
864 unsigned long size, const void *data)
865 {
866 struct brw_bufmgr *bufmgr = bo->bufmgr;
867 struct drm_i915_gem_pwrite pwrite;
868 int ret;
869
870 memclear(pwrite);
871 pwrite.handle = bo->gem_handle;
872 pwrite.offset = offset;
873 pwrite.size = size;
874 pwrite.data_ptr = (uint64_t) (uintptr_t) data;
875 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite);
876 if (ret != 0) {
877 ret = -errno;
878 DBG("%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
879 __FILE__, __LINE__, bo->gem_handle, (int) offset,
880 (int) size, strerror(errno));
881 }
882
883 return ret;
884 }
885
886 int
887 brw_bo_get_subdata(struct brw_bo *bo, unsigned long offset,
888 unsigned long size, void *data)
889 {
890 struct brw_bufmgr *bufmgr = bo->bufmgr;
891 struct drm_i915_gem_pread pread;
892 int ret;
893
894 memclear(pread);
895 pread.handle = bo->gem_handle;
896 pread.offset = offset;
897 pread.size = size;
898 pread.data_ptr = (uint64_t) (uintptr_t) data;
899 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_PREAD, &pread);
900 if (ret != 0) {
901 ret = -errno;
902 DBG("%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
903 __FILE__, __LINE__, bo->gem_handle, (int) offset,
904 (int) size, strerror(errno));
905 }
906
907 return ret;
908 }
909
910 /** Waits for all GPU rendering with the object to have completed. */
911 void
912 brw_bo_wait_rendering(struct brw_context *brw, struct brw_bo *bo)
913 {
914 set_domain(brw, "waiting for",
915 bo, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
916 }
917
918 /**
919 * Waits on a BO for the given amount of time.
920 *
921 * @bo: buffer object to wait for
922 * @timeout_ns: amount of time to wait in nanoseconds.
923 * If value is less than 0, an infinite wait will occur.
924 *
925 * Returns 0 if the wait was successful ie. the last batch referencing the
926 * object has completed within the allotted time. Otherwise some negative return
927 * value describes the error. Of particular interest is -ETIME when the wait has
928 * failed to yield the desired result.
929 *
930 * Similar to brw_bo_wait_rendering except a timeout parameter allows
931 * the operation to give up after a certain amount of time. Another subtle
932 * difference is the internal locking semantics are different (this variant does
933 * not hold the lock for the duration of the wait). This makes the wait subject
934 * to a larger userspace race window.
935 *
936 * The implementation shall wait until the object is no longer actively
937 * referenced within a batch buffer at the time of the call. The wait will
938 * not guarantee that the buffer is re-issued via another thread, or an flinked
939 * handle. Userspace must make sure this race does not occur if such precision
940 * is important.
941 *
942 * Note that some kernels have broken the inifite wait for negative values
943 * promise, upgrade to latest stable kernels if this is the case.
944 */
945 int
946 brw_bo_wait(struct brw_bo *bo, int64_t timeout_ns)
947 {
948 struct brw_bufmgr *bufmgr = bo->bufmgr;
949 struct drm_i915_gem_wait wait;
950 int ret;
951
952 memclear(wait);
953 wait.bo_handle = bo->gem_handle;
954 wait.timeout_ns = timeout_ns;
955 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
956 if (ret == -1)
957 return -errno;
958
959 return ret;
960 }
961
962 void
963 brw_bufmgr_destroy(struct brw_bufmgr *bufmgr)
964 {
965 pthread_mutex_destroy(&bufmgr->lock);
966
967 /* Free any cached buffer objects we were going to reuse */
968 for (int i = 0; i < bufmgr->num_buckets; i++) {
969 struct bo_cache_bucket *bucket = &bufmgr->cache_bucket[i];
970
971 list_for_each_entry_safe(struct brw_bo, bo, &bucket->head, head) {
972 list_del(&bo->head);
973
974 bo_free(bo);
975 }
976 }
977
978 _mesa_hash_table_destroy(bufmgr->name_table, NULL);
979 _mesa_hash_table_destroy(bufmgr->handle_table, NULL);
980
981 free(bufmgr);
982 }
983
984 static int
985 bo_set_tiling_internal(struct brw_bo *bo, uint32_t tiling_mode,
986 uint32_t stride)
987 {
988 struct brw_bufmgr *bufmgr = bo->bufmgr;
989 struct drm_i915_gem_set_tiling set_tiling;
990 int ret;
991
992 if (bo->global_name == 0 &&
993 tiling_mode == bo->tiling_mode && stride == bo->stride)
994 return 0;
995
996 memset(&set_tiling, 0, sizeof(set_tiling));
997 do {
998 /* set_tiling is slightly broken and overwrites the
999 * input on the error path, so we have to open code
1000 * rmIoctl.
1001 */
1002 set_tiling.handle = bo->gem_handle;
1003 set_tiling.tiling_mode = tiling_mode;
1004 set_tiling.stride = stride;
1005
1006 ret = ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
1007 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
1008 if (ret == -1)
1009 return -errno;
1010
1011 bo->tiling_mode = set_tiling.tiling_mode;
1012 bo->swizzle_mode = set_tiling.swizzle_mode;
1013 bo->stride = set_tiling.stride;
1014 return 0;
1015 }
1016
1017 int
1018 brw_bo_get_tiling(struct brw_bo *bo, uint32_t *tiling_mode,
1019 uint32_t *swizzle_mode)
1020 {
1021 *tiling_mode = bo->tiling_mode;
1022 *swizzle_mode = bo->swizzle_mode;
1023 return 0;
1024 }
1025
1026 struct brw_bo *
1027 brw_bo_gem_create_from_prime(struct brw_bufmgr *bufmgr, int prime_fd,
1028 int size)
1029 {
1030 int ret;
1031 uint32_t handle;
1032 struct brw_bo *bo;
1033 struct drm_i915_gem_get_tiling get_tiling;
1034
1035 pthread_mutex_lock(&bufmgr->lock);
1036 ret = drmPrimeFDToHandle(bufmgr->fd, prime_fd, &handle);
1037 if (ret) {
1038 DBG("create_from_prime: failed to obtain handle from fd: %s\n",
1039 strerror(errno));
1040 pthread_mutex_unlock(&bufmgr->lock);
1041 return NULL;
1042 }
1043
1044 /*
1045 * See if the kernel has already returned this buffer to us. Just as
1046 * for named buffers, we must not create two bo's pointing at the same
1047 * kernel object
1048 */
1049 bo = hash_find_bo(bufmgr->handle_table, handle);
1050 if (bo) {
1051 brw_bo_reference(bo);
1052 goto out;
1053 }
1054
1055 bo = calloc(1, sizeof(*bo));
1056 if (!bo)
1057 goto out;
1058
1059 p_atomic_set(&bo->refcount, 1);
1060
1061 /* Determine size of bo. The fd-to-handle ioctl really should
1062 * return the size, but it doesn't. If we have kernel 3.12 or
1063 * later, we can lseek on the prime fd to get the size. Older
1064 * kernels will just fail, in which case we fall back to the
1065 * provided (estimated or guess size). */
1066 ret = lseek(prime_fd, 0, SEEK_END);
1067 if (ret != -1)
1068 bo->size = ret;
1069 else
1070 bo->size = size;
1071
1072 bo->bufmgr = bufmgr;
1073
1074 bo->gem_handle = handle;
1075 _mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
1076
1077 bo->name = "prime";
1078 bo->reusable = false;
1079
1080 memclear(get_tiling);
1081 get_tiling.handle = bo->gem_handle;
1082 if (drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling))
1083 goto err;
1084
1085 bo->tiling_mode = get_tiling.tiling_mode;
1086 bo->swizzle_mode = get_tiling.swizzle_mode;
1087 /* XXX stride is unknown */
1088
1089 out:
1090 pthread_mutex_unlock(&bufmgr->lock);
1091 return bo;
1092
1093 err:
1094 bo_free(bo);
1095 pthread_mutex_unlock(&bufmgr->lock);
1096 return NULL;
1097 }
1098
1099 int
1100 brw_bo_gem_export_to_prime(struct brw_bo *bo, int *prime_fd)
1101 {
1102 struct brw_bufmgr *bufmgr = bo->bufmgr;
1103
1104 if (drmPrimeHandleToFD(bufmgr->fd, bo->gem_handle,
1105 DRM_CLOEXEC, prime_fd) != 0)
1106 return -errno;
1107
1108 bo->reusable = false;
1109
1110 return 0;
1111 }
1112
1113 int
1114 brw_bo_flink(struct brw_bo *bo, uint32_t *name)
1115 {
1116 struct brw_bufmgr *bufmgr = bo->bufmgr;
1117
1118 if (!bo->global_name) {
1119 struct drm_gem_flink flink;
1120
1121 memclear(flink);
1122 flink.handle = bo->gem_handle;
1123 if (drmIoctl(bufmgr->fd, DRM_IOCTL_GEM_FLINK, &flink))
1124 return -errno;
1125
1126 pthread_mutex_lock(&bufmgr->lock);
1127 if (!bo->global_name) {
1128 bo->global_name = flink.name;
1129 bo->reusable = false;
1130
1131 _mesa_hash_table_insert(bufmgr->name_table, &bo->global_name, bo);
1132 }
1133 pthread_mutex_unlock(&bufmgr->lock);
1134 }
1135
1136 *name = bo->global_name;
1137 return 0;
1138 }
1139
1140 /**
1141 * Enables unlimited caching of buffer objects for reuse.
1142 *
1143 * This is potentially very memory expensive, as the cache at each bucket
1144 * size is only bounded by how many buffers of that size we've managed to have
1145 * in flight at once.
1146 */
1147 void
1148 brw_bufmgr_enable_reuse(struct brw_bufmgr *bufmgr)
1149 {
1150 bufmgr->bo_reuse = true;
1151 }
1152
1153 static void
1154 add_bucket(struct brw_bufmgr *bufmgr, int size)
1155 {
1156 unsigned int i = bufmgr->num_buckets;
1157
1158 assert(i < ARRAY_SIZE(bufmgr->cache_bucket));
1159
1160 list_inithead(&bufmgr->cache_bucket[i].head);
1161 bufmgr->cache_bucket[i].size = size;
1162 bufmgr->num_buckets++;
1163 }
1164
1165 static void
1166 init_cache_buckets(struct brw_bufmgr *bufmgr)
1167 {
1168 unsigned long size, cache_max_size = 64 * 1024 * 1024;
1169
1170 /* OK, so power of two buckets was too wasteful of memory.
1171 * Give 3 other sizes between each power of two, to hopefully
1172 * cover things accurately enough. (The alternative is
1173 * probably to just go for exact matching of sizes, and assume
1174 * that for things like composited window resize the tiled
1175 * width/height alignment and rounding of sizes to pages will
1176 * get us useful cache hit rates anyway)
1177 */
1178 add_bucket(bufmgr, 4096);
1179 add_bucket(bufmgr, 4096 * 2);
1180 add_bucket(bufmgr, 4096 * 3);
1181
1182 /* Initialize the linked lists for BO reuse cache. */
1183 for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
1184 add_bucket(bufmgr, size);
1185
1186 add_bucket(bufmgr, size + size * 1 / 4);
1187 add_bucket(bufmgr, size + size * 2 / 4);
1188 add_bucket(bufmgr, size + size * 3 / 4);
1189 }
1190 }
1191
1192 uint32_t
1193 brw_create_hw_context(struct brw_bufmgr *bufmgr)
1194 {
1195 struct drm_i915_gem_context_create create;
1196 int ret;
1197
1198 memclear(create);
1199 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
1200 if (ret != 0) {
1201 DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n", strerror(errno));
1202 return 0;
1203 }
1204
1205 return create.ctx_id;
1206 }
1207
1208 void
1209 brw_destroy_hw_context(struct brw_bufmgr *bufmgr, uint32_t ctx_id)
1210 {
1211 struct drm_i915_gem_context_destroy d = {.ctx_id = ctx_id };
1212
1213 if (ctx_id != 0 &&
1214 drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, &d) != 0) {
1215 fprintf(stderr, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
1216 strerror(errno));
1217 }
1218 }
1219
1220 int
1221 brw_reg_read(struct brw_bufmgr *bufmgr, uint32_t offset, uint64_t *result)
1222 {
1223 struct drm_i915_reg_read reg_read;
1224 int ret;
1225
1226 memclear(reg_read);
1227 reg_read.offset = offset;
1228
1229 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_REG_READ, &reg_read);
1230
1231 *result = reg_read.val;
1232 return ret;
1233 }
1234
1235 void *
1236 brw_bo_map__gtt(struct brw_bo *bo)
1237 {
1238 struct brw_bufmgr *bufmgr = bo->bufmgr;
1239
1240 if (bo->gtt_virtual)
1241 return bo->gtt_virtual;
1242
1243 pthread_mutex_lock(&bufmgr->lock);
1244 if (bo->gtt_virtual == NULL) {
1245 struct drm_i915_gem_mmap_gtt mmap_arg;
1246 void *ptr;
1247
1248 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
1249 bo->gem_handle, bo->name, bo->map_count);
1250
1251 memclear(mmap_arg);
1252 mmap_arg.handle = bo->gem_handle;
1253
1254 /* Get the fake offset back... */
1255 ptr = MAP_FAILED;
1256 if (drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg) == 0) {
1257 /* and mmap it */
1258 ptr = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
1259 MAP_SHARED, bufmgr->fd, mmap_arg.offset);
1260 }
1261 if (ptr == MAP_FAILED) {
1262 --bo->map_count;
1263 ptr = NULL;
1264 }
1265
1266 bo->gtt_virtual = ptr;
1267 }
1268 pthread_mutex_unlock(&bufmgr->lock);
1269
1270 return bo->gtt_virtual;
1271 }
1272
1273 void *
1274 brw_bo_map__cpu(struct brw_bo *bo)
1275 {
1276 struct brw_bufmgr *bufmgr = bo->bufmgr;
1277
1278 if (bo->mem_virtual)
1279 return bo->mem_virtual;
1280
1281 pthread_mutex_lock(&bufmgr->lock);
1282 if (!bo->mem_virtual) {
1283 struct drm_i915_gem_mmap mmap_arg;
1284
1285 DBG("bo_map: %d (%s), map_count=%d\n",
1286 bo->gem_handle, bo->name, bo->map_count);
1287
1288 memclear(mmap_arg);
1289 mmap_arg.handle = bo->gem_handle;
1290 mmap_arg.size = bo->size;
1291 if (drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg)) {
1292 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1293 __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
1294 } else {
1295 bo->map_count++;
1296 VG(VALGRIND_MALLOCLIKE_BLOCK
1297 (mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
1298 bo->mem_virtual = (void *) (uintptr_t) mmap_arg.addr_ptr;
1299 }
1300 }
1301 pthread_mutex_unlock(&bufmgr->lock);
1302
1303 return bo->mem_virtual;
1304 }
1305
1306 void *
1307 brw_bo_map__wc(struct brw_bo *bo)
1308 {
1309 struct brw_bufmgr *bufmgr = bo->bufmgr;
1310
1311 if (bo->wc_virtual)
1312 return bo->wc_virtual;
1313
1314 pthread_mutex_lock(&bufmgr->lock);
1315 if (!bo->wc_virtual) {
1316 struct drm_i915_gem_mmap mmap_arg;
1317
1318 DBG("bo_map: %d (%s), map_count=%d\n",
1319 bo->gem_handle, bo->name, bo->map_count);
1320
1321 memclear(mmap_arg);
1322 mmap_arg.handle = bo->gem_handle;
1323 mmap_arg.size = bo->size;
1324 mmap_arg.flags = I915_MMAP_WC;
1325 if (drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg)) {
1326 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1327 __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
1328 } else {
1329 bo->map_count++;
1330 VG(VALGRIND_MALLOCLIKE_BLOCK
1331 (mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
1332 bo->wc_virtual = (void *) (uintptr_t) mmap_arg.addr_ptr;
1333 }
1334 }
1335 pthread_mutex_unlock(&bufmgr->lock);
1336
1337 return bo->wc_virtual;
1338 }
1339
1340 /**
1341 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
1342 * and manage map buffer objections.
1343 *
1344 * \param fd File descriptor of the opened DRM device.
1345 */
1346 struct brw_bufmgr *
1347 brw_bufmgr_init(struct gen_device_info *devinfo, int fd, int batch_size)
1348 {
1349 struct brw_bufmgr *bufmgr;
1350
1351 bufmgr = calloc(1, sizeof(*bufmgr));
1352 if (bufmgr == NULL)
1353 return NULL;
1354
1355 /* Handles to buffer objects belong to the device fd and are not
1356 * reference counted by the kernel. If the same fd is used by
1357 * multiple parties (threads sharing the same screen bufmgr, or
1358 * even worse the same device fd passed to multiple libraries)
1359 * ownership of those handles is shared by those independent parties.
1360 *
1361 * Don't do this! Ensure that each library/bufmgr has its own device
1362 * fd so that its namespace does not clash with another.
1363 */
1364 bufmgr->fd = fd;
1365
1366 if (pthread_mutex_init(&bufmgr->lock, NULL) != 0) {
1367 free(bufmgr);
1368 return NULL;
1369 }
1370
1371 bufmgr->has_llc = devinfo->has_llc;
1372
1373 init_cache_buckets(bufmgr);
1374
1375 bufmgr->name_table =
1376 _mesa_hash_table_create(NULL, key_hash_uint, key_uint_equal);
1377 bufmgr->handle_table =
1378 _mesa_hash_table_create(NULL, key_hash_uint, key_uint_equal);
1379
1380 return bufmgr;
1381 }