i965/cnl: Don't write to Cache Mode Register 1 on gen10+
[mesa.git] / src / mesa / drivers / dri / i965 / brw_bufmgr.c
1 /**************************************************************************
2 *
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007-2012 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
26 * of the Software.
27 *
28 *
29 **************************************************************************/
30 /*
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
35 */
36
37 #ifdef HAVE_CONFIG_H
38 #include "config.h"
39 #endif
40
41 #include <xf86drm.h>
42 #include <util/u_atomic.h>
43 #include <fcntl.h>
44 #include <stdio.h>
45 #include <stdlib.h>
46 #include <string.h>
47 #include <unistd.h>
48 #include <assert.h>
49 #include <pthread.h>
50 #include <sys/ioctl.h>
51 #include <sys/stat.h>
52 #include <sys/types.h>
53 #include <stdbool.h>
54
55 #include "errno.h"
56 #ifndef ETIME
57 #define ETIME ETIMEDOUT
58 #endif
59 #include "common/gen_debug.h"
60 #include "common/gen_device_info.h"
61 #include "libdrm_macros.h"
62 #include "main/macros.h"
63 #include "util/macros.h"
64 #include "util/hash_table.h"
65 #include "util/list.h"
66 #include "brw_bufmgr.h"
67 #include "brw_context.h"
68 #include "string.h"
69
70 #include "i915_drm.h"
71
72 #ifdef HAVE_VALGRIND
73 #include <valgrind.h>
74 #include <memcheck.h>
75 #define VG(x) x
76 #else
77 #define VG(x)
78 #endif
79
80 #define memclear(s) memset(&s, 0, sizeof(s))
81
82 #define FILE_DEBUG_FLAG DEBUG_BUFMGR
83
84 static inline int
85 atomic_add_unless(int *v, int add, int unless)
86 {
87 int c, old;
88 c = p_atomic_read(v);
89 while (c != unless && (old = p_atomic_cmpxchg(v, c, c + add)) != c)
90 c = old;
91 return c == unless;
92 }
93
94 struct bo_cache_bucket {
95 struct list_head head;
96 uint64_t size;
97 };
98
99 struct brw_bufmgr {
100 int fd;
101
102 pthread_mutex_t lock;
103
104 /** Array of lists of cached gem objects of power-of-two sizes */
105 struct bo_cache_bucket cache_bucket[14 * 4];
106 int num_buckets;
107 time_t time;
108
109 struct hash_table *name_table;
110 struct hash_table *handle_table;
111
112 bool has_llc:1;
113 bool bo_reuse:1;
114 };
115
116 static int bo_set_tiling_internal(struct brw_bo *bo, uint32_t tiling_mode,
117 uint32_t stride);
118
119 static void bo_free(struct brw_bo *bo);
120
121 static uint32_t
122 key_hash_uint(const void *key)
123 {
124 return _mesa_hash_data(key, 4);
125 }
126
127 static bool
128 key_uint_equal(const void *a, const void *b)
129 {
130 return *((unsigned *) a) == *((unsigned *) b);
131 }
132
133 static struct brw_bo *
134 hash_find_bo(struct hash_table *ht, unsigned int key)
135 {
136 struct hash_entry *entry = _mesa_hash_table_search(ht, &key);
137 return entry ? (struct brw_bo *) entry->data : NULL;
138 }
139
140 static uint64_t
141 bo_tile_size(struct brw_bufmgr *bufmgr, uint64_t size, uint32_t tiling)
142 {
143 if (tiling == I915_TILING_NONE)
144 return size;
145
146 /* 965+ just need multiples of page size for tiling */
147 return ALIGN(size, 4096);
148 }
149
150 /*
151 * Round a given pitch up to the minimum required for X tiling on a
152 * given chip. We use 512 as the minimum to allow for a later tiling
153 * change.
154 */
155 static uint32_t
156 bo_tile_pitch(struct brw_bufmgr *bufmgr, uint32_t pitch, uint32_t tiling)
157 {
158 unsigned long tile_width;
159
160 /* If untiled, then just align it so that we can do rendering
161 * to it with the 3D engine.
162 */
163 if (tiling == I915_TILING_NONE)
164 return ALIGN(pitch, 64);
165
166 if (tiling == I915_TILING_X)
167 tile_width = 512;
168 else
169 tile_width = 128;
170
171 /* 965 is flexible */
172 return ALIGN(pitch, tile_width);
173 }
174
175 static struct bo_cache_bucket *
176 bucket_for_size(struct brw_bufmgr *bufmgr, uint64_t size)
177 {
178 int i;
179
180 for (i = 0; i < bufmgr->num_buckets; i++) {
181 struct bo_cache_bucket *bucket = &bufmgr->cache_bucket[i];
182 if (bucket->size >= size) {
183 return bucket;
184 }
185 }
186
187 return NULL;
188 }
189
190 inline void
191 brw_bo_reference(struct brw_bo *bo)
192 {
193 p_atomic_inc(&bo->refcount);
194 }
195
196 int
197 brw_bo_busy(struct brw_bo *bo)
198 {
199 struct brw_bufmgr *bufmgr = bo->bufmgr;
200 struct drm_i915_gem_busy busy;
201 int ret;
202
203 memclear(busy);
204 busy.handle = bo->gem_handle;
205
206 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
207 if (ret == 0) {
208 bo->idle = !busy.busy;
209 return busy.busy;
210 }
211 return false;
212 }
213
214 int
215 brw_bo_madvise(struct brw_bo *bo, int state)
216 {
217 struct drm_i915_gem_madvise madv;
218
219 memclear(madv);
220 madv.handle = bo->gem_handle;
221 madv.madv = state;
222 madv.retained = 1;
223 drmIoctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
224
225 return madv.retained;
226 }
227
228 /* drop the oldest entries that have been purged by the kernel */
229 static void
230 brw_bo_cache_purge_bucket(struct brw_bufmgr *bufmgr,
231 struct bo_cache_bucket *bucket)
232 {
233 list_for_each_entry_safe(struct brw_bo, bo, &bucket->head, head) {
234 if (brw_bo_madvise(bo, I915_MADV_DONTNEED))
235 break;
236
237 list_del(&bo->head);
238 bo_free(bo);
239 }
240 }
241
242 static struct brw_bo *
243 bo_alloc_internal(struct brw_bufmgr *bufmgr,
244 const char *name,
245 uint64_t size,
246 unsigned flags,
247 uint32_t tiling_mode,
248 uint32_t stride, uint64_t alignment)
249 {
250 struct brw_bo *bo;
251 unsigned int page_size = getpagesize();
252 int ret;
253 struct bo_cache_bucket *bucket;
254 bool alloc_from_cache;
255 uint64_t bo_size;
256 bool for_render = false;
257
258 if (flags & BO_ALLOC_FOR_RENDER)
259 for_render = true;
260
261 /* Round the allocated size up to a power of two number of pages. */
262 bucket = bucket_for_size(bufmgr, size);
263
264 /* If we don't have caching at this size, don't actually round the
265 * allocation up.
266 */
267 if (bucket == NULL) {
268 bo_size = size;
269 if (bo_size < page_size)
270 bo_size = page_size;
271 } else {
272 bo_size = bucket->size;
273 }
274
275 pthread_mutex_lock(&bufmgr->lock);
276 /* Get a buffer out of the cache if available */
277 retry:
278 alloc_from_cache = false;
279 if (bucket != NULL && !list_empty(&bucket->head)) {
280 if (for_render) {
281 /* Allocate new render-target BOs from the tail (MRU)
282 * of the list, as it will likely be hot in the GPU
283 * cache and in the aperture for us.
284 */
285 bo = LIST_ENTRY(struct brw_bo, bucket->head.prev, head);
286 list_del(&bo->head);
287 alloc_from_cache = true;
288 bo->align = alignment;
289 } else {
290 assert(alignment == 0);
291 /* For non-render-target BOs (where we're probably
292 * going to map it first thing in order to fill it
293 * with data), check if the last BO in the cache is
294 * unbusy, and only reuse in that case. Otherwise,
295 * allocating a new buffer is probably faster than
296 * waiting for the GPU to finish.
297 */
298 bo = LIST_ENTRY(struct brw_bo, bucket->head.next, head);
299 if (!brw_bo_busy(bo)) {
300 alloc_from_cache = true;
301 list_del(&bo->head);
302 }
303 }
304
305 if (alloc_from_cache) {
306 if (!brw_bo_madvise(bo, I915_MADV_WILLNEED)) {
307 bo_free(bo);
308 brw_bo_cache_purge_bucket(bufmgr, bucket);
309 goto retry;
310 }
311
312 if (bo_set_tiling_internal(bo, tiling_mode, stride)) {
313 bo_free(bo);
314 goto retry;
315 }
316 }
317 }
318
319 if (!alloc_from_cache) {
320 struct drm_i915_gem_create create;
321
322 bo = calloc(1, sizeof(*bo));
323 if (!bo)
324 goto err;
325
326 bo->size = bo_size;
327 bo->idle = true;
328
329 memclear(create);
330 create.size = bo_size;
331
332 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CREATE, &create);
333 if (ret != 0) {
334 free(bo);
335 goto err;
336 }
337
338 bo->gem_handle = create.handle;
339 _mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
340
341 bo->bufmgr = bufmgr;
342 bo->align = alignment;
343
344 bo->tiling_mode = I915_TILING_NONE;
345 bo->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
346 bo->stride = 0;
347
348 if (bo_set_tiling_internal(bo, tiling_mode, stride))
349 goto err_free;
350 }
351
352 bo->name = name;
353 p_atomic_set(&bo->refcount, 1);
354 bo->reusable = true;
355 bo->cache_coherent = bufmgr->has_llc;
356
357 pthread_mutex_unlock(&bufmgr->lock);
358
359 DBG("bo_create: buf %d (%s) %ldb\n", bo->gem_handle, bo->name, size);
360
361 return bo;
362
363 err_free:
364 bo_free(bo);
365 err:
366 pthread_mutex_unlock(&bufmgr->lock);
367 return NULL;
368 }
369
370 struct brw_bo *
371 brw_bo_alloc(struct brw_bufmgr *bufmgr,
372 const char *name, uint64_t size, uint64_t alignment)
373 {
374 return bo_alloc_internal(bufmgr, name, size, 0, I915_TILING_NONE, 0, 0);
375 }
376
377 struct brw_bo *
378 brw_bo_alloc_tiled(struct brw_bufmgr *bufmgr, const char *name,
379 uint64_t size, uint32_t tiling_mode, uint32_t pitch,
380 unsigned flags)
381 {
382 return bo_alloc_internal(bufmgr, name, size, flags, tiling_mode, pitch, 0);
383 }
384
385 struct brw_bo *
386 brw_bo_alloc_tiled_2d(struct brw_bufmgr *bufmgr, const char *name,
387 int x, int y, int cpp, uint32_t tiling,
388 uint32_t *pitch, unsigned flags)
389 {
390 uint64_t size;
391 uint32_t stride;
392 unsigned long aligned_y, height_alignment;
393
394 /* If we're tiled, our allocations are in 8 or 32-row blocks,
395 * so failure to align our height means that we won't allocate
396 * enough pages.
397 *
398 * If we're untiled, we still have to align to 2 rows high
399 * because the data port accesses 2x2 blocks even if the
400 * bottom row isn't to be rendered, so failure to align means
401 * we could walk off the end of the GTT and fault. This is
402 * documented on 965, and may be the case on older chipsets
403 * too so we try to be careful.
404 */
405 aligned_y = y;
406 height_alignment = 2;
407
408 if (tiling == I915_TILING_X)
409 height_alignment = 8;
410 else if (tiling == I915_TILING_Y)
411 height_alignment = 32;
412 aligned_y = ALIGN(y, height_alignment);
413
414 stride = x * cpp;
415 stride = bo_tile_pitch(bufmgr, stride, tiling);
416 size = stride * aligned_y;
417 size = bo_tile_size(bufmgr, size, tiling);
418 *pitch = stride;
419
420 if (tiling == I915_TILING_NONE)
421 stride = 0;
422
423 return bo_alloc_internal(bufmgr, name, size, flags, tiling, stride, 0);
424 }
425
426 /**
427 * Returns a brw_bo wrapping the given buffer object handle.
428 *
429 * This can be used when one application needs to pass a buffer object
430 * to another.
431 */
432 struct brw_bo *
433 brw_bo_gem_create_from_name(struct brw_bufmgr *bufmgr,
434 const char *name, unsigned int handle)
435 {
436 struct brw_bo *bo;
437 int ret;
438 struct drm_gem_open open_arg;
439 struct drm_i915_gem_get_tiling get_tiling;
440
441 /* At the moment most applications only have a few named bo.
442 * For instance, in a DRI client only the render buffers passed
443 * between X and the client are named. And since X returns the
444 * alternating names for the front/back buffer a linear search
445 * provides a sufficiently fast match.
446 */
447 pthread_mutex_lock(&bufmgr->lock);
448 bo = hash_find_bo(bufmgr->name_table, handle);
449 if (bo) {
450 brw_bo_reference(bo);
451 goto out;
452 }
453
454 memclear(open_arg);
455 open_arg.name = handle;
456 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_GEM_OPEN, &open_arg);
457 if (ret != 0) {
458 DBG("Couldn't reference %s handle 0x%08x: %s\n",
459 name, handle, strerror(errno));
460 bo = NULL;
461 goto out;
462 }
463 /* Now see if someone has used a prime handle to get this
464 * object from the kernel before by looking through the list
465 * again for a matching gem_handle
466 */
467 bo = hash_find_bo(bufmgr->handle_table, open_arg.handle);
468 if (bo) {
469 brw_bo_reference(bo);
470 goto out;
471 }
472
473 bo = calloc(1, sizeof(*bo));
474 if (!bo)
475 goto out;
476
477 p_atomic_set(&bo->refcount, 1);
478
479 bo->size = open_arg.size;
480 bo->offset64 = 0;
481 bo->bufmgr = bufmgr;
482 bo->gem_handle = open_arg.handle;
483 bo->name = name;
484 bo->global_name = handle;
485 bo->reusable = false;
486
487 _mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
488 _mesa_hash_table_insert(bufmgr->name_table, &bo->global_name, bo);
489
490 memclear(get_tiling);
491 get_tiling.handle = bo->gem_handle;
492 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling);
493 if (ret != 0)
494 goto err_unref;
495
496 bo->tiling_mode = get_tiling.tiling_mode;
497 bo->swizzle_mode = get_tiling.swizzle_mode;
498 /* XXX stride is unknown */
499 DBG("bo_create_from_handle: %d (%s)\n", handle, bo->name);
500
501 out:
502 pthread_mutex_unlock(&bufmgr->lock);
503 return bo;
504
505 err_unref:
506 bo_free(bo);
507 pthread_mutex_unlock(&bufmgr->lock);
508 return NULL;
509 }
510
511 static void
512 bo_free(struct brw_bo *bo)
513 {
514 struct brw_bufmgr *bufmgr = bo->bufmgr;
515 struct drm_gem_close close;
516 struct hash_entry *entry;
517 int ret;
518
519 if (bo->map_cpu) {
520 VG(VALGRIND_FREELIKE_BLOCK(bo->map_cpu, 0));
521 drm_munmap(bo->map_cpu, bo->size);
522 }
523 if (bo->map_wc) {
524 VG(VALGRIND_FREELIKE_BLOCK(bo->map_wc, 0));
525 drm_munmap(bo->map_wc, bo->size);
526 }
527 if (bo->map_gtt) {
528 drm_munmap(bo->map_gtt, bo->size);
529 }
530
531 if (bo->global_name) {
532 entry = _mesa_hash_table_search(bufmgr->name_table, &bo->global_name);
533 _mesa_hash_table_remove(bufmgr->name_table, entry);
534 }
535 entry = _mesa_hash_table_search(bufmgr->handle_table, &bo->gem_handle);
536 _mesa_hash_table_remove(bufmgr->handle_table, entry);
537
538 /* Close this object */
539 memclear(close);
540 close.handle = bo->gem_handle;
541 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_GEM_CLOSE, &close);
542 if (ret != 0) {
543 DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
544 bo->gem_handle, bo->name, strerror(errno));
545 }
546 free(bo);
547 }
548
549 static void
550 bo_mark_mmaps_incoherent(struct brw_bo *bo)
551 {
552 #if HAVE_VALGRIND
553 if (bo->map_cpu)
554 VALGRIND_MAKE_MEM_NOACCESS(bo->map_cpu, bo->size);
555
556 if (bo->map_wc)
557 VALGRIND_MAKE_MEM_NOACCESS(bo->map_wc, bo->size);
558
559 if (bo->map_gtt)
560 VALGRIND_MAKE_MEM_NOACCESS(bo->map_gtt, bo->size);
561 #endif
562 }
563
564 /** Frees all cached buffers significantly older than @time. */
565 static void
566 cleanup_bo_cache(struct brw_bufmgr *bufmgr, time_t time)
567 {
568 int i;
569
570 if (bufmgr->time == time)
571 return;
572
573 for (i = 0; i < bufmgr->num_buckets; i++) {
574 struct bo_cache_bucket *bucket = &bufmgr->cache_bucket[i];
575
576 list_for_each_entry_safe(struct brw_bo, bo, &bucket->head, head) {
577 if (time - bo->free_time <= 1)
578 break;
579
580 list_del(&bo->head);
581
582 bo_free(bo);
583 }
584 }
585
586 bufmgr->time = time;
587 }
588
589 static void
590 bo_unreference_final(struct brw_bo *bo, time_t time)
591 {
592 struct brw_bufmgr *bufmgr = bo->bufmgr;
593 struct bo_cache_bucket *bucket;
594
595 DBG("bo_unreference final: %d (%s)\n", bo->gem_handle, bo->name);
596
597 /* Clear any left-over mappings */
598 if (bo->map_count) {
599 DBG("bo freed with non-zero map-count %d\n", bo->map_count);
600 bo->map_count = 0;
601 bo_mark_mmaps_incoherent(bo);
602 }
603
604 bucket = bucket_for_size(bufmgr, bo->size);
605 /* Put the buffer into our internal cache for reuse if we can. */
606 if (bufmgr->bo_reuse && bo->reusable && bucket != NULL &&
607 brw_bo_madvise(bo, I915_MADV_DONTNEED)) {
608 bo->free_time = time;
609
610 bo->name = NULL;
611 bo->kflags = 0;
612
613 list_addtail(&bo->head, &bucket->head);
614 } else {
615 bo_free(bo);
616 }
617 }
618
619 void
620 brw_bo_unreference(struct brw_bo *bo)
621 {
622 if (bo == NULL)
623 return;
624
625 assert(p_atomic_read(&bo->refcount) > 0);
626
627 if (atomic_add_unless(&bo->refcount, -1, 1)) {
628 struct brw_bufmgr *bufmgr = bo->bufmgr;
629 struct timespec time;
630
631 clock_gettime(CLOCK_MONOTONIC, &time);
632
633 pthread_mutex_lock(&bufmgr->lock);
634
635 if (p_atomic_dec_zero(&bo->refcount)) {
636 bo_unreference_final(bo, time.tv_sec);
637 cleanup_bo_cache(bufmgr, time.tv_sec);
638 }
639
640 pthread_mutex_unlock(&bufmgr->lock);
641 }
642 }
643
644 static void
645 set_domain(struct brw_context *brw, const char *action,
646 struct brw_bo *bo, uint32_t read_domains, uint32_t write_domain)
647 {
648 struct drm_i915_gem_set_domain sd = {
649 .handle = bo->gem_handle,
650 .read_domains = read_domains,
651 .write_domain = write_domain,
652 };
653
654 double elapsed = unlikely(brw && brw->perf_debug) ? -get_time() : 0.0;
655
656 if (drmIoctl(bo->bufmgr->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &sd) != 0) {
657 DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s.\n",
658 __FILE__, __LINE__, bo->gem_handle, read_domains, write_domain,
659 strerror(errno));
660 }
661
662 if (unlikely(brw && brw->perf_debug)) {
663 elapsed += get_time();
664 if (elapsed > 1e-5) /* 0.01ms */
665 perf_debug("%s a busy \"%s\" BO stalled and took %.03f ms.\n",
666 action, bo->name, elapsed * 1000);
667 }
668 }
669
670 static void *
671 brw_bo_map_cpu(struct brw_context *brw, struct brw_bo *bo, unsigned flags)
672 {
673 struct brw_bufmgr *bufmgr = bo->bufmgr;
674
675 pthread_mutex_lock(&bufmgr->lock);
676
677 if (!bo->map_cpu) {
678 struct drm_i915_gem_mmap mmap_arg;
679
680 DBG("brw_bo_map_cpu: %d (%s), map_count=%d\n",
681 bo->gem_handle, bo->name, bo->map_count);
682
683 memclear(mmap_arg);
684 mmap_arg.handle = bo->gem_handle;
685 mmap_arg.size = bo->size;
686 int ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
687 if (ret != 0) {
688 ret = -errno;
689 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
690 __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
691 pthread_mutex_unlock(&bufmgr->lock);
692 return NULL;
693 }
694 bo->map_count++;
695 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
696 bo->map_cpu = (void *) (uintptr_t) mmap_arg.addr_ptr;
697 }
698 DBG("brw_bo_map_cpu: %d (%s) -> %p\n", bo->gem_handle, bo->name,
699 bo->map_cpu);
700
701 if (!(flags & MAP_ASYNC) || !bufmgr->has_llc) {
702 set_domain(brw, "CPU mapping", bo, I915_GEM_DOMAIN_CPU,
703 flags & MAP_WRITE ? I915_GEM_DOMAIN_CPU : 0);
704 }
705
706 bo_mark_mmaps_incoherent(bo);
707 VG(VALGRIND_MAKE_MEM_DEFINED(bo->map_cpu, bo->size));
708 pthread_mutex_unlock(&bufmgr->lock);
709
710 return bo->map_cpu;
711 }
712
713 static void *
714 brw_bo_map_gtt(struct brw_context *brw, struct brw_bo *bo, unsigned flags)
715 {
716 struct brw_bufmgr *bufmgr = bo->bufmgr;
717
718 pthread_mutex_lock(&bufmgr->lock);
719
720 /* Get a mapping of the buffer if we haven't before. */
721 if (bo->map_gtt == NULL) {
722 struct drm_i915_gem_mmap_gtt mmap_arg;
723
724 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
725 bo->gem_handle, bo->name, bo->map_count);
726
727 memclear(mmap_arg);
728 mmap_arg.handle = bo->gem_handle;
729
730 /* Get the fake offset back... */
731 int ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg);
732 if (ret != 0) {
733 DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
734 __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
735 pthread_mutex_unlock(&bufmgr->lock);
736 return NULL;
737 }
738
739 /* and mmap it */
740 bo->map_gtt = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
741 MAP_SHARED, bufmgr->fd, mmap_arg.offset);
742 if (bo->map_gtt == MAP_FAILED) {
743 bo->map_gtt = NULL;
744 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
745 __FILE__, __LINE__, bo->gem_handle, bo->name, strerror(errno));
746 pthread_mutex_unlock(&bufmgr->lock);
747 return NULL;
748 }
749 bo->map_count++;
750 }
751
752 DBG("bo_map_gtt: %d (%s) -> %p\n", bo->gem_handle, bo->name,
753 bo->map_gtt);
754
755 if (!(flags & MAP_ASYNC) || !bufmgr->has_llc) {
756 set_domain(brw, "GTT mapping", bo,
757 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
758 }
759
760 bo_mark_mmaps_incoherent(bo);
761 VG(VALGRIND_MAKE_MEM_DEFINED(bo->map_gtt, bo->size));
762 pthread_mutex_unlock(&bufmgr->lock);
763
764 return bo->map_gtt;
765 }
766
767 static bool
768 can_map_cpu(struct brw_bo *bo, unsigned flags)
769 {
770 if (bo->cache_coherent)
771 return true;
772
773 if (flags & MAP_PERSISTENT)
774 return false;
775
776 if (flags & MAP_COHERENT)
777 return false;
778
779 return !(flags & MAP_WRITE);
780 }
781
782 void *
783 brw_bo_map(struct brw_context *brw, struct brw_bo *bo, unsigned flags)
784 {
785 if (bo->tiling_mode != I915_TILING_NONE && !(flags & MAP_RAW))
786 return brw_bo_map_gtt(brw, bo, flags);
787 else if (can_map_cpu(bo, flags))
788 return brw_bo_map_cpu(brw, bo, flags);
789 else
790 return brw_bo_map_gtt(brw, bo, flags);
791 }
792
793 int
794 brw_bo_unmap(struct brw_bo *bo)
795 {
796 struct brw_bufmgr *bufmgr = bo->bufmgr;
797 int ret = 0;
798
799 pthread_mutex_lock(&bufmgr->lock);
800
801 if (bo->map_count <= 0) {
802 DBG("attempted to unmap an unmapped bo\n");
803 pthread_mutex_unlock(&bufmgr->lock);
804 /* Preserve the old behaviour of just treating this as a
805 * no-op rather than reporting the error.
806 */
807 return 0;
808 }
809
810 if (--bo->map_count == 0) {
811 bo_mark_mmaps_incoherent(bo);
812 }
813 pthread_mutex_unlock(&bufmgr->lock);
814
815 return ret;
816 }
817
818 int
819 brw_bo_subdata(struct brw_bo *bo, uint64_t offset,
820 uint64_t size, const void *data)
821 {
822 struct brw_bufmgr *bufmgr = bo->bufmgr;
823 struct drm_i915_gem_pwrite pwrite;
824 int ret;
825
826 memclear(pwrite);
827 pwrite.handle = bo->gem_handle;
828 pwrite.offset = offset;
829 pwrite.size = size;
830 pwrite.data_ptr = (uint64_t) (uintptr_t) data;
831 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite);
832 if (ret != 0) {
833 ret = -errno;
834 DBG("%s:%d: Error writing data to buffer %d: "
835 "(%"PRIu64" %"PRIu64") %s .\n",
836 __FILE__, __LINE__, bo->gem_handle, offset, size, strerror(errno));
837 }
838
839 return ret;
840 }
841
842 int
843 brw_bo_get_subdata(struct brw_bo *bo, uint64_t offset,
844 uint64_t size, void *data)
845 {
846 struct brw_bufmgr *bufmgr = bo->bufmgr;
847 struct drm_i915_gem_pread pread;
848 int ret;
849
850 memclear(pread);
851 pread.handle = bo->gem_handle;
852 pread.offset = offset;
853 pread.size = size;
854 pread.data_ptr = (uint64_t) (uintptr_t) data;
855 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_PREAD, &pread);
856 if (ret != 0) {
857 ret = -errno;
858 DBG("%s:%d: Error reading data from buffer %d: "
859 "(%"PRIu64" %"PRIu64") %s .\n",
860 __FILE__, __LINE__, bo->gem_handle, offset, size, strerror(errno));
861 }
862
863 return ret;
864 }
865
866 /** Waits for all GPU rendering with the object to have completed. */
867 void
868 brw_bo_wait_rendering(struct brw_context *brw, struct brw_bo *bo)
869 {
870 set_domain(brw, "waiting for",
871 bo, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
872 }
873
874 /**
875 * Waits on a BO for the given amount of time.
876 *
877 * @bo: buffer object to wait for
878 * @timeout_ns: amount of time to wait in nanoseconds.
879 * If value is less than 0, an infinite wait will occur.
880 *
881 * Returns 0 if the wait was successful ie. the last batch referencing the
882 * object has completed within the allotted time. Otherwise some negative return
883 * value describes the error. Of particular interest is -ETIME when the wait has
884 * failed to yield the desired result.
885 *
886 * Similar to brw_bo_wait_rendering except a timeout parameter allows
887 * the operation to give up after a certain amount of time. Another subtle
888 * difference is the internal locking semantics are different (this variant does
889 * not hold the lock for the duration of the wait). This makes the wait subject
890 * to a larger userspace race window.
891 *
892 * The implementation shall wait until the object is no longer actively
893 * referenced within a batch buffer at the time of the call. The wait will
894 * not guarantee that the buffer is re-issued via another thread, or an flinked
895 * handle. Userspace must make sure this race does not occur if such precision
896 * is important.
897 *
898 * Note that some kernels have broken the inifite wait for negative values
899 * promise, upgrade to latest stable kernels if this is the case.
900 */
901 int
902 brw_bo_wait(struct brw_bo *bo, int64_t timeout_ns)
903 {
904 struct brw_bufmgr *bufmgr = bo->bufmgr;
905 struct drm_i915_gem_wait wait;
906 int ret;
907
908 memclear(wait);
909 wait.bo_handle = bo->gem_handle;
910 wait.timeout_ns = timeout_ns;
911 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
912 if (ret == -1)
913 return -errno;
914
915 return ret;
916 }
917
918 void
919 brw_bufmgr_destroy(struct brw_bufmgr *bufmgr)
920 {
921 pthread_mutex_destroy(&bufmgr->lock);
922
923 /* Free any cached buffer objects we were going to reuse */
924 for (int i = 0; i < bufmgr->num_buckets; i++) {
925 struct bo_cache_bucket *bucket = &bufmgr->cache_bucket[i];
926
927 list_for_each_entry_safe(struct brw_bo, bo, &bucket->head, head) {
928 list_del(&bo->head);
929
930 bo_free(bo);
931 }
932 }
933
934 _mesa_hash_table_destroy(bufmgr->name_table, NULL);
935 _mesa_hash_table_destroy(bufmgr->handle_table, NULL);
936
937 free(bufmgr);
938 }
939
940 static int
941 bo_set_tiling_internal(struct brw_bo *bo, uint32_t tiling_mode,
942 uint32_t stride)
943 {
944 struct brw_bufmgr *bufmgr = bo->bufmgr;
945 struct drm_i915_gem_set_tiling set_tiling;
946 int ret;
947
948 if (bo->global_name == 0 &&
949 tiling_mode == bo->tiling_mode && stride == bo->stride)
950 return 0;
951
952 memset(&set_tiling, 0, sizeof(set_tiling));
953 do {
954 /* set_tiling is slightly broken and overwrites the
955 * input on the error path, so we have to open code
956 * rmIoctl.
957 */
958 set_tiling.handle = bo->gem_handle;
959 set_tiling.tiling_mode = tiling_mode;
960 set_tiling.stride = stride;
961
962 ret = ioctl(bufmgr->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
963 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
964 if (ret == -1)
965 return -errno;
966
967 bo->tiling_mode = set_tiling.tiling_mode;
968 bo->swizzle_mode = set_tiling.swizzle_mode;
969 bo->stride = set_tiling.stride;
970 return 0;
971 }
972
973 int
974 brw_bo_get_tiling(struct brw_bo *bo, uint32_t *tiling_mode,
975 uint32_t *swizzle_mode)
976 {
977 *tiling_mode = bo->tiling_mode;
978 *swizzle_mode = bo->swizzle_mode;
979 return 0;
980 }
981
982 struct brw_bo *
983 brw_bo_gem_create_from_prime(struct brw_bufmgr *bufmgr, int prime_fd)
984 {
985 int ret;
986 uint32_t handle;
987 struct brw_bo *bo;
988 struct drm_i915_gem_get_tiling get_tiling;
989
990 pthread_mutex_lock(&bufmgr->lock);
991 ret = drmPrimeFDToHandle(bufmgr->fd, prime_fd, &handle);
992 if (ret) {
993 DBG("create_from_prime: failed to obtain handle from fd: %s\n",
994 strerror(errno));
995 pthread_mutex_unlock(&bufmgr->lock);
996 return NULL;
997 }
998
999 /*
1000 * See if the kernel has already returned this buffer to us. Just as
1001 * for named buffers, we must not create two bo's pointing at the same
1002 * kernel object
1003 */
1004 bo = hash_find_bo(bufmgr->handle_table, handle);
1005 if (bo) {
1006 brw_bo_reference(bo);
1007 goto out;
1008 }
1009
1010 bo = calloc(1, sizeof(*bo));
1011 if (!bo)
1012 goto out;
1013
1014 p_atomic_set(&bo->refcount, 1);
1015
1016 /* Determine size of bo. The fd-to-handle ioctl really should
1017 * return the size, but it doesn't. If we have kernel 3.12 or
1018 * later, we can lseek on the prime fd to get the size. Older
1019 * kernels will just fail, in which case we fall back to the
1020 * provided (estimated or guess size). */
1021 ret = lseek(prime_fd, 0, SEEK_END);
1022 if (ret != -1)
1023 bo->size = ret;
1024
1025 bo->bufmgr = bufmgr;
1026
1027 bo->gem_handle = handle;
1028 _mesa_hash_table_insert(bufmgr->handle_table, &bo->gem_handle, bo);
1029
1030 bo->name = "prime";
1031 bo->reusable = false;
1032
1033 memclear(get_tiling);
1034 get_tiling.handle = bo->gem_handle;
1035 if (drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling))
1036 goto err;
1037
1038 bo->tiling_mode = get_tiling.tiling_mode;
1039 bo->swizzle_mode = get_tiling.swizzle_mode;
1040 /* XXX stride is unknown */
1041
1042 out:
1043 pthread_mutex_unlock(&bufmgr->lock);
1044 return bo;
1045
1046 err:
1047 bo_free(bo);
1048 pthread_mutex_unlock(&bufmgr->lock);
1049 return NULL;
1050 }
1051
1052 int
1053 brw_bo_gem_export_to_prime(struct brw_bo *bo, int *prime_fd)
1054 {
1055 struct brw_bufmgr *bufmgr = bo->bufmgr;
1056
1057 if (drmPrimeHandleToFD(bufmgr->fd, bo->gem_handle,
1058 DRM_CLOEXEC, prime_fd) != 0)
1059 return -errno;
1060
1061 bo->reusable = false;
1062
1063 return 0;
1064 }
1065
1066 int
1067 brw_bo_flink(struct brw_bo *bo, uint32_t *name)
1068 {
1069 struct brw_bufmgr *bufmgr = bo->bufmgr;
1070
1071 if (!bo->global_name) {
1072 struct drm_gem_flink flink;
1073
1074 memclear(flink);
1075 flink.handle = bo->gem_handle;
1076 if (drmIoctl(bufmgr->fd, DRM_IOCTL_GEM_FLINK, &flink))
1077 return -errno;
1078
1079 pthread_mutex_lock(&bufmgr->lock);
1080 if (!bo->global_name) {
1081 bo->global_name = flink.name;
1082 bo->reusable = false;
1083
1084 _mesa_hash_table_insert(bufmgr->name_table, &bo->global_name, bo);
1085 }
1086 pthread_mutex_unlock(&bufmgr->lock);
1087 }
1088
1089 *name = bo->global_name;
1090 return 0;
1091 }
1092
1093 /**
1094 * Enables unlimited caching of buffer objects for reuse.
1095 *
1096 * This is potentially very memory expensive, as the cache at each bucket
1097 * size is only bounded by how many buffers of that size we've managed to have
1098 * in flight at once.
1099 */
1100 void
1101 brw_bufmgr_enable_reuse(struct brw_bufmgr *bufmgr)
1102 {
1103 bufmgr->bo_reuse = true;
1104 }
1105
1106 static void
1107 add_bucket(struct brw_bufmgr *bufmgr, int size)
1108 {
1109 unsigned int i = bufmgr->num_buckets;
1110
1111 assert(i < ARRAY_SIZE(bufmgr->cache_bucket));
1112
1113 list_inithead(&bufmgr->cache_bucket[i].head);
1114 bufmgr->cache_bucket[i].size = size;
1115 bufmgr->num_buckets++;
1116 }
1117
1118 static void
1119 init_cache_buckets(struct brw_bufmgr *bufmgr)
1120 {
1121 uint64_t size, cache_max_size = 64 * 1024 * 1024;
1122
1123 /* OK, so power of two buckets was too wasteful of memory.
1124 * Give 3 other sizes between each power of two, to hopefully
1125 * cover things accurately enough. (The alternative is
1126 * probably to just go for exact matching of sizes, and assume
1127 * that for things like composited window resize the tiled
1128 * width/height alignment and rounding of sizes to pages will
1129 * get us useful cache hit rates anyway)
1130 */
1131 add_bucket(bufmgr, 4096);
1132 add_bucket(bufmgr, 4096 * 2);
1133 add_bucket(bufmgr, 4096 * 3);
1134
1135 /* Initialize the linked lists for BO reuse cache. */
1136 for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
1137 add_bucket(bufmgr, size);
1138
1139 add_bucket(bufmgr, size + size * 1 / 4);
1140 add_bucket(bufmgr, size + size * 2 / 4);
1141 add_bucket(bufmgr, size + size * 3 / 4);
1142 }
1143 }
1144
1145 uint32_t
1146 brw_create_hw_context(struct brw_bufmgr *bufmgr)
1147 {
1148 struct drm_i915_gem_context_create create;
1149 int ret;
1150
1151 memclear(create);
1152 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
1153 if (ret != 0) {
1154 DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n", strerror(errno));
1155 return 0;
1156 }
1157
1158 return create.ctx_id;
1159 }
1160
1161 void
1162 brw_destroy_hw_context(struct brw_bufmgr *bufmgr, uint32_t ctx_id)
1163 {
1164 struct drm_i915_gem_context_destroy d = {.ctx_id = ctx_id };
1165
1166 if (ctx_id != 0 &&
1167 drmIoctl(bufmgr->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, &d) != 0) {
1168 fprintf(stderr, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
1169 strerror(errno));
1170 }
1171 }
1172
1173 int
1174 brw_reg_read(struct brw_bufmgr *bufmgr, uint32_t offset, uint64_t *result)
1175 {
1176 struct drm_i915_reg_read reg_read;
1177 int ret;
1178
1179 memclear(reg_read);
1180 reg_read.offset = offset;
1181
1182 ret = drmIoctl(bufmgr->fd, DRM_IOCTL_I915_REG_READ, &reg_read);
1183
1184 *result = reg_read.val;
1185 return ret;
1186 }
1187
1188 /**
1189 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
1190 * and manage map buffer objections.
1191 *
1192 * \param fd File descriptor of the opened DRM device.
1193 */
1194 struct brw_bufmgr *
1195 brw_bufmgr_init(struct gen_device_info *devinfo, int fd, int batch_size)
1196 {
1197 struct brw_bufmgr *bufmgr;
1198
1199 bufmgr = calloc(1, sizeof(*bufmgr));
1200 if (bufmgr == NULL)
1201 return NULL;
1202
1203 /* Handles to buffer objects belong to the device fd and are not
1204 * reference counted by the kernel. If the same fd is used by
1205 * multiple parties (threads sharing the same screen bufmgr, or
1206 * even worse the same device fd passed to multiple libraries)
1207 * ownership of those handles is shared by those independent parties.
1208 *
1209 * Don't do this! Ensure that each library/bufmgr has its own device
1210 * fd so that its namespace does not clash with another.
1211 */
1212 bufmgr->fd = fd;
1213
1214 if (pthread_mutex_init(&bufmgr->lock, NULL) != 0) {
1215 free(bufmgr);
1216 return NULL;
1217 }
1218
1219 bufmgr->has_llc = devinfo->has_llc;
1220
1221 init_cache_buckets(bufmgr);
1222
1223 bufmgr->name_table =
1224 _mesa_hash_table_create(NULL, key_hash_uint, key_uint_equal);
1225 bufmgr->handle_table =
1226 _mesa_hash_table_create(NULL, key_hash_uint, key_uint_equal);
1227
1228 return bufmgr;
1229 }