be587ca9282d1b682f0a7e526ce35b60fc5e3dd3
[mesa.git] / src / mesa / drivers / dri / i965 / intel_bufmgr_gem.c
1 /**************************************************************************
2 *
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007-2012 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
26 * of the Software.
27 *
28 *
29 **************************************************************************/
30 /*
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
35 */
36
37 #ifdef HAVE_CONFIG_H
38 #include "config.h"
39 #endif
40
41 #include <xf86drm.h>
42 #include <util/u_atomic.h>
43 #include <fcntl.h>
44 #include <stdio.h>
45 #include <stdlib.h>
46 #include <string.h>
47 #include <unistd.h>
48 #include <assert.h>
49 #include <pthread.h>
50 #include <sys/ioctl.h>
51 #include <sys/stat.h>
52 #include <sys/types.h>
53 #include <stdbool.h>
54
55 #include "errno.h"
56 #ifndef ETIME
57 #define ETIME ETIMEDOUT
58 #endif
59 #include "common/gen_debug.h"
60 #include "libdrm_macros.h"
61 #include "main/macros.h"
62 #include "util/macros.h"
63 #include "util/list.h"
64 #include "brw_bufmgr.h"
65 #include "intel_bufmgr_priv.h"
66 #include "intel_chipset.h"
67 #include "string.h"
68
69 #include "i915_drm.h"
70 #include "uthash.h"
71
72 #ifdef HAVE_VALGRIND
73 #include <valgrind.h>
74 #include <memcheck.h>
75 #define VG(x) x
76 #else
77 #define VG(x)
78 #endif
79
80 #define memclear(s) memset(&s, 0, sizeof(s))
81
82 #define FILE_DEBUG_FLAG DEBUG_BUFMGR
83
84 static inline int
85 atomic_add_unless(int *v, int add, int unless)
86 {
87 int c, old;
88 c = p_atomic_read(v);
89 while (c != unless && (old = p_atomic_cmpxchg(v, c, c + add)) != c)
90 c = old;
91 return c == unless;
92 }
93
94 /**
95 * upper_32_bits - return bits 32-63 of a number
96 * @n: the number we're accessing
97 *
98 * A basic shift-right of a 64- or 32-bit quantity. Use this to suppress
99 * the "right shift count >= width of type" warning when that quantity is
100 * 32-bits.
101 */
102 #define upper_32_bits(n) ((__u32)(((n) >> 16) >> 16))
103
104 /**
105 * lower_32_bits - return bits 0-31 of a number
106 * @n: the number we're accessing
107 */
108 #define lower_32_bits(n) ((__u32)(n))
109
110 typedef struct _drm_bacon_bo_gem drm_bacon_bo_gem;
111
112 struct drm_bacon_gem_bo_bucket {
113 struct list_head head;
114 unsigned long size;
115 };
116
117 typedef struct _drm_bacon_bufmgr_gem {
118 drm_bacon_bufmgr bufmgr;
119
120 int refcount;
121
122 int fd;
123
124 int max_relocs;
125
126 pthread_mutex_t lock;
127
128 struct drm_i915_gem_exec_object2 *exec2_objects;
129 drm_bacon_bo **exec_bos;
130 int exec_size;
131 int exec_count;
132
133 /** Array of lists of cached gem objects of power-of-two sizes */
134 struct drm_bacon_gem_bo_bucket cache_bucket[14 * 4];
135 int num_buckets;
136 time_t time;
137
138 struct list_head managers;
139
140 drm_bacon_bo_gem *name_table;
141 drm_bacon_bo_gem *handle_table;
142
143 struct list_head vma_cache;
144 int vma_count, vma_open, vma_max;
145
146 uint64_t gtt_size;
147 int pci_device;
148 int gen;
149 unsigned int has_bsd : 1;
150 unsigned int has_blt : 1;
151 unsigned int has_llc : 1;
152 unsigned int has_wait_timeout : 1;
153 unsigned int bo_reuse : 1;
154 unsigned int no_exec : 1;
155 unsigned int has_vebox : 1;
156 unsigned int has_exec_async : 1;
157
158 struct {
159 void *ptr;
160 uint32_t handle;
161 } userptr_active;
162
163 } drm_bacon_bufmgr_gem;
164
165 typedef struct _drm_bacon_reloc_target_info {
166 drm_bacon_bo *bo;
167 } drm_bacon_reloc_target;
168
169 struct _drm_bacon_bo_gem {
170 drm_bacon_bo bo;
171
172 int refcount;
173 uint32_t gem_handle;
174 const char *name;
175
176 /**
177 * Kenel-assigned global name for this object
178 *
179 * List contains both flink named and prime fd'd objects
180 */
181 unsigned int global_name;
182
183 UT_hash_handle handle_hh;
184 UT_hash_handle name_hh;
185
186 /**
187 * Index of the buffer within the validation list while preparing a
188 * batchbuffer execution.
189 */
190 int validate_index;
191
192 /**
193 * Current tiling mode
194 */
195 uint32_t tiling_mode;
196 uint32_t swizzle_mode;
197 unsigned long stride;
198
199 unsigned long kflags;
200
201 time_t free_time;
202
203 /** Array passed to the DRM containing relocation information. */
204 struct drm_i915_gem_relocation_entry *relocs;
205 /**
206 * Array of info structs corresponding to relocs[i].target_handle etc
207 */
208 drm_bacon_reloc_target *reloc_target_info;
209 /** Number of entries in relocs */
210 int reloc_count;
211 /** Array of BOs that are referenced by this buffer and will be softpinned */
212 drm_bacon_bo **softpin_target;
213 /** Number softpinned BOs that are referenced by this buffer */
214 int softpin_target_count;
215 /** Maximum amount of softpinned BOs that are referenced by this buffer */
216 int softpin_target_size;
217
218 /** Mapped address for the buffer, saved across map/unmap cycles */
219 void *mem_virtual;
220 /** GTT virtual address for the buffer, saved across map/unmap cycles */
221 void *gtt_virtual;
222 /** WC CPU address for the buffer, saved across map/unmap cycles */
223 void *wc_virtual;
224 /**
225 * Virtual address of the buffer allocated by user, used for userptr
226 * objects only.
227 */
228 void *user_virtual;
229 int map_count;
230 struct list_head vma_list;
231
232 /** BO cache list */
233 struct list_head head;
234
235 /**
236 * Boolean of whether this BO and its children have been included in
237 * the current drm_bacon_bufmgr_check_aperture_space() total.
238 */
239 bool included_in_check_aperture;
240
241 /**
242 * Boolean of whether this buffer has been used as a relocation
243 * target and had its size accounted for, and thus can't have any
244 * further relocations added to it.
245 */
246 bool used_as_reloc_target;
247
248 /**
249 * Boolean of whether we have encountered an error whilst building the relocation tree.
250 */
251 bool has_error;
252
253 /**
254 * Boolean of whether this buffer can be re-used
255 */
256 bool reusable;
257
258 /**
259 * Boolean of whether the GPU is definitely not accessing the buffer.
260 *
261 * This is only valid when reusable, since non-reusable
262 * buffers are those that have been shared with other
263 * processes, so we don't know their state.
264 */
265 bool idle;
266
267 /**
268 * Boolean of whether this buffer was allocated with userptr
269 */
270 bool is_userptr;
271
272 /**
273 * Size in bytes of this buffer and its relocation descendents.
274 *
275 * Used to avoid costly tree walking in
276 * drm_bacon_bufmgr_check_aperture in the common case.
277 */
278 int reloc_tree_size;
279
280 /** Flags that we may need to do the SW_FINISH ioctl on unmap. */
281 bool mapped_cpu_write;
282 };
283
284 static unsigned int
285 drm_bacon_gem_estimate_batch_space(drm_bacon_bo ** bo_array, int count);
286
287 static unsigned int
288 drm_bacon_gem_compute_batch_space(drm_bacon_bo ** bo_array, int count);
289
290 static int
291 drm_bacon_gem_bo_set_tiling_internal(drm_bacon_bo *bo,
292 uint32_t tiling_mode,
293 uint32_t stride);
294
295 static void drm_bacon_gem_bo_unreference_locked_timed(drm_bacon_bo *bo,
296 time_t time);
297
298 static void drm_bacon_gem_bo_free(drm_bacon_bo *bo);
299
300 static inline drm_bacon_bo_gem *to_bo_gem(drm_bacon_bo *bo)
301 {
302 return (drm_bacon_bo_gem *)bo;
303 }
304
305 static unsigned long
306 drm_bacon_gem_bo_tile_size(drm_bacon_bufmgr_gem *bufmgr_gem, unsigned long size,
307 uint32_t *tiling_mode)
308 {
309 if (*tiling_mode == I915_TILING_NONE)
310 return size;
311
312 /* 965+ just need multiples of page size for tiling */
313 return ALIGN(size, 4096);
314 }
315
316 /*
317 * Round a given pitch up to the minimum required for X tiling on a
318 * given chip. We use 512 as the minimum to allow for a later tiling
319 * change.
320 */
321 static unsigned long
322 drm_bacon_gem_bo_tile_pitch(drm_bacon_bufmgr_gem *bufmgr_gem,
323 unsigned long pitch, uint32_t *tiling_mode)
324 {
325 unsigned long tile_width;
326
327 /* If untiled, then just align it so that we can do rendering
328 * to it with the 3D engine.
329 */
330 if (*tiling_mode == I915_TILING_NONE)
331 return ALIGN(pitch, 64);
332
333 if (*tiling_mode == I915_TILING_X)
334 tile_width = 512;
335 else
336 tile_width = 128;
337
338 /* 965 is flexible */
339 return ALIGN(pitch, tile_width);
340 }
341
342 static struct drm_bacon_gem_bo_bucket *
343 drm_bacon_gem_bo_bucket_for_size(drm_bacon_bufmgr_gem *bufmgr_gem,
344 unsigned long size)
345 {
346 int i;
347
348 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
349 struct drm_bacon_gem_bo_bucket *bucket =
350 &bufmgr_gem->cache_bucket[i];
351 if (bucket->size >= size) {
352 return bucket;
353 }
354 }
355
356 return NULL;
357 }
358
359 static void
360 drm_bacon_gem_dump_validation_list(drm_bacon_bufmgr_gem *bufmgr_gem)
361 {
362 int i, j;
363
364 for (i = 0; i < bufmgr_gem->exec_count; i++) {
365 drm_bacon_bo *bo = bufmgr_gem->exec_bos[i];
366 drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
367
368 if (bo_gem->relocs == NULL && bo_gem->softpin_target == NULL) {
369 DBG("%2d: %d %s(%s)\n", i, bo_gem->gem_handle,
370 bo_gem->kflags & EXEC_OBJECT_PINNED ? "*" : "",
371 bo_gem->name);
372 continue;
373 }
374
375 for (j = 0; j < bo_gem->reloc_count; j++) {
376 drm_bacon_bo *target_bo = bo_gem->reloc_target_info[j].bo;
377 drm_bacon_bo_gem *target_gem =
378 (drm_bacon_bo_gem *) target_bo;
379
380 DBG("%2d: %d %s(%s)@0x%08x %08x -> "
381 "%d (%s)@0x%08x %08x + 0x%08x\n",
382 i,
383 bo_gem->gem_handle,
384 bo_gem->kflags & EXEC_OBJECT_PINNED ? "*" : "",
385 bo_gem->name,
386 upper_32_bits(bo_gem->relocs[j].offset),
387 lower_32_bits(bo_gem->relocs[j].offset),
388 target_gem->gem_handle,
389 target_gem->name,
390 upper_32_bits(target_bo->offset64),
391 lower_32_bits(target_bo->offset64),
392 bo_gem->relocs[j].delta);
393 }
394
395 for (j = 0; j < bo_gem->softpin_target_count; j++) {
396 drm_bacon_bo *target_bo = bo_gem->softpin_target[j];
397 drm_bacon_bo_gem *target_gem =
398 (drm_bacon_bo_gem *) target_bo;
399 DBG("%2d: %d %s(%s) -> "
400 "%d *(%s)@0x%08x %08x\n",
401 i,
402 bo_gem->gem_handle,
403 bo_gem->kflags & EXEC_OBJECT_PINNED ? "*" : "",
404 bo_gem->name,
405 target_gem->gem_handle,
406 target_gem->name,
407 upper_32_bits(target_bo->offset64),
408 lower_32_bits(target_bo->offset64));
409 }
410 }
411 }
412
413 inline void
414 drm_bacon_bo_reference(drm_bacon_bo *bo)
415 {
416 drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
417
418 p_atomic_inc(&bo_gem->refcount);
419 }
420
421 static void
422 drm_bacon_add_validate_buffer2(drm_bacon_bo *bo)
423 {
424 drm_bacon_bufmgr_gem *bufmgr_gem = (drm_bacon_bufmgr_gem *)bo->bufmgr;
425 drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *)bo;
426 int index;
427
428 if (bo_gem->validate_index != -1)
429 return;
430
431 /* Extend the array of validation entries as necessary. */
432 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
433 int new_size = bufmgr_gem->exec_size * 2;
434
435 if (new_size == 0)
436 new_size = 5;
437
438 bufmgr_gem->exec2_objects =
439 realloc(bufmgr_gem->exec2_objects,
440 sizeof(*bufmgr_gem->exec2_objects) * new_size);
441 bufmgr_gem->exec_bos =
442 realloc(bufmgr_gem->exec_bos,
443 sizeof(*bufmgr_gem->exec_bos) * new_size);
444 bufmgr_gem->exec_size = new_size;
445 }
446
447 index = bufmgr_gem->exec_count;
448 bo_gem->validate_index = index;
449 /* Fill in array entry */
450 bufmgr_gem->exec2_objects[index].handle = bo_gem->gem_handle;
451 bufmgr_gem->exec2_objects[index].relocation_count = bo_gem->reloc_count;
452 bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
453 bufmgr_gem->exec2_objects[index].alignment = bo->align;
454 bufmgr_gem->exec2_objects[index].offset = bo->offset64;
455 bufmgr_gem->exec2_objects[index].flags = bo_gem->kflags;
456 bufmgr_gem->exec2_objects[index].rsvd1 = 0;
457 bufmgr_gem->exec2_objects[index].rsvd2 = 0;
458 bufmgr_gem->exec_bos[index] = bo;
459 bufmgr_gem->exec_count++;
460 }
461
462 static void
463 drm_bacon_bo_gem_set_in_aperture_size(drm_bacon_bufmgr_gem *bufmgr_gem,
464 drm_bacon_bo_gem *bo_gem,
465 unsigned int alignment)
466 {
467 unsigned int size;
468
469 assert(!bo_gem->used_as_reloc_target);
470
471 /* The older chipsets are far-less flexible in terms of tiling,
472 * and require tiled buffer to be size aligned in the aperture.
473 * This means that in the worst possible case we will need a hole
474 * twice as large as the object in order for it to fit into the
475 * aperture. Optimal packing is for wimps.
476 */
477 size = bo_gem->bo.size;
478
479 bo_gem->reloc_tree_size = size + alignment;
480 }
481
482 static int
483 drm_bacon_setup_reloc_list(drm_bacon_bo *bo)
484 {
485 drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
486 drm_bacon_bufmgr_gem *bufmgr_gem = (drm_bacon_bufmgr_gem *) bo->bufmgr;
487 unsigned int max_relocs = bufmgr_gem->max_relocs;
488
489 if (bo->size / 4 < max_relocs)
490 max_relocs = bo->size / 4;
491
492 bo_gem->relocs = malloc(max_relocs *
493 sizeof(struct drm_i915_gem_relocation_entry));
494 bo_gem->reloc_target_info = malloc(max_relocs *
495 sizeof(drm_bacon_reloc_target));
496 if (bo_gem->relocs == NULL || bo_gem->reloc_target_info == NULL) {
497 bo_gem->has_error = true;
498
499 free (bo_gem->relocs);
500 bo_gem->relocs = NULL;
501
502 free (bo_gem->reloc_target_info);
503 bo_gem->reloc_target_info = NULL;
504
505 return 1;
506 }
507
508 return 0;
509 }
510
511 int
512 drm_bacon_bo_busy(drm_bacon_bo *bo)
513 {
514 drm_bacon_bufmgr_gem *bufmgr_gem = (drm_bacon_bufmgr_gem *) bo->bufmgr;
515 drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
516 struct drm_i915_gem_busy busy;
517 int ret;
518
519 if (bo_gem->reusable && bo_gem->idle)
520 return false;
521
522 memclear(busy);
523 busy.handle = bo_gem->gem_handle;
524
525 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
526 if (ret == 0) {
527 bo_gem->idle = !busy.busy;
528 return busy.busy;
529 } else {
530 return false;
531 }
532 return (ret == 0 && busy.busy);
533 }
534
535 static int
536 drm_bacon_gem_bo_madvise_internal(drm_bacon_bufmgr_gem *bufmgr_gem,
537 drm_bacon_bo_gem *bo_gem, int state)
538 {
539 struct drm_i915_gem_madvise madv;
540
541 memclear(madv);
542 madv.handle = bo_gem->gem_handle;
543 madv.madv = state;
544 madv.retained = 1;
545 drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
546
547 return madv.retained;
548 }
549
550 int
551 drm_bacon_bo_madvise(drm_bacon_bo *bo, int madv)
552 {
553 return drm_bacon_gem_bo_madvise_internal
554 ((drm_bacon_bufmgr_gem *) bo->bufmgr,
555 (drm_bacon_bo_gem *) bo,
556 madv);
557 }
558
559 /* drop the oldest entries that have been purged by the kernel */
560 static void
561 drm_bacon_gem_bo_cache_purge_bucket(drm_bacon_bufmgr_gem *bufmgr_gem,
562 struct drm_bacon_gem_bo_bucket *bucket)
563 {
564 while (!list_empty(&bucket->head)) {
565 drm_bacon_bo_gem *bo_gem;
566
567 bo_gem = LIST_ENTRY(drm_bacon_bo_gem,
568 bucket->head.next, head);
569 if (drm_bacon_gem_bo_madvise_internal
570 (bufmgr_gem, bo_gem, I915_MADV_DONTNEED))
571 break;
572
573 list_del(&bo_gem->head);
574 drm_bacon_gem_bo_free(&bo_gem->bo);
575 }
576 }
577
578 static drm_bacon_bo *
579 drm_bacon_gem_bo_alloc_internal(drm_bacon_bufmgr *bufmgr,
580 const char *name,
581 unsigned long size,
582 unsigned long flags,
583 uint32_t tiling_mode,
584 unsigned long stride,
585 unsigned int alignment)
586 {
587 drm_bacon_bufmgr_gem *bufmgr_gem = (drm_bacon_bufmgr_gem *) bufmgr;
588 drm_bacon_bo_gem *bo_gem;
589 unsigned int page_size = getpagesize();
590 int ret;
591 struct drm_bacon_gem_bo_bucket *bucket;
592 bool alloc_from_cache;
593 unsigned long bo_size;
594 bool for_render = false;
595
596 if (flags & BO_ALLOC_FOR_RENDER)
597 for_render = true;
598
599 /* Round the allocated size up to a power of two number of pages. */
600 bucket = drm_bacon_gem_bo_bucket_for_size(bufmgr_gem, size);
601
602 /* If we don't have caching at this size, don't actually round the
603 * allocation up.
604 */
605 if (bucket == NULL) {
606 bo_size = size;
607 if (bo_size < page_size)
608 bo_size = page_size;
609 } else {
610 bo_size = bucket->size;
611 }
612
613 pthread_mutex_lock(&bufmgr_gem->lock);
614 /* Get a buffer out of the cache if available */
615 retry:
616 alloc_from_cache = false;
617 if (bucket != NULL && !list_empty(&bucket->head)) {
618 if (for_render) {
619 /* Allocate new render-target BOs from the tail (MRU)
620 * of the list, as it will likely be hot in the GPU
621 * cache and in the aperture for us.
622 */
623 bo_gem = LIST_ENTRY(drm_bacon_bo_gem,
624 bucket->head.prev, head);
625 list_del(&bo_gem->head);
626 alloc_from_cache = true;
627 bo_gem->bo.align = alignment;
628 } else {
629 assert(alignment == 0);
630 /* For non-render-target BOs (where we're probably
631 * going to map it first thing in order to fill it
632 * with data), check if the last BO in the cache is
633 * unbusy, and only reuse in that case. Otherwise,
634 * allocating a new buffer is probably faster than
635 * waiting for the GPU to finish.
636 */
637 bo_gem = LIST_ENTRY(drm_bacon_bo_gem,
638 bucket->head.next, head);
639 if (!drm_bacon_bo_busy(&bo_gem->bo)) {
640 alloc_from_cache = true;
641 list_del(&bo_gem->head);
642 }
643 }
644
645 if (alloc_from_cache) {
646 if (!drm_bacon_gem_bo_madvise_internal
647 (bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) {
648 drm_bacon_gem_bo_free(&bo_gem->bo);
649 drm_bacon_gem_bo_cache_purge_bucket(bufmgr_gem,
650 bucket);
651 goto retry;
652 }
653
654 if (drm_bacon_gem_bo_set_tiling_internal(&bo_gem->bo,
655 tiling_mode,
656 stride)) {
657 drm_bacon_gem_bo_free(&bo_gem->bo);
658 goto retry;
659 }
660 }
661 }
662
663 if (!alloc_from_cache) {
664 struct drm_i915_gem_create create;
665
666 bo_gem = calloc(1, sizeof(*bo_gem));
667 if (!bo_gem)
668 goto err;
669
670 /* drm_bacon_gem_bo_free calls list_del() for an uninitialized
671 list (vma_list), so better set the list head here */
672 list_inithead(&bo_gem->vma_list);
673
674 bo_gem->bo.size = bo_size;
675
676 memclear(create);
677 create.size = bo_size;
678
679 ret = drmIoctl(bufmgr_gem->fd,
680 DRM_IOCTL_I915_GEM_CREATE,
681 &create);
682 if (ret != 0) {
683 free(bo_gem);
684 goto err;
685 }
686
687 bo_gem->gem_handle = create.handle;
688 HASH_ADD(handle_hh, bufmgr_gem->handle_table,
689 gem_handle, sizeof(bo_gem->gem_handle),
690 bo_gem);
691
692 bo_gem->bo.handle = bo_gem->gem_handle;
693 bo_gem->bo.bufmgr = bufmgr;
694 bo_gem->bo.align = alignment;
695
696 bo_gem->tiling_mode = I915_TILING_NONE;
697 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
698 bo_gem->stride = 0;
699
700 if (drm_bacon_gem_bo_set_tiling_internal(&bo_gem->bo,
701 tiling_mode,
702 stride))
703 goto err_free;
704 }
705
706 bo_gem->name = name;
707 p_atomic_set(&bo_gem->refcount, 1);
708 bo_gem->validate_index = -1;
709 bo_gem->used_as_reloc_target = false;
710 bo_gem->has_error = false;
711 bo_gem->reusable = true;
712
713 drm_bacon_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, alignment);
714 pthread_mutex_unlock(&bufmgr_gem->lock);
715
716 DBG("bo_create: buf %d (%s) %ldb\n",
717 bo_gem->gem_handle, bo_gem->name, size);
718
719 return &bo_gem->bo;
720
721 err_free:
722 drm_bacon_gem_bo_free(&bo_gem->bo);
723 err:
724 pthread_mutex_unlock(&bufmgr_gem->lock);
725 return NULL;
726 }
727
728 drm_bacon_bo *
729 drm_bacon_bo_alloc_for_render(drm_bacon_bufmgr *bufmgr,
730 const char *name,
731 unsigned long size,
732 unsigned int alignment)
733 {
734 return drm_bacon_gem_bo_alloc_internal(bufmgr, name, size,
735 BO_ALLOC_FOR_RENDER,
736 I915_TILING_NONE, 0,
737 alignment);
738 }
739
740 drm_bacon_bo *
741 drm_bacon_bo_alloc(drm_bacon_bufmgr *bufmgr,
742 const char *name,
743 unsigned long size,
744 unsigned int alignment)
745 {
746 return drm_bacon_gem_bo_alloc_internal(bufmgr, name, size, 0,
747 I915_TILING_NONE, 0, 0);
748 }
749
750 drm_bacon_bo *
751 drm_bacon_bo_alloc_tiled(drm_bacon_bufmgr *bufmgr, const char *name,
752 int x, int y, int cpp, uint32_t *tiling_mode,
753 unsigned long *pitch, unsigned long flags)
754 {
755 drm_bacon_bufmgr_gem *bufmgr_gem = (drm_bacon_bufmgr_gem *)bufmgr;
756 unsigned long size, stride;
757 uint32_t tiling;
758
759 do {
760 unsigned long aligned_y, height_alignment;
761
762 tiling = *tiling_mode;
763
764 /* If we're tiled, our allocations are in 8 or 32-row blocks,
765 * so failure to align our height means that we won't allocate
766 * enough pages.
767 *
768 * If we're untiled, we still have to align to 2 rows high
769 * because the data port accesses 2x2 blocks even if the
770 * bottom row isn't to be rendered, so failure to align means
771 * we could walk off the end of the GTT and fault. This is
772 * documented on 965, and may be the case on older chipsets
773 * too so we try to be careful.
774 */
775 aligned_y = y;
776 height_alignment = 2;
777
778 if (tiling == I915_TILING_X)
779 height_alignment = 8;
780 else if (tiling == I915_TILING_Y)
781 height_alignment = 32;
782 aligned_y = ALIGN(y, height_alignment);
783
784 stride = x * cpp;
785 stride = drm_bacon_gem_bo_tile_pitch(bufmgr_gem, stride, tiling_mode);
786 size = stride * aligned_y;
787 size = drm_bacon_gem_bo_tile_size(bufmgr_gem, size, tiling_mode);
788 } while (*tiling_mode != tiling);
789 *pitch = stride;
790
791 if (tiling == I915_TILING_NONE)
792 stride = 0;
793
794 return drm_bacon_gem_bo_alloc_internal(bufmgr, name, size, flags,
795 tiling, stride, 0);
796 }
797
798 drm_bacon_bo *
799 drm_bacon_bo_alloc_userptr(drm_bacon_bufmgr *bufmgr,
800 const char *name,
801 void *addr,
802 uint32_t tiling_mode,
803 uint32_t stride,
804 unsigned long size,
805 unsigned long flags)
806 {
807 drm_bacon_bufmgr_gem *bufmgr_gem = (drm_bacon_bufmgr_gem *) bufmgr;
808 drm_bacon_bo_gem *bo_gem;
809 int ret;
810 struct drm_i915_gem_userptr userptr;
811
812 /* Tiling with userptr surfaces is not supported
813 * on all hardware so refuse it for time being.
814 */
815 if (tiling_mode != I915_TILING_NONE)
816 return NULL;
817
818 bo_gem = calloc(1, sizeof(*bo_gem));
819 if (!bo_gem)
820 return NULL;
821
822 p_atomic_set(&bo_gem->refcount, 1);
823 list_inithead(&bo_gem->vma_list);
824
825 bo_gem->bo.size = size;
826
827 memclear(userptr);
828 userptr.user_ptr = (__u64)((unsigned long)addr);
829 userptr.user_size = size;
830 userptr.flags = flags;
831
832 ret = drmIoctl(bufmgr_gem->fd,
833 DRM_IOCTL_I915_GEM_USERPTR,
834 &userptr);
835 if (ret != 0) {
836 DBG("bo_create_userptr: "
837 "ioctl failed with user ptr %p size 0x%lx, "
838 "user flags 0x%lx\n", addr, size, flags);
839 free(bo_gem);
840 return NULL;
841 }
842
843 pthread_mutex_lock(&bufmgr_gem->lock);
844
845 bo_gem->gem_handle = userptr.handle;
846 bo_gem->bo.handle = bo_gem->gem_handle;
847 bo_gem->bo.bufmgr = bufmgr;
848 bo_gem->is_userptr = true;
849 bo_gem->bo.virtual = addr;
850 /* Save the address provided by user */
851 bo_gem->user_virtual = addr;
852 bo_gem->tiling_mode = I915_TILING_NONE;
853 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
854 bo_gem->stride = 0;
855
856 HASH_ADD(handle_hh, bufmgr_gem->handle_table,
857 gem_handle, sizeof(bo_gem->gem_handle),
858 bo_gem);
859
860 bo_gem->name = name;
861 bo_gem->validate_index = -1;
862 bo_gem->used_as_reloc_target = false;
863 bo_gem->has_error = false;
864 bo_gem->reusable = false;
865
866 drm_bacon_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
867 pthread_mutex_unlock(&bufmgr_gem->lock);
868
869 DBG("bo_create_userptr: "
870 "ptr %p buf %d (%s) size %ldb, stride 0x%x, tile mode %d\n",
871 addr, bo_gem->gem_handle, bo_gem->name,
872 size, stride, tiling_mode);
873
874 return &bo_gem->bo;
875 }
876
877 bool
878 drm_bacon_has_userptr(drm_bacon_bufmgr *bufmgr)
879 {
880 drm_bacon_bufmgr_gem *bufmgr_gem = (drm_bacon_bufmgr_gem *) bufmgr;
881 int ret;
882 void *ptr;
883 long pgsz;
884 struct drm_i915_gem_userptr userptr;
885
886 pgsz = sysconf(_SC_PAGESIZE);
887 assert(pgsz > 0);
888
889 ret = posix_memalign(&ptr, pgsz, pgsz);
890 if (ret) {
891 DBG("Failed to get a page (%ld) for userptr detection!\n",
892 pgsz);
893 return false;
894 }
895
896 memclear(userptr);
897 userptr.user_ptr = (__u64)(unsigned long)ptr;
898 userptr.user_size = pgsz;
899
900 retry:
901 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_USERPTR, &userptr);
902 if (ret) {
903 if (errno == ENODEV && userptr.flags == 0) {
904 userptr.flags = I915_USERPTR_UNSYNCHRONIZED;
905 goto retry;
906 }
907 free(ptr);
908 return false;
909 }
910
911 /* We don't release the userptr bo here as we want to keep the
912 * kernel mm tracking alive for our lifetime. The first time we
913 * create a userptr object the kernel has to install a mmu_notifer
914 * which is a heavyweight operation (e.g. it requires taking all
915 * mm_locks and stop_machine()).
916 */
917
918 bufmgr_gem->userptr_active.ptr = ptr;
919 bufmgr_gem->userptr_active.handle = userptr.handle;
920
921 return true;
922 }
923
924 /**
925 * Returns a drm_bacon_bo wrapping the given buffer object handle.
926 *
927 * This can be used when one application needs to pass a buffer object
928 * to another.
929 */
930 drm_bacon_bo *
931 drm_bacon_bo_gem_create_from_name(drm_bacon_bufmgr *bufmgr,
932 const char *name,
933 unsigned int handle)
934 {
935 drm_bacon_bufmgr_gem *bufmgr_gem = (drm_bacon_bufmgr_gem *) bufmgr;
936 drm_bacon_bo_gem *bo_gem;
937 int ret;
938 struct drm_gem_open open_arg;
939 struct drm_i915_gem_get_tiling get_tiling;
940
941 /* At the moment most applications only have a few named bo.
942 * For instance, in a DRI client only the render buffers passed
943 * between X and the client are named. And since X returns the
944 * alternating names for the front/back buffer a linear search
945 * provides a sufficiently fast match.
946 */
947 pthread_mutex_lock(&bufmgr_gem->lock);
948 HASH_FIND(name_hh, bufmgr_gem->name_table,
949 &handle, sizeof(handle), bo_gem);
950 if (bo_gem) {
951 drm_bacon_bo_reference(&bo_gem->bo);
952 goto out;
953 }
954
955 memclear(open_arg);
956 open_arg.name = handle;
957 ret = drmIoctl(bufmgr_gem->fd,
958 DRM_IOCTL_GEM_OPEN,
959 &open_arg);
960 if (ret != 0) {
961 DBG("Couldn't reference %s handle 0x%08x: %s\n",
962 name, handle, strerror(errno));
963 bo_gem = NULL;
964 goto out;
965 }
966 /* Now see if someone has used a prime handle to get this
967 * object from the kernel before by looking through the list
968 * again for a matching gem_handle
969 */
970 HASH_FIND(handle_hh, bufmgr_gem->handle_table,
971 &open_arg.handle, sizeof(open_arg.handle), bo_gem);
972 if (bo_gem) {
973 drm_bacon_bo_reference(&bo_gem->bo);
974 goto out;
975 }
976
977 bo_gem = calloc(1, sizeof(*bo_gem));
978 if (!bo_gem)
979 goto out;
980
981 p_atomic_set(&bo_gem->refcount, 1);
982 list_inithead(&bo_gem->vma_list);
983
984 bo_gem->bo.size = open_arg.size;
985 bo_gem->bo.offset = 0;
986 bo_gem->bo.offset64 = 0;
987 bo_gem->bo.virtual = NULL;
988 bo_gem->bo.bufmgr = bufmgr;
989 bo_gem->name = name;
990 bo_gem->validate_index = -1;
991 bo_gem->gem_handle = open_arg.handle;
992 bo_gem->bo.handle = open_arg.handle;
993 bo_gem->global_name = handle;
994 bo_gem->reusable = false;
995
996 HASH_ADD(handle_hh, bufmgr_gem->handle_table,
997 gem_handle, sizeof(bo_gem->gem_handle), bo_gem);
998 HASH_ADD(name_hh, bufmgr_gem->name_table,
999 global_name, sizeof(bo_gem->global_name), bo_gem);
1000
1001 memclear(get_tiling);
1002 get_tiling.handle = bo_gem->gem_handle;
1003 ret = drmIoctl(bufmgr_gem->fd,
1004 DRM_IOCTL_I915_GEM_GET_TILING,
1005 &get_tiling);
1006 if (ret != 0)
1007 goto err_unref;
1008
1009 bo_gem->tiling_mode = get_tiling.tiling_mode;
1010 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
1011 /* XXX stride is unknown */
1012 drm_bacon_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
1013 DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
1014
1015 out:
1016 pthread_mutex_unlock(&bufmgr_gem->lock);
1017 return &bo_gem->bo;
1018
1019 err_unref:
1020 drm_bacon_gem_bo_free(&bo_gem->bo);
1021 pthread_mutex_unlock(&bufmgr_gem->lock);
1022 return NULL;
1023 }
1024
1025 static void
1026 drm_bacon_gem_bo_free(drm_bacon_bo *bo)
1027 {
1028 drm_bacon_bufmgr_gem *bufmgr_gem = (drm_bacon_bufmgr_gem *) bo->bufmgr;
1029 drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
1030 struct drm_gem_close close;
1031 int ret;
1032
1033 list_del(&bo_gem->vma_list);
1034 if (bo_gem->mem_virtual) {
1035 VG(VALGRIND_FREELIKE_BLOCK(bo_gem->mem_virtual, 0));
1036 drm_munmap(bo_gem->mem_virtual, bo_gem->bo.size);
1037 bufmgr_gem->vma_count--;
1038 }
1039 if (bo_gem->wc_virtual) {
1040 VG(VALGRIND_FREELIKE_BLOCK(bo_gem->wc_virtual, 0));
1041 drm_munmap(bo_gem->wc_virtual, bo_gem->bo.size);
1042 bufmgr_gem->vma_count--;
1043 }
1044 if (bo_gem->gtt_virtual) {
1045 drm_munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
1046 bufmgr_gem->vma_count--;
1047 }
1048
1049 if (bo_gem->global_name)
1050 HASH_DELETE(name_hh, bufmgr_gem->name_table, bo_gem);
1051 HASH_DELETE(handle_hh, bufmgr_gem->handle_table, bo_gem);
1052
1053 /* Close this object */
1054 memclear(close);
1055 close.handle = bo_gem->gem_handle;
1056 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
1057 if (ret != 0) {
1058 DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
1059 bo_gem->gem_handle, bo_gem->name, strerror(errno));
1060 }
1061 free(bo);
1062 }
1063
1064 static void
1065 drm_bacon_gem_bo_mark_mmaps_incoherent(drm_bacon_bo *bo)
1066 {
1067 #if HAVE_VALGRIND
1068 drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
1069
1070 if (bo_gem->mem_virtual)
1071 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->mem_virtual, bo->size);
1072
1073 if (bo_gem->wc_virtual)
1074 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->wc_virtual, bo->size);
1075
1076 if (bo_gem->gtt_virtual)
1077 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->gtt_virtual, bo->size);
1078 #endif
1079 }
1080
1081 /** Frees all cached buffers significantly older than @time. */
1082 static void
1083 drm_bacon_gem_cleanup_bo_cache(drm_bacon_bufmgr_gem *bufmgr_gem, time_t time)
1084 {
1085 int i;
1086
1087 if (bufmgr_gem->time == time)
1088 return;
1089
1090 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
1091 struct drm_bacon_gem_bo_bucket *bucket =
1092 &bufmgr_gem->cache_bucket[i];
1093
1094 while (!list_empty(&bucket->head)) {
1095 drm_bacon_bo_gem *bo_gem;
1096
1097 bo_gem = LIST_ENTRY(drm_bacon_bo_gem,
1098 bucket->head.next, head);
1099 if (time - bo_gem->free_time <= 1)
1100 break;
1101
1102 list_del(&bo_gem->head);
1103
1104 drm_bacon_gem_bo_free(&bo_gem->bo);
1105 }
1106 }
1107
1108 bufmgr_gem->time = time;
1109 }
1110
1111 static void drm_bacon_gem_bo_purge_vma_cache(drm_bacon_bufmgr_gem *bufmgr_gem)
1112 {
1113 int limit;
1114
1115 DBG("%s: cached=%d, open=%d, limit=%d\n", __FUNCTION__,
1116 bufmgr_gem->vma_count, bufmgr_gem->vma_open, bufmgr_gem->vma_max);
1117
1118 if (bufmgr_gem->vma_max < 0)
1119 return;
1120
1121 /* We may need to evict a few entries in order to create new mmaps */
1122 limit = bufmgr_gem->vma_max - 2*bufmgr_gem->vma_open;
1123 if (limit < 0)
1124 limit = 0;
1125
1126 while (bufmgr_gem->vma_count > limit) {
1127 drm_bacon_bo_gem *bo_gem;
1128
1129 bo_gem = LIST_ENTRY(drm_bacon_bo_gem,
1130 bufmgr_gem->vma_cache.next,
1131 vma_list);
1132 assert(bo_gem->map_count == 0);
1133 list_delinit(&bo_gem->vma_list);
1134
1135 if (bo_gem->mem_virtual) {
1136 drm_munmap(bo_gem->mem_virtual, bo_gem->bo.size);
1137 bo_gem->mem_virtual = NULL;
1138 bufmgr_gem->vma_count--;
1139 }
1140 if (bo_gem->wc_virtual) {
1141 drm_munmap(bo_gem->wc_virtual, bo_gem->bo.size);
1142 bo_gem->wc_virtual = NULL;
1143 bufmgr_gem->vma_count--;
1144 }
1145 if (bo_gem->gtt_virtual) {
1146 drm_munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
1147 bo_gem->gtt_virtual = NULL;
1148 bufmgr_gem->vma_count--;
1149 }
1150 }
1151 }
1152
1153 static void drm_bacon_gem_bo_close_vma(drm_bacon_bufmgr_gem *bufmgr_gem,
1154 drm_bacon_bo_gem *bo_gem)
1155 {
1156 bufmgr_gem->vma_open--;
1157 list_addtail(&bo_gem->vma_list, &bufmgr_gem->vma_cache);
1158 if (bo_gem->mem_virtual)
1159 bufmgr_gem->vma_count++;
1160 if (bo_gem->wc_virtual)
1161 bufmgr_gem->vma_count++;
1162 if (bo_gem->gtt_virtual)
1163 bufmgr_gem->vma_count++;
1164 drm_bacon_gem_bo_purge_vma_cache(bufmgr_gem);
1165 }
1166
1167 static void drm_bacon_gem_bo_open_vma(drm_bacon_bufmgr_gem *bufmgr_gem,
1168 drm_bacon_bo_gem *bo_gem)
1169 {
1170 bufmgr_gem->vma_open++;
1171 list_del(&bo_gem->vma_list);
1172 if (bo_gem->mem_virtual)
1173 bufmgr_gem->vma_count--;
1174 if (bo_gem->wc_virtual)
1175 bufmgr_gem->vma_count--;
1176 if (bo_gem->gtt_virtual)
1177 bufmgr_gem->vma_count--;
1178 drm_bacon_gem_bo_purge_vma_cache(bufmgr_gem);
1179 }
1180
1181 static void
1182 drm_bacon_gem_bo_unreference_final(drm_bacon_bo *bo, time_t time)
1183 {
1184 drm_bacon_bufmgr_gem *bufmgr_gem = (drm_bacon_bufmgr_gem *) bo->bufmgr;
1185 drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
1186 struct drm_bacon_gem_bo_bucket *bucket;
1187 int i;
1188
1189 /* Unreference all the target buffers */
1190 for (i = 0; i < bo_gem->reloc_count; i++) {
1191 if (bo_gem->reloc_target_info[i].bo != bo) {
1192 drm_bacon_gem_bo_unreference_locked_timed(bo_gem->
1193 reloc_target_info[i].bo,
1194 time);
1195 }
1196 }
1197 for (i = 0; i < bo_gem->softpin_target_count; i++)
1198 drm_bacon_gem_bo_unreference_locked_timed(bo_gem->softpin_target[i],
1199 time);
1200 bo_gem->kflags = 0;
1201 bo_gem->reloc_count = 0;
1202 bo_gem->used_as_reloc_target = false;
1203 bo_gem->softpin_target_count = 0;
1204
1205 DBG("bo_unreference final: %d (%s)\n",
1206 bo_gem->gem_handle, bo_gem->name);
1207
1208 /* release memory associated with this object */
1209 if (bo_gem->reloc_target_info) {
1210 free(bo_gem->reloc_target_info);
1211 bo_gem->reloc_target_info = NULL;
1212 }
1213 if (bo_gem->relocs) {
1214 free(bo_gem->relocs);
1215 bo_gem->relocs = NULL;
1216 }
1217 if (bo_gem->softpin_target) {
1218 free(bo_gem->softpin_target);
1219 bo_gem->softpin_target = NULL;
1220 bo_gem->softpin_target_size = 0;
1221 }
1222
1223 /* Clear any left-over mappings */
1224 if (bo_gem->map_count) {
1225 DBG("bo freed with non-zero map-count %d\n", bo_gem->map_count);
1226 bo_gem->map_count = 0;
1227 drm_bacon_gem_bo_close_vma(bufmgr_gem, bo_gem);
1228 drm_bacon_gem_bo_mark_mmaps_incoherent(bo);
1229 }
1230
1231 bucket = drm_bacon_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
1232 /* Put the buffer into our internal cache for reuse if we can. */
1233 if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
1234 drm_bacon_gem_bo_madvise_internal(bufmgr_gem, bo_gem,
1235 I915_MADV_DONTNEED)) {
1236 bo_gem->free_time = time;
1237
1238 bo_gem->name = NULL;
1239 bo_gem->validate_index = -1;
1240
1241 list_addtail(&bo_gem->head, &bucket->head);
1242 } else {
1243 drm_bacon_gem_bo_free(bo);
1244 }
1245 }
1246
1247 static void drm_bacon_gem_bo_unreference_locked_timed(drm_bacon_bo *bo,
1248 time_t time)
1249 {
1250 drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
1251
1252 assert(p_atomic_read(&bo_gem->refcount) > 0);
1253 if (p_atomic_dec_zero(&bo_gem->refcount))
1254 drm_bacon_gem_bo_unreference_final(bo, time);
1255 }
1256
1257 void
1258 drm_bacon_bo_unreference(drm_bacon_bo *bo)
1259 {
1260 drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
1261
1262 if (bo == NULL)
1263 return;
1264
1265 assert(p_atomic_read(&bo_gem->refcount) > 0);
1266
1267 if (atomic_add_unless(&bo_gem->refcount, -1, 1)) {
1268 drm_bacon_bufmgr_gem *bufmgr_gem =
1269 (drm_bacon_bufmgr_gem *) bo->bufmgr;
1270 struct timespec time;
1271
1272 clock_gettime(CLOCK_MONOTONIC, &time);
1273
1274 pthread_mutex_lock(&bufmgr_gem->lock);
1275
1276 if (p_atomic_dec_zero(&bo_gem->refcount)) {
1277 drm_bacon_gem_bo_unreference_final(bo, time.tv_sec);
1278 drm_bacon_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
1279 }
1280
1281 pthread_mutex_unlock(&bufmgr_gem->lock);
1282 }
1283 }
1284
1285 int
1286 drm_bacon_bo_map(drm_bacon_bo *bo, int write_enable)
1287 {
1288 drm_bacon_bufmgr_gem *bufmgr_gem = (drm_bacon_bufmgr_gem *) bo->bufmgr;
1289 drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
1290 struct drm_i915_gem_set_domain set_domain;
1291 int ret;
1292
1293 if (bo_gem->is_userptr) {
1294 /* Return the same user ptr */
1295 bo->virtual = bo_gem->user_virtual;
1296 return 0;
1297 }
1298
1299 pthread_mutex_lock(&bufmgr_gem->lock);
1300
1301 if (bo_gem->map_count++ == 0)
1302 drm_bacon_gem_bo_open_vma(bufmgr_gem, bo_gem);
1303
1304 if (!bo_gem->mem_virtual) {
1305 struct drm_i915_gem_mmap mmap_arg;
1306
1307 DBG("bo_map: %d (%s), map_count=%d\n",
1308 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
1309
1310 memclear(mmap_arg);
1311 mmap_arg.handle = bo_gem->gem_handle;
1312 mmap_arg.size = bo->size;
1313 ret = drmIoctl(bufmgr_gem->fd,
1314 DRM_IOCTL_I915_GEM_MMAP,
1315 &mmap_arg);
1316 if (ret != 0) {
1317 ret = -errno;
1318 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1319 __FILE__, __LINE__, bo_gem->gem_handle,
1320 bo_gem->name, strerror(errno));
1321 if (--bo_gem->map_count == 0)
1322 drm_bacon_gem_bo_close_vma(bufmgr_gem, bo_gem);
1323 pthread_mutex_unlock(&bufmgr_gem->lock);
1324 return ret;
1325 }
1326 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
1327 bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
1328 }
1329 DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1330 bo_gem->mem_virtual);
1331 bo->virtual = bo_gem->mem_virtual;
1332
1333 memclear(set_domain);
1334 set_domain.handle = bo_gem->gem_handle;
1335 set_domain.read_domains = I915_GEM_DOMAIN_CPU;
1336 if (write_enable)
1337 set_domain.write_domain = I915_GEM_DOMAIN_CPU;
1338 else
1339 set_domain.write_domain = 0;
1340 ret = drmIoctl(bufmgr_gem->fd,
1341 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1342 &set_domain);
1343 if (ret != 0) {
1344 DBG("%s:%d: Error setting to CPU domain %d: %s\n",
1345 __FILE__, __LINE__, bo_gem->gem_handle,
1346 strerror(errno));
1347 }
1348
1349 if (write_enable)
1350 bo_gem->mapped_cpu_write = true;
1351
1352 drm_bacon_gem_bo_mark_mmaps_incoherent(bo);
1353 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->mem_virtual, bo->size));
1354 pthread_mutex_unlock(&bufmgr_gem->lock);
1355
1356 return 0;
1357 }
1358
1359 static int
1360 map_gtt(drm_bacon_bo *bo)
1361 {
1362 drm_bacon_bufmgr_gem *bufmgr_gem = (drm_bacon_bufmgr_gem *) bo->bufmgr;
1363 drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
1364 int ret;
1365
1366 if (bo_gem->is_userptr)
1367 return -EINVAL;
1368
1369 if (bo_gem->map_count++ == 0)
1370 drm_bacon_gem_bo_open_vma(bufmgr_gem, bo_gem);
1371
1372 /* Get a mapping of the buffer if we haven't before. */
1373 if (bo_gem->gtt_virtual == NULL) {
1374 struct drm_i915_gem_mmap_gtt mmap_arg;
1375
1376 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
1377 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
1378
1379 memclear(mmap_arg);
1380 mmap_arg.handle = bo_gem->gem_handle;
1381
1382 /* Get the fake offset back... */
1383 ret = drmIoctl(bufmgr_gem->fd,
1384 DRM_IOCTL_I915_GEM_MMAP_GTT,
1385 &mmap_arg);
1386 if (ret != 0) {
1387 ret = -errno;
1388 DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
1389 __FILE__, __LINE__,
1390 bo_gem->gem_handle, bo_gem->name,
1391 strerror(errno));
1392 if (--bo_gem->map_count == 0)
1393 drm_bacon_gem_bo_close_vma(bufmgr_gem, bo_gem);
1394 return ret;
1395 }
1396
1397 /* and mmap it */
1398 bo_gem->gtt_virtual = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
1399 MAP_SHARED, bufmgr_gem->fd,
1400 mmap_arg.offset);
1401 if (bo_gem->gtt_virtual == MAP_FAILED) {
1402 bo_gem->gtt_virtual = NULL;
1403 ret = -errno;
1404 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1405 __FILE__, __LINE__,
1406 bo_gem->gem_handle, bo_gem->name,
1407 strerror(errno));
1408 if (--bo_gem->map_count == 0)
1409 drm_bacon_gem_bo_close_vma(bufmgr_gem, bo_gem);
1410 return ret;
1411 }
1412 }
1413
1414 bo->virtual = bo_gem->gtt_virtual;
1415
1416 DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1417 bo_gem->gtt_virtual);
1418
1419 return 0;
1420 }
1421
1422 int
1423 drm_bacon_gem_bo_map_gtt(drm_bacon_bo *bo)
1424 {
1425 drm_bacon_bufmgr_gem *bufmgr_gem = (drm_bacon_bufmgr_gem *) bo->bufmgr;
1426 drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
1427 struct drm_i915_gem_set_domain set_domain;
1428 int ret;
1429
1430 pthread_mutex_lock(&bufmgr_gem->lock);
1431
1432 ret = map_gtt(bo);
1433 if (ret) {
1434 pthread_mutex_unlock(&bufmgr_gem->lock);
1435 return ret;
1436 }
1437
1438 /* Now move it to the GTT domain so that the GPU and CPU
1439 * caches are flushed and the GPU isn't actively using the
1440 * buffer.
1441 *
1442 * The pagefault handler does this domain change for us when
1443 * it has unbound the BO from the GTT, but it's up to us to
1444 * tell it when we're about to use things if we had done
1445 * rendering and it still happens to be bound to the GTT.
1446 */
1447 memclear(set_domain);
1448 set_domain.handle = bo_gem->gem_handle;
1449 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1450 set_domain.write_domain = I915_GEM_DOMAIN_GTT;
1451 ret = drmIoctl(bufmgr_gem->fd,
1452 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1453 &set_domain);
1454 if (ret != 0) {
1455 DBG("%s:%d: Error setting domain %d: %s\n",
1456 __FILE__, __LINE__, bo_gem->gem_handle,
1457 strerror(errno));
1458 }
1459
1460 drm_bacon_gem_bo_mark_mmaps_incoherent(bo);
1461 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size));
1462 pthread_mutex_unlock(&bufmgr_gem->lock);
1463
1464 return 0;
1465 }
1466
1467 /**
1468 * Performs a mapping of the buffer object like the normal GTT
1469 * mapping, but avoids waiting for the GPU to be done reading from or
1470 * rendering to the buffer.
1471 *
1472 * This is used in the implementation of GL_ARB_map_buffer_range: The
1473 * user asks to create a buffer, then does a mapping, fills some
1474 * space, runs a drawing command, then asks to map it again without
1475 * synchronizing because it guarantees that it won't write over the
1476 * data that the GPU is busy using (or, more specifically, that if it
1477 * does write over the data, it acknowledges that rendering is
1478 * undefined).
1479 */
1480
1481 int
1482 drm_bacon_gem_bo_map_unsynchronized(drm_bacon_bo *bo)
1483 {
1484 drm_bacon_bufmgr_gem *bufmgr_gem = (drm_bacon_bufmgr_gem *) bo->bufmgr;
1485 #ifdef HAVE_VALGRIND
1486 drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
1487 #endif
1488 int ret;
1489
1490 /* If the CPU cache isn't coherent with the GTT, then use a
1491 * regular synchronized mapping. The problem is that we don't
1492 * track where the buffer was last used on the CPU side in
1493 * terms of drm_bacon_bo_map vs drm_bacon_gem_bo_map_gtt, so
1494 * we would potentially corrupt the buffer even when the user
1495 * does reasonable things.
1496 */
1497 if (!bufmgr_gem->has_llc)
1498 return drm_bacon_gem_bo_map_gtt(bo);
1499
1500 pthread_mutex_lock(&bufmgr_gem->lock);
1501
1502 ret = map_gtt(bo);
1503 if (ret == 0) {
1504 drm_bacon_gem_bo_mark_mmaps_incoherent(bo);
1505 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size));
1506 }
1507
1508 pthread_mutex_unlock(&bufmgr_gem->lock);
1509
1510 return ret;
1511 }
1512
1513 int
1514 drm_bacon_bo_unmap(drm_bacon_bo *bo)
1515 {
1516 drm_bacon_bufmgr_gem *bufmgr_gem;
1517 drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
1518 int ret = 0;
1519
1520 if (bo == NULL)
1521 return 0;
1522
1523 if (bo_gem->is_userptr)
1524 return 0;
1525
1526 bufmgr_gem = (drm_bacon_bufmgr_gem *) bo->bufmgr;
1527
1528 pthread_mutex_lock(&bufmgr_gem->lock);
1529
1530 if (bo_gem->map_count <= 0) {
1531 DBG("attempted to unmap an unmapped bo\n");
1532 pthread_mutex_unlock(&bufmgr_gem->lock);
1533 /* Preserve the old behaviour of just treating this as a
1534 * no-op rather than reporting the error.
1535 */
1536 return 0;
1537 }
1538
1539 if (bo_gem->mapped_cpu_write) {
1540 struct drm_i915_gem_sw_finish sw_finish;
1541
1542 /* Cause a flush to happen if the buffer's pinned for
1543 * scanout, so the results show up in a timely manner.
1544 * Unlike GTT set domains, this only does work if the
1545 * buffer should be scanout-related.
1546 */
1547 memclear(sw_finish);
1548 sw_finish.handle = bo_gem->gem_handle;
1549 ret = drmIoctl(bufmgr_gem->fd,
1550 DRM_IOCTL_I915_GEM_SW_FINISH,
1551 &sw_finish);
1552 ret = ret == -1 ? -errno : 0;
1553
1554 bo_gem->mapped_cpu_write = false;
1555 }
1556
1557 /* We need to unmap after every innovation as we cannot track
1558 * an open vma for every bo as that will exhaust the system
1559 * limits and cause later failures.
1560 */
1561 if (--bo_gem->map_count == 0) {
1562 drm_bacon_gem_bo_close_vma(bufmgr_gem, bo_gem);
1563 drm_bacon_gem_bo_mark_mmaps_incoherent(bo);
1564 bo->virtual = NULL;
1565 }
1566 pthread_mutex_unlock(&bufmgr_gem->lock);
1567
1568 return ret;
1569 }
1570
1571 int
1572 drm_bacon_bo_subdata(drm_bacon_bo *bo, unsigned long offset,
1573 unsigned long size, const void *data)
1574 {
1575 drm_bacon_bufmgr_gem *bufmgr_gem = (drm_bacon_bufmgr_gem *) bo->bufmgr;
1576 drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
1577 struct drm_i915_gem_pwrite pwrite;
1578 int ret;
1579
1580 if (bo_gem->is_userptr)
1581 return -EINVAL;
1582
1583 memclear(pwrite);
1584 pwrite.handle = bo_gem->gem_handle;
1585 pwrite.offset = offset;
1586 pwrite.size = size;
1587 pwrite.data_ptr = (uint64_t) (uintptr_t) data;
1588 ret = drmIoctl(bufmgr_gem->fd,
1589 DRM_IOCTL_I915_GEM_PWRITE,
1590 &pwrite);
1591 if (ret != 0) {
1592 ret = -errno;
1593 DBG("%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
1594 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1595 (int)size, strerror(errno));
1596 }
1597
1598 return ret;
1599 }
1600
1601 int
1602 drm_bacon_bo_get_subdata(drm_bacon_bo *bo, unsigned long offset,
1603 unsigned long size, void *data)
1604 {
1605 drm_bacon_bufmgr_gem *bufmgr_gem = (drm_bacon_bufmgr_gem *) bo->bufmgr;
1606 drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
1607 struct drm_i915_gem_pread pread;
1608 int ret;
1609
1610 if (bo_gem->is_userptr)
1611 return -EINVAL;
1612
1613 memclear(pread);
1614 pread.handle = bo_gem->gem_handle;
1615 pread.offset = offset;
1616 pread.size = size;
1617 pread.data_ptr = (uint64_t) (uintptr_t) data;
1618 ret = drmIoctl(bufmgr_gem->fd,
1619 DRM_IOCTL_I915_GEM_PREAD,
1620 &pread);
1621 if (ret != 0) {
1622 ret = -errno;
1623 DBG("%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
1624 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1625 (int)size, strerror(errno));
1626 }
1627
1628 return ret;
1629 }
1630
1631 /** Waits for all GPU rendering with the object to have completed. */
1632 void
1633 drm_bacon_bo_wait_rendering(drm_bacon_bo *bo)
1634 {
1635 drm_bacon_gem_bo_start_gtt_access(bo, 1);
1636 }
1637
1638 /**
1639 * Waits on a BO for the given amount of time.
1640 *
1641 * @bo: buffer object to wait for
1642 * @timeout_ns: amount of time to wait in nanoseconds.
1643 * If value is less than 0, an infinite wait will occur.
1644 *
1645 * Returns 0 if the wait was successful ie. the last batch referencing the
1646 * object has completed within the allotted time. Otherwise some negative return
1647 * value describes the error. Of particular interest is -ETIME when the wait has
1648 * failed to yield the desired result.
1649 *
1650 * Similar to drm_bacon_gem_bo_wait_rendering except a timeout parameter allows
1651 * the operation to give up after a certain amount of time. Another subtle
1652 * difference is the internal locking semantics are different (this variant does
1653 * not hold the lock for the duration of the wait). This makes the wait subject
1654 * to a larger userspace race window.
1655 *
1656 * The implementation shall wait until the object is no longer actively
1657 * referenced within a batch buffer at the time of the call. The wait will
1658 * not guarantee that the buffer is re-issued via another thread, or an flinked
1659 * handle. Userspace must make sure this race does not occur if such precision
1660 * is important.
1661 *
1662 * Note that some kernels have broken the inifite wait for negative values
1663 * promise, upgrade to latest stable kernels if this is the case.
1664 */
1665 int
1666 drm_bacon_gem_bo_wait(drm_bacon_bo *bo, int64_t timeout_ns)
1667 {
1668 drm_bacon_bufmgr_gem *bufmgr_gem = (drm_bacon_bufmgr_gem *) bo->bufmgr;
1669 drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
1670 struct drm_i915_gem_wait wait;
1671 int ret;
1672
1673 if (!bufmgr_gem->has_wait_timeout) {
1674 DBG("%s:%d: Timed wait is not supported. Falling back to "
1675 "infinite wait\n", __FILE__, __LINE__);
1676 if (timeout_ns) {
1677 drm_bacon_bo_wait_rendering(bo);
1678 return 0;
1679 } else {
1680 return drm_bacon_bo_busy(bo) ? -ETIME : 0;
1681 }
1682 }
1683
1684 memclear(wait);
1685 wait.bo_handle = bo_gem->gem_handle;
1686 wait.timeout_ns = timeout_ns;
1687 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
1688 if (ret == -1)
1689 return -errno;
1690
1691 return ret;
1692 }
1693
1694 /**
1695 * Sets the object to the GTT read and possibly write domain, used by the X
1696 * 2D driver in the absence of kernel support to do drm_bacon_gem_bo_map_gtt().
1697 *
1698 * In combination with drm_bacon_gem_bo_pin() and manual fence management, we
1699 * can do tiled pixmaps this way.
1700 */
1701 void
1702 drm_bacon_gem_bo_start_gtt_access(drm_bacon_bo *bo, int write_enable)
1703 {
1704 drm_bacon_bufmgr_gem *bufmgr_gem = (drm_bacon_bufmgr_gem *) bo->bufmgr;
1705 drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
1706 struct drm_i915_gem_set_domain set_domain;
1707 int ret;
1708
1709 memclear(set_domain);
1710 set_domain.handle = bo_gem->gem_handle;
1711 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1712 set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
1713 ret = drmIoctl(bufmgr_gem->fd,
1714 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1715 &set_domain);
1716 if (ret != 0) {
1717 DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
1718 __FILE__, __LINE__, bo_gem->gem_handle,
1719 set_domain.read_domains, set_domain.write_domain,
1720 strerror(errno));
1721 }
1722 }
1723
1724 static void
1725 drm_bacon_bufmgr_gem_destroy(drm_bacon_bufmgr *bufmgr)
1726 {
1727 drm_bacon_bufmgr_gem *bufmgr_gem = (drm_bacon_bufmgr_gem *) bufmgr;
1728 struct drm_gem_close close_bo;
1729 int i, ret;
1730
1731 free(bufmgr_gem->exec2_objects);
1732 free(bufmgr_gem->exec_bos);
1733
1734 pthread_mutex_destroy(&bufmgr_gem->lock);
1735
1736 /* Free any cached buffer objects we were going to reuse */
1737 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
1738 struct drm_bacon_gem_bo_bucket *bucket =
1739 &bufmgr_gem->cache_bucket[i];
1740 drm_bacon_bo_gem *bo_gem;
1741
1742 while (!list_empty(&bucket->head)) {
1743 bo_gem = LIST_ENTRY(drm_bacon_bo_gem,
1744 bucket->head.next, head);
1745 list_del(&bo_gem->head);
1746
1747 drm_bacon_gem_bo_free(&bo_gem->bo);
1748 }
1749 }
1750
1751 /* Release userptr bo kept hanging around for optimisation. */
1752 if (bufmgr_gem->userptr_active.ptr) {
1753 memclear(close_bo);
1754 close_bo.handle = bufmgr_gem->userptr_active.handle;
1755 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close_bo);
1756 free(bufmgr_gem->userptr_active.ptr);
1757 if (ret)
1758 fprintf(stderr,
1759 "Failed to release test userptr object! (%d) "
1760 "i915 kernel driver may not be sane!\n", errno);
1761 }
1762
1763 free(bufmgr);
1764 }
1765
1766 /**
1767 * Adds the target buffer to the validation list and adds the relocation
1768 * to the reloc_buffer's relocation list.
1769 *
1770 * The relocation entry at the given offset must already contain the
1771 * precomputed relocation value, because the kernel will optimize out
1772 * the relocation entry write when the buffer hasn't moved from the
1773 * last known offset in target_bo.
1774 */
1775 static int
1776 do_bo_emit_reloc(drm_bacon_bo *bo, uint32_t offset,
1777 drm_bacon_bo *target_bo, uint32_t target_offset,
1778 uint32_t read_domains, uint32_t write_domain)
1779 {
1780 drm_bacon_bufmgr_gem *bufmgr_gem = (drm_bacon_bufmgr_gem *) bo->bufmgr;
1781 drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
1782 drm_bacon_bo_gem *target_bo_gem = (drm_bacon_bo_gem *) target_bo;
1783
1784 if (bo_gem->has_error)
1785 return -ENOMEM;
1786
1787 if (target_bo_gem->has_error) {
1788 bo_gem->has_error = true;
1789 return -ENOMEM;
1790 }
1791
1792 /* Create a new relocation list if needed */
1793 if (bo_gem->relocs == NULL && drm_bacon_setup_reloc_list(bo))
1794 return -ENOMEM;
1795
1796 /* Check overflow */
1797 assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
1798
1799 /* Check args */
1800 assert(offset <= bo->size - 4);
1801 assert((write_domain & (write_domain - 1)) == 0);
1802
1803 /* Make sure that we're not adding a reloc to something whose size has
1804 * already been accounted for.
1805 */
1806 assert(!bo_gem->used_as_reloc_target);
1807 if (target_bo_gem != bo_gem) {
1808 target_bo_gem->used_as_reloc_target = true;
1809 bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
1810 }
1811
1812 bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo;
1813 if (target_bo != bo)
1814 drm_bacon_bo_reference(target_bo);
1815
1816 bo_gem->relocs[bo_gem->reloc_count].offset = offset;
1817 bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
1818 bo_gem->relocs[bo_gem->reloc_count].target_handle =
1819 target_bo_gem->gem_handle;
1820 bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
1821 bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
1822 bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset64;
1823 bo_gem->reloc_count++;
1824
1825 return 0;
1826 }
1827
1828 static int
1829 drm_bacon_gem_bo_add_softpin_target(drm_bacon_bo *bo, drm_bacon_bo *target_bo)
1830 {
1831 drm_bacon_bufmgr_gem *bufmgr_gem = (drm_bacon_bufmgr_gem *) bo->bufmgr;
1832 drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
1833 drm_bacon_bo_gem *target_bo_gem = (drm_bacon_bo_gem *) target_bo;
1834
1835 if (bo_gem->has_error)
1836 return -ENOMEM;
1837
1838 if (target_bo_gem->has_error) {
1839 bo_gem->has_error = true;
1840 return -ENOMEM;
1841 }
1842
1843 if (!(target_bo_gem->kflags & EXEC_OBJECT_PINNED))
1844 return -EINVAL;
1845 if (target_bo_gem == bo_gem)
1846 return -EINVAL;
1847
1848 if (bo_gem->softpin_target_count == bo_gem->softpin_target_size) {
1849 int new_size = bo_gem->softpin_target_size * 2;
1850 if (new_size == 0)
1851 new_size = bufmgr_gem->max_relocs;
1852
1853 bo_gem->softpin_target = realloc(bo_gem->softpin_target, new_size *
1854 sizeof(drm_bacon_bo *));
1855 if (!bo_gem->softpin_target)
1856 return -ENOMEM;
1857
1858 bo_gem->softpin_target_size = new_size;
1859 }
1860 bo_gem->softpin_target[bo_gem->softpin_target_count] = target_bo;
1861 drm_bacon_bo_reference(target_bo);
1862 bo_gem->softpin_target_count++;
1863
1864 return 0;
1865 }
1866
1867 int
1868 drm_bacon_bo_emit_reloc(drm_bacon_bo *bo, uint32_t offset,
1869 drm_bacon_bo *target_bo, uint32_t target_offset,
1870 uint32_t read_domains, uint32_t write_domain)
1871 {
1872 drm_bacon_bo_gem *target_bo_gem = (drm_bacon_bo_gem *)target_bo;
1873
1874 if (target_bo_gem->kflags & EXEC_OBJECT_PINNED)
1875 return drm_bacon_gem_bo_add_softpin_target(bo, target_bo);
1876 else
1877 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
1878 read_domains, write_domain);
1879 }
1880
1881 int
1882 drm_bacon_gem_bo_get_reloc_count(drm_bacon_bo *bo)
1883 {
1884 drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
1885
1886 return bo_gem->reloc_count;
1887 }
1888
1889 /**
1890 * Removes existing relocation entries in the BO after "start".
1891 *
1892 * This allows a user to avoid a two-step process for state setup with
1893 * counting up all the buffer objects and doing a
1894 * drm_bacon_bufmgr_check_aperture_space() before emitting any of the
1895 * relocations for the state setup. Instead, save the state of the
1896 * batchbuffer including drm_bacon_gem_get_reloc_count(), emit all the
1897 * state, and then check if it still fits in the aperture.
1898 *
1899 * Any further drm_bacon_bufmgr_check_aperture_space() queries
1900 * involving this buffer in the tree are undefined after this call.
1901 *
1902 * This also removes all softpinned targets being referenced by the BO.
1903 */
1904 void
1905 drm_bacon_gem_bo_clear_relocs(drm_bacon_bo *bo, int start)
1906 {
1907 drm_bacon_bufmgr_gem *bufmgr_gem = (drm_bacon_bufmgr_gem *) bo->bufmgr;
1908 drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
1909 int i;
1910 struct timespec time;
1911
1912 clock_gettime(CLOCK_MONOTONIC, &time);
1913
1914 assert(bo_gem->reloc_count >= start);
1915
1916 /* Unreference the cleared target buffers */
1917 pthread_mutex_lock(&bufmgr_gem->lock);
1918
1919 for (i = start; i < bo_gem->reloc_count; i++) {
1920 drm_bacon_bo_gem *target_bo_gem = (drm_bacon_bo_gem *) bo_gem->reloc_target_info[i].bo;
1921 if (&target_bo_gem->bo != bo) {
1922 drm_bacon_gem_bo_unreference_locked_timed(&target_bo_gem->bo,
1923 time.tv_sec);
1924 }
1925 }
1926 bo_gem->reloc_count = start;
1927
1928 for (i = 0; i < bo_gem->softpin_target_count; i++) {
1929 drm_bacon_bo_gem *target_bo_gem = (drm_bacon_bo_gem *) bo_gem->softpin_target[i];
1930 drm_bacon_gem_bo_unreference_locked_timed(&target_bo_gem->bo, time.tv_sec);
1931 }
1932 bo_gem->softpin_target_count = 0;
1933
1934 pthread_mutex_unlock(&bufmgr_gem->lock);
1935
1936 }
1937
1938 static void
1939 drm_bacon_gem_bo_process_reloc2(drm_bacon_bo *bo)
1940 {
1941 drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *)bo;
1942 int i;
1943
1944 if (bo_gem->relocs == NULL && bo_gem->softpin_target == NULL)
1945 return;
1946
1947 for (i = 0; i < bo_gem->reloc_count; i++) {
1948 drm_bacon_bo *target_bo = bo_gem->reloc_target_info[i].bo;
1949
1950 if (target_bo == bo)
1951 continue;
1952
1953 drm_bacon_gem_bo_mark_mmaps_incoherent(bo);
1954
1955 /* Continue walking the tree depth-first. */
1956 drm_bacon_gem_bo_process_reloc2(target_bo);
1957
1958 /* Add the target to the validate list */
1959 drm_bacon_add_validate_buffer2(target_bo);
1960 }
1961
1962 for (i = 0; i < bo_gem->softpin_target_count; i++) {
1963 drm_bacon_bo *target_bo = bo_gem->softpin_target[i];
1964
1965 if (target_bo == bo)
1966 continue;
1967
1968 drm_bacon_gem_bo_mark_mmaps_incoherent(bo);
1969 drm_bacon_gem_bo_process_reloc2(target_bo);
1970 drm_bacon_add_validate_buffer2(target_bo);
1971 }
1972 }
1973
1974 static void
1975 drm_bacon_update_buffer_offsets2 (drm_bacon_bufmgr_gem *bufmgr_gem)
1976 {
1977 int i;
1978
1979 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1980 drm_bacon_bo *bo = bufmgr_gem->exec_bos[i];
1981 drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *)bo;
1982
1983 /* Update the buffer offset */
1984 if (bufmgr_gem->exec2_objects[i].offset != bo->offset64) {
1985 /* If we're seeing softpinned object here it means that the kernel
1986 * has relocated our object... Indicating a programming error
1987 */
1988 assert(!(bo_gem->kflags & EXEC_OBJECT_PINNED));
1989 DBG("BO %d (%s) migrated: 0x%08x %08x -> 0x%08x %08x\n",
1990 bo_gem->gem_handle, bo_gem->name,
1991 upper_32_bits(bo->offset64),
1992 lower_32_bits(bo->offset64),
1993 upper_32_bits(bufmgr_gem->exec2_objects[i].offset),
1994 lower_32_bits(bufmgr_gem->exec2_objects[i].offset));
1995 bo->offset64 = bufmgr_gem->exec2_objects[i].offset;
1996 bo->offset = bufmgr_gem->exec2_objects[i].offset;
1997 }
1998 }
1999 }
2000
2001 static int
2002 do_exec2(drm_bacon_bo *bo, int used, drm_bacon_context *ctx,
2003 int in_fence, int *out_fence,
2004 unsigned int flags)
2005 {
2006 drm_bacon_bufmgr_gem *bufmgr_gem = (drm_bacon_bufmgr_gem *)bo->bufmgr;
2007 struct drm_i915_gem_execbuffer2 execbuf;
2008 int ret = 0;
2009 int i;
2010
2011 if (to_bo_gem(bo)->has_error)
2012 return -ENOMEM;
2013
2014 switch (flags & 0x7) {
2015 default:
2016 return -EINVAL;
2017 case I915_EXEC_BLT:
2018 if (!bufmgr_gem->has_blt)
2019 return -EINVAL;
2020 break;
2021 case I915_EXEC_BSD:
2022 if (!bufmgr_gem->has_bsd)
2023 return -EINVAL;
2024 break;
2025 case I915_EXEC_VEBOX:
2026 if (!bufmgr_gem->has_vebox)
2027 return -EINVAL;
2028 break;
2029 case I915_EXEC_RENDER:
2030 case I915_EXEC_DEFAULT:
2031 break;
2032 }
2033
2034 pthread_mutex_lock(&bufmgr_gem->lock);
2035 /* Update indices and set up the validate list. */
2036 drm_bacon_gem_bo_process_reloc2(bo);
2037
2038 /* Add the batch buffer to the validation list. There are no relocations
2039 * pointing to it.
2040 */
2041 drm_bacon_add_validate_buffer2(bo);
2042
2043 memclear(execbuf);
2044 execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec2_objects;
2045 execbuf.buffer_count = bufmgr_gem->exec_count;
2046 execbuf.batch_start_offset = 0;
2047 execbuf.batch_len = used;
2048 execbuf.cliprects_ptr = 0;
2049 execbuf.num_cliprects = 0;
2050 execbuf.DR1 = 0;
2051 execbuf.DR4 = 0;
2052 execbuf.flags = flags;
2053 if (ctx == NULL)
2054 i915_execbuffer2_set_context_id(execbuf, 0);
2055 else
2056 i915_execbuffer2_set_context_id(execbuf, ctx->ctx_id);
2057 execbuf.rsvd2 = 0;
2058 if (in_fence != -1) {
2059 execbuf.rsvd2 = in_fence;
2060 execbuf.flags |= I915_EXEC_FENCE_IN;
2061 }
2062 if (out_fence != NULL) {
2063 *out_fence = -1;
2064 execbuf.flags |= I915_EXEC_FENCE_OUT;
2065 }
2066
2067 if (bufmgr_gem->no_exec)
2068 goto skip_execution;
2069
2070 ret = drmIoctl(bufmgr_gem->fd,
2071 DRM_IOCTL_I915_GEM_EXECBUFFER2_WR,
2072 &execbuf);
2073 if (ret != 0) {
2074 ret = -errno;
2075 if (ret == -ENOSPC) {
2076 DBG("Execbuffer fails to pin. "
2077 "Estimate: %u. Actual: %u. Available: %u\n",
2078 drm_bacon_gem_estimate_batch_space(bufmgr_gem->exec_bos,
2079 bufmgr_gem->exec_count),
2080 drm_bacon_gem_compute_batch_space(bufmgr_gem->exec_bos,
2081 bufmgr_gem->exec_count),
2082 (unsigned int) bufmgr_gem->gtt_size);
2083 }
2084 }
2085 drm_bacon_update_buffer_offsets2(bufmgr_gem);
2086
2087 if (ret == 0 && out_fence != NULL)
2088 *out_fence = execbuf.rsvd2 >> 32;
2089
2090 skip_execution:
2091 if (INTEL_DEBUG & DEBUG_BUFMGR)
2092 drm_bacon_gem_dump_validation_list(bufmgr_gem);
2093
2094 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2095 drm_bacon_bo_gem *bo_gem = to_bo_gem(bufmgr_gem->exec_bos[i]);
2096
2097 bo_gem->idle = false;
2098
2099 /* Disconnect the buffer from the validate list */
2100 bo_gem->validate_index = -1;
2101 bufmgr_gem->exec_bos[i] = NULL;
2102 }
2103 bufmgr_gem->exec_count = 0;
2104 pthread_mutex_unlock(&bufmgr_gem->lock);
2105
2106 return ret;
2107 }
2108
2109 int
2110 drm_bacon_bo_exec(drm_bacon_bo *bo, int used)
2111 {
2112 return do_exec2(bo, used, NULL, -1, NULL, I915_EXEC_RENDER);
2113 }
2114
2115 int
2116 drm_bacon_bo_mrb_exec(drm_bacon_bo *bo, int used, unsigned int flags)
2117 {
2118 return do_exec2(bo, used, NULL, -1, NULL, flags);
2119 }
2120
2121 int
2122 drm_bacon_gem_bo_context_exec(drm_bacon_bo *bo, drm_bacon_context *ctx,
2123 int used, unsigned int flags)
2124 {
2125 return do_exec2(bo, used, ctx, -1, NULL, flags);
2126 }
2127
2128 int
2129 drm_bacon_gem_bo_fence_exec(drm_bacon_bo *bo,
2130 drm_bacon_context *ctx,
2131 int used,
2132 int in_fence,
2133 int *out_fence,
2134 unsigned int flags)
2135 {
2136 return do_exec2(bo, used, ctx, in_fence, out_fence, flags);
2137 }
2138
2139 static int
2140 drm_bacon_gem_bo_set_tiling_internal(drm_bacon_bo *bo,
2141 uint32_t tiling_mode,
2142 uint32_t stride)
2143 {
2144 drm_bacon_bufmgr_gem *bufmgr_gem = (drm_bacon_bufmgr_gem *) bo->bufmgr;
2145 drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
2146 struct drm_i915_gem_set_tiling set_tiling;
2147 int ret;
2148
2149 if (bo_gem->global_name == 0 &&
2150 tiling_mode == bo_gem->tiling_mode &&
2151 stride == bo_gem->stride)
2152 return 0;
2153
2154 memset(&set_tiling, 0, sizeof(set_tiling));
2155 do {
2156 /* set_tiling is slightly broken and overwrites the
2157 * input on the error path, so we have to open code
2158 * rmIoctl.
2159 */
2160 set_tiling.handle = bo_gem->gem_handle;
2161 set_tiling.tiling_mode = tiling_mode;
2162 set_tiling.stride = stride;
2163
2164 ret = ioctl(bufmgr_gem->fd,
2165 DRM_IOCTL_I915_GEM_SET_TILING,
2166 &set_tiling);
2167 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
2168 if (ret == -1)
2169 return -errno;
2170
2171 bo_gem->tiling_mode = set_tiling.tiling_mode;
2172 bo_gem->swizzle_mode = set_tiling.swizzle_mode;
2173 bo_gem->stride = set_tiling.stride;
2174 return 0;
2175 }
2176
2177 int
2178 drm_bacon_bo_set_tiling(drm_bacon_bo *bo, uint32_t * tiling_mode,
2179 uint32_t stride)
2180 {
2181 drm_bacon_bufmgr_gem *bufmgr_gem = (drm_bacon_bufmgr_gem *) bo->bufmgr;
2182 drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
2183 int ret;
2184
2185 /* Tiling with userptr surfaces is not supported
2186 * on all hardware so refuse it for time being.
2187 */
2188 if (bo_gem->is_userptr)
2189 return -EINVAL;
2190
2191 /* Linear buffers have no stride. By ensuring that we only ever use
2192 * stride 0 with linear buffers, we simplify our code.
2193 */
2194 if (*tiling_mode == I915_TILING_NONE)
2195 stride = 0;
2196
2197 ret = drm_bacon_gem_bo_set_tiling_internal(bo, *tiling_mode, stride);
2198 if (ret == 0)
2199 drm_bacon_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
2200
2201 *tiling_mode = bo_gem->tiling_mode;
2202 return ret;
2203 }
2204
2205 int
2206 drm_bacon_bo_get_tiling(drm_bacon_bo *bo, uint32_t * tiling_mode,
2207 uint32_t *swizzle_mode)
2208 {
2209 drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
2210
2211 *tiling_mode = bo_gem->tiling_mode;
2212 *swizzle_mode = bo_gem->swizzle_mode;
2213 return 0;
2214 }
2215
2216 int
2217 drm_bacon_bo_set_softpin_offset(drm_bacon_bo *bo, uint64_t offset)
2218 {
2219 drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
2220
2221 bo->offset64 = offset;
2222 bo->offset = offset;
2223 bo_gem->kflags |= EXEC_OBJECT_PINNED;
2224
2225 return 0;
2226 }
2227
2228 drm_bacon_bo *
2229 drm_bacon_bo_gem_create_from_prime(drm_bacon_bufmgr *bufmgr, int prime_fd, int size)
2230 {
2231 drm_bacon_bufmgr_gem *bufmgr_gem = (drm_bacon_bufmgr_gem *) bufmgr;
2232 int ret;
2233 uint32_t handle;
2234 drm_bacon_bo_gem *bo_gem;
2235 struct drm_i915_gem_get_tiling get_tiling;
2236
2237 pthread_mutex_lock(&bufmgr_gem->lock);
2238 ret = drmPrimeFDToHandle(bufmgr_gem->fd, prime_fd, &handle);
2239 if (ret) {
2240 DBG("create_from_prime: failed to obtain handle from fd: %s\n", strerror(errno));
2241 pthread_mutex_unlock(&bufmgr_gem->lock);
2242 return NULL;
2243 }
2244
2245 /*
2246 * See if the kernel has already returned this buffer to us. Just as
2247 * for named buffers, we must not create two bo's pointing at the same
2248 * kernel object
2249 */
2250 HASH_FIND(handle_hh, bufmgr_gem->handle_table,
2251 &handle, sizeof(handle), bo_gem);
2252 if (bo_gem) {
2253 drm_bacon_bo_reference(&bo_gem->bo);
2254 goto out;
2255 }
2256
2257 bo_gem = calloc(1, sizeof(*bo_gem));
2258 if (!bo_gem)
2259 goto out;
2260
2261 p_atomic_set(&bo_gem->refcount, 1);
2262 list_inithead(&bo_gem->vma_list);
2263
2264 /* Determine size of bo. The fd-to-handle ioctl really should
2265 * return the size, but it doesn't. If we have kernel 3.12 or
2266 * later, we can lseek on the prime fd to get the size. Older
2267 * kernels will just fail, in which case we fall back to the
2268 * provided (estimated or guess size). */
2269 ret = lseek(prime_fd, 0, SEEK_END);
2270 if (ret != -1)
2271 bo_gem->bo.size = ret;
2272 else
2273 bo_gem->bo.size = size;
2274
2275 bo_gem->bo.handle = handle;
2276 bo_gem->bo.bufmgr = bufmgr;
2277
2278 bo_gem->gem_handle = handle;
2279 HASH_ADD(handle_hh, bufmgr_gem->handle_table,
2280 gem_handle, sizeof(bo_gem->gem_handle), bo_gem);
2281
2282 bo_gem->name = "prime";
2283 bo_gem->validate_index = -1;
2284 bo_gem->used_as_reloc_target = false;
2285 bo_gem->has_error = false;
2286 bo_gem->reusable = false;
2287
2288 memclear(get_tiling);
2289 get_tiling.handle = bo_gem->gem_handle;
2290 if (drmIoctl(bufmgr_gem->fd,
2291 DRM_IOCTL_I915_GEM_GET_TILING,
2292 &get_tiling))
2293 goto err;
2294
2295 bo_gem->tiling_mode = get_tiling.tiling_mode;
2296 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
2297 /* XXX stride is unknown */
2298 drm_bacon_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
2299
2300 out:
2301 pthread_mutex_unlock(&bufmgr_gem->lock);
2302 return &bo_gem->bo;
2303
2304 err:
2305 drm_bacon_gem_bo_free(&bo_gem->bo);
2306 pthread_mutex_unlock(&bufmgr_gem->lock);
2307 return NULL;
2308 }
2309
2310 int
2311 drm_bacon_bo_gem_export_to_prime(drm_bacon_bo *bo, int *prime_fd)
2312 {
2313 drm_bacon_bufmgr_gem *bufmgr_gem = (drm_bacon_bufmgr_gem *) bo->bufmgr;
2314 drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
2315
2316 if (drmPrimeHandleToFD(bufmgr_gem->fd, bo_gem->gem_handle,
2317 DRM_CLOEXEC, prime_fd) != 0)
2318 return -errno;
2319
2320 bo_gem->reusable = false;
2321
2322 return 0;
2323 }
2324
2325 int
2326 drm_bacon_bo_flink(drm_bacon_bo *bo, uint32_t *name)
2327 {
2328 drm_bacon_bufmgr_gem *bufmgr_gem = (drm_bacon_bufmgr_gem *) bo->bufmgr;
2329 drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
2330
2331 if (!bo_gem->global_name) {
2332 struct drm_gem_flink flink;
2333
2334 memclear(flink);
2335 flink.handle = bo_gem->gem_handle;
2336 if (drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink))
2337 return -errno;
2338
2339 pthread_mutex_lock(&bufmgr_gem->lock);
2340 if (!bo_gem->global_name) {
2341 bo_gem->global_name = flink.name;
2342 bo_gem->reusable = false;
2343
2344 HASH_ADD(name_hh, bufmgr_gem->name_table,
2345 global_name, sizeof(bo_gem->global_name),
2346 bo_gem);
2347 }
2348 pthread_mutex_unlock(&bufmgr_gem->lock);
2349 }
2350
2351 *name = bo_gem->global_name;
2352 return 0;
2353 }
2354
2355 /**
2356 * Enables unlimited caching of buffer objects for reuse.
2357 *
2358 * This is potentially very memory expensive, as the cache at each bucket
2359 * size is only bounded by how many buffers of that size we've managed to have
2360 * in flight at once.
2361 */
2362 void
2363 drm_bacon_bufmgr_gem_enable_reuse(drm_bacon_bufmgr *bufmgr)
2364 {
2365 drm_bacon_bufmgr_gem *bufmgr_gem = (drm_bacon_bufmgr_gem *) bufmgr;
2366
2367 bufmgr_gem->bo_reuse = true;
2368 }
2369
2370 /**
2371 * Disables implicit synchronisation before executing the bo
2372 *
2373 * This will cause rendering corruption unless you correctly manage explicit
2374 * fences for all rendering involving this buffer - including use by others.
2375 * Disabling the implicit serialisation is only required if that serialisation
2376 * is too coarse (for example, you have split the buffer into many
2377 * non-overlapping regions and are sharing the whole buffer between concurrent
2378 * independent command streams).
2379 *
2380 * Note the kernel must advertise support via I915_PARAM_HAS_EXEC_ASYNC,
2381 * which can be checked using drm_bacon_bufmgr_can_disable_implicit_sync,
2382 * or subsequent execbufs involving the bo will generate EINVAL.
2383 */
2384 void
2385 drm_bacon_gem_bo_disable_implicit_sync(drm_bacon_bo *bo)
2386 {
2387 drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
2388
2389 bo_gem->kflags |= EXEC_OBJECT_ASYNC;
2390 }
2391
2392 /**
2393 * Enables implicit synchronisation before executing the bo
2394 *
2395 * This is the default behaviour of the kernel, to wait upon prior writes
2396 * completing on the object before rendering with it, or to wait for prior
2397 * reads to complete before writing into the object.
2398 * drm_bacon_gem_bo_disable_implicit_sync() can stop this behaviour, telling
2399 * the kernel never to insert a stall before using the object. Then this
2400 * function can be used to restore the implicit sync before subsequent
2401 * rendering.
2402 */
2403 void
2404 drm_bacon_gem_bo_enable_implicit_sync(drm_bacon_bo *bo)
2405 {
2406 drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
2407
2408 bo_gem->kflags &= ~EXEC_OBJECT_ASYNC;
2409 }
2410
2411 /**
2412 * Query whether the kernel supports disabling of its implicit synchronisation
2413 * before execbuf. See drm_bacon_gem_bo_disable_implicit_sync()
2414 */
2415 int
2416 drm_bacon_bufmgr_gem_can_disable_implicit_sync(drm_bacon_bufmgr *bufmgr)
2417 {
2418 drm_bacon_bufmgr_gem *bufmgr_gem = (drm_bacon_bufmgr_gem *) bufmgr;
2419
2420 return bufmgr_gem->has_exec_async;
2421 }
2422
2423 /**
2424 * Return the additional aperture space required by the tree of buffer objects
2425 * rooted at bo.
2426 */
2427 static int
2428 drm_bacon_gem_bo_get_aperture_space(drm_bacon_bo *bo)
2429 {
2430 drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
2431 int i;
2432 int total = 0;
2433
2434 if (bo == NULL || bo_gem->included_in_check_aperture)
2435 return 0;
2436
2437 total += bo->size;
2438 bo_gem->included_in_check_aperture = true;
2439
2440 for (i = 0; i < bo_gem->reloc_count; i++)
2441 total +=
2442 drm_bacon_gem_bo_get_aperture_space(bo_gem->
2443 reloc_target_info[i].bo);
2444
2445 return total;
2446 }
2447
2448 /**
2449 * Clear the flag set by drm_bacon_gem_bo_get_aperture_space() so we're ready
2450 * for the next drm_bacon_bufmgr_check_aperture_space() call.
2451 */
2452 static void
2453 drm_bacon_gem_bo_clear_aperture_space_flag(drm_bacon_bo *bo)
2454 {
2455 drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
2456 int i;
2457
2458 if (bo == NULL || !bo_gem->included_in_check_aperture)
2459 return;
2460
2461 bo_gem->included_in_check_aperture = false;
2462
2463 for (i = 0; i < bo_gem->reloc_count; i++)
2464 drm_bacon_gem_bo_clear_aperture_space_flag(bo_gem->
2465 reloc_target_info[i].bo);
2466 }
2467
2468 /**
2469 * Return a conservative estimate for the amount of aperture required
2470 * for a collection of buffers. This may double-count some buffers.
2471 */
2472 static unsigned int
2473 drm_bacon_gem_estimate_batch_space(drm_bacon_bo **bo_array, int count)
2474 {
2475 int i;
2476 unsigned int total = 0;
2477
2478 for (i = 0; i < count; i++) {
2479 drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo_array[i];
2480 if (bo_gem != NULL)
2481 total += bo_gem->reloc_tree_size;
2482 }
2483 return total;
2484 }
2485
2486 /**
2487 * Return the amount of aperture needed for a collection of buffers.
2488 * This avoids double counting any buffers, at the cost of looking
2489 * at every buffer in the set.
2490 */
2491 static unsigned int
2492 drm_bacon_gem_compute_batch_space(drm_bacon_bo **bo_array, int count)
2493 {
2494 int i;
2495 unsigned int total = 0;
2496
2497 for (i = 0; i < count; i++) {
2498 total += drm_bacon_gem_bo_get_aperture_space(bo_array[i]);
2499 /* For the first buffer object in the array, we get an
2500 * accurate count back for its reloc_tree size (since nothing
2501 * had been flagged as being counted yet). We can save that
2502 * value out as a more conservative reloc_tree_size that
2503 * avoids double-counting target buffers. Since the first
2504 * buffer happens to usually be the batch buffer in our
2505 * callers, this can pull us back from doing the tree
2506 * walk on every new batch emit.
2507 */
2508 if (i == 0) {
2509 drm_bacon_bo_gem *bo_gem =
2510 (drm_bacon_bo_gem *) bo_array[i];
2511 bo_gem->reloc_tree_size = total;
2512 }
2513 }
2514
2515 for (i = 0; i < count; i++)
2516 drm_bacon_gem_bo_clear_aperture_space_flag(bo_array[i]);
2517 return total;
2518 }
2519
2520 /**
2521 * Return -1 if the batchbuffer should be flushed before attempting to
2522 * emit rendering referencing the buffers pointed to by bo_array.
2523 *
2524 * This is required because if we try to emit a batchbuffer with relocations
2525 * to a tree of buffers that won't simultaneously fit in the aperture,
2526 * the rendering will return an error at a point where the software is not
2527 * prepared to recover from it.
2528 *
2529 * However, we also want to emit the batchbuffer significantly before we reach
2530 * the limit, as a series of batchbuffers each of which references buffers
2531 * covering almost all of the aperture means that at each emit we end up
2532 * waiting to evict a buffer from the last rendering, and we get synchronous
2533 * performance. By emitting smaller batchbuffers, we eat some CPU overhead to
2534 * get better parallelism.
2535 */
2536 int
2537 drm_bacon_bufmgr_check_aperture_space(drm_bacon_bo **bo_array, int count)
2538 {
2539 drm_bacon_bufmgr_gem *bufmgr_gem =
2540 (drm_bacon_bufmgr_gem *) bo_array[0]->bufmgr;
2541 unsigned int total = 0;
2542 unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
2543
2544 total = drm_bacon_gem_estimate_batch_space(bo_array, count);
2545
2546 if (total > threshold)
2547 total = drm_bacon_gem_compute_batch_space(bo_array, count);
2548
2549 if (total > threshold) {
2550 DBG("check_space: overflowed available aperture, "
2551 "%dkb vs %dkb\n",
2552 total / 1024, (int)bufmgr_gem->gtt_size / 1024);
2553 return -ENOSPC;
2554 } else {
2555 DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024,
2556 (int)bufmgr_gem->gtt_size / 1024);
2557 return 0;
2558 }
2559 }
2560
2561 /*
2562 * Disable buffer reuse for objects which are shared with the kernel
2563 * as scanout buffers
2564 */
2565 int
2566 drm_bacon_bo_disable_reuse(drm_bacon_bo *bo)
2567 {
2568 drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
2569
2570 bo_gem->reusable = false;
2571 return 0;
2572 }
2573
2574 int
2575 drm_bacon_bo_is_reusable(drm_bacon_bo *bo)
2576 {
2577 drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
2578
2579 return bo_gem->reusable;
2580 }
2581
2582 static int
2583 _drm_bacon_gem_bo_references(drm_bacon_bo *bo, drm_bacon_bo *target_bo)
2584 {
2585 drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
2586 int i;
2587
2588 for (i = 0; i < bo_gem->reloc_count; i++) {
2589 if (bo_gem->reloc_target_info[i].bo == target_bo)
2590 return 1;
2591 if (bo == bo_gem->reloc_target_info[i].bo)
2592 continue;
2593 if (_drm_bacon_gem_bo_references(bo_gem->reloc_target_info[i].bo,
2594 target_bo))
2595 return 1;
2596 }
2597
2598 for (i = 0; i< bo_gem->softpin_target_count; i++) {
2599 if (bo_gem->softpin_target[i] == target_bo)
2600 return 1;
2601 if (_drm_bacon_gem_bo_references(bo_gem->softpin_target[i], target_bo))
2602 return 1;
2603 }
2604
2605 return 0;
2606 }
2607
2608 /** Return true if target_bo is referenced by bo's relocation tree. */
2609 int
2610 drm_bacon_bo_references(drm_bacon_bo *bo, drm_bacon_bo *target_bo)
2611 {
2612 drm_bacon_bo_gem *target_bo_gem = (drm_bacon_bo_gem *) target_bo;
2613
2614 if (bo == NULL || target_bo == NULL)
2615 return 0;
2616 if (target_bo_gem->used_as_reloc_target)
2617 return _drm_bacon_gem_bo_references(bo, target_bo);
2618 return 0;
2619 }
2620
2621 static void
2622 add_bucket(drm_bacon_bufmgr_gem *bufmgr_gem, int size)
2623 {
2624 unsigned int i = bufmgr_gem->num_buckets;
2625
2626 assert(i < ARRAY_SIZE(bufmgr_gem->cache_bucket));
2627
2628 list_inithead(&bufmgr_gem->cache_bucket[i].head);
2629 bufmgr_gem->cache_bucket[i].size = size;
2630 bufmgr_gem->num_buckets++;
2631 }
2632
2633 static void
2634 init_cache_buckets(drm_bacon_bufmgr_gem *bufmgr_gem)
2635 {
2636 unsigned long size, cache_max_size = 64 * 1024 * 1024;
2637
2638 /* OK, so power of two buckets was too wasteful of memory.
2639 * Give 3 other sizes between each power of two, to hopefully
2640 * cover things accurately enough. (The alternative is
2641 * probably to just go for exact matching of sizes, and assume
2642 * that for things like composited window resize the tiled
2643 * width/height alignment and rounding of sizes to pages will
2644 * get us useful cache hit rates anyway)
2645 */
2646 add_bucket(bufmgr_gem, 4096);
2647 add_bucket(bufmgr_gem, 4096 * 2);
2648 add_bucket(bufmgr_gem, 4096 * 3);
2649
2650 /* Initialize the linked lists for BO reuse cache. */
2651 for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
2652 add_bucket(bufmgr_gem, size);
2653
2654 add_bucket(bufmgr_gem, size + size * 1 / 4);
2655 add_bucket(bufmgr_gem, size + size * 2 / 4);
2656 add_bucket(bufmgr_gem, size + size * 3 / 4);
2657 }
2658 }
2659
2660 void
2661 drm_bacon_bufmgr_gem_set_vma_cache_size(drm_bacon_bufmgr *bufmgr, int limit)
2662 {
2663 drm_bacon_bufmgr_gem *bufmgr_gem = (drm_bacon_bufmgr_gem *)bufmgr;
2664
2665 bufmgr_gem->vma_max = limit;
2666
2667 drm_bacon_gem_bo_purge_vma_cache(bufmgr_gem);
2668 }
2669
2670 static int
2671 parse_devid_override(const char *devid_override)
2672 {
2673 static const struct {
2674 const char *name;
2675 int pci_id;
2676 } name_map[] = {
2677 { "brw", PCI_CHIP_I965_GM },
2678 { "g4x", PCI_CHIP_GM45_GM },
2679 { "ilk", PCI_CHIP_ILD_G },
2680 { "snb", PCI_CHIP_SANDYBRIDGE_M_GT2_PLUS },
2681 { "ivb", PCI_CHIP_IVYBRIDGE_S_GT2 },
2682 { "hsw", PCI_CHIP_HASWELL_CRW_E_GT3 },
2683 { "byt", PCI_CHIP_VALLEYVIEW_3 },
2684 { "bdw", 0x1620 | BDW_ULX },
2685 { "skl", PCI_CHIP_SKYLAKE_DT_GT2 },
2686 { "kbl", PCI_CHIP_KABYLAKE_DT_GT2 },
2687 };
2688 unsigned int i;
2689
2690 for (i = 0; i < ARRAY_SIZE(name_map); i++) {
2691 if (!strcmp(name_map[i].name, devid_override))
2692 return name_map[i].pci_id;
2693 }
2694
2695 return strtod(devid_override, NULL);
2696 }
2697
2698 /**
2699 * Get the PCI ID for the device. This can be overridden by setting the
2700 * INTEL_DEVID_OVERRIDE environment variable to the desired ID.
2701 */
2702 static int
2703 get_pci_device_id(drm_bacon_bufmgr_gem *bufmgr_gem)
2704 {
2705 char *devid_override;
2706 int devid = 0;
2707 int ret;
2708 drm_i915_getparam_t gp;
2709
2710 if (geteuid() == getuid()) {
2711 devid_override = getenv("INTEL_DEVID_OVERRIDE");
2712 if (devid_override) {
2713 bufmgr_gem->no_exec = true;
2714 return parse_devid_override(devid_override);
2715 }
2716 }
2717
2718 memclear(gp);
2719 gp.param = I915_PARAM_CHIPSET_ID;
2720 gp.value = &devid;
2721 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2722 if (ret) {
2723 fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno);
2724 fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
2725 }
2726 return devid;
2727 }
2728
2729 int
2730 drm_bacon_bufmgr_gem_get_devid(drm_bacon_bufmgr *bufmgr)
2731 {
2732 drm_bacon_bufmgr_gem *bufmgr_gem = (drm_bacon_bufmgr_gem *)bufmgr;
2733
2734 return bufmgr_gem->pci_device;
2735 }
2736
2737 drm_bacon_context *
2738 drm_bacon_gem_context_create(drm_bacon_bufmgr *bufmgr)
2739 {
2740 drm_bacon_bufmgr_gem *bufmgr_gem = (drm_bacon_bufmgr_gem *)bufmgr;
2741 struct drm_i915_gem_context_create create;
2742 drm_bacon_context *context = NULL;
2743 int ret;
2744
2745 context = calloc(1, sizeof(*context));
2746 if (!context)
2747 return NULL;
2748
2749 memclear(create);
2750 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
2751 if (ret != 0) {
2752 DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n",
2753 strerror(errno));
2754 free(context);
2755 return NULL;
2756 }
2757
2758 context->ctx_id = create.ctx_id;
2759 context->bufmgr = bufmgr;
2760
2761 return context;
2762 }
2763
2764 int
2765 drm_bacon_gem_context_get_id(drm_bacon_context *ctx, uint32_t *ctx_id)
2766 {
2767 if (ctx == NULL)
2768 return -EINVAL;
2769
2770 *ctx_id = ctx->ctx_id;
2771
2772 return 0;
2773 }
2774
2775 void
2776 drm_bacon_gem_context_destroy(drm_bacon_context *ctx)
2777 {
2778 drm_bacon_bufmgr_gem *bufmgr_gem;
2779 struct drm_i915_gem_context_destroy destroy;
2780 int ret;
2781
2782 if (ctx == NULL)
2783 return;
2784
2785 memclear(destroy);
2786
2787 bufmgr_gem = (drm_bacon_bufmgr_gem *)ctx->bufmgr;
2788 destroy.ctx_id = ctx->ctx_id;
2789 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY,
2790 &destroy);
2791 if (ret != 0)
2792 fprintf(stderr, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
2793 strerror(errno));
2794
2795 free(ctx);
2796 }
2797
2798 int
2799 drm_bacon_get_reset_stats(drm_bacon_context *ctx,
2800 uint32_t *reset_count,
2801 uint32_t *active,
2802 uint32_t *pending)
2803 {
2804 drm_bacon_bufmgr_gem *bufmgr_gem;
2805 struct drm_i915_reset_stats stats;
2806 int ret;
2807
2808 if (ctx == NULL)
2809 return -EINVAL;
2810
2811 memclear(stats);
2812
2813 bufmgr_gem = (drm_bacon_bufmgr_gem *)ctx->bufmgr;
2814 stats.ctx_id = ctx->ctx_id;
2815 ret = drmIoctl(bufmgr_gem->fd,
2816 DRM_IOCTL_I915_GET_RESET_STATS,
2817 &stats);
2818 if (ret == 0) {
2819 if (reset_count != NULL)
2820 *reset_count = stats.reset_count;
2821
2822 if (active != NULL)
2823 *active = stats.batch_active;
2824
2825 if (pending != NULL)
2826 *pending = stats.batch_pending;
2827 }
2828
2829 return ret;
2830 }
2831
2832 int
2833 drm_bacon_reg_read(drm_bacon_bufmgr *bufmgr,
2834 uint32_t offset,
2835 uint64_t *result)
2836 {
2837 drm_bacon_bufmgr_gem *bufmgr_gem = (drm_bacon_bufmgr_gem *)bufmgr;
2838 struct drm_i915_reg_read reg_read;
2839 int ret;
2840
2841 memclear(reg_read);
2842 reg_read.offset = offset;
2843
2844 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_REG_READ, &reg_read);
2845
2846 *result = reg_read.val;
2847 return ret;
2848 }
2849
2850 static pthread_mutex_t bufmgr_list_mutex = PTHREAD_MUTEX_INITIALIZER;
2851 static struct list_head bufmgr_list = { &bufmgr_list, &bufmgr_list };
2852
2853 static drm_bacon_bufmgr_gem *
2854 drm_bacon_bufmgr_gem_find(int fd)
2855 {
2856 list_for_each_entry(drm_bacon_bufmgr_gem,
2857 bufmgr_gem, &bufmgr_list, managers) {
2858 if (bufmgr_gem->fd == fd) {
2859 p_atomic_inc(&bufmgr_gem->refcount);
2860 return bufmgr_gem;
2861 }
2862 }
2863
2864 return NULL;
2865 }
2866
2867 void
2868 drm_bacon_bufmgr_destroy(drm_bacon_bufmgr *bufmgr)
2869 {
2870 drm_bacon_bufmgr_gem *bufmgr_gem = (drm_bacon_bufmgr_gem *)bufmgr;
2871
2872 if (atomic_add_unless(&bufmgr_gem->refcount, -1, 1)) {
2873 pthread_mutex_lock(&bufmgr_list_mutex);
2874
2875 if (p_atomic_dec_zero(&bufmgr_gem->refcount)) {
2876 list_del(&bufmgr_gem->managers);
2877 drm_bacon_bufmgr_gem_destroy(bufmgr);
2878 }
2879
2880 pthread_mutex_unlock(&bufmgr_list_mutex);
2881 }
2882 }
2883
2884 void *drm_bacon_gem_bo_map__gtt(drm_bacon_bo *bo)
2885 {
2886 drm_bacon_bufmgr_gem *bufmgr_gem = (drm_bacon_bufmgr_gem *) bo->bufmgr;
2887 drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
2888
2889 if (bo_gem->gtt_virtual)
2890 return bo_gem->gtt_virtual;
2891
2892 if (bo_gem->is_userptr)
2893 return NULL;
2894
2895 pthread_mutex_lock(&bufmgr_gem->lock);
2896 if (bo_gem->gtt_virtual == NULL) {
2897 struct drm_i915_gem_mmap_gtt mmap_arg;
2898 void *ptr;
2899
2900 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
2901 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
2902
2903 if (bo_gem->map_count++ == 0)
2904 drm_bacon_gem_bo_open_vma(bufmgr_gem, bo_gem);
2905
2906 memclear(mmap_arg);
2907 mmap_arg.handle = bo_gem->gem_handle;
2908
2909 /* Get the fake offset back... */
2910 ptr = MAP_FAILED;
2911 if (drmIoctl(bufmgr_gem->fd,
2912 DRM_IOCTL_I915_GEM_MMAP_GTT,
2913 &mmap_arg) == 0) {
2914 /* and mmap it */
2915 ptr = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
2916 MAP_SHARED, bufmgr_gem->fd,
2917 mmap_arg.offset);
2918 }
2919 if (ptr == MAP_FAILED) {
2920 if (--bo_gem->map_count == 0)
2921 drm_bacon_gem_bo_close_vma(bufmgr_gem, bo_gem);
2922 ptr = NULL;
2923 }
2924
2925 bo_gem->gtt_virtual = ptr;
2926 }
2927 pthread_mutex_unlock(&bufmgr_gem->lock);
2928
2929 return bo_gem->gtt_virtual;
2930 }
2931
2932 void *drm_bacon_gem_bo_map__cpu(drm_bacon_bo *bo)
2933 {
2934 drm_bacon_bufmgr_gem *bufmgr_gem = (drm_bacon_bufmgr_gem *) bo->bufmgr;
2935 drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
2936
2937 if (bo_gem->mem_virtual)
2938 return bo_gem->mem_virtual;
2939
2940 if (bo_gem->is_userptr) {
2941 /* Return the same user ptr */
2942 return bo_gem->user_virtual;
2943 }
2944
2945 pthread_mutex_lock(&bufmgr_gem->lock);
2946 if (!bo_gem->mem_virtual) {
2947 struct drm_i915_gem_mmap mmap_arg;
2948
2949 if (bo_gem->map_count++ == 0)
2950 drm_bacon_gem_bo_open_vma(bufmgr_gem, bo_gem);
2951
2952 DBG("bo_map: %d (%s), map_count=%d\n",
2953 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
2954
2955 memclear(mmap_arg);
2956 mmap_arg.handle = bo_gem->gem_handle;
2957 mmap_arg.size = bo->size;
2958 if (drmIoctl(bufmgr_gem->fd,
2959 DRM_IOCTL_I915_GEM_MMAP,
2960 &mmap_arg)) {
2961 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
2962 __FILE__, __LINE__, bo_gem->gem_handle,
2963 bo_gem->name, strerror(errno));
2964 if (--bo_gem->map_count == 0)
2965 drm_bacon_gem_bo_close_vma(bufmgr_gem, bo_gem);
2966 } else {
2967 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
2968 bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
2969 }
2970 }
2971 pthread_mutex_unlock(&bufmgr_gem->lock);
2972
2973 return bo_gem->mem_virtual;
2974 }
2975
2976 void *drm_bacon_gem_bo_map__wc(drm_bacon_bo *bo)
2977 {
2978 drm_bacon_bufmgr_gem *bufmgr_gem = (drm_bacon_bufmgr_gem *) bo->bufmgr;
2979 drm_bacon_bo_gem *bo_gem = (drm_bacon_bo_gem *) bo;
2980
2981 if (bo_gem->wc_virtual)
2982 return bo_gem->wc_virtual;
2983
2984 if (bo_gem->is_userptr)
2985 return NULL;
2986
2987 pthread_mutex_lock(&bufmgr_gem->lock);
2988 if (!bo_gem->wc_virtual) {
2989 struct drm_i915_gem_mmap mmap_arg;
2990
2991 if (bo_gem->map_count++ == 0)
2992 drm_bacon_gem_bo_open_vma(bufmgr_gem, bo_gem);
2993
2994 DBG("bo_map: %d (%s), map_count=%d\n",
2995 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
2996
2997 memclear(mmap_arg);
2998 mmap_arg.handle = bo_gem->gem_handle;
2999 mmap_arg.size = bo->size;
3000 mmap_arg.flags = I915_MMAP_WC;
3001 if (drmIoctl(bufmgr_gem->fd,
3002 DRM_IOCTL_I915_GEM_MMAP,
3003 &mmap_arg)) {
3004 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
3005 __FILE__, __LINE__, bo_gem->gem_handle,
3006 bo_gem->name, strerror(errno));
3007 if (--bo_gem->map_count == 0)
3008 drm_bacon_gem_bo_close_vma(bufmgr_gem, bo_gem);
3009 } else {
3010 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
3011 bo_gem->wc_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
3012 }
3013 }
3014 pthread_mutex_unlock(&bufmgr_gem->lock);
3015
3016 return bo_gem->wc_virtual;
3017 }
3018
3019 /**
3020 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
3021 * and manage map buffer objections.
3022 *
3023 * \param fd File descriptor of the opened DRM device.
3024 */
3025 drm_bacon_bufmgr *
3026 drm_bacon_bufmgr_gem_init(int fd, int batch_size)
3027 {
3028 drm_bacon_bufmgr_gem *bufmgr_gem;
3029 struct drm_i915_gem_get_aperture aperture;
3030 drm_i915_getparam_t gp;
3031 int ret, tmp;
3032
3033 pthread_mutex_lock(&bufmgr_list_mutex);
3034
3035 bufmgr_gem = drm_bacon_bufmgr_gem_find(fd);
3036 if (bufmgr_gem)
3037 goto exit;
3038
3039 bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
3040 if (bufmgr_gem == NULL)
3041 goto exit;
3042
3043 bufmgr_gem->fd = fd;
3044 p_atomic_set(&bufmgr_gem->refcount, 1);
3045
3046 if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
3047 free(bufmgr_gem);
3048 bufmgr_gem = NULL;
3049 goto exit;
3050 }
3051
3052 memclear(aperture);
3053 ret = drmIoctl(bufmgr_gem->fd,
3054 DRM_IOCTL_I915_GEM_GET_APERTURE,
3055 &aperture);
3056
3057 if (ret == 0)
3058 bufmgr_gem->gtt_size = aperture.aper_available_size;
3059 else {
3060 fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
3061 strerror(errno));
3062 bufmgr_gem->gtt_size = 128 * 1024 * 1024;
3063 fprintf(stderr, "Assuming %dkB available aperture size.\n"
3064 "May lead to reduced performance or incorrect "
3065 "rendering.\n",
3066 (int)bufmgr_gem->gtt_size / 1024);
3067 }
3068
3069 bufmgr_gem->pci_device = get_pci_device_id(bufmgr_gem);
3070
3071 if (IS_GEN4(bufmgr_gem->pci_device))
3072 bufmgr_gem->gen = 4;
3073 else if (IS_GEN5(bufmgr_gem->pci_device))
3074 bufmgr_gem->gen = 5;
3075 else if (IS_GEN6(bufmgr_gem->pci_device))
3076 bufmgr_gem->gen = 6;
3077 else if (IS_GEN7(bufmgr_gem->pci_device))
3078 bufmgr_gem->gen = 7;
3079 else if (IS_GEN8(bufmgr_gem->pci_device))
3080 bufmgr_gem->gen = 8;
3081 else if (IS_GEN9(bufmgr_gem->pci_device))
3082 bufmgr_gem->gen = 9;
3083 else {
3084 free(bufmgr_gem);
3085 bufmgr_gem = NULL;
3086 goto exit;
3087 }
3088
3089 memclear(gp);
3090 gp.value = &tmp;
3091
3092 gp.param = I915_PARAM_HAS_BSD;
3093 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3094 bufmgr_gem->has_bsd = ret == 0;
3095
3096 gp.param = I915_PARAM_HAS_BLT;
3097 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3098 bufmgr_gem->has_blt = ret == 0;
3099
3100 gp.param = I915_PARAM_HAS_EXEC_ASYNC;
3101 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3102 bufmgr_gem->has_exec_async = ret == 0;
3103
3104 gp.param = I915_PARAM_HAS_WAIT_TIMEOUT;
3105 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3106 bufmgr_gem->has_wait_timeout = ret == 0;
3107
3108 gp.param = I915_PARAM_HAS_LLC;
3109 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3110 if (ret != 0) {
3111 /* Kernel does not supports HAS_LLC query, fallback to GPU
3112 * generation detection and assume that we have LLC on GEN6/7
3113 */
3114 bufmgr_gem->has_llc = (IS_GEN6(bufmgr_gem->pci_device) |
3115 IS_GEN7(bufmgr_gem->pci_device));
3116 } else
3117 bufmgr_gem->has_llc = *gp.value;
3118
3119 gp.param = I915_PARAM_HAS_VEBOX;
3120 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3121 bufmgr_gem->has_vebox = (ret == 0) & (*gp.value > 0);
3122
3123 /* Let's go with one relocation per every 2 dwords (but round down a bit
3124 * since a power of two will mean an extra page allocation for the reloc
3125 * buffer).
3126 *
3127 * Every 4 was too few for the blender benchmark.
3128 */
3129 bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
3130
3131 init_cache_buckets(bufmgr_gem);
3132
3133 list_inithead(&bufmgr_gem->vma_cache);
3134 bufmgr_gem->vma_max = -1; /* unlimited by default */
3135
3136 list_add(&bufmgr_gem->managers, &bufmgr_list);
3137
3138 exit:
3139 pthread_mutex_unlock(&bufmgr_list_mutex);
3140
3141 return bufmgr_gem != NULL ? &bufmgr_gem->bufmgr : NULL;
3142 }