466dc1ec32322ef125560f077fa8eca896e9257a
[mesa.git] / src / mesa / drivers / dri / i915 / intel_bufmgr_ttm.c
1 /**************************************************************************
2 *
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
26 * of the Software.
27 *
28 *
29 **************************************************************************/
30 /*
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
35 */
36
37 #include <xf86drm.h>
38 #include <stdlib.h>
39 #include <unistd.h>
40 #include "glthread.h"
41 #include "errno.h"
42 #include "mtypes.h"
43 #include "dri_bufmgr.h"
44 #include "string.h"
45 #include "imports.h"
46
47 #include "i915_drm.h"
48
49 #include "intel_bufmgr_ttm.h"
50
51 #define BUFMGR_DEBUG 0
52
53 #define MAX_RELOCS_PER_LIST 511
54 struct intel_reloc_info
55 {
56 GLuint type;
57 GLuint reloc;
58 GLuint delta; /* not needed? */
59 GLuint index;
60 drm_handle_t handle;
61 };
62
63 struct intel_bo_node
64 {
65 drmMMListHead head;
66 drmBO *buf;
67 struct drm_i915_op_arg bo_arg;
68 unsigned long arg0;
69 unsigned long arg1;
70 void (*destroy)(void *);
71 void *priv;
72 };
73
74 struct intel_bo_reloc_list
75 {
76 drmMMListHead head;
77 drmBO buf;
78 uint32_t *relocs;
79 };
80
81 struct intel_bo_reloc_node
82 {
83 drmMMListHead head;
84 drm_handle_t handle;
85 uint32_t nr_reloc_types;
86 struct intel_bo_reloc_list type_list;
87 };
88
89 struct intel_bo_list {
90 unsigned numTarget;
91 unsigned numCurrent;
92 unsigned numOnList;
93 drmMMListHead list;
94 drmMMListHead free;
95 void (*destroy)(void *node);
96 };
97
98 typedef struct _dri_bufmgr_ttm {
99 dri_bufmgr bufmgr;
100
101 int fd;
102 _glthread_Mutex mutex;
103 unsigned int fence_type;
104 unsigned int fence_type_flush;
105
106 /** ttm relocation list */
107 struct intel_bo_list list;
108 struct intel_bo_list reloc_list;
109
110 } dri_bufmgr_ttm;
111
112 typedef struct _dri_bo_ttm {
113 dri_bo bo;
114
115 int refcount; /* Protected by bufmgr->mutex */
116 drmBO drm_bo;
117 const char *name;
118 /**
119 * Note whether we are the owner of the buffer, to determine if we must
120 * drmBODestroy or drmBOUnreference to unreference the buffer.
121 */
122 GLboolean owner;
123 } dri_bo_ttm;
124
125 typedef struct _dri_fence_ttm
126 {
127 dri_fence fence;
128
129 int refcount; /* Protected by bufmgr->mutex */
130 const char *name;
131 drmFence drm_fence;
132 } dri_fence_ttm;
133
134
135 static int intel_adjust_list_nodes(struct intel_bo_list *list)
136 {
137 struct intel_bo_node *node;
138 drmMMListHead *l;
139 int ret = 0;
140
141 while(list->numCurrent < list->numTarget) {
142 node = (struct intel_bo_node *) drmMalloc(sizeof(*node));
143 if (!node) {
144 ret = -ENOMEM;
145 break;
146 }
147 list->numCurrent++;
148 DRMLISTADD(&node->head, &list->free);
149 }
150
151 while(list->numCurrent > list->numTarget) {
152 l = list->free.next;
153 if (l == &list->free)
154 break;
155 DRMLISTDEL(l);
156 node = DRMLISTENTRY(struct intel_bo_node, l, head);
157 list->destroy(node);
158 list->numCurrent--;
159 }
160 return ret;
161 }
162
163 #if 0
164 void intel_bo_free_list(struct intel_bo_list *list)
165 {
166 struct intel_bo_node *node;
167 drmMMListHead *l;
168
169 l = list->list.next;
170 while(l != &list->list) {
171 DRMLISTDEL(l);
172 node = DRMLISTENTRY(struct intel_bo_node, l, head);
173 list->destroy(node);
174 l = list->list.next;
175 list->numCurrent--;
176 list->numOnList--;
177 }
178
179 l = list->free.next;
180 while(l != &list->free) {
181 DRMLISTDEL(l);
182 node = DRMLISTENTRY(struct intel_bo_node, l, head);
183 list->destroy(node);
184 l = list->free.next;
185 list->numCurrent--;
186 }
187 }
188 #endif
189
190 static int intel_bo_reset_list(struct intel_bo_list *list)
191 {
192 drmMMListHead *l;
193 int ret;
194
195 ret = intel_adjust_list_nodes(list);
196 if (ret)
197 return ret;
198
199 l = list->list.next;
200 while (l != &list->list) {
201 DRMLISTDEL(l);
202 DRMLISTADD(l, &list->free);
203 list->numOnList--;
204 l = list->list.next;
205 }
206 return intel_adjust_list_nodes(list);
207 }
208
209 static void generic_destroy(void *nodep)
210 {
211 free(nodep);
212 }
213
214 static int intel_create_bo_list(int numTarget, struct intel_bo_list *list, void (*destroy)(void *))
215 {
216 DRMINITLISTHEAD(&list->list);
217 DRMINITLISTHEAD(&list->free);
218 list->numTarget = numTarget;
219 list->numCurrent = 0;
220 list->numOnList = 0;
221 if (list->destroy)
222 list->destroy = destroy;
223 else
224 list->destroy = generic_destroy;
225 return intel_adjust_list_nodes(list);
226 }
227
228
229 static struct drm_i915_op_arg *
230 intel_setup_validate_list(int fd, struct intel_bo_list *list, struct intel_bo_list *reloc_list)
231 {
232 struct intel_bo_node *node;
233 struct intel_bo_reloc_node *rl_node;
234 drmMMListHead *l, *rl;
235 struct drm_i915_op_arg *arg, *first;
236 struct drm_bo_op_req *req;
237 uint64_t *prevNext = NULL;
238
239 first = NULL;
240
241 for (l = list->list.next; l != &list->list; l = l->next) {
242 node = DRMLISTENTRY(struct intel_bo_node, l, head);
243
244 arg = &node->bo_arg;
245 req = &arg->d.req;
246
247 if (!first)
248 first = arg;
249
250 if (prevNext)
251 *prevNext = (unsigned long) arg;
252
253 memset(arg, 0, sizeof(*arg));
254 prevNext = &arg->next;
255 req->bo_req.handle = node->buf->handle;
256 req->op = drm_bo_validate;
257 req->bo_req.flags = node->arg0;
258 req->bo_req.hint = 0;
259 req->bo_req.mask = node->arg1;
260 req->bo_req.fence_class = 0; /* Backwards compat. */
261 arg->reloc_handle = 0;
262
263 for (rl = reloc_list->list.next; rl != &reloc_list->list; rl = rl->next) {
264 rl_node = DRMLISTENTRY(struct intel_bo_reloc_node, rl, head);
265
266 if (rl_node->handle == node->buf->handle) {
267 arg->reloc_handle = rl_node->type_list.buf.handle;
268 }
269 }
270 }
271
272 if (!first)
273 return 0;
274
275 return first;
276 }
277
278 static void intel_free_validate_list(int fd, struct intel_bo_list *list)
279 {
280 struct intel_bo_node *node;
281 drmMMListHead *l;
282
283 for (l = list->list.next; l != &list->list; l = l->next) {
284 node = DRMLISTENTRY(struct intel_bo_node, l, head);
285
286 if (node->destroy)
287 (*node->destroy)(node->priv);
288
289 }
290 }
291
292 static void intel_free_reloc_list(int fd, struct intel_bo_list *reloc_list)
293 {
294 struct intel_bo_reloc_node *reloc_node;
295 drmMMListHead *rl, *tmp;
296
297 for (rl = reloc_list->list.next, tmp = rl->next; rl != &reloc_list->list; rl = tmp, tmp = rl->next) {
298 reloc_node = DRMLISTENTRY(struct intel_bo_reloc_node, rl, head);
299
300 DRMLISTDEL(rl);
301
302 if (reloc_node->nr_reloc_types > 1) {
303
304 /* TODO */
305 }
306
307 drmBOUnmap(fd, &reloc_node->type_list.buf);
308 drmBODestroy(fd, &reloc_node->type_list.buf);
309 free(reloc_node);
310 }
311 }
312
313 static int intel_add_validate_buffer(struct intel_bo_list *list, dri_bo *buf, unsigned flags,
314 unsigned mask, int *itemLoc, void (*destroy_cb)(void *))
315 {
316 struct intel_bo_node *node, *cur;
317 drmMMListHead *l;
318 int count = 0;
319 int ret = 0;
320 drmBO *buf_bo = &((dri_bo_ttm *)buf)->drm_bo;
321 cur = NULL;
322
323 for (l = list->list.next; l != &list->list; l = l->next) {
324 node = DRMLISTENTRY(struct intel_bo_node, l, head);
325 if (node->buf->handle == buf_bo->handle) {
326 cur = node;
327 break;
328 }
329 count++;
330 }
331
332 if (!cur) {
333 cur = drmMalloc(sizeof(*cur));
334 if (!cur) {
335 return -ENOMEM;
336 }
337 cur->buf = buf_bo;
338 cur->priv = buf;
339 cur->arg0 = flags;
340 cur->arg1 = mask;
341 cur->destroy = destroy_cb;
342 ret = 1;
343
344 DRMLISTADDTAIL(&cur->head, &list->list);
345
346 } else {
347 unsigned memMask = (cur->arg1 | mask) & DRM_BO_MASK_MEM;
348 unsigned memFlags = cur->arg0 & flags & memMask;
349
350 if (!memFlags) {
351 return -EINVAL;
352 }
353 if (mask & cur->arg1 & ~DRM_BO_MASK_MEM & (cur->arg0 ^ flags)) {
354 return -EINVAL;
355 }
356 cur->arg1 |= mask;
357 cur->arg0 = memFlags | ((cur->arg0 | flags) &
358 cur->arg1 & ~DRM_BO_MASK_MEM);
359 }
360 *itemLoc = count;
361 return ret;
362 }
363
364 #define RELOC0_STRIDE 4
365 #define RELOC0_HEADER 4
366 #define RELOC_BUF_SIZE ((RELOC0_HEADER + MAX_RELOCS_PER_LIST * RELOC0_STRIDE) * sizeof(uint32_t))
367
368 static int intel_create_new_reloc_type_list(int fd, struct intel_bo_reloc_list *cur_type)
369 {
370 int ret;
371
372 /* should allocate a drmBO here */
373 ret = drmBOCreate(fd, 0, RELOC_BUF_SIZE, 0,
374 NULL, drm_bo_type_dc,
375 DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_MAPPABLE | DRM_BO_FLAG_CACHED,
376 0, &cur_type->buf);
377 if (ret)
378 return ret;
379
380 ret = drmBOMap(fd, &cur_type->buf, DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE, 0, (void **)&cur_type->relocs);
381 if (ret)
382 return ret;
383 return 0;
384 }
385
386
387 static int intel_add_validate_reloc(int fd, struct intel_bo_list *reloc_list, struct intel_reloc_info *reloc_info)
388 {
389 struct intel_bo_reloc_node *rl_node, *cur;
390 drmMMListHead *rl, *l;
391 int ret = 0;
392 uint32_t *reloc_start;
393 int num_relocs;
394 struct intel_bo_reloc_list *cur_type;
395
396 cur = NULL;
397
398 for (rl = reloc_list->list.next; rl != &reloc_list->list; rl = rl->next) {
399 rl_node = DRMLISTENTRY(struct intel_bo_reloc_node, rl, head);
400 if (rl_node->handle == reloc_info->handle) {
401 cur = rl_node;
402 break;
403 }
404 }
405
406 if (!cur) {
407
408 cur = malloc(sizeof(*cur));
409 if (!cur)
410 return -ENOMEM;
411
412 cur->nr_reloc_types = 1;
413 cur->handle = reloc_info->handle;
414 cur_type = &cur->type_list;
415
416 DRMINITLISTHEAD(&cur->type_list.head);
417 ret = intel_create_new_reloc_type_list(fd, cur_type);
418 if (ret) {
419 return -1;
420 }
421 DRMLISTADDTAIL(&cur->head, &reloc_list->list);
422
423 cur_type->relocs[0] = 0 | (reloc_info->type << 16);
424 cur_type->relocs[1] = 0; // next reloc buffer handle is 0
425
426 } else {
427 int found = 0;
428 if ((cur->type_list.relocs[0] >> 16) == reloc_info->type) {
429 cur_type = &cur->type_list;
430 found = 1;
431 } else {
432 for (l = cur->type_list.head.next; l != &cur->type_list.head; l = l->next) {
433 cur_type = DRMLISTENTRY(struct intel_bo_reloc_list, l, head);
434 if (((cur_type->relocs[0] >> 16) & 0xffff) == reloc_info->type)
435 found = 1;
436 break;
437 }
438 }
439
440 /* didn't find the relocation type */
441 if (!found) {
442 cur_type = malloc(sizeof(*cur_type));
443 if (!cur_type) {
444 return -ENOMEM;
445 }
446
447 ret = intel_create_new_reloc_type_list(fd, cur_type);
448 DRMLISTADDTAIL(&cur_type->head, &cur->type_list.head);
449
450 cur_type->relocs[0] = (reloc_info->type << 16);
451 cur_type->relocs[1] = 0;
452
453 // cur->relocs[cur->nr_reloc_lists-1][1] = 0;// TODO ADD HANDLE HERE
454
455 cur->nr_reloc_types++;
456 }
457 }
458
459 reloc_start = cur_type->relocs;
460
461 num_relocs = (reloc_start[0] & 0xffff);
462
463 reloc_start[num_relocs*RELOC0_STRIDE + RELOC0_HEADER] = reloc_info->reloc;
464 reloc_start[num_relocs*RELOC0_STRIDE + RELOC0_HEADER+1] = reloc_info->delta;
465 reloc_start[num_relocs*RELOC0_STRIDE + RELOC0_HEADER+2] = reloc_info->index;
466 reloc_start[0]++;
467 if (((reloc_start[0] & 0xffff)) > (MAX_RELOCS_PER_LIST)) {
468 return -ENOMEM;
469 }
470 return 0;
471 }
472
473
474 #if 0
475 int
476 driFenceSignaled(DriFenceObject * fence, unsigned type)
477 {
478 int signaled;
479 int ret;
480
481 if (fence == NULL)
482 return GL_TRUE;
483
484 _glthread_LOCK_MUTEX(fence->mutex);
485 ret = drmFenceSignaled(bufmgr_ttm->fd, &fence->fence, type, &signaled);
486 _glthread_UNLOCK_MUTEX(fence->mutex);
487 BM_CKFATAL(ret);
488 return signaled;
489 }
490 #endif
491
492 static dri_bo *
493 dri_ttm_alloc(dri_bufmgr *bufmgr, const char *name,
494 unsigned long size, unsigned int alignment,
495 unsigned int location_mask)
496 {
497 dri_bufmgr_ttm *ttm_bufmgr;
498 dri_bo_ttm *ttm_buf;
499 unsigned int pageSize = getpagesize();
500 int ret;
501 unsigned int flags, hint;
502
503 ttm_bufmgr = (dri_bufmgr_ttm *)bufmgr;
504
505 ttm_buf = malloc(sizeof(*ttm_buf));
506 if (!ttm_buf)
507 return NULL;
508
509 /* The mask argument doesn't do anything for us that we want other than
510 * determine which pool (TTM or local) the buffer is allocated into, so just
511 * pass all of the allocation class flags.
512 */
513 flags = location_mask | DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE |
514 DRM_BO_FLAG_EXE;
515 /* No hints we want to use. */
516 hint = 0;
517
518 ret = drmBOCreate(ttm_bufmgr->fd, 0, size, alignment / pageSize,
519 NULL, drm_bo_type_dc,
520 flags, hint, &ttm_buf->drm_bo);
521 if (ret != 0) {
522 free(ttm_buf);
523 return NULL;
524 }
525 ttm_buf->bo.size = ttm_buf->drm_bo.size;
526 ttm_buf->bo.offset = ttm_buf->drm_bo.offset;
527 ttm_buf->bo.virtual = NULL;
528 ttm_buf->bo.bufmgr = bufmgr;
529 ttm_buf->name = name;
530 ttm_buf->refcount = 1;
531 ttm_buf->owner = GL_TRUE;
532
533 #if BUFMGR_DEBUG
534 fprintf(stderr, "bo_create: %p (%s)\n", &ttm_buf->bo, ttm_buf->name);
535 #endif
536
537 return &ttm_buf->bo;
538 }
539
540 /* Our TTM backend doesn't allow creation of static buffers, as that requires
541 * privelege for the non-fake case, and the lock in the fake case where we were
542 * working around the X Server not creating buffers and passing handles to us.
543 */
544 static dri_bo *
545 dri_ttm_alloc_static(dri_bufmgr *bufmgr, const char *name,
546 unsigned long offset, unsigned long size, void *virtual,
547 unsigned int location_mask)
548 {
549 return NULL;
550 }
551
552 /** Returns a dri_bo wrapping the given buffer object handle.
553 *
554 * This can be used when one application needs to pass a buffer object
555 * to another.
556 */
557 dri_bo *
558 intel_ttm_bo_create_from_handle(dri_bufmgr *bufmgr, const char *name,
559 unsigned int handle)
560 {
561 dri_bufmgr_ttm *ttm_bufmgr;
562 dri_bo_ttm *ttm_buf;
563 int ret;
564
565 ttm_bufmgr = (dri_bufmgr_ttm *)bufmgr;
566
567 ttm_buf = malloc(sizeof(*ttm_buf));
568 if (!ttm_buf)
569 return NULL;
570
571 ret = drmBOReference(ttm_bufmgr->fd, handle, &ttm_buf->drm_bo);
572 if (ret != 0) {
573 free(ttm_buf);
574 return NULL;
575 }
576 ttm_buf->bo.size = ttm_buf->drm_bo.size;
577 ttm_buf->bo.offset = ttm_buf->drm_bo.offset;
578 ttm_buf->bo.virtual = NULL;
579 ttm_buf->bo.bufmgr = bufmgr;
580 ttm_buf->name = name;
581 ttm_buf->refcount = 1;
582 ttm_buf->owner = GL_FALSE;
583
584 #if BUFMGR_DEBUG
585 fprintf(stderr, "bo_create_from_handle: %p %08x (%s)\n", &ttm_buf->bo, handle,
586 ttm_buf->name);
587 #endif
588
589 return &ttm_buf->bo;
590 }
591
592 static void
593 dri_ttm_bo_reference(dri_bo *buf)
594 {
595 dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)buf->bufmgr;
596 dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf;
597
598 _glthread_LOCK_MUTEX(bufmgr_ttm->mutex);
599 ttm_buf->refcount++;
600 _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
601 }
602
603 static void
604 dri_ttm_bo_unreference(dri_bo *buf)
605 {
606 dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)buf->bufmgr;
607 dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf;
608
609 if (!buf)
610 return;
611
612 _glthread_LOCK_MUTEX(bufmgr_ttm->mutex);
613 if (--ttm_buf->refcount == 0) {
614 int ret;
615
616 /* XXX Having to use drmBODestroy as the opposite of drmBOCreate instead
617 * of simply unreferencing is madness, and leads to behaviors we may not
618 * want (making the buffer unsharable).
619 */
620 if (ttm_buf->owner)
621 ret = drmBODestroy(bufmgr_ttm->fd, &ttm_buf->drm_bo);
622 else
623 ret = drmBOUnReference(bufmgr_ttm->fd, &ttm_buf->drm_bo);
624 if (ret != 0) {
625 fprintf(stderr, "drmBOUnReference failed (%s): %s\n", ttm_buf->name,
626 strerror(-ret));
627 }
628 #if BUFMGR_DEBUG
629 fprintf(stderr, "bo_unreference final: %p (%s)\n",
630 &ttm_buf->bo, ttm_buf->name);
631 #endif
632 _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
633 free(buf);
634 return;
635 }
636 _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
637 }
638
639 static int
640 dri_ttm_bo_map(dri_bo *buf, GLboolean write_enable)
641 {
642 dri_bufmgr_ttm *bufmgr_ttm;
643 dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf;
644 unsigned int flags;
645
646 bufmgr_ttm = (dri_bufmgr_ttm *)buf->bufmgr;
647
648 flags = DRM_BO_FLAG_READ;
649 if (write_enable)
650 flags |= DRM_BO_FLAG_WRITE;
651
652 assert(buf->virtual == NULL);
653
654 #if BUFMGR_DEBUG
655 fprintf(stderr, "bo_map: %p (%s)\n", &ttm_buf->bo, ttm_buf->name);
656 #endif
657
658 return drmBOMap(bufmgr_ttm->fd, &ttm_buf->drm_bo, flags, 0, &buf->virtual);
659 }
660
661 static int
662 dri_ttm_bo_unmap(dri_bo *buf)
663 {
664 dri_bufmgr_ttm *bufmgr_ttm;
665 dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf;
666
667 if (buf == NULL)
668 return 0;
669
670 bufmgr_ttm = (dri_bufmgr_ttm *)buf->bufmgr;
671
672 assert(buf->virtual != NULL);
673
674 buf->virtual = NULL;
675
676 #if BUFMGR_DEBUG
677 fprintf(stderr, "bo_unmap: %p (%s)\n", &ttm_buf->bo, ttm_buf->name);
678 #endif
679
680 return drmBOUnmap(bufmgr_ttm->fd, &ttm_buf->drm_bo);
681 }
682
683 static int
684 dri_ttm_validate(dri_bo *buf, unsigned int flags)
685 {
686 dri_bufmgr_ttm *bufmgr_ttm;
687 dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf;
688 unsigned int mask;
689 int err;
690
691 /* XXX: Sanity-check whether we've already validated this one under
692 * different flags. See drmAddValidateItem().
693 */
694
695 bufmgr_ttm = (dri_bufmgr_ttm *)buf->bufmgr;
696
697 /* Calculate the appropriate mask to pass to the DRM. There appears to be
698 * be a direct relationship to flags, so it's unnecessary to have it passed
699 * in as an argument.
700 */
701 mask = DRM_BO_MASK_MEM;
702 mask |= flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_EXE);
703
704 err = drmBOValidate(bufmgr_ttm->fd, &ttm_buf->drm_bo, 0, flags, mask, 0);
705
706 if (err == 0) {
707 /* XXX: add to fence list for sanity checking */
708 } else {
709 fprintf(stderr, "failed to validate buffer (%s): %s\n",
710 ttm_buf->name, strerror(-err));
711 }
712
713 buf->offset = ttm_buf->drm_bo.offset;
714
715 #if BUFMGR_DEBUG
716 fprintf(stderr, "bo_validate: %p (%s)\n", &ttm_buf->bo, ttm_buf->name);
717 #endif
718
719 return err;
720 }
721
722 /* Returns a dri_bo wrapping the given buffer object handle.
723 *
724 * This can be used when one application needs to pass a buffer object
725 * to another.
726 */
727 dri_fence *
728 intel_ttm_fence_create_from_arg(dri_bufmgr *bufmgr, const char *name,
729 drm_fence_arg_t *arg)
730 {
731 dri_bufmgr_ttm *ttm_bufmgr;
732 dri_fence_ttm *ttm_fence;
733
734 ttm_bufmgr = (dri_bufmgr_ttm *)bufmgr;
735
736 ttm_fence = malloc(sizeof(*ttm_fence));
737 if (!ttm_fence)
738 return NULL;
739
740 ttm_fence->drm_fence.handle = arg->handle;
741 ttm_fence->drm_fence.fence_class = arg->fence_class;
742 ttm_fence->drm_fence.type = arg->type;
743 ttm_fence->drm_fence.flags = arg->flags;
744 ttm_fence->drm_fence.signaled = 0;
745 ttm_fence->drm_fence.sequence = arg->sequence;
746
747 ttm_fence->fence.bufmgr = bufmgr;
748 ttm_fence->name = name;
749 ttm_fence->refcount = 1;
750
751 #if BUFMGR_DEBUG
752 fprintf(stderr, "fence_create_from_handle: %p (%s)\n", &ttm_fence->fence,
753 ttm_fence->name);
754 #endif
755
756 return &ttm_fence->fence;
757 }
758
759 static dri_fence *
760 dri_ttm_fence_validated(dri_bufmgr *bufmgr, const char *name,
761 GLboolean flushed)
762 {
763 return NULL;
764 }
765
766 static void
767 dri_ttm_fence_reference(dri_fence *fence)
768 {
769 dri_fence_ttm *fence_ttm = (dri_fence_ttm *)fence;
770 dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)fence->bufmgr;
771
772 _glthread_LOCK_MUTEX(bufmgr_ttm->mutex);
773 ++fence_ttm->refcount;
774 _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
775 #if BUFMGR_DEBUG
776 fprintf(stderr, "fence_reference: %p (%s)\n", &fence_ttm->fence,
777 fence_ttm->name);
778 #endif
779 }
780
781 static void
782 dri_ttm_fence_unreference(dri_fence *fence)
783 {
784 dri_fence_ttm *fence_ttm = (dri_fence_ttm *)fence;
785 dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)fence->bufmgr;
786
787 if (!fence)
788 return;
789
790 #if BUFMGR_DEBUG
791 fprintf(stderr, "fence_unreference: %p (%s)\n", &fence_ttm->fence,
792 fence_ttm->name);
793 #endif
794 _glthread_LOCK_MUTEX(bufmgr_ttm->mutex);
795 if (--fence_ttm->refcount == 0) {
796 int ret;
797
798 /* XXX Having to use drmFenceDestroy as the opposite of drmFenceBuffers
799 * instead of simply unreferencing is madness, and leads to behaviors we
800 * may not want (making the fence unsharable). This behavior by the DRM
801 * ioctls should be fixed, and drmFenceDestroy eliminated.
802 */
803 ret = drmFenceDestroy(bufmgr_ttm->fd, &fence_ttm->drm_fence);
804 if (ret != 0) {
805 fprintf(stderr, "drmFenceDestroy failed (%s): %s\n",
806 fence_ttm->name, strerror(-ret));
807 }
808
809 _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
810 free(fence);
811 return;
812 }
813 _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
814 }
815
816 static void
817 dri_ttm_fence_wait(dri_fence *fence)
818 {
819 dri_fence_ttm *fence_ttm = (dri_fence_ttm *)fence;
820 dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)fence->bufmgr;
821 int ret;
822
823 _glthread_LOCK_MUTEX(bufmgr_ttm->mutex);
824 ret = drmFenceWait(bufmgr_ttm->fd, 0, &fence_ttm->drm_fence, 0);
825 _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
826 if (ret != 0) {
827 _mesa_printf("%s:%d: Error %d waiting for fence %s.\n",
828 __FILE__, __LINE__, ret, fence_ttm->name);
829 abort();
830 }
831
832 #if BUFMGR_DEBUG
833 fprintf(stderr, "fence_wait: %p (%s)\n", &fence_ttm->fence,
834 fence_ttm->name);
835 #endif
836 }
837
838 static void
839 dri_bufmgr_ttm_destroy(dri_bufmgr *bufmgr)
840 {
841 dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)bufmgr;
842
843 _glthread_DESTROY_MUTEX(bufmgr_ttm->mutex);
844 free(bufmgr);
845 }
846
847
848 static void intel_dribo_destroy_callback(void *priv)
849 {
850 dri_bo *dribo = priv;
851
852 if (dribo) {
853 dri_bo_unreference(dribo);
854 }
855 }
856
857 static void
858 dri_ttm_emit_reloc(dri_bo *batch_buf, GLuint flags, GLuint delta, GLuint offset,
859 dri_bo *relocatee)
860 {
861 dri_bo_ttm *ttm_buf = (dri_bo_ttm *)batch_buf;
862 dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)batch_buf->bufmgr;
863 int newItem;
864 struct intel_reloc_info reloc;
865 int mask;
866 int ret;
867
868 mask = DRM_BO_MASK_MEM;
869 mask |= flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_EXE);
870
871 ret = intel_add_validate_buffer(&bufmgr_ttm->list, relocatee, flags, mask, &newItem, intel_dribo_destroy_callback);
872 if (ret < 0)
873 return;
874
875 if (ret == 1) {
876 dri_bo_reference(relocatee);
877 }
878
879 reloc.type = I915_RELOC_TYPE_0;
880 reloc.reloc = offset;
881 reloc.delta = delta;
882 reloc.index = newItem;
883 reloc.handle = ttm_buf->drm_bo.handle;
884
885 intel_add_validate_reloc(bufmgr_ttm->fd, &bufmgr_ttm->reloc_list, &reloc);
886 return;
887 }
888
889
890 static void *
891 dri_ttm_process_reloc(dri_bo *batch_buf)
892 {
893 dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)batch_buf->bufmgr;
894 void *ptr;
895 int itemLoc;
896
897 dri_bo_unmap(batch_buf);
898
899 intel_add_validate_buffer(&bufmgr_ttm->list, batch_buf, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_EXE,
900 DRM_BO_MASK_MEM | DRM_BO_FLAG_EXE, &itemLoc, NULL);
901
902 ptr = intel_setup_validate_list(bufmgr_ttm->fd, &bufmgr_ttm->list, &bufmgr_ttm->reloc_list);
903
904 return ptr;
905 }
906
907 static void
908 dri_ttm_post_submit(dri_bo *batch_buf, dri_fence **last_fence)
909 {
910 dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)batch_buf->bufmgr;
911
912 intel_free_validate_list(bufmgr_ttm->fd, &bufmgr_ttm->list);
913 intel_free_reloc_list(bufmgr_ttm->fd, &bufmgr_ttm->reloc_list);
914
915 intel_bo_reset_list(&bufmgr_ttm->list);
916 }
917
918 /**
919 * Initializes the TTM buffer manager, which uses the kernel to allocate, map,
920 * and manage map buffer objections.
921 *
922 * \param fd File descriptor of the opened DRM device.
923 * \param fence_type Driver-specific fence type used for fences with no flush.
924 * \param fence_type_flush Driver-specific fence type used for fences with a
925 * flush.
926 */
927 dri_bufmgr *
928 intel_bufmgr_ttm_init(int fd, unsigned int fence_type,
929 unsigned int fence_type_flush)
930 {
931 dri_bufmgr_ttm *bufmgr_ttm;
932
933 bufmgr_ttm = malloc(sizeof(*bufmgr_ttm));
934 bufmgr_ttm->fd = fd;
935 bufmgr_ttm->fence_type = fence_type;
936 bufmgr_ttm->fence_type_flush = fence_type_flush;
937 _glthread_INIT_MUTEX(bufmgr_ttm->mutex);
938
939 intel_create_bo_list(10, &bufmgr_ttm->list, NULL);
940 intel_create_bo_list(1, &bufmgr_ttm->reloc_list, NULL);
941
942 bufmgr_ttm->bufmgr.bo_alloc = dri_ttm_alloc;
943 bufmgr_ttm->bufmgr.bo_alloc_static = dri_ttm_alloc_static;
944 bufmgr_ttm->bufmgr.bo_reference = dri_ttm_bo_reference;
945 bufmgr_ttm->bufmgr.bo_unreference = dri_ttm_bo_unreference;
946 bufmgr_ttm->bufmgr.bo_map = dri_ttm_bo_map;
947 bufmgr_ttm->bufmgr.bo_unmap = dri_ttm_bo_unmap;
948 bufmgr_ttm->bufmgr.bo_validate = dri_ttm_validate;
949 bufmgr_ttm->bufmgr.fence_validated = dri_ttm_fence_validated;
950 bufmgr_ttm->bufmgr.fence_reference = dri_ttm_fence_reference;
951 bufmgr_ttm->bufmgr.fence_unreference = dri_ttm_fence_unreference;
952 bufmgr_ttm->bufmgr.fence_wait = dri_ttm_fence_wait;
953 bufmgr_ttm->bufmgr.destroy = dri_bufmgr_ttm_destroy;
954 bufmgr_ttm->bufmgr.emit_reloc = dri_ttm_emit_reloc;
955 bufmgr_ttm->bufmgr.process_relocs = dri_ttm_process_reloc;
956 bufmgr_ttm->bufmgr.post_submit = dri_ttm_post_submit;
957 return &bufmgr_ttm->bufmgr;
958 }
959