[intel] Remove the relocation buffer lists and just cache one per buffer.
[mesa.git] / src / mesa / drivers / dri / intel / intel_bufmgr_ttm.c
1 /**************************************************************************
2 *
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
26 * of the Software.
27 *
28 *
29 **************************************************************************/
30 /*
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
35 */
36
37 #include <xf86drm.h>
38 #include <stdlib.h>
39 #include <unistd.h>
40 #include "glthread.h"
41 #include "errno.h"
42 #include "mtypes.h"
43 #include "dri_bufmgr.h"
44 #include "string.h"
45 #include "imports.h"
46
47 #include "i915_drm.h"
48
49 #include "intel_bufmgr_ttm.h"
50
51 #define DBG(...) do { \
52 if (bufmgr_ttm->bufmgr.debug) \
53 _mesa_printf(__VA_ARGS__); \
54 } while (0)
55
56 /* Buffer validation list */
57 struct intel_bo_list {
58 unsigned numCurrent;
59 drmMMListHead list;
60 };
61
62 typedef struct _dri_bufmgr_ttm {
63 dri_bufmgr bufmgr;
64
65 int fd;
66 unsigned int fence_type;
67 unsigned int fence_type_flush;
68
69 uint32_t max_relocs;
70 struct intel_bo_list list; /* list of buffers to be validated */
71 } dri_bufmgr_ttm;
72
73 typedef struct _dri_bo_ttm {
74 dri_bo bo;
75
76 int refcount;
77 drmBO drm_bo;
78 const char *name;
79
80 /** DRM buffer object containing relocation list */
81 drmBO *reloc_buf;
82 uint32_t *relocs;
83 } dri_bo_ttm;
84
85 typedef struct _dri_fence_ttm
86 {
87 dri_fence fence;
88
89 int refcount;
90 const char *name;
91 drmFence drm_fence;
92 } dri_fence_ttm;
93
94 /* Validation list node */
95 struct intel_bo_node
96 {
97 drmMMListHead head;
98 dri_bo *bo;
99 struct drm_i915_op_arg bo_arg;
100 uint64_t flags;
101 uint64_t mask;
102 };
103
104 static void
105 intel_init_validate_list(struct intel_bo_list *list)
106 {
107 DRMINITLISTHEAD(&list->list);
108 list->numCurrent = 0;
109 }
110
111 /**
112 * Empties the validation list and clears the relocations
113 */
114 static void
115 intel_free_validate_list(dri_bufmgr_ttm *bufmgr_ttm)
116 {
117 struct intel_bo_list *list = &bufmgr_ttm->list;
118 drmMMListHead *l;
119
120 for (l = list->list.next; l != &list->list; l = list->list.next) {
121 struct intel_bo_node *node =
122 DRMLISTENTRY(struct intel_bo_node, l, head);
123 dri_bo_ttm *bo_ttm = (dri_bo_ttm *)node->bo;
124
125 DRMLISTDEL(l);
126
127 /* Clear relocation list */
128 if (bo_ttm->relocs != NULL)
129 bo_ttm->relocs[0] = bo_ttm->relocs[0] & ~0xffff;
130
131 dri_bo_unreference(node->bo);
132
133 drmFree(node);
134 list->numCurrent--;
135 }
136 }
137
138 static void dri_ttm_dump_validation_list(dri_bufmgr_ttm *bufmgr_ttm)
139 {
140 struct intel_bo_list *list = &bufmgr_ttm->list;
141 drmMMListHead *l;
142 int i = 0;
143
144 for (l = list->list.next; l != &list->list; l = l->next) {
145 int j;
146 struct intel_bo_node *node =
147 DRMLISTENTRY(struct intel_bo_node, l, head);
148 dri_bo_ttm *bo_ttm = (dri_bo_ttm *)node->bo;
149
150 if (bo_ttm->relocs != NULL) {
151 for (j = 0; j < (bo_ttm->relocs[0] & 0xffff); j++) {
152 uint32_t *reloc_entry = bo_ttm->relocs + I915_RELOC_HEADER +
153 j * I915_RELOC0_STRIDE;
154
155 DBG("%2d: %s@0x%08x -> %d + 0x%08x\n",
156 i, bo_ttm->name,
157 reloc_entry[0], reloc_entry[2], reloc_entry[1]);
158 }
159 } else {
160 DBG("%2d: %s\n", i, bo_ttm->name);
161 }
162 i++;
163 }
164 }
165
166 static struct drm_i915_op_arg *
167 intel_setup_validate_list(dri_bufmgr_ttm *bufmgr_ttm, GLuint *count_p)
168 {
169 struct intel_bo_list *list = &bufmgr_ttm->list;
170 drmMMListHead *l;
171 struct drm_i915_op_arg *first;
172 uint64_t *prevNext = NULL;
173 GLuint count = 0;
174
175 first = NULL;
176
177 for (l = list->list.next; l != &list->list; l = l->next) {
178 struct intel_bo_node *node =
179 DRMLISTENTRY(struct intel_bo_node, l, head);
180 dri_bo_ttm *ttm_buf = (dri_bo_ttm *)node->bo;
181 struct drm_i915_op_arg *arg = &node->bo_arg;
182 struct drm_bo_op_req *req = &arg->d.req;
183
184 if (!first)
185 first = arg;
186
187 if (prevNext)
188 *prevNext = (unsigned long) arg;
189
190 memset(arg, 0, sizeof(*arg));
191 prevNext = &arg->next;
192 req->bo_req.handle = ttm_buf->drm_bo.handle;
193 req->op = drm_bo_validate;
194 req->bo_req.flags = node->flags;
195 req->bo_req.hint = 0;
196 #ifdef DRM_BO_HINT_PRESUMED_OFFSET
197 req->bo_req.hint |= DRM_BO_HINT_PRESUMED_OFFSET;
198 req->bo_req.presumed_offset = node->bo->offset;
199 #endif
200 req->bo_req.mask = node->mask;
201 req->bo_req.fence_class = 0; /* Backwards compat. */
202
203 if (ttm_buf->reloc_buf != NULL)
204 arg->reloc_handle = ttm_buf->reloc_buf->handle;
205 else
206 arg->reloc_handle = 0;
207
208 count++;
209 }
210
211 if (!first)
212 return 0;
213
214 dri_ttm_dump_validation_list(bufmgr_ttm);
215 *count_p = count;
216 return first;
217 }
218
219 /**
220 * Adds the given buffer to the list of buffers to be validated (moved into the
221 * appropriate memory type) with the next batch submission.
222 *
223 * If a buffer is validated multiple times in a batch submission, it ends up
224 * with the intersection of the memory type flags and the union of the
225 * remaining flags.
226 */
227 static struct intel_bo_node *
228 intel_add_validate_buffer(dri_bufmgr_ttm *bufmgr_ttm,
229 dri_bo *buf,
230 uint64_t flags, uint64_t mask,
231 int *itemLoc)
232 {
233 struct intel_bo_list *list = &bufmgr_ttm->list;
234 struct intel_bo_node *cur;
235 dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf;
236 drmMMListHead *l;
237 int count = 0;
238 int ret = 0;
239 cur = NULL;
240
241 /* Find the buffer in the validation list if it's already there. */
242 for (l = list->list.next; l != &list->list; l = l->next) {
243 struct intel_bo_node *node =
244 DRMLISTENTRY(struct intel_bo_node, l, head);
245
246 if (((dri_bo_ttm *)node->bo)->drm_bo.handle == ttm_buf->drm_bo.handle) {
247 cur = node;
248 break;
249 }
250 count++;
251 }
252
253 if (!cur) {
254 cur = drmMalloc(sizeof(*cur));
255 if (!cur) {
256 return NULL;
257 }
258 cur->bo = buf;
259 dri_bo_reference(buf);
260 cur->flags = flags;
261 cur->mask = mask;
262 ret = 1;
263
264 DRMLISTADDTAIL(&cur->head, &list->list);
265 } else {
266 uint64_t memMask = (cur->mask | mask) & DRM_BO_MASK_MEM;
267 uint64_t memFlags = cur->flags & flags & memMask;
268
269 if (!memFlags) {
270 fprintf(stderr,
271 "%s: No shared memory types between "
272 "0x%16llx and 0x%16llx\n",
273 __FUNCTION__, cur->flags, flags);
274 return NULL;
275 }
276 if (mask & cur->mask & ~DRM_BO_MASK_MEM & (cur->flags ^ flags)) {
277 fprintf(stderr,
278 "%s: Incompatible flags between 0x%16llx and 0x%16llx "
279 "(0x%16llx, 0x%16llx masks)\n",
280 __FUNCTION__, cur->flags, flags, cur->mask, mask);
281 return NULL;
282 }
283 cur->mask |= mask;
284 cur->flags = memFlags | ((cur->flags | flags) &
285 cur->mask & ~DRM_BO_MASK_MEM);
286 }
287 *itemLoc = count;
288
289 return cur;
290 }
291
292
293 #define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
294 sizeof(uint32_t))
295
296 static int
297 intel_setup_reloc_list(dri_bo *bo)
298 {
299 dri_bo_ttm *bo_ttm = (dri_bo_ttm *)bo;
300 dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)bo->bufmgr;
301 int ret;
302
303 /* If the buffer exists, then it was just created, or it was reintialized
304 * at the last intel_free_validate_list().
305 */
306 if (bo_ttm->reloc_buf != NULL)
307 return;
308
309 bo_ttm->reloc_buf = malloc(sizeof(bo_ttm->drm_bo));
310
311 ret = drmBOCreate(bufmgr_ttm->fd,
312 RELOC_BUF_SIZE(bufmgr_ttm->max_relocs), 0,
313 NULL,
314 DRM_BO_FLAG_MEM_LOCAL |
315 DRM_BO_FLAG_READ |
316 DRM_BO_FLAG_WRITE |
317 DRM_BO_FLAG_MAPPABLE |
318 DRM_BO_FLAG_CACHED,
319 0, bo_ttm->reloc_buf);
320 if (ret) {
321 fprintf(stderr, "Failed to create relocation BO: %s\n",
322 strerror(-ret));
323 return ret;
324 }
325
326 ret = drmBOMap(bufmgr_ttm->fd, bo_ttm->reloc_buf,
327 DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE,
328 0, (void **)&bo_ttm->relocs);
329 if (ret) {
330 fprintf(stderr, "Failed to map relocation BO: %s\n",
331 strerror(-ret));
332 return ret;
333 }
334
335 /* Initialize the relocation list with the header:
336 * DWORD 0: relocation type, relocation count
337 * DWORD 1: handle to next relocation list (currently none)
338 * DWORD 2: unused
339 * DWORD 3: unused
340 */
341 bo_ttm->relocs[0] = I915_RELOC_TYPE_0 << 16;
342 bo_ttm->relocs[1] = 0;
343 bo_ttm->relocs[2] = 0;
344 bo_ttm->relocs[3] = 0;
345
346 return 0;
347 }
348
349 #if 0
350 int
351 driFenceSignaled(DriFenceObject * fence, unsigned type)
352 {
353 int signaled;
354 int ret;
355
356 if (fence == NULL)
357 return GL_TRUE;
358
359 ret = drmFenceSignaled(bufmgr_ttm->fd, &fence->fence, type, &signaled);
360 BM_CKFATAL(ret);
361 return signaled;
362 }
363 #endif
364
365 static dri_bo *
366 dri_ttm_alloc(dri_bufmgr *bufmgr, const char *name,
367 unsigned long size, unsigned int alignment,
368 uint64_t location_mask)
369 {
370 dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)bufmgr;
371 dri_bo_ttm *ttm_buf;
372 unsigned int pageSize = getpagesize();
373 int ret;
374 unsigned int flags, hint;
375
376 ttm_buf = malloc(sizeof(*ttm_buf));
377 if (!ttm_buf)
378 return NULL;
379
380 /* The mask argument doesn't do anything for us that we want other than
381 * determine which pool (TTM or local) the buffer is allocated into, so
382 * just pass all of the allocation class flags.
383 */
384 flags = location_mask | DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE |
385 DRM_BO_FLAG_EXE;
386 /* No hints we want to use. */
387 hint = 0;
388
389 ret = drmBOCreate(bufmgr_ttm->fd, size, alignment / pageSize,
390 NULL, flags, hint, &ttm_buf->drm_bo);
391 if (ret != 0) {
392 free(ttm_buf);
393 return NULL;
394 }
395 ttm_buf->bo.size = ttm_buf->drm_bo.size;
396 ttm_buf->bo.offset = ttm_buf->drm_bo.offset;
397 ttm_buf->bo.virtual = NULL;
398 ttm_buf->bo.bufmgr = bufmgr;
399 ttm_buf->name = name;
400 ttm_buf->refcount = 1;
401 ttm_buf->reloc_buf = NULL;
402 ttm_buf->relocs = NULL;
403
404 DBG("bo_create: %p (%s)\n", &ttm_buf->bo, ttm_buf->name);
405
406 return &ttm_buf->bo;
407 }
408
409 /* Our TTM backend doesn't allow creation of static buffers, as that requires
410 * privelege for the non-fake case, and the lock in the fake case where we were
411 * working around the X Server not creating buffers and passing handles to us.
412 */
413 static dri_bo *
414 dri_ttm_alloc_static(dri_bufmgr *bufmgr, const char *name,
415 unsigned long offset, unsigned long size, void *virtual,
416 uint64_t location_mask)
417 {
418 return NULL;
419 }
420
421 /**
422 * Returns a dri_bo wrapping the given buffer object handle.
423 *
424 * This can be used when one application needs to pass a buffer object
425 * to another.
426 */
427 dri_bo *
428 intel_ttm_bo_create_from_handle(dri_bufmgr *bufmgr, const char *name,
429 unsigned int handle)
430 {
431 dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)bufmgr;
432 dri_bo_ttm *ttm_buf;
433 int ret;
434
435 ttm_buf = malloc(sizeof(*ttm_buf));
436 if (!ttm_buf)
437 return NULL;
438
439 ret = drmBOReference(bufmgr_ttm->fd, handle, &ttm_buf->drm_bo);
440 if (ret != 0) {
441 fprintf(stderr, "Couldn't reference %s handle 0x%08x: %s\n",
442 name, handle, strerror(-ret));
443 free(ttm_buf);
444 return NULL;
445 }
446 ttm_buf->bo.size = ttm_buf->drm_bo.size;
447 ttm_buf->bo.offset = ttm_buf->drm_bo.offset;
448 ttm_buf->bo.virtual = NULL;
449 ttm_buf->bo.bufmgr = bufmgr;
450 ttm_buf->name = name;
451 ttm_buf->refcount = 1;
452
453 DBG("bo_create_from_handle: %p %08x (%s)\n",
454 &ttm_buf->bo, handle, ttm_buf->name);
455
456 return &ttm_buf->bo;
457 }
458
459 static void
460 dri_ttm_bo_reference(dri_bo *buf)
461 {
462 dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)buf->bufmgr;
463 dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf;
464
465 ttm_buf->refcount++;
466 }
467
468 static void
469 dri_ttm_bo_unreference(dri_bo *buf)
470 {
471 dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)buf->bufmgr;
472 dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf;
473
474 if (!buf)
475 return;
476
477 if (--ttm_buf->refcount == 0) {
478 int ret;
479
480 if (ttm_buf->reloc_buf) {
481 drmBOUnmap(bufmgr_ttm->fd, ttm_buf->reloc_buf);
482 drmBOUnreference(bufmgr_ttm->fd, ttm_buf->reloc_buf);
483 free(ttm_buf->reloc_buf);
484 }
485
486 ret = drmBOUnreference(bufmgr_ttm->fd, &ttm_buf->drm_bo);
487 if (ret != 0) {
488 fprintf(stderr, "drmBOUnreference failed (%s): %s\n",
489 ttm_buf->name, strerror(-ret));
490 }
491 DBG("bo_unreference final: %p (%s)\n", &ttm_buf->bo, ttm_buf->name);
492
493 free(buf);
494 return;
495 }
496 }
497
498 static int
499 dri_ttm_bo_map(dri_bo *buf, GLboolean write_enable)
500 {
501 dri_bufmgr_ttm *bufmgr_ttm;
502 dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf;
503 unsigned int flags;
504
505 bufmgr_ttm = (dri_bufmgr_ttm *)buf->bufmgr;
506
507 flags = DRM_BO_FLAG_READ;
508 if (write_enable)
509 flags |= DRM_BO_FLAG_WRITE;
510
511 assert(buf->virtual == NULL);
512
513 DBG("bo_map: %p (%s)\n", &ttm_buf->bo, ttm_buf->name);
514
515 return drmBOMap(bufmgr_ttm->fd, &ttm_buf->drm_bo, flags, 0, &buf->virtual);
516 }
517
518 static int
519 dri_ttm_bo_unmap(dri_bo *buf)
520 {
521 dri_bufmgr_ttm *bufmgr_ttm;
522 dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf;
523
524 if (buf == NULL)
525 return 0;
526
527 bufmgr_ttm = (dri_bufmgr_ttm *)buf->bufmgr;
528
529 assert(buf->virtual != NULL);
530
531 buf->virtual = NULL;
532
533 DBG("bo_unmap: %p (%s)\n", &ttm_buf->bo, ttm_buf->name);
534
535 return drmBOUnmap(bufmgr_ttm->fd, &ttm_buf->drm_bo);
536 }
537
538 /**
539 * Returns a dri_bo wrapping the given buffer object handle.
540 *
541 * This can be used when one application needs to pass a buffer object
542 * to another.
543 */
544 dri_fence *
545 intel_ttm_fence_create_from_arg(dri_bufmgr *bufmgr, const char *name,
546 drm_fence_arg_t *arg)
547 {
548 dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)bufmgr;
549 dri_fence_ttm *ttm_fence;
550
551 ttm_fence = malloc(sizeof(*ttm_fence));
552 if (!ttm_fence)
553 return NULL;
554
555 ttm_fence->drm_fence.handle = arg->handle;
556 ttm_fence->drm_fence.fence_class = arg->fence_class;
557 ttm_fence->drm_fence.type = arg->type;
558 ttm_fence->drm_fence.flags = arg->flags;
559 ttm_fence->drm_fence.signaled = 0;
560 ttm_fence->drm_fence.sequence = arg->sequence;
561
562 ttm_fence->fence.bufmgr = bufmgr;
563 ttm_fence->name = name;
564 ttm_fence->refcount = 1;
565
566 DBG("fence_create_from_handle: %p (%s)\n",
567 &ttm_fence->fence, ttm_fence->name);
568
569 return &ttm_fence->fence;
570 }
571
572
573 static void
574 dri_ttm_fence_reference(dri_fence *fence)
575 {
576 dri_fence_ttm *fence_ttm = (dri_fence_ttm *)fence;
577 dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)fence->bufmgr;
578
579 ++fence_ttm->refcount;
580 DBG("fence_reference: %p (%s)\n", &fence_ttm->fence, fence_ttm->name);
581 }
582
583 static void
584 dri_ttm_fence_unreference(dri_fence *fence)
585 {
586 dri_fence_ttm *fence_ttm = (dri_fence_ttm *)fence;
587 dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)fence->bufmgr;
588
589 if (!fence)
590 return;
591
592 DBG("fence_unreference: %p (%s)\n", &fence_ttm->fence, fence_ttm->name);
593
594 if (--fence_ttm->refcount == 0) {
595 int ret;
596
597 ret = drmFenceUnreference(bufmgr_ttm->fd, &fence_ttm->drm_fence);
598 if (ret != 0) {
599 fprintf(stderr, "drmFenceUnreference failed (%s): %s\n",
600 fence_ttm->name, strerror(-ret));
601 }
602
603 free(fence);
604 return;
605 }
606 }
607
608 static void
609 dri_ttm_fence_wait(dri_fence *fence)
610 {
611 dri_fence_ttm *fence_ttm = (dri_fence_ttm *)fence;
612 dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)fence->bufmgr;
613 int ret;
614
615 ret = drmFenceWait(bufmgr_ttm->fd, DRM_FENCE_FLAG_WAIT_LAZY, &fence_ttm->drm_fence, 0);
616 if (ret != 0) {
617 _mesa_printf("%s:%d: Error %d waiting for fence %s.\n",
618 __FILE__, __LINE__, ret, fence_ttm->name);
619 abort();
620 }
621
622 DBG("fence_wait: %p (%s)\n", &fence_ttm->fence, fence_ttm->name);
623 }
624
625 static void
626 dri_bufmgr_ttm_destroy(dri_bufmgr *bufmgr)
627 {
628 dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)bufmgr;
629
630 intel_free_validate_list(bufmgr_ttm);
631
632 free(bufmgr);
633 }
634
635 /**
636 * Adds the target buffer to the validation list and adds the relocation
637 * to the reloc_buffer's relocation list.
638 *
639 * The relocation entry at the given offset must already contain the
640 * precomputed relocation value, because the kernel will optimize out
641 * the relocation entry write when the buffer hasn't moved from the
642 * last known offset in target_buf.
643 */
644 static void
645 dri_ttm_emit_reloc(dri_bo *reloc_buf, uint64_t flags, GLuint delta,
646 GLuint offset, dri_bo *target_buf)
647 {
648 dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)reloc_buf->bufmgr;
649 dri_bo_ttm *reloc_buf_ttm = (dri_bo_ttm *)reloc_buf;
650 struct intel_bo_node *node;
651 int index;
652 int mask;
653 int num_relocs;
654 uint32_t *this_reloc;
655
656 mask = DRM_BO_MASK_MEM;
657 mask |= flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_EXE);
658
659 node = intel_add_validate_buffer(bufmgr_ttm, target_buf, flags, mask,
660 &index);
661
662 intel_setup_reloc_list(reloc_buf);
663
664 num_relocs = (reloc_buf_ttm->relocs[0] & 0xffff);
665
666 /* Check overflow */
667 assert((reloc_buf_ttm->relocs[0] & 0xffff) < bufmgr_ttm->max_relocs);
668
669 this_reloc = reloc_buf_ttm->relocs + I915_RELOC_HEADER +
670 num_relocs * I915_RELOC0_STRIDE;
671
672 this_reloc[0] = offset;
673 this_reloc[1] = delta;
674 this_reloc[2] = index;
675 this_reloc[3] = 0;
676
677 reloc_buf_ttm->relocs[0]++; /* Increment relocation count */
678 /* Check wraparound */
679 assert((reloc_buf_ttm->relocs[0] & 0xffff) != 0);
680 }
681
682
683 static void *
684 dri_ttm_process_reloc(dri_bo *batch_buf, GLuint *count)
685 {
686 dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)batch_buf->bufmgr;
687 void *ptr;
688 int index;
689
690 /* Add the batch buffer to the validation list. There are no relocations
691 * pointing to it.
692 */
693 intel_add_validate_buffer(bufmgr_ttm, batch_buf,
694 DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_EXE,
695 DRM_BO_MASK_MEM | DRM_BO_FLAG_EXE,
696 &index);
697
698 ptr = intel_setup_validate_list(bufmgr_ttm, count);
699
700 return ptr;
701 }
702
703 static void
704 intel_update_buffer_offsets (dri_bufmgr_ttm *bufmgr_ttm)
705 {
706 struct intel_bo_list *list = &bufmgr_ttm->list;
707 struct intel_bo_node *node;
708 drmMMListHead *l;
709 struct drm_i915_op_arg *arg;
710 struct drm_bo_arg_rep *rep;
711
712 for (l = list->list.next; l != &list->list; l = l->next) {
713 node = DRMLISTENTRY(struct intel_bo_node, l, head);
714 arg = &node->bo_arg;
715 rep = &arg->d.rep;
716 node->bo->offset = rep->bo_info.offset;
717 }
718 }
719
720 static void
721 dri_ttm_post_submit(dri_bo *batch_buf, dri_fence **last_fence)
722 {
723 dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)batch_buf->bufmgr;
724
725 intel_update_buffer_offsets (bufmgr_ttm);
726
727 if (bufmgr_ttm->bufmgr.debug)
728 dri_ttm_dump_validation_list(bufmgr_ttm);
729
730 intel_free_validate_list(bufmgr_ttm);
731 }
732
733 /**
734 * Initializes the TTM buffer manager, which uses the kernel to allocate, map,
735 * and manage map buffer objections.
736 *
737 * \param fd File descriptor of the opened DRM device.
738 * \param fence_type Driver-specific fence type used for fences with no flush.
739 * \param fence_type_flush Driver-specific fence type used for fences with a
740 * flush.
741 */
742 dri_bufmgr *
743 intel_bufmgr_ttm_init(int fd, unsigned int fence_type,
744 unsigned int fence_type_flush, int batch_size)
745 {
746 dri_bufmgr_ttm *bufmgr_ttm;
747
748 bufmgr_ttm = malloc(sizeof(*bufmgr_ttm));
749 bufmgr_ttm->fd = fd;
750 bufmgr_ttm->fence_type = fence_type;
751 bufmgr_ttm->fence_type_flush = fence_type_flush;
752
753 /* lets go with one relocation per every four dwords - purely heuristic */
754 bufmgr_ttm->max_relocs = batch_size / sizeof(uint32_t) / 4;
755
756 intel_init_validate_list(&bufmgr_ttm->list);
757
758 bufmgr_ttm->bufmgr.bo_alloc = dri_ttm_alloc;
759 bufmgr_ttm->bufmgr.bo_alloc_static = dri_ttm_alloc_static;
760 bufmgr_ttm->bufmgr.bo_reference = dri_ttm_bo_reference;
761 bufmgr_ttm->bufmgr.bo_unreference = dri_ttm_bo_unreference;
762 bufmgr_ttm->bufmgr.bo_map = dri_ttm_bo_map;
763 bufmgr_ttm->bufmgr.bo_unmap = dri_ttm_bo_unmap;
764 bufmgr_ttm->bufmgr.fence_reference = dri_ttm_fence_reference;
765 bufmgr_ttm->bufmgr.fence_unreference = dri_ttm_fence_unreference;
766 bufmgr_ttm->bufmgr.fence_wait = dri_ttm_fence_wait;
767 bufmgr_ttm->bufmgr.destroy = dri_bufmgr_ttm_destroy;
768 bufmgr_ttm->bufmgr.emit_reloc = dri_ttm_emit_reloc;
769 bufmgr_ttm->bufmgr.process_relocs = dri_ttm_process_reloc;
770 bufmgr_ttm->bufmgr.post_submit = dri_ttm_post_submit;
771
772 return &bufmgr_ttm->bufmgr;
773 }
774