Merge master and fix conflicts
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_bo_legacy.c
1 /*
2 * Copyright © 2008 Nicolai Haehnle
3 * Copyright © 2008 Dave Airlie
4 * Copyright © 2008 Jérôme Glisse
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
25 * of the Software.
26 */
27 /*
28 * Authors:
29 * Aapo Tahkola <aet@rasterburn.org>
30 * Nicolai Haehnle <prefect_@gmx.net>
31 * Dave Airlie
32 * Jérôme Glisse <glisse@freedesktop.org>
33 */
34 #include <stdio.h>
35 #include <stddef.h>
36 #include <stdint.h>
37 #include <stdlib.h>
38 #include <string.h>
39 #include <errno.h>
40 #include <unistd.h>
41 #include <sys/mman.h>
42 #include <sys/ioctl.h>
43 #include "xf86drm.h"
44 #include "texmem.h"
45 #include "main/simple_list.h"
46
47 #include "drm.h"
48 #include "radeon_drm.h"
49 #include "radeon_common.h"
50 #include "radeon_bocs_wrapper.h"
51 #include "radeon_macros.h"
52
53 /* no seriously texmem.c is this screwed up */
54 struct bo_legacy_texture_object {
55 driTextureObject base;
56 struct bo_legacy *parent;
57 };
58
59 struct bo_legacy {
60 struct radeon_bo base;
61 int map_count;
62 uint32_t pending;
63 int is_pending;
64 int static_bo;
65 uint32_t offset;
66 struct bo_legacy_texture_object *tobj;
67 int validated;
68 int dirty;
69 void *ptr;
70 struct bo_legacy *next, *prev;
71 struct bo_legacy *pnext, *pprev;
72 #ifdef RADEON_DEBUG_BO
73 char szBufUsage[16];
74 #endif /* RADEON_DEBUG_BO */
75 };
76
77 struct bo_manager_legacy {
78 struct radeon_bo_manager base;
79 unsigned nhandle;
80 unsigned nfree_handles;
81 unsigned cfree_handles;
82 uint32_t current_age;
83 struct bo_legacy bos;
84 struct bo_legacy pending_bos;
85 uint32_t fb_location;
86 uint32_t texture_offset;
87 unsigned dma_alloc_size;
88 uint32_t dma_buf_count;
89 unsigned cpendings;
90 driTextureObject texture_swapped;
91 driTexHeap *texture_heap;
92 struct radeon_screen *screen;
93 unsigned *free_handles;
94 };
95
96 static void bo_legacy_tobj_destroy(void *data, driTextureObject *t)
97 {
98 struct bo_legacy_texture_object *tobj = (struct bo_legacy_texture_object *)t;
99
100 if (tobj->parent) {
101 tobj->parent->tobj = NULL;
102 tobj->parent->validated = 0;
103 }
104 }
105
106 static void inline clean_handles(struct bo_manager_legacy *bom)
107 {
108 while (bom->cfree_handles > 0 &&
109 !bom->free_handles[bom->cfree_handles - 1])
110 bom->cfree_handles--;
111
112 }
113 static int legacy_new_handle(struct bo_manager_legacy *bom, uint32_t *handle)
114 {
115 uint32_t tmp;
116
117 *handle = 0;
118 if (bom->nhandle == 0xFFFFFFFF) {
119 return -EINVAL;
120 }
121 if (bom->cfree_handles > 0) {
122 tmp = bom->free_handles[--bom->cfree_handles];
123 clean_handles(bom);
124 } else {
125 bom->cfree_handles = 0;
126 tmp = bom->nhandle++;
127 }
128 assert(tmp);
129 *handle = tmp;
130 return 0;
131 }
132
133 static int legacy_free_handle(struct bo_manager_legacy *bom, uint32_t handle)
134 {
135 uint32_t *handles;
136
137 if (!handle) {
138 return 0;
139 }
140 if (handle == (bom->nhandle - 1)) {
141 int i;
142
143 bom->nhandle--;
144 for (i = bom->cfree_handles - 1; i >= 0; i--) {
145 if (bom->free_handles[i] == (bom->nhandle - 1)) {
146 bom->nhandle--;
147 bom->free_handles[i] = 0;
148 }
149 }
150 clean_handles(bom);
151 return 0;
152 }
153 if (bom->cfree_handles < bom->nfree_handles) {
154 bom->free_handles[bom->cfree_handles++] = handle;
155 return 0;
156 }
157 bom->nfree_handles += 0x100;
158 handles = (uint32_t*)realloc(bom->free_handles, bom->nfree_handles * 4);
159 if (handles == NULL) {
160 bom->nfree_handles -= 0x100;
161 return -ENOMEM;
162 }
163 bom->free_handles = handles;
164 bom->free_handles[bom->cfree_handles++] = handle;
165 return 0;
166 }
167
168 static void legacy_get_current_age(struct bo_manager_legacy *boml)
169 {
170 drm_radeon_getparam_t gp;
171 unsigned char *RADEONMMIO = NULL;
172 int r;
173
174 if (IS_R300_CLASS(boml->screen)) {
175 gp.param = RADEON_PARAM_LAST_CLEAR;
176 gp.value = (int *)&boml->current_age;
177 r = drmCommandWriteRead(boml->base.fd, DRM_RADEON_GETPARAM,
178 &gp, sizeof(gp));
179 if (r) {
180 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__, r);
181 exit(1);
182 }
183 } else {
184 RADEONMMIO = boml->screen->mmio.map;
185 boml->current_age = boml->screen->scratch[3];
186 boml->current_age = INREG(RADEON_GUI_SCRATCH_REG3);
187 }
188 }
189
190 static int legacy_is_pending(struct radeon_bo *bo)
191 {
192 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
193 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
194
195 if (bo_legacy->is_pending <= 0) {
196 bo_legacy->is_pending = 0;
197 return 0;
198 }
199 if (boml->current_age >= bo_legacy->pending) {
200 if (boml->pending_bos.pprev == bo_legacy) {
201 boml->pending_bos.pprev = bo_legacy->pprev;
202 }
203 bo_legacy->pprev->pnext = bo_legacy->pnext;
204 if (bo_legacy->pnext) {
205 bo_legacy->pnext->pprev = bo_legacy->pprev;
206 }
207 assert(bo_legacy->is_pending <= bo->cref);
208 while (bo_legacy->is_pending--) {
209 bo = radeon_bo_unref(bo);
210 if (!bo)
211 break;
212 }
213 if (bo)
214 bo_legacy->is_pending = 0;
215 boml->cpendings--;
216 return 0;
217 }
218 return 1;
219 }
220
221 static int legacy_wait_pending(struct radeon_bo *bo)
222 {
223 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
224 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
225
226 if (!bo_legacy->is_pending) {
227 return 0;
228 }
229 /* FIXME: lockup and userspace busy looping that's all the folks */
230 legacy_get_current_age(boml);
231 while (legacy_is_pending(bo)) {
232 usleep(10);
233 legacy_get_current_age(boml);
234 }
235 return 0;
236 }
237
238 static void legacy_track_pending(struct bo_manager_legacy *boml, int debug)
239 {
240 struct bo_legacy *bo_legacy;
241 struct bo_legacy *next;
242
243 legacy_get_current_age(boml);
244 bo_legacy = boml->pending_bos.pnext;
245 while (bo_legacy) {
246 if (debug)
247 fprintf(stderr,"pending %p %d %d %d\n", bo_legacy, bo_legacy->base.size,
248 boml->current_age, bo_legacy->pending);
249 next = bo_legacy->pnext;
250 if (legacy_is_pending(&(bo_legacy->base))) {
251 }
252 bo_legacy = next;
253 }
254 }
255
256 static int legacy_wait_any_pending(struct bo_manager_legacy *boml)
257 {
258 struct bo_legacy *bo_legacy;
259
260 legacy_get_current_age(boml);
261 bo_legacy = boml->pending_bos.pnext;
262 if (!bo_legacy)
263 return -1;
264 legacy_wait_pending(&bo_legacy->base);
265 return 0;
266 }
267
268 static void legacy_kick_all_buffers(struct bo_manager_legacy *boml)
269 {
270 struct bo_legacy *legacy;
271
272 legacy = boml->bos.next;
273 while (legacy != &boml->bos) {
274 if (legacy->tobj) {
275 if (legacy->validated) {
276 driDestroyTextureObject(&legacy->tobj->base);
277 legacy->tobj = 0;
278 legacy->validated = 0;
279 }
280 }
281 legacy = legacy->next;
282 }
283 }
284
285 static struct bo_legacy *bo_allocate(struct bo_manager_legacy *boml,
286 uint32_t size,
287 uint32_t alignment,
288 uint32_t domains,
289 #ifdef RADEON_DEBUG_BO
290 uint32_t flags,
291 char * szBufUsage)
292 #else
293 uint32_t flags)
294 #endif /* RADEON_DEBUG_BO */
295 {
296 struct bo_legacy *bo_legacy;
297 static int pgsize;
298
299 if (pgsize == 0)
300 pgsize = getpagesize() - 1;
301
302 size = (size + pgsize) & ~pgsize;
303
304 bo_legacy = (struct bo_legacy*)calloc(1, sizeof(struct bo_legacy));
305 if (bo_legacy == NULL) {
306 return NULL;
307 }
308 bo_legacy->base.bom = (struct radeon_bo_manager*)boml;
309 bo_legacy->base.handle = 0;
310 bo_legacy->base.size = size;
311 bo_legacy->base.alignment = alignment;
312 bo_legacy->base.domains = domains;
313 bo_legacy->base.flags = flags;
314 bo_legacy->base.ptr = NULL;
315 bo_legacy->map_count = 0;
316 bo_legacy->next = NULL;
317 bo_legacy->prev = NULL;
318 bo_legacy->pnext = NULL;
319 bo_legacy->pprev = NULL;
320 bo_legacy->next = boml->bos.next;
321 bo_legacy->prev = &boml->bos;
322 boml->bos.next = bo_legacy;
323 if (bo_legacy->next) {
324 bo_legacy->next->prev = bo_legacy;
325 }
326
327 #ifdef RADEON_DEBUG_BO
328 sprintf(bo_legacy->szBufUsage, "%s", szBufUsage);
329 #endif /* RADEON_DEBUG_BO */
330
331 return bo_legacy;
332 }
333
334 static int bo_dma_alloc(struct radeon_bo *bo)
335 {
336 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
337 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
338 drm_radeon_mem_alloc_t alloc;
339 unsigned size;
340 int base_offset;
341 int r;
342
343 /* align size on 4Kb */
344 size = (((4 * 1024) - 1) + bo->size) & ~((4 * 1024) - 1);
345 alloc.region = RADEON_MEM_REGION_GART;
346 alloc.alignment = bo_legacy->base.alignment;
347 alloc.size = size;
348 alloc.region_offset = &base_offset;
349 r = drmCommandWriteRead(bo->bom->fd,
350 DRM_RADEON_ALLOC,
351 &alloc,
352 sizeof(alloc));
353 if (r) {
354 /* ptr is set to NULL if dma allocation failed */
355 bo_legacy->ptr = NULL;
356 return r;
357 }
358 bo_legacy->ptr = boml->screen->gartTextures.map + base_offset;
359 bo_legacy->offset = boml->screen->gart_texture_offset + base_offset;
360 bo->size = size;
361 boml->dma_alloc_size += size;
362 boml->dma_buf_count++;
363 return 0;
364 }
365
366 static int bo_dma_free(struct radeon_bo *bo)
367 {
368 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
369 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
370 drm_radeon_mem_free_t memfree;
371 int r;
372
373 if (bo_legacy->ptr == NULL) {
374 /* ptr is set to NULL if dma allocation failed */
375 return 0;
376 }
377 legacy_get_current_age(boml);
378 memfree.region = RADEON_MEM_REGION_GART;
379 memfree.region_offset = bo_legacy->offset;
380 memfree.region_offset -= boml->screen->gart_texture_offset;
381 r = drmCommandWrite(boml->base.fd,
382 DRM_RADEON_FREE,
383 &memfree,
384 sizeof(memfree));
385 if (r) {
386 fprintf(stderr, "Failed to free bo[%p] at %08x\n",
387 &bo_legacy->base, memfree.region_offset);
388 fprintf(stderr, "ret = %s\n", strerror(-r));
389 return r;
390 }
391 boml->dma_alloc_size -= bo_legacy->base.size;
392 boml->dma_buf_count--;
393 return 0;
394 }
395
396 static void bo_free(struct bo_legacy *bo_legacy)
397 {
398 struct bo_manager_legacy *boml;
399
400 if (bo_legacy == NULL) {
401 return;
402 }
403 boml = (struct bo_manager_legacy *)bo_legacy->base.bom;
404 bo_legacy->prev->next = bo_legacy->next;
405 if (bo_legacy->next) {
406 bo_legacy->next->prev = bo_legacy->prev;
407 }
408 if (!bo_legacy->static_bo) {
409 legacy_free_handle(boml, bo_legacy->base.handle);
410 if (bo_legacy->base.domains & RADEON_GEM_DOMAIN_GTT) {
411 /* dma buffers */
412 bo_dma_free(&bo_legacy->base);
413 } else {
414 driDestroyTextureObject(&bo_legacy->tobj->base);
415 bo_legacy->tobj = NULL;
416 /* free backing store */
417 free(bo_legacy->ptr);
418 }
419 }
420 memset(bo_legacy, 0 , sizeof(struct bo_legacy));
421 free(bo_legacy);
422 }
423
424 static struct radeon_bo *bo_open(struct radeon_bo_manager *bom,
425 uint32_t handle,
426 uint32_t size,
427 uint32_t alignment,
428 uint32_t domains,
429 #ifdef RADEON_DEBUG_BO
430 uint32_t flags,
431 char * szBufUsage)
432 #else
433 uint32_t flags)
434 #endif /* RADEON_DEBUG_BO */
435 {
436 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bom;
437 struct bo_legacy *bo_legacy;
438 int r;
439
440 if (handle) {
441 bo_legacy = boml->bos.next;
442 while (bo_legacy) {
443 if (bo_legacy->base.handle == handle) {
444 radeon_bo_ref(&(bo_legacy->base));
445 return (struct radeon_bo*)bo_legacy;
446 }
447 bo_legacy = bo_legacy->next;
448 }
449 return NULL;
450 }
451 #ifdef RADEON_DEBUG_BO
452 bo_legacy = bo_allocate(boml, size, alignment, domains, flags, szBufUsage);
453 #else
454 bo_legacy = bo_allocate(boml, size, alignment, domains, flags);
455 #endif /* RADEON_DEBUG_BO */
456 bo_legacy->static_bo = 0;
457 r = legacy_new_handle(boml, &bo_legacy->base.handle);
458 if (r) {
459 bo_free(bo_legacy);
460 return NULL;
461 }
462 if (bo_legacy->base.domains & RADEON_GEM_DOMAIN_GTT)
463 {
464 retry:
465 legacy_track_pending(boml, 0);
466 /* dma buffers */
467
468 r = bo_dma_alloc(&(bo_legacy->base));
469 if (r)
470 {
471 if (legacy_wait_any_pending(boml) == -1)
472 {
473 bo_free(bo_legacy);
474 return NULL;
475 }
476 goto retry;
477 return NULL;
478 }
479 }
480 else
481 {
482 bo_legacy->ptr = malloc(bo_legacy->base.size);
483 if (bo_legacy->ptr == NULL) {
484 bo_free(bo_legacy);
485 return NULL;
486 }
487 }
488 radeon_bo_ref(&(bo_legacy->base));
489
490 return (struct radeon_bo*)bo_legacy;
491 }
492
493 static void bo_ref(struct radeon_bo *bo)
494 {
495 }
496
497 static struct radeon_bo *bo_unref(struct radeon_bo *bo)
498 {
499 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
500
501 if (bo->cref <= 0) {
502 bo_legacy->prev->next = bo_legacy->next;
503 if (bo_legacy->next) {
504 bo_legacy->next->prev = bo_legacy->prev;
505 }
506 if (!bo_legacy->is_pending) {
507 bo_free(bo_legacy);
508 }
509 return NULL;
510 }
511 return bo;
512 }
513
514 static int bo_map(struct radeon_bo *bo, int write)
515 {
516 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
517 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
518
519 legacy_wait_pending(bo);
520 bo_legacy->validated = 0;
521 bo_legacy->dirty = 1;
522 bo_legacy->map_count++;
523 bo->ptr = bo_legacy->ptr;
524 /* Read the first pixel in the frame buffer. This should
525 * be a noop, right? In fact without this conform fails as reading
526 * from the framebuffer sometimes produces old results -- the
527 * on-card read cache gets mixed up and doesn't notice that the
528 * framebuffer has been updated.
529 *
530 * Note that we should probably be reading some otherwise unused
531 * region of VRAM, otherwise we might get incorrect results when
532 * reading pixels from the top left of the screen.
533 *
534 * I found this problem on an R420 with glean's texCube test.
535 * Note that the R200 span code also *writes* the first pixel in the
536 * framebuffer, but I've found this to be unnecessary.
537 * -- Nicolai Hähnle, June 2008
538 */
539 if (!(bo->domains & RADEON_GEM_DOMAIN_GTT)) {
540 int p;
541 volatile int *buf = (int*)boml->screen->driScreen->pFB;
542 p = *buf;
543 }
544
545 return 0;
546 }
547
548 static int bo_unmap(struct radeon_bo *bo)
549 {
550 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
551
552 if (--bo_legacy->map_count > 0)
553 {
554 return 0;
555 }
556
557 bo->ptr = NULL;
558
559 return 0;
560 }
561
562 static struct radeon_bo_funcs bo_legacy_funcs = {
563 bo_open,
564 bo_ref,
565 bo_unref,
566 bo_map,
567 bo_unmap
568 };
569
570 static int bo_vram_validate(struct radeon_bo *bo,
571 uint32_t *soffset,
572 uint32_t *eoffset)
573 {
574 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
575 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
576 int r;
577 int retry_count = 0, pending_retry = 0;
578
579 if (!bo_legacy->tobj) {
580 bo_legacy->tobj = CALLOC(sizeof(struct bo_legacy_texture_object));
581 bo_legacy->tobj->parent = bo_legacy;
582 make_empty_list(&bo_legacy->tobj->base);
583 bo_legacy->tobj->base.totalSize = bo->size;
584 retry:
585 r = driAllocateTexture(&boml->texture_heap, 1,
586 &bo_legacy->tobj->base);
587 if (r) {
588 pending_retry = 0;
589 while(boml->cpendings && pending_retry++ < 10000) {
590 legacy_track_pending(boml, 0);
591 retry_count++;
592 if (retry_count > 2) {
593 free(bo_legacy->tobj);
594 bo_legacy->tobj = NULL;
595 fprintf(stderr, "Ouch! vram_validate failed %d\n", r);
596 return -1;
597 }
598 goto retry;
599 }
600 }
601 bo_legacy->offset = boml->texture_offset +
602 bo_legacy->tobj->base.memBlock->ofs;
603 bo_legacy->dirty = 1;
604 }
605
606 assert(bo_legacy->tobj->base.memBlock);
607
608 if (bo_legacy->tobj)
609 driUpdateTextureLRU(&bo_legacy->tobj->base);
610
611 if (bo_legacy->dirty || bo_legacy->tobj->base.dirty_images[0]) {
612 /* Copy to VRAM using a blit.
613 * All memory is 4K aligned. We're using 1024 pixels wide blits.
614 */
615 drm_radeon_texture_t tex;
616 drm_radeon_tex_image_t tmp;
617 int ret;
618
619 tex.offset = bo_legacy->offset;
620 tex.image = &tmp;
621 assert(!(tex.offset & 1023));
622
623 tmp.x = 0;
624 tmp.y = 0;
625 if (bo->size < 4096) {
626 tmp.width = (bo->size + 3) / 4;
627 tmp.height = 1;
628 } else {
629 tmp.width = 1024;
630 tmp.height = (bo->size + 4095) / 4096;
631 }
632 tmp.data = bo_legacy->ptr;
633 tex.format = RADEON_TXFORMAT_ARGB8888;
634 tex.width = tmp.width;
635 tex.height = tmp.height;
636 tex.pitch = MAX2(tmp.width / 16, 1);
637 do {
638 ret = drmCommandWriteRead(bo->bom->fd,
639 DRM_RADEON_TEXTURE,
640 &tex,
641 sizeof(drm_radeon_texture_t));
642 if (ret) {
643 if (RADEON_DEBUG & DEBUG_IOCTL)
644 fprintf(stderr, "DRM_RADEON_TEXTURE: again!\n");
645 usleep(1);
646 }
647 } while (ret == -EAGAIN);
648 bo_legacy->dirty = 0;
649 bo_legacy->tobj->base.dirty_images[0] = 0;
650 }
651 return 0;
652 }
653
654 /*
655 * radeon_bo_legacy_validate -
656 * returns:
657 * 0 - all good
658 * -EINVAL - mapped buffer can't be validated
659 * -EAGAIN - restart validation we've kicked all the buffers out
660 */
661 int radeon_bo_legacy_validate(struct radeon_bo *bo,
662 uint32_t *soffset,
663 uint32_t *eoffset)
664 {
665 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
666 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
667 int r;
668 int retries = 0;
669
670 if (bo_legacy->map_count) {
671 #ifdef RADEON_DEBUG_BO
672 fprintf(stderr, "bo(%p, %d, %s) is mapped (%d) can't valide it.\n",
673 bo, bo->size, bo_legacy->szBufUsage, bo_legacy->map_count);
674 #else
675 fprintf(stderr, "bo(%p, %d) is mapped (%d) can't valide it.\n",
676 bo, bo->size, bo_legacy->map_count);
677 #endif /* RADEON_DEBUG_BO */
678
679 return -EINVAL;
680 }
681 if (bo_legacy->static_bo || bo_legacy->validated) {
682 *soffset = bo_legacy->offset;
683 *eoffset = bo_legacy->offset + bo->size;
684
685 return 0;
686 }
687 if (!(bo->domains & RADEON_GEM_DOMAIN_GTT)) {
688
689 r = bo_vram_validate(bo, soffset, eoffset);
690 if (r) {
691 legacy_track_pending(boml, 0);
692 legacy_kick_all_buffers(boml);
693 retries++;
694 if (retries == 2) {
695 fprintf(stderr,"legacy bo: failed to get relocations into aperture\n");
696 assert(0);
697 exit(-1);
698 }
699 return -EAGAIN;
700 }
701 }
702 *soffset = bo_legacy->offset;
703 *eoffset = bo_legacy->offset + bo->size;
704 bo_legacy->validated = 1;
705
706 return 0;
707 }
708
709 void radeon_bo_legacy_pending(struct radeon_bo *bo, uint32_t pending)
710 {
711 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
712 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
713
714 bo_legacy->pending = pending;
715 bo_legacy->is_pending++;
716 /* add to pending list */
717 radeon_bo_ref(bo);
718 if (bo_legacy->is_pending > 1) {
719 return;
720 }
721 bo_legacy->pprev = boml->pending_bos.pprev;
722 bo_legacy->pnext = NULL;
723 bo_legacy->pprev->pnext = bo_legacy;
724 boml->pending_bos.pprev = bo_legacy;
725 boml->cpendings++;
726 }
727
728 void radeon_bo_manager_legacy_dtor(struct radeon_bo_manager *bom)
729 {
730 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bom;
731 struct bo_legacy *bo_legacy;
732
733 if (bom == NULL) {
734 return;
735 }
736 bo_legacy = boml->bos.next;
737 while (bo_legacy) {
738 struct bo_legacy *next;
739
740 next = bo_legacy->next;
741 bo_free(bo_legacy);
742 bo_legacy = next;
743 }
744 driDestroyTextureHeap(boml->texture_heap);
745 free(boml->free_handles);
746 free(boml);
747 }
748
749 static struct bo_legacy *radeon_legacy_bo_alloc_static(struct bo_manager_legacy *bom,
750 int size,
751 #ifdef RADEON_DEBUG_BO
752 uint32_t offset,
753 char * szBufUsage)
754 #else
755 uint32_t offset)
756 #endif /* RADEON_DEBUG_BO */
757 {
758 struct bo_legacy *bo;
759
760 #ifdef RADEON_DEBUG_BO
761 bo = bo_allocate(bom, size, 0, RADEON_GEM_DOMAIN_VRAM, 0, szBufUsage);
762 #else
763 bo = bo_allocate(bom, size, 0, RADEON_GEM_DOMAIN_VRAM, 0);
764 #endif /* RADEON_DEBUG_BO */
765 if (bo == NULL)
766 return NULL;
767 bo->static_bo = 1;
768 bo->offset = offset + bom->fb_location;
769 bo->base.handle = bo->offset;
770 bo->ptr = bom->screen->driScreen->pFB + offset;
771 if (bo->base.handle > bom->nhandle) {
772 bom->nhandle = bo->base.handle + 1;
773 }
774 radeon_bo_ref(&(bo->base));
775 return bo;
776 }
777
778 struct radeon_bo_manager *radeon_bo_manager_legacy_ctor(struct radeon_screen *scrn)
779 {
780 struct bo_manager_legacy *bom;
781 struct bo_legacy *bo;
782 unsigned size;
783
784 bom = (struct bo_manager_legacy*)
785 calloc(1, sizeof(struct bo_manager_legacy));
786 if (bom == NULL) {
787 return NULL;
788 }
789
790 make_empty_list(&bom->texture_swapped);
791
792 bom->texture_heap = driCreateTextureHeap(0,
793 bom,
794 scrn->texSize[0],
795 12,
796 RADEON_NR_TEX_REGIONS,
797 (drmTextureRegionPtr)scrn->sarea->tex_list[0],
798 &scrn->sarea->tex_age[0],
799 &bom->texture_swapped,
800 sizeof(struct bo_legacy_texture_object),
801 &bo_legacy_tobj_destroy);
802 bom->texture_offset = scrn->texOffset[0];
803
804 bom->base.funcs = &bo_legacy_funcs;
805 bom->base.fd = scrn->driScreen->fd;
806 bom->bos.next = NULL;
807 bom->bos.prev = NULL;
808 bom->pending_bos.pprev = &bom->pending_bos;
809 bom->pending_bos.pnext = NULL;
810 bom->screen = scrn;
811 bom->fb_location = scrn->fbLocation;
812 bom->nhandle = 1;
813 bom->cfree_handles = 0;
814 bom->nfree_handles = 0x400;
815 bom->free_handles = (uint32_t*)malloc(bom->nfree_handles * 4);
816 if (bom->free_handles == NULL) {
817 radeon_bo_manager_legacy_dtor((struct radeon_bo_manager*)bom);
818 return NULL;
819 }
820
821 /* biggest framebuffer size */
822 size = 4096*4096*4;
823
824 /* allocate front */
825 #ifdef RADEON_DEBUG_BO
826 bo = radeon_legacy_bo_alloc_static(bom, size, bom->screen->frontOffset, "FRONT BUF");
827 #else
828 bo = radeon_legacy_bo_alloc_static(bom, size, bom->screen->frontOffset);
829 #endif /* RADEON_DEBUG_BO */
830 if (!bo) {
831 radeon_bo_manager_legacy_dtor((struct radeon_bo_manager*)bom);
832 return NULL;
833 }
834 if (scrn->sarea->tiling_enabled) {
835 bo->base.flags = RADEON_BO_FLAGS_MACRO_TILE;
836 }
837
838 /* allocate back */
839 #ifdef RADEON_DEBUG_BO
840 bo = radeon_legacy_bo_alloc_static(bom, size, bom->screen->backOffset, "BACK BUF");
841 #else
842 bo = radeon_legacy_bo_alloc_static(bom, size, bom->screen->backOffset);
843 #endif /* RADEON_DEBUG_BO */
844 if (!bo) {
845 radeon_bo_manager_legacy_dtor((struct radeon_bo_manager*)bom);
846 return NULL;
847 }
848 if (scrn->sarea->tiling_enabled) {
849 bo->base.flags = RADEON_BO_FLAGS_MACRO_TILE;
850 }
851
852 /* allocate depth */
853 #ifdef RADEON_DEBUG_BO
854 bo = radeon_legacy_bo_alloc_static(bom, size, bom->screen->depthOffset, "Z BUF");
855 #else
856 bo = radeon_legacy_bo_alloc_static(bom, size, bom->screen->depthOffset);
857 #endif /* RADEON_DEBUG_BO */
858 if (!bo) {
859 radeon_bo_manager_legacy_dtor((struct radeon_bo_manager*)bom);
860 return NULL;
861 }
862 bo->base.flags = 0;
863 if (scrn->sarea->tiling_enabled) {
864 bo->base.flags |= RADEON_BO_FLAGS_MACRO_TILE;
865 bo->base.flags |= RADEON_BO_FLAGS_MICRO_TILE;
866 }
867 return (struct radeon_bo_manager*)bom;
868 }
869
870 void radeon_bo_legacy_texture_age(struct radeon_bo_manager *bom)
871 {
872 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bom;
873 DRI_AGE_TEXTURES(boml->texture_heap);
874 }
875
876 unsigned radeon_bo_legacy_relocs_size(struct radeon_bo *bo)
877 {
878 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
879
880 if (bo_legacy->static_bo || (bo->domains & RADEON_GEM_DOMAIN_GTT)) {
881 return 0;
882 }
883 return bo->size;
884 }
885
886 int radeon_legacy_bo_is_static(struct radeon_bo *bo)
887 {
888 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
889 return bo_legacy->static_bo;
890 }
891