Merge branch 'mesa_7_5_branch'
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_bo_legacy.c
1 /*
2 * Copyright © 2008 Nicolai Haehnle
3 * Copyright © 2008 Dave Airlie
4 * Copyright © 2008 Jérôme Glisse
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
25 * of the Software.
26 */
27 /*
28 * Authors:
29 * Aapo Tahkola <aet@rasterburn.org>
30 * Nicolai Haehnle <prefect_@gmx.net>
31 * Dave Airlie
32 * Jérôme Glisse <glisse@freedesktop.org>
33 */
34 #include <stdio.h>
35 #include <stddef.h>
36 #include <stdint.h>
37 #include <stdlib.h>
38 #include <string.h>
39 #include <errno.h>
40 #include <unistd.h>
41 #include <sys/mman.h>
42 #include <sys/ioctl.h>
43 #include "xf86drm.h"
44 #include "texmem.h"
45 #include "main/simple_list.h"
46
47 #include "drm.h"
48 #include "radeon_drm.h"
49 #include "radeon_common.h"
50 #include "radeon_bocs_wrapper.h"
51 #include "radeon_macros.h"
52
53 /* no seriously texmem.c is this screwed up */
54 struct bo_legacy_texture_object {
55 driTextureObject base;
56 struct bo_legacy *parent;
57 };
58
59 struct bo_legacy {
60 struct radeon_bo base;
61 int map_count;
62 uint32_t pending;
63 int is_pending;
64 int static_bo;
65 uint32_t offset;
66 struct bo_legacy_texture_object *tobj;
67 int validated;
68 int dirty;
69 void *ptr;
70 struct bo_legacy *next, *prev;
71 struct bo_legacy *pnext, *pprev;
72 };
73
74 struct bo_manager_legacy {
75 struct radeon_bo_manager base;
76 unsigned nhandle;
77 unsigned nfree_handles;
78 unsigned cfree_handles;
79 uint32_t current_age;
80 struct bo_legacy bos;
81 struct bo_legacy pending_bos;
82 uint32_t fb_location;
83 uint32_t texture_offset;
84 unsigned dma_alloc_size;
85 uint32_t dma_buf_count;
86 unsigned cpendings;
87 driTextureObject texture_swapped;
88 driTexHeap *texture_heap;
89 struct radeon_screen *screen;
90 unsigned *free_handles;
91 };
92
93 static void bo_legacy_tobj_destroy(void *data, driTextureObject *t)
94 {
95 struct bo_legacy_texture_object *tobj = (struct bo_legacy_texture_object *)t;
96
97 if (tobj->parent) {
98 tobj->parent->tobj = NULL;
99 tobj->parent->validated = 0;
100 }
101 }
102
103 static void inline clean_handles(struct bo_manager_legacy *bom)
104 {
105 while (bom->cfree_handles > 0 &&
106 !bom->free_handles[bom->cfree_handles - 1])
107 bom->cfree_handles--;
108
109 }
110 static int legacy_new_handle(struct bo_manager_legacy *bom, uint32_t *handle)
111 {
112 uint32_t tmp;
113
114 *handle = 0;
115 if (bom->nhandle == 0xFFFFFFFF) {
116 return -EINVAL;
117 }
118 if (bom->cfree_handles > 0) {
119 tmp = bom->free_handles[--bom->cfree_handles];
120 clean_handles(bom);
121 } else {
122 bom->cfree_handles = 0;
123 tmp = bom->nhandle++;
124 }
125 assert(tmp);
126 *handle = tmp;
127 return 0;
128 }
129
130 static int legacy_free_handle(struct bo_manager_legacy *bom, uint32_t handle)
131 {
132 uint32_t *handles;
133
134 if (!handle) {
135 return 0;
136 }
137 if (handle == (bom->nhandle - 1)) {
138 int i;
139
140 bom->nhandle--;
141 for (i = bom->cfree_handles - 1; i >= 0; i--) {
142 if (bom->free_handles[i] == (bom->nhandle - 1)) {
143 bom->nhandle--;
144 bom->free_handles[i] = 0;
145 }
146 }
147 clean_handles(bom);
148 return 0;
149 }
150 if (bom->cfree_handles < bom->nfree_handles) {
151 bom->free_handles[bom->cfree_handles++] = handle;
152 return 0;
153 }
154 bom->nfree_handles += 0x100;
155 handles = (uint32_t*)realloc(bom->free_handles, bom->nfree_handles * 4);
156 if (handles == NULL) {
157 bom->nfree_handles -= 0x100;
158 return -ENOMEM;
159 }
160 bom->free_handles = handles;
161 bom->free_handles[bom->cfree_handles++] = handle;
162 return 0;
163 }
164
165 static void legacy_get_current_age(struct bo_manager_legacy *boml)
166 {
167 drm_radeon_getparam_t gp;
168 unsigned char *RADEONMMIO = NULL;
169 int r;
170
171 if (IS_R300_CLASS(boml->screen)) {
172 gp.param = RADEON_PARAM_LAST_CLEAR;
173 gp.value = (int *)&boml->current_age;
174 r = drmCommandWriteRead(boml->base.fd, DRM_RADEON_GETPARAM,
175 &gp, sizeof(gp));
176 if (r) {
177 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__, r);
178 exit(1);
179 }
180 } else {
181 RADEONMMIO = boml->screen->mmio.map;
182 boml->current_age = boml->screen->scratch[3];
183 boml->current_age = INREG(RADEON_GUI_SCRATCH_REG3);
184 }
185 }
186
187 static int legacy_is_pending(struct radeon_bo *bo)
188 {
189 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
190 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
191
192 if (bo_legacy->is_pending <= 0) {
193 bo_legacy->is_pending = 0;
194 return 0;
195 }
196 if (boml->current_age >= bo_legacy->pending) {
197 if (boml->pending_bos.pprev == bo_legacy) {
198 boml->pending_bos.pprev = bo_legacy->pprev;
199 }
200 bo_legacy->pprev->pnext = bo_legacy->pnext;
201 if (bo_legacy->pnext) {
202 bo_legacy->pnext->pprev = bo_legacy->pprev;
203 }
204 assert(bo_legacy->is_pending <= bo->cref);
205 while (bo_legacy->is_pending--) {
206 bo = radeon_bo_unref(bo);
207 if (!bo)
208 break;
209 }
210 if (bo)
211 bo_legacy->is_pending = 0;
212 boml->cpendings--;
213 return 0;
214 }
215 return 1;
216 }
217
218 static int legacy_wait_pending(struct radeon_bo *bo)
219 {
220 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
221 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
222
223 if (!bo_legacy->is_pending) {
224 return 0;
225 }
226 /* FIXME: lockup and userspace busy looping that's all the folks */
227 legacy_get_current_age(boml);
228 while (legacy_is_pending(bo)) {
229 usleep(10);
230 legacy_get_current_age(boml);
231 }
232 return 0;
233 }
234
235 static void legacy_track_pending(struct bo_manager_legacy *boml, int debug)
236 {
237 struct bo_legacy *bo_legacy;
238 struct bo_legacy *next;
239
240 legacy_get_current_age(boml);
241 bo_legacy = boml->pending_bos.pnext;
242 while (bo_legacy) {
243 if (debug)
244 fprintf(stderr,"pending %p %d %d %d\n", bo_legacy, bo_legacy->base.size,
245 boml->current_age, bo_legacy->pending);
246 next = bo_legacy->pnext;
247 if (legacy_is_pending(&(bo_legacy->base))) {
248 }
249 bo_legacy = next;
250 }
251 }
252
253 static int legacy_wait_any_pending(struct bo_manager_legacy *boml)
254 {
255 struct bo_legacy *bo_legacy;
256
257 legacy_get_current_age(boml);
258 bo_legacy = boml->pending_bos.pnext;
259 if (!bo_legacy)
260 return -1;
261 legacy_wait_pending(&bo_legacy->base);
262 return 0;
263 }
264
265 static void legacy_kick_all_buffers(struct bo_manager_legacy *boml)
266 {
267 struct bo_legacy *legacy;
268
269 legacy = boml->bos.next;
270 while (legacy != &boml->bos) {
271 if (legacy->tobj) {
272 if (legacy->validated) {
273 driDestroyTextureObject(&legacy->tobj->base);
274 legacy->tobj = 0;
275 legacy->validated = 0;
276 }
277 }
278 legacy = legacy->next;
279 }
280 }
281
282 static struct bo_legacy *bo_allocate(struct bo_manager_legacy *boml,
283 uint32_t size,
284 uint32_t alignment,
285 uint32_t domains,
286 uint32_t flags)
287 {
288 struct bo_legacy *bo_legacy;
289 static int pgsize;
290
291 if (pgsize == 0)
292 pgsize = getpagesize() - 1;
293
294 size = (size + pgsize) & ~pgsize;
295
296 bo_legacy = (struct bo_legacy*)calloc(1, sizeof(struct bo_legacy));
297 if (bo_legacy == NULL) {
298 return NULL;
299 }
300 bo_legacy->base.bom = (struct radeon_bo_manager*)boml;
301 bo_legacy->base.handle = 0;
302 bo_legacy->base.size = size;
303 bo_legacy->base.alignment = alignment;
304 bo_legacy->base.domains = domains;
305 bo_legacy->base.flags = flags;
306 bo_legacy->base.ptr = NULL;
307 bo_legacy->map_count = 0;
308 bo_legacy->next = NULL;
309 bo_legacy->prev = NULL;
310 bo_legacy->pnext = NULL;
311 bo_legacy->pprev = NULL;
312 bo_legacy->next = boml->bos.next;
313 bo_legacy->prev = &boml->bos;
314 boml->bos.next = bo_legacy;
315 if (bo_legacy->next) {
316 bo_legacy->next->prev = bo_legacy;
317 }
318 return bo_legacy;
319 }
320
321 static int bo_dma_alloc(struct radeon_bo *bo)
322 {
323 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
324 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
325 drm_radeon_mem_alloc_t alloc;
326 unsigned size;
327 int base_offset;
328 int r;
329
330 /* align size on 4Kb */
331 size = (((4 * 1024) - 1) + bo->size) & ~((4 * 1024) - 1);
332 alloc.region = RADEON_MEM_REGION_GART;
333 alloc.alignment = bo_legacy->base.alignment;
334 alloc.size = size;
335 alloc.region_offset = &base_offset;
336 r = drmCommandWriteRead(bo->bom->fd,
337 DRM_RADEON_ALLOC,
338 &alloc,
339 sizeof(alloc));
340 if (r) {
341 /* ptr is set to NULL if dma allocation failed */
342 bo_legacy->ptr = NULL;
343 return r;
344 }
345 bo_legacy->ptr = boml->screen->gartTextures.map + base_offset;
346 bo_legacy->offset = boml->screen->gart_texture_offset + base_offset;
347 bo->size = size;
348 boml->dma_alloc_size += size;
349 boml->dma_buf_count++;
350 return 0;
351 }
352
353 static int bo_dma_free(struct radeon_bo *bo)
354 {
355 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
356 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
357 drm_radeon_mem_free_t memfree;
358 int r;
359
360 if (bo_legacy->ptr == NULL) {
361 /* ptr is set to NULL if dma allocation failed */
362 return 0;
363 }
364 legacy_get_current_age(boml);
365 memfree.region = RADEON_MEM_REGION_GART;
366 memfree.region_offset = bo_legacy->offset;
367 memfree.region_offset -= boml->screen->gart_texture_offset;
368 r = drmCommandWrite(boml->base.fd,
369 DRM_RADEON_FREE,
370 &memfree,
371 sizeof(memfree));
372 if (r) {
373 fprintf(stderr, "Failed to free bo[%p] at %08x\n",
374 &bo_legacy->base, memfree.region_offset);
375 fprintf(stderr, "ret = %s\n", strerror(-r));
376 return r;
377 }
378 boml->dma_alloc_size -= bo_legacy->base.size;
379 boml->dma_buf_count--;
380 return 0;
381 }
382
383 static void bo_free(struct bo_legacy *bo_legacy)
384 {
385 struct bo_manager_legacy *boml;
386
387 if (bo_legacy == NULL) {
388 return;
389 }
390 boml = (struct bo_manager_legacy *)bo_legacy->base.bom;
391 bo_legacy->prev->next = bo_legacy->next;
392 if (bo_legacy->next) {
393 bo_legacy->next->prev = bo_legacy->prev;
394 }
395 if (!bo_legacy->static_bo) {
396 legacy_free_handle(boml, bo_legacy->base.handle);
397 if (bo_legacy->base.domains & RADEON_GEM_DOMAIN_GTT) {
398 /* dma buffers */
399 bo_dma_free(&bo_legacy->base);
400 } else {
401 driDestroyTextureObject(&bo_legacy->tobj->base);
402 bo_legacy->tobj = NULL;
403 /* free backing store */
404 free(bo_legacy->ptr);
405 }
406 }
407 memset(bo_legacy, 0 , sizeof(struct bo_legacy));
408 free(bo_legacy);
409 }
410
411 static struct radeon_bo *bo_open(struct radeon_bo_manager *bom,
412 uint32_t handle,
413 uint32_t size,
414 uint32_t alignment,
415 uint32_t domains,
416 uint32_t flags)
417 {
418 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bom;
419 struct bo_legacy *bo_legacy;
420 int r;
421
422 if (handle) {
423 bo_legacy = boml->bos.next;
424 while (bo_legacy) {
425 if (bo_legacy->base.handle == handle) {
426 radeon_bo_ref(&(bo_legacy->base));
427 return (struct radeon_bo*)bo_legacy;
428 }
429 bo_legacy = bo_legacy->next;
430 }
431 return NULL;
432 }
433
434 bo_legacy = bo_allocate(boml, size, alignment, domains, flags);
435 bo_legacy->static_bo = 0;
436 r = legacy_new_handle(boml, &bo_legacy->base.handle);
437 if (r) {
438 bo_free(bo_legacy);
439 return NULL;
440 }
441 if (bo_legacy->base.domains & RADEON_GEM_DOMAIN_GTT) {
442 retry:
443 legacy_track_pending(boml, 0);
444 /* dma buffers */
445
446 r = bo_dma_alloc(&(bo_legacy->base));
447 if (r) {
448 if (legacy_wait_any_pending(boml) == -1) {
449 bo_free(bo_legacy);
450 return NULL;
451 }
452 goto retry;
453 return NULL;
454 }
455 } else {
456 bo_legacy->ptr = malloc(bo_legacy->base.size);
457 if (bo_legacy->ptr == NULL) {
458 bo_free(bo_legacy);
459 return NULL;
460 }
461 }
462 radeon_bo_ref(&(bo_legacy->base));
463 return (struct radeon_bo*)bo_legacy;
464 }
465
466 static void bo_ref(struct radeon_bo *bo)
467 {
468 }
469
470 static struct radeon_bo *bo_unref(struct radeon_bo *bo)
471 {
472 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
473
474 if (bo->cref <= 0) {
475 bo_legacy->prev->next = bo_legacy->next;
476 if (bo_legacy->next) {
477 bo_legacy->next->prev = bo_legacy->prev;
478 }
479 if (!bo_legacy->is_pending) {
480 bo_free(bo_legacy);
481 }
482 return NULL;
483 }
484 return bo;
485 }
486
487 static int bo_map(struct radeon_bo *bo, int write)
488 {
489 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
490 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
491
492 legacy_wait_pending(bo);
493 bo_legacy->validated = 0;
494 bo_legacy->dirty = 1;
495 bo_legacy->map_count++;
496 bo->ptr = bo_legacy->ptr;
497 /* Read the first pixel in the frame buffer. This should
498 * be a noop, right? In fact without this conform fails as reading
499 * from the framebuffer sometimes produces old results -- the
500 * on-card read cache gets mixed up and doesn't notice that the
501 * framebuffer has been updated.
502 *
503 * Note that we should probably be reading some otherwise unused
504 * region of VRAM, otherwise we might get incorrect results when
505 * reading pixels from the top left of the screen.
506 *
507 * I found this problem on an R420 with glean's texCube test.
508 * Note that the R200 span code also *writes* the first pixel in the
509 * framebuffer, but I've found this to be unnecessary.
510 * -- Nicolai Hähnle, June 2008
511 */
512 if (!(bo->domains & RADEON_GEM_DOMAIN_GTT)) {
513 int p;
514 volatile int *buf = (int*)boml->screen->driScreen->pFB;
515 p = *buf;
516 }
517 return 0;
518 }
519
520 static int bo_unmap(struct radeon_bo *bo)
521 {
522 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
523
524 if (--bo_legacy->map_count > 0) {
525 return 0;
526 }
527 bo->ptr = NULL;
528 return 0;
529 }
530
531 static struct radeon_bo_funcs bo_legacy_funcs = {
532 bo_open,
533 bo_ref,
534 bo_unref,
535 bo_map,
536 bo_unmap
537 };
538
539 static int bo_vram_validate(struct radeon_bo *bo,
540 uint32_t *soffset,
541 uint32_t *eoffset)
542 {
543 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
544 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
545 int r;
546 int retry_count = 0, pending_retry = 0;
547
548 if (!bo_legacy->tobj) {
549 bo_legacy->tobj = CALLOC(sizeof(struct bo_legacy_texture_object));
550 bo_legacy->tobj->parent = bo_legacy;
551 make_empty_list(&bo_legacy->tobj->base);
552 bo_legacy->tobj->base.totalSize = bo->size;
553 retry:
554 r = driAllocateTexture(&boml->texture_heap, 1,
555 &bo_legacy->tobj->base);
556 if (r) {
557 pending_retry = 0;
558 while(boml->cpendings && pending_retry++ < 10000) {
559 legacy_track_pending(boml, 0);
560 retry_count++;
561 if (retry_count > 2) {
562 free(bo_legacy->tobj);
563 bo_legacy->tobj = NULL;
564 fprintf(stderr, "Ouch! vram_validate failed %d\n", r);
565 return -1;
566 }
567 goto retry;
568 }
569 }
570 bo_legacy->offset = boml->texture_offset +
571 bo_legacy->tobj->base.memBlock->ofs;
572 bo_legacy->dirty = 1;
573 }
574
575 assert(bo_legacy->tobj->base.memBlock);
576
577 if (bo_legacy->tobj)
578 driUpdateTextureLRU(&bo_legacy->tobj->base);
579
580 if (bo_legacy->dirty || bo_legacy->tobj->base.dirty_images[0]) {
581 /* Copy to VRAM using a blit.
582 * All memory is 4K aligned. We're using 1024 pixels wide blits.
583 */
584 drm_radeon_texture_t tex;
585 drm_radeon_tex_image_t tmp;
586 int ret;
587
588 tex.offset = bo_legacy->offset;
589 tex.image = &tmp;
590 assert(!(tex.offset & 1023));
591
592 tmp.x = 0;
593 tmp.y = 0;
594 if (bo->size < 4096) {
595 tmp.width = (bo->size + 3) / 4;
596 tmp.height = 1;
597 } else {
598 tmp.width = 1024;
599 tmp.height = (bo->size + 4095) / 4096;
600 }
601 tmp.data = bo_legacy->ptr;
602 tex.format = RADEON_TXFORMAT_ARGB8888;
603 tex.width = tmp.width;
604 tex.height = tmp.height;
605 tex.pitch = MAX2(tmp.width / 16, 1);
606 do {
607 ret = drmCommandWriteRead(bo->bom->fd,
608 DRM_RADEON_TEXTURE,
609 &tex,
610 sizeof(drm_radeon_texture_t));
611 if (ret) {
612 if (RADEON_DEBUG & DEBUG_IOCTL)
613 fprintf(stderr, "DRM_RADEON_TEXTURE: again!\n");
614 usleep(1);
615 }
616 } while (ret == -EAGAIN);
617 bo_legacy->dirty = 0;
618 bo_legacy->tobj->base.dirty_images[0] = 0;
619 }
620 return 0;
621 }
622
623 /*
624 * radeon_bo_legacy_validate -
625 * returns:
626 * 0 - all good
627 * -EINVAL - mapped buffer can't be validated
628 * -EAGAIN - restart validation we've kicked all the buffers out
629 */
630 int radeon_bo_legacy_validate(struct radeon_bo *bo,
631 uint32_t *soffset,
632 uint32_t *eoffset)
633 {
634 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
635 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
636 int r;
637 int retries = 0;
638
639 if (bo_legacy->map_count) {
640 fprintf(stderr, "bo(%p, %d) is mapped (%d) can't valide it.\n",
641 bo, bo->size, bo_legacy->map_count);
642 return -EINVAL;
643 }
644 if (bo_legacy->static_bo || bo_legacy->validated) {
645 *soffset = bo_legacy->offset;
646 *eoffset = bo_legacy->offset + bo->size;
647 return 0;
648 }
649 if (!(bo->domains & RADEON_GEM_DOMAIN_GTT)) {
650
651 r = bo_vram_validate(bo, soffset, eoffset);
652 if (r) {
653 legacy_track_pending(boml, 0);
654 legacy_kick_all_buffers(boml);
655 retries++;
656 if (retries == 2) {
657 fprintf(stderr,"legacy bo: failed to get relocations into aperture\n");
658 assert(0);
659 exit(-1);
660 }
661 return -EAGAIN;
662 }
663 }
664 *soffset = bo_legacy->offset;
665 *eoffset = bo_legacy->offset + bo->size;
666 bo_legacy->validated = 1;
667 return 0;
668 }
669
670 void radeon_bo_legacy_pending(struct radeon_bo *bo, uint32_t pending)
671 {
672 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
673 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
674
675 bo_legacy->pending = pending;
676 bo_legacy->is_pending++;
677 /* add to pending list */
678 radeon_bo_ref(bo);
679 if (bo_legacy->is_pending > 1) {
680 return;
681 }
682 bo_legacy->pprev = boml->pending_bos.pprev;
683 bo_legacy->pnext = NULL;
684 bo_legacy->pprev->pnext = bo_legacy;
685 boml->pending_bos.pprev = bo_legacy;
686 boml->cpendings++;
687 }
688
689 void radeon_bo_manager_legacy_dtor(struct radeon_bo_manager *bom)
690 {
691 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bom;
692 struct bo_legacy *bo_legacy;
693
694 if (bom == NULL) {
695 return;
696 }
697 bo_legacy = boml->bos.next;
698 while (bo_legacy) {
699 struct bo_legacy *next;
700
701 next = bo_legacy->next;
702 bo_free(bo_legacy);
703 bo_legacy = next;
704 }
705 driDestroyTextureHeap(boml->texture_heap);
706 free(boml->free_handles);
707 free(boml);
708 }
709
710 static struct bo_legacy *radeon_legacy_bo_alloc_static(struct bo_manager_legacy *bom,
711 int size, uint32_t offset)
712 {
713 struct bo_legacy *bo;
714
715 bo = bo_allocate(bom, size, 0, RADEON_GEM_DOMAIN_VRAM, 0);
716 if (bo == NULL)
717 return NULL;
718 bo->static_bo = 1;
719 bo->offset = offset + bom->fb_location;
720 bo->base.handle = bo->offset;
721 bo->ptr = bom->screen->driScreen->pFB + offset;
722 if (bo->base.handle > bom->nhandle) {
723 bom->nhandle = bo->base.handle + 1;
724 }
725 radeon_bo_ref(&(bo->base));
726 return bo;
727 }
728
729 struct radeon_bo_manager *radeon_bo_manager_legacy_ctor(struct radeon_screen *scrn)
730 {
731 struct bo_manager_legacy *bom;
732 struct bo_legacy *bo;
733 unsigned size;
734
735 bom = (struct bo_manager_legacy*)
736 calloc(1, sizeof(struct bo_manager_legacy));
737 if (bom == NULL) {
738 return NULL;
739 }
740
741 make_empty_list(&bom->texture_swapped);
742
743 bom->texture_heap = driCreateTextureHeap(0,
744 bom,
745 scrn->texSize[0],
746 12,
747 RADEON_NR_TEX_REGIONS,
748 (drmTextureRegionPtr)scrn->sarea->tex_list[0],
749 &scrn->sarea->tex_age[0],
750 &bom->texture_swapped,
751 sizeof(struct bo_legacy_texture_object),
752 &bo_legacy_tobj_destroy);
753 bom->texture_offset = scrn->texOffset[0];
754
755 bom->base.funcs = &bo_legacy_funcs;
756 bom->base.fd = scrn->driScreen->fd;
757 bom->bos.next = NULL;
758 bom->bos.prev = NULL;
759 bom->pending_bos.pprev = &bom->pending_bos;
760 bom->pending_bos.pnext = NULL;
761 bom->screen = scrn;
762 bom->fb_location = scrn->fbLocation;
763 bom->nhandle = 1;
764 bom->cfree_handles = 0;
765 bom->nfree_handles = 0x400;
766 bom->free_handles = (uint32_t*)malloc(bom->nfree_handles * 4);
767 if (bom->free_handles == NULL) {
768 radeon_bo_manager_legacy_dtor((struct radeon_bo_manager*)bom);
769 return NULL;
770 }
771
772 /* biggest framebuffer size */
773 size = 4096*4096*4;
774
775 /* allocate front */
776 bo = radeon_legacy_bo_alloc_static(bom, size, bom->screen->frontOffset);
777 if (!bo) {
778 radeon_bo_manager_legacy_dtor((struct radeon_bo_manager*)bom);
779 return NULL;
780 }
781 if (scrn->sarea->tiling_enabled) {
782 bo->base.flags = RADEON_BO_FLAGS_MACRO_TILE;
783 }
784
785 /* allocate back */
786 bo = radeon_legacy_bo_alloc_static(bom, size, bom->screen->backOffset);
787 if (!bo) {
788 radeon_bo_manager_legacy_dtor((struct radeon_bo_manager*)bom);
789 return NULL;
790 }
791 if (scrn->sarea->tiling_enabled) {
792 bo->base.flags = RADEON_BO_FLAGS_MACRO_TILE;
793 }
794
795 /* allocate depth */
796 bo = radeon_legacy_bo_alloc_static(bom, size, bom->screen->depthOffset);
797 if (!bo) {
798 radeon_bo_manager_legacy_dtor((struct radeon_bo_manager*)bom);
799 return NULL;
800 }
801 bo->base.flags = 0;
802 if (scrn->sarea->tiling_enabled) {
803 bo->base.flags |= RADEON_BO_FLAGS_MACRO_TILE;
804 bo->base.flags |= RADEON_BO_FLAGS_MICRO_TILE;
805 }
806 return (struct radeon_bo_manager*)bom;
807 }
808
809 void radeon_bo_legacy_texture_age(struct radeon_bo_manager *bom)
810 {
811 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bom;
812 DRI_AGE_TEXTURES(boml->texture_heap);
813 }
814
815 unsigned radeon_bo_legacy_relocs_size(struct radeon_bo *bo)
816 {
817 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
818
819 if (bo_legacy->static_bo || (bo->domains & RADEON_GEM_DOMAIN_GTT)) {
820 return 0;
821 }
822 return bo->size;
823 }
824
825 int radeon_legacy_bo_is_static(struct radeon_bo *bo)
826 {
827 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
828 return bo_legacy->static_bo;
829 }
830