Merge branch 'mesa_7_5_branch'
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_bo_legacy.c
1 /*
2 * Copyright © 2008 Nicolai Haehnle
3 * Copyright © 2008 Dave Airlie
4 * Copyright © 2008 Jérôme Glisse
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
25 * of the Software.
26 */
27 /*
28 * Authors:
29 * Aapo Tahkola <aet@rasterburn.org>
30 * Nicolai Haehnle <prefect_@gmx.net>
31 * Dave Airlie
32 * Jérôme Glisse <glisse@freedesktop.org>
33 */
34 #include <stdio.h>
35 #include <stddef.h>
36 #include <stdint.h>
37 #include <stdlib.h>
38 #include <string.h>
39 #include <errno.h>
40 #include <unistd.h>
41 #include <sys/mman.h>
42 #include <sys/ioctl.h>
43 #include "xf86drm.h"
44 #include "texmem.h"
45 #include "main/simple_list.h"
46
47 #include "drm.h"
48 #include "radeon_drm.h"
49 #include "radeon_common.h"
50 #include "radeon_bocs_wrapper.h"
51 #include "radeon_macros.h"
52
53 /* no seriously texmem.c is this screwed up */
54 struct bo_legacy_texture_object {
55 driTextureObject base;
56 struct bo_legacy *parent;
57 };
58
59 struct bo_legacy {
60 struct radeon_bo base;
61 int map_count;
62 uint32_t pending;
63 int is_pending;
64 int static_bo;
65 uint32_t offset;
66 struct bo_legacy_texture_object *tobj;
67 int validated;
68 int dirty;
69 void *ptr;
70 struct bo_legacy *next, *prev;
71 struct bo_legacy *pnext, *pprev;
72 };
73
74 struct bo_manager_legacy {
75 struct radeon_bo_manager base;
76 unsigned nhandle;
77 unsigned nfree_handles;
78 unsigned cfree_handles;
79 uint32_t current_age;
80 struct bo_legacy bos;
81 struct bo_legacy pending_bos;
82 uint32_t fb_location;
83 uint32_t texture_offset;
84 unsigned dma_alloc_size;
85 uint32_t dma_buf_count;
86 unsigned cpendings;
87 driTextureObject texture_swapped;
88 driTexHeap *texture_heap;
89 struct radeon_screen *screen;
90 unsigned *free_handles;
91 };
92
93 static void bo_legacy_tobj_destroy(void *data, driTextureObject *t)
94 {
95 struct bo_legacy_texture_object *tobj = (struct bo_legacy_texture_object *)t;
96
97 if (tobj->parent) {
98 tobj->parent->tobj = NULL;
99 tobj->parent->validated = 0;
100 }
101 }
102
103 static void inline clean_handles(struct bo_manager_legacy *bom)
104 {
105 while (bom->cfree_handles > 0 &&
106 !bom->free_handles[bom->cfree_handles - 1])
107 bom->cfree_handles--;
108
109 }
110 static int legacy_new_handle(struct bo_manager_legacy *bom, uint32_t *handle)
111 {
112 uint32_t tmp;
113
114 *handle = 0;
115 if (bom->nhandle == 0xFFFFFFFF) {
116 return -EINVAL;
117 }
118 if (bom->cfree_handles > 0) {
119 tmp = bom->free_handles[--bom->cfree_handles];
120 clean_handles(bom);
121 } else {
122 bom->cfree_handles = 0;
123 tmp = bom->nhandle++;
124 }
125 assert(tmp);
126 *handle = tmp;
127 return 0;
128 }
129
130 static int legacy_free_handle(struct bo_manager_legacy *bom, uint32_t handle)
131 {
132 uint32_t *handles;
133
134 if (!handle) {
135 return 0;
136 }
137 if (handle == (bom->nhandle - 1)) {
138 int i;
139
140 bom->nhandle--;
141 for (i = bom->cfree_handles - 1; i >= 0; i--) {
142 if (bom->free_handles[i] == (bom->nhandle - 1)) {
143 bom->nhandle--;
144 bom->free_handles[i] = 0;
145 }
146 }
147 clean_handles(bom);
148 return 0;
149 }
150 if (bom->cfree_handles < bom->nfree_handles) {
151 bom->free_handles[bom->cfree_handles++] = handle;
152 return 0;
153 }
154 bom->nfree_handles += 0x100;
155 handles = (uint32_t*)realloc(bom->free_handles, bom->nfree_handles * 4);
156 if (handles == NULL) {
157 bom->nfree_handles -= 0x100;
158 return -ENOMEM;
159 }
160 bom->free_handles = handles;
161 bom->free_handles[bom->cfree_handles++] = handle;
162 return 0;
163 }
164
165 static void legacy_get_current_age(struct bo_manager_legacy *boml)
166 {
167 drm_radeon_getparam_t gp;
168 unsigned char *RADEONMMIO = NULL;
169 int r;
170
171 if (IS_R300_CLASS(boml->screen)) {
172 gp.param = RADEON_PARAM_LAST_CLEAR;
173 gp.value = (int *)&boml->current_age;
174 r = drmCommandWriteRead(boml->base.fd, DRM_RADEON_GETPARAM,
175 &gp, sizeof(gp));
176 if (r) {
177 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__, r);
178 exit(1);
179 }
180 } else {
181 RADEONMMIO = boml->screen->mmio.map;
182 boml->current_age = boml->screen->scratch[3];
183 boml->current_age = INREG(RADEON_GUI_SCRATCH_REG3);
184 }
185 }
186
187 static int legacy_is_pending(struct radeon_bo *bo)
188 {
189 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
190 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
191
192 if (bo_legacy->is_pending <= 0) {
193 bo_legacy->is_pending = 0;
194 return 0;
195 }
196 if (boml->current_age >= bo_legacy->pending) {
197 if (boml->pending_bos.pprev == bo_legacy) {
198 boml->pending_bos.pprev = bo_legacy->pprev;
199 }
200 bo_legacy->pprev->pnext = bo_legacy->pnext;
201 if (bo_legacy->pnext) {
202 bo_legacy->pnext->pprev = bo_legacy->pprev;
203 }
204 assert(bo_legacy->is_pending <= bo->cref);
205 while (bo_legacy->is_pending--) {
206 bo = radeon_bo_unref(bo);
207 if (!bo)
208 break;
209 }
210 if (bo)
211 bo_legacy->is_pending = 0;
212 boml->cpendings--;
213 return 0;
214 }
215 return 1;
216 }
217
218 static int legacy_wait_pending(struct radeon_bo *bo)
219 {
220 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
221 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
222
223 if (!bo_legacy->is_pending) {
224 return 0;
225 }
226 /* FIXME: lockup and userspace busy looping that's all the folks */
227 legacy_get_current_age(boml);
228 while (legacy_is_pending(bo)) {
229 usleep(10);
230 legacy_get_current_age(boml);
231 }
232 return 0;
233 }
234
235 static void legacy_track_pending(struct bo_manager_legacy *boml, int debug)
236 {
237 struct bo_legacy *bo_legacy;
238 struct bo_legacy *next;
239
240 legacy_get_current_age(boml);
241 bo_legacy = boml->pending_bos.pnext;
242 while (bo_legacy) {
243 if (debug)
244 fprintf(stderr,"pending %p %d %d %d\n", bo_legacy, bo_legacy->base.size,
245 boml->current_age, bo_legacy->pending);
246 next = bo_legacy->pnext;
247 if (legacy_is_pending(&(bo_legacy->base))) {
248 }
249 bo_legacy = next;
250 }
251 }
252
253 static int legacy_wait_any_pending(struct bo_manager_legacy *boml)
254 {
255 struct bo_legacy *bo_legacy;
256
257 legacy_get_current_age(boml);
258 bo_legacy = boml->pending_bos.pnext;
259 if (!bo_legacy)
260 return -1;
261 legacy_wait_pending(&bo_legacy->base);
262 return 0;
263 }
264
265 static void legacy_kick_all_buffers(struct bo_manager_legacy *boml)
266 {
267 struct bo_legacy *legacy;
268
269 legacy = boml->bos.next;
270 while (legacy != &boml->bos) {
271 if (legacy->tobj) {
272 if (legacy->validated) {
273 driDestroyTextureObject(&legacy->tobj->base);
274 legacy->tobj = 0;
275 legacy->validated = 0;
276 }
277 }
278 legacy = legacy->next;
279 }
280 }
281
282 static struct bo_legacy *bo_allocate(struct bo_manager_legacy *boml,
283 uint32_t size,
284 uint32_t alignment,
285 uint32_t domains,
286 uint32_t flags)
287 {
288 struct bo_legacy *bo_legacy;
289 static int pgsize;
290
291 if (pgsize == 0)
292 pgsize = getpagesize() - 1;
293
294 size = (size + pgsize) & ~pgsize;
295
296 bo_legacy = (struct bo_legacy*)calloc(1, sizeof(struct bo_legacy));
297 if (bo_legacy == NULL) {
298 return NULL;
299 }
300 bo_legacy->base.bom = (struct radeon_bo_manager*)boml;
301 bo_legacy->base.handle = 0;
302 bo_legacy->base.size = size;
303 bo_legacy->base.alignment = alignment;
304 bo_legacy->base.domains = domains;
305 bo_legacy->base.flags = flags;
306 bo_legacy->base.ptr = NULL;
307 bo_legacy->map_count = 0;
308 bo_legacy->next = NULL;
309 bo_legacy->prev = NULL;
310 bo_legacy->pnext = NULL;
311 bo_legacy->pprev = NULL;
312 bo_legacy->next = boml->bos.next;
313 bo_legacy->prev = &boml->bos;
314 boml->bos.next = bo_legacy;
315 if (bo_legacy->next) {
316 bo_legacy->next->prev = bo_legacy;
317 }
318 return bo_legacy;
319 }
320
321 static int bo_dma_alloc(struct radeon_bo *bo)
322 {
323 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
324 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
325 drm_radeon_mem_alloc_t alloc;
326 unsigned size;
327 int base_offset;
328 int r;
329
330 /* align size on 4Kb */
331 size = (((4 * 1024) - 1) + bo->size) & ~((4 * 1024) - 1);
332 alloc.region = RADEON_MEM_REGION_GART;
333 alloc.alignment = bo_legacy->base.alignment;
334 alloc.size = size;
335 alloc.region_offset = &base_offset;
336 r = drmCommandWriteRead(bo->bom->fd,
337 DRM_RADEON_ALLOC,
338 &alloc,
339 sizeof(alloc));
340 if (r) {
341 /* ptr is set to NULL if dma allocation failed */
342 bo_legacy->ptr = NULL;
343 return r;
344 }
345 bo_legacy->ptr = boml->screen->gartTextures.map + base_offset;
346 bo_legacy->offset = boml->screen->gart_texture_offset + base_offset;
347 bo->size = size;
348 boml->dma_alloc_size += size;
349 boml->dma_buf_count++;
350 return 0;
351 }
352
353 static int bo_dma_free(struct radeon_bo *bo)
354 {
355 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
356 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
357 drm_radeon_mem_free_t memfree;
358 int r;
359
360 if (bo_legacy->ptr == NULL) {
361 /* ptr is set to NULL if dma allocation failed */
362 return 0;
363 }
364 legacy_get_current_age(boml);
365 memfree.region = RADEON_MEM_REGION_GART;
366 memfree.region_offset = bo_legacy->offset;
367 memfree.region_offset -= boml->screen->gart_texture_offset;
368 r = drmCommandWrite(boml->base.fd,
369 DRM_RADEON_FREE,
370 &memfree,
371 sizeof(memfree));
372 if (r) {
373 fprintf(stderr, "Failed to free bo[%p] at %08x\n",
374 &bo_legacy->base, memfree.region_offset);
375 fprintf(stderr, "ret = %s\n", strerror(-r));
376 return r;
377 }
378 boml->dma_alloc_size -= bo_legacy->base.size;
379 boml->dma_buf_count--;
380 return 0;
381 }
382
383 static void bo_free(struct bo_legacy *bo_legacy)
384 {
385 struct bo_manager_legacy *boml;
386
387 if (bo_legacy == NULL) {
388 return;
389 }
390 boml = (struct bo_manager_legacy *)bo_legacy->base.bom;
391 bo_legacy->prev->next = bo_legacy->next;
392 if (bo_legacy->next) {
393 bo_legacy->next->prev = bo_legacy->prev;
394 }
395 if (!bo_legacy->static_bo) {
396 legacy_free_handle(boml, bo_legacy->base.handle);
397 if (bo_legacy->base.domains & RADEON_GEM_DOMAIN_GTT) {
398 /* dma buffers */
399 bo_dma_free(&bo_legacy->base);
400 } else {
401 driDestroyTextureObject(&bo_legacy->tobj->base);
402 bo_legacy->tobj = NULL;
403 /* free backing store */
404 free(bo_legacy->ptr);
405 }
406 }
407 memset(bo_legacy, 0 , sizeof(struct bo_legacy));
408 free(bo_legacy);
409 }
410
411 static struct radeon_bo *bo_open(struct radeon_bo_manager *bom,
412 uint32_t handle,
413 uint32_t size,
414 uint32_t alignment,
415 uint32_t domains,
416 uint32_t flags)
417 {
418 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bom;
419 struct bo_legacy *bo_legacy;
420 int r;
421
422 if (handle) {
423 bo_legacy = boml->bos.next;
424 while (bo_legacy) {
425 if (bo_legacy->base.handle == handle) {
426 radeon_bo_ref(&(bo_legacy->base));
427 return (struct radeon_bo*)bo_legacy;
428 }
429 bo_legacy = bo_legacy->next;
430 }
431 return NULL;
432 }
433
434 bo_legacy = bo_allocate(boml, size, alignment, domains, flags);
435 bo_legacy->static_bo = 0;
436 r = legacy_new_handle(boml, &bo_legacy->base.handle);
437 if (r) {
438 bo_free(bo_legacy);
439 return NULL;
440 }
441 if (bo_legacy->base.domains & RADEON_GEM_DOMAIN_GTT) {
442 retry:
443 legacy_track_pending(boml, 0);
444 /* dma buffers */
445
446 r = bo_dma_alloc(&(bo_legacy->base));
447 if (r) {
448 if (legacy_wait_any_pending(boml) == -1) {
449 bo_free(bo_legacy);
450 return NULL;
451 }
452 goto retry;
453 return NULL;
454 }
455 } else {
456 bo_legacy->ptr = malloc(bo_legacy->base.size);
457 if (bo_legacy->ptr == NULL) {
458 bo_free(bo_legacy);
459 return NULL;
460 }
461 }
462 radeon_bo_ref(&(bo_legacy->base));
463 return (struct radeon_bo*)bo_legacy;
464 }
465
466 static void bo_ref(struct radeon_bo *bo)
467 {
468 }
469
470 static struct radeon_bo *bo_unref(struct radeon_bo *bo)
471 {
472 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
473
474 if (bo->cref <= 0) {
475 bo_legacy->prev->next = bo_legacy->next;
476 if (bo_legacy->next) {
477 bo_legacy->next->prev = bo_legacy->prev;
478 }
479 if (!bo_legacy->is_pending) {
480 bo_free(bo_legacy);
481 }
482 return NULL;
483 }
484 return bo;
485 }
486
487 static int bo_map(struct radeon_bo *bo, int write)
488 {
489 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
490 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
491
492 legacy_wait_pending(bo);
493 bo_legacy->validated = 0;
494 bo_legacy->dirty = 1;
495 bo_legacy->map_count++;
496 bo->ptr = bo_legacy->ptr;
497 /* Read the first pixel in the frame buffer. This should
498 * be a noop, right? In fact without this conform fails as reading
499 * from the framebuffer sometimes produces old results -- the
500 * on-card read cache gets mixed up and doesn't notice that the
501 * framebuffer has been updated.
502 *
503 * Note that we should probably be reading some otherwise unused
504 * region of VRAM, otherwise we might get incorrect results when
505 * reading pixels from the top left of the screen.
506 *
507 * I found this problem on an R420 with glean's texCube test.
508 * Note that the R200 span code also *writes* the first pixel in the
509 * framebuffer, but I've found this to be unnecessary.
510 * -- Nicolai Hähnle, June 2008
511 */
512 if (!(bo->domains & RADEON_GEM_DOMAIN_GTT)) {
513 int p;
514 volatile int *buf = (int*)boml->screen->driScreen->pFB;
515 p = *buf;
516 }
517 return 0;
518 }
519
520 static int bo_unmap(struct radeon_bo *bo)
521 {
522 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
523
524 if (--bo_legacy->map_count > 0) {
525 return 0;
526 }
527 bo->ptr = NULL;
528 return 0;
529 }
530
531
532 static int bo_is_static(struct radeon_bo *bo)
533 {
534 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
535 return bo_legacy->static_bo;
536 }
537
538 static struct radeon_bo_funcs bo_legacy_funcs = {
539 bo_open,
540 bo_ref,
541 bo_unref,
542 bo_map,
543 bo_unmap,
544 NULL,
545 bo_is_static,
546 };
547
548 static int bo_vram_validate(struct radeon_bo *bo,
549 uint32_t *soffset,
550 uint32_t *eoffset)
551 {
552 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
553 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
554 int r;
555 int retry_count = 0, pending_retry = 0;
556
557 if (!bo_legacy->tobj) {
558 bo_legacy->tobj = CALLOC(sizeof(struct bo_legacy_texture_object));
559 bo_legacy->tobj->parent = bo_legacy;
560 make_empty_list(&bo_legacy->tobj->base);
561 bo_legacy->tobj->base.totalSize = bo->size;
562 retry:
563 r = driAllocateTexture(&boml->texture_heap, 1,
564 &bo_legacy->tobj->base);
565 if (r) {
566 pending_retry = 0;
567 while(boml->cpendings && pending_retry++ < 10000) {
568 legacy_track_pending(boml, 0);
569 retry_count++;
570 if (retry_count > 2) {
571 free(bo_legacy->tobj);
572 bo_legacy->tobj = NULL;
573 fprintf(stderr, "Ouch! vram_validate failed %d\n", r);
574 return -1;
575 }
576 goto retry;
577 }
578 }
579 bo_legacy->offset = boml->texture_offset +
580 bo_legacy->tobj->base.memBlock->ofs;
581 bo_legacy->dirty = 1;
582 }
583
584 assert(bo_legacy->tobj->base.memBlock);
585
586 if (bo_legacy->tobj)
587 driUpdateTextureLRU(&bo_legacy->tobj->base);
588
589 if (bo_legacy->dirty || bo_legacy->tobj->base.dirty_images[0]) {
590 /* Copy to VRAM using a blit.
591 * All memory is 4K aligned. We're using 1024 pixels wide blits.
592 */
593 drm_radeon_texture_t tex;
594 drm_radeon_tex_image_t tmp;
595 int ret;
596
597 tex.offset = bo_legacy->offset;
598 tex.image = &tmp;
599 assert(!(tex.offset & 1023));
600
601 tmp.x = 0;
602 tmp.y = 0;
603 if (bo->size < 4096) {
604 tmp.width = (bo->size + 3) / 4;
605 tmp.height = 1;
606 } else {
607 tmp.width = 1024;
608 tmp.height = (bo->size + 4095) / 4096;
609 }
610 tmp.data = bo_legacy->ptr;
611 tex.format = RADEON_TXFORMAT_ARGB8888;
612 tex.width = tmp.width;
613 tex.height = tmp.height;
614 tex.pitch = MAX2(tmp.width / 16, 1);
615 do {
616 ret = drmCommandWriteRead(bo->bom->fd,
617 DRM_RADEON_TEXTURE,
618 &tex,
619 sizeof(drm_radeon_texture_t));
620 if (ret) {
621 if (RADEON_DEBUG & DEBUG_IOCTL)
622 fprintf(stderr, "DRM_RADEON_TEXTURE: again!\n");
623 usleep(1);
624 }
625 } while (ret == -EAGAIN);
626 bo_legacy->dirty = 0;
627 bo_legacy->tobj->base.dirty_images[0] = 0;
628 }
629 return 0;
630 }
631
632 /*
633 * radeon_bo_legacy_validate -
634 * returns:
635 * 0 - all good
636 * -EINVAL - mapped buffer can't be validated
637 * -EAGAIN - restart validation we've kicked all the buffers out
638 */
639 int radeon_bo_legacy_validate(struct radeon_bo *bo,
640 uint32_t *soffset,
641 uint32_t *eoffset)
642 {
643 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
644 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
645 int r;
646 int retries = 0;
647
648 if (bo_legacy->map_count) {
649 fprintf(stderr, "bo(%p, %d) is mapped (%d) can't valide it.\n",
650 bo, bo->size, bo_legacy->map_count);
651 return -EINVAL;
652 }
653 if (bo_legacy->static_bo || bo_legacy->validated) {
654 *soffset = bo_legacy->offset;
655 *eoffset = bo_legacy->offset + bo->size;
656 return 0;
657 }
658 if (!(bo->domains & RADEON_GEM_DOMAIN_GTT)) {
659
660 r = bo_vram_validate(bo, soffset, eoffset);
661 if (r) {
662 legacy_track_pending(boml, 0);
663 legacy_kick_all_buffers(boml);
664 retries++;
665 if (retries == 2) {
666 fprintf(stderr,"legacy bo: failed to get relocations into aperture\n");
667 assert(0);
668 exit(-1);
669 }
670 return -EAGAIN;
671 }
672 }
673 *soffset = bo_legacy->offset;
674 *eoffset = bo_legacy->offset + bo->size;
675 bo_legacy->validated = 1;
676 return 0;
677 }
678
679 void radeon_bo_legacy_pending(struct radeon_bo *bo, uint32_t pending)
680 {
681 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
682 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
683
684 bo_legacy->pending = pending;
685 bo_legacy->is_pending++;
686 /* add to pending list */
687 radeon_bo_ref(bo);
688 if (bo_legacy->is_pending > 1) {
689 return;
690 }
691 bo_legacy->pprev = boml->pending_bos.pprev;
692 bo_legacy->pnext = NULL;
693 bo_legacy->pprev->pnext = bo_legacy;
694 boml->pending_bos.pprev = bo_legacy;
695 boml->cpendings++;
696 }
697
698 void radeon_bo_manager_legacy_dtor(struct radeon_bo_manager *bom)
699 {
700 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bom;
701 struct bo_legacy *bo_legacy;
702
703 if (bom == NULL) {
704 return;
705 }
706 bo_legacy = boml->bos.next;
707 while (bo_legacy) {
708 struct bo_legacy *next;
709
710 next = bo_legacy->next;
711 bo_free(bo_legacy);
712 bo_legacy = next;
713 }
714 driDestroyTextureHeap(boml->texture_heap);
715 free(boml->free_handles);
716 free(boml);
717 }
718
719 static struct bo_legacy *radeon_legacy_bo_alloc_static(struct bo_manager_legacy *bom,
720 int size, uint32_t offset)
721 {
722 struct bo_legacy *bo;
723
724 bo = bo_allocate(bom, size, 0, RADEON_GEM_DOMAIN_VRAM, 0);
725 if (bo == NULL)
726 return NULL;
727 bo->static_bo = 1;
728 bo->offset = offset + bom->fb_location;
729 bo->base.handle = bo->offset;
730 bo->ptr = bom->screen->driScreen->pFB + offset;
731 if (bo->base.handle > bom->nhandle) {
732 bom->nhandle = bo->base.handle + 1;
733 }
734 radeon_bo_ref(&(bo->base));
735 return bo;
736 }
737
738 struct radeon_bo_manager *radeon_bo_manager_legacy_ctor(struct radeon_screen *scrn)
739 {
740 struct bo_manager_legacy *bom;
741 struct bo_legacy *bo;
742 unsigned size;
743
744 bom = (struct bo_manager_legacy*)
745 calloc(1, sizeof(struct bo_manager_legacy));
746 if (bom == NULL) {
747 return NULL;
748 }
749
750 make_empty_list(&bom->texture_swapped);
751
752 bom->texture_heap = driCreateTextureHeap(0,
753 bom,
754 scrn->texSize[0],
755 12,
756 RADEON_NR_TEX_REGIONS,
757 (drmTextureRegionPtr)scrn->sarea->tex_list[0],
758 &scrn->sarea->tex_age[0],
759 &bom->texture_swapped,
760 sizeof(struct bo_legacy_texture_object),
761 &bo_legacy_tobj_destroy);
762 bom->texture_offset = scrn->texOffset[0];
763
764 bom->base.funcs = &bo_legacy_funcs;
765 bom->base.fd = scrn->driScreen->fd;
766 bom->bos.next = NULL;
767 bom->bos.prev = NULL;
768 bom->pending_bos.pprev = &bom->pending_bos;
769 bom->pending_bos.pnext = NULL;
770 bom->screen = scrn;
771 bom->fb_location = scrn->fbLocation;
772 bom->nhandle = 1;
773 bom->cfree_handles = 0;
774 bom->nfree_handles = 0x400;
775 bom->free_handles = (uint32_t*)malloc(bom->nfree_handles * 4);
776 if (bom->free_handles == NULL) {
777 radeon_bo_manager_legacy_dtor((struct radeon_bo_manager*)bom);
778 return NULL;
779 }
780
781 /* biggest framebuffer size */
782 size = 4096*4096*4;
783
784 /* allocate front */
785 bo = radeon_legacy_bo_alloc_static(bom, size, bom->screen->frontOffset);
786 if (!bo) {
787 radeon_bo_manager_legacy_dtor((struct radeon_bo_manager*)bom);
788 return NULL;
789 }
790 if (scrn->sarea->tiling_enabled) {
791 bo->base.flags = RADEON_BO_FLAGS_MACRO_TILE;
792 }
793
794 /* allocate back */
795 bo = radeon_legacy_bo_alloc_static(bom, size, bom->screen->backOffset);
796 if (!bo) {
797 radeon_bo_manager_legacy_dtor((struct radeon_bo_manager*)bom);
798 return NULL;
799 }
800 if (scrn->sarea->tiling_enabled) {
801 bo->base.flags = RADEON_BO_FLAGS_MACRO_TILE;
802 }
803
804 /* allocate depth */
805 bo = radeon_legacy_bo_alloc_static(bom, size, bom->screen->depthOffset);
806 if (!bo) {
807 radeon_bo_manager_legacy_dtor((struct radeon_bo_manager*)bom);
808 return NULL;
809 }
810 bo->base.flags = 0;
811 if (scrn->sarea->tiling_enabled) {
812 bo->base.flags |= RADEON_BO_FLAGS_MACRO_TILE;
813 bo->base.flags |= RADEON_BO_FLAGS_MICRO_TILE;
814 }
815 return (struct radeon_bo_manager*)bom;
816 }
817
818 void radeon_bo_legacy_texture_age(struct radeon_bo_manager *bom)
819 {
820 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bom;
821 DRI_AGE_TEXTURES(boml->texture_heap);
822 }
823
824 unsigned radeon_bo_legacy_relocs_size(struct radeon_bo *bo)
825 {
826 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
827
828 if (bo_legacy->static_bo || (bo->domains & RADEON_GEM_DOMAIN_GTT)) {
829 return 0;
830 }
831 return bo->size;
832 }
833