radeon: initialise swapped objects pointer
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_bo_legacy.c
1 /*
2 * Copyright © 2008 Nicolai Haehnle
3 * Copyright © 2008 Dave Airlie
4 * Copyright © 2008 Jérôme Glisse
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
25 * of the Software.
26 */
27 /*
28 * Authors:
29 * Aapo Tahkola <aet@rasterburn.org>
30 * Nicolai Haehnle <prefect_@gmx.net>
31 * Dave Airlie
32 * Jérôme Glisse <glisse@freedesktop.org>
33 */
34 #include <stdio.h>
35 #include <stddef.h>
36 #include <stdint.h>
37 #include <stdlib.h>
38 #include <string.h>
39 #include <errno.h>
40 #include <unistd.h>
41 #include <sys/mman.h>
42 #include <sys/ioctl.h>
43 #include "xf86drm.h"
44 #include "texmem.h"
45 #include "main/simple_list.h"
46
47 #include "drm.h"
48 #include "radeon_drm.h"
49 #include "radeon_common.h"
50 #include "radeon_bocs_wrapper.h"
51
52 /* no seriously texmem.c is this screwed up */
53 struct bo_legacy_texture_object {
54 driTextureObject base;
55 struct bo_legacy *parent;
56 };
57
58 struct bo_legacy {
59 struct radeon_bo base;
60 int map_count;
61 uint32_t pending;
62 int is_pending;
63 int static_bo;
64 int got_dri_texture_obj;
65 uint32_t offset;
66 struct bo_legacy_texture_object *tobj;
67 int validated;
68 int dirty;
69 void *ptr;
70 struct bo_legacy *next, *prev;
71 struct bo_legacy *pnext, *pprev;
72 };
73
74 struct bo_manager_legacy {
75 struct radeon_bo_manager base;
76 unsigned nhandle;
77 unsigned nfree_handles;
78 unsigned cfree_handles;
79 uint32_t current_age;
80 struct bo_legacy bos;
81 struct bo_legacy pending_bos;
82 uint32_t fb_location;
83 uint32_t texture_offset;
84 unsigned dma_alloc_size;
85 uint32_t dma_buf_count;
86 unsigned cpendings;
87 driTextureObject texture_swapped;
88 driTexHeap *texture_heap;
89 struct radeon_screen *screen;
90 unsigned *free_handles;
91 };
92
93 static void bo_legacy_tobj_destroy(void *data, driTextureObject *t)
94 {
95 struct bo_legacy_texture_object *tobj = (struct bo_legacy_texture_object *)t;
96
97 if (tobj->parent) {
98 tobj->parent->got_dri_texture_obj = 0;
99 tobj->parent->validated = 0;
100 }
101 }
102
103 static void inline clean_handles(struct bo_manager_legacy *bom)
104 {
105 while (bom->cfree_handles > 0 &&
106 !bom->free_handles[bom->cfree_handles - 1])
107 bom->cfree_handles--;
108
109 }
110 static int legacy_new_handle(struct bo_manager_legacy *bom, uint32_t *handle)
111 {
112 uint32_t tmp;
113
114 *handle = 0;
115 if (bom->nhandle == 0xFFFFFFFF) {
116 return -EINVAL;
117 }
118 if (bom->cfree_handles > 0) {
119 tmp = bom->free_handles[--bom->cfree_handles];
120 clean_handles(bom);
121 } else {
122 bom->cfree_handles = 0;
123 tmp = bom->nhandle++;
124 }
125 assert(tmp);
126 *handle = tmp;
127 return 0;
128 }
129
130 static int legacy_free_handle(struct bo_manager_legacy *bom, uint32_t handle)
131 {
132 uint32_t *handles;
133
134 if (!handle) {
135 return 0;
136 }
137 if (handle == (bom->nhandle - 1)) {
138 int i;
139
140 bom->nhandle--;
141 for (i = bom->cfree_handles - 1; i >= 0; i--) {
142 if (bom->free_handles[i] == (bom->nhandle - 1)) {
143 bom->nhandle--;
144 bom->free_handles[i] = 0;
145 }
146 }
147 clean_handles(bom);
148 return 0;
149 }
150 if (bom->cfree_handles < bom->nfree_handles) {
151 bom->free_handles[bom->cfree_handles++] = handle;
152 return 0;
153 }
154 bom->nfree_handles += 0x100;
155 handles = (uint32_t*)realloc(bom->free_handles, bom->nfree_handles * 4);
156 if (handles == NULL) {
157 bom->nfree_handles -= 0x100;
158 return -ENOMEM;
159 }
160 bom->free_handles = handles;
161 bom->free_handles[bom->cfree_handles++] = handle;
162 return 0;
163 }
164
165 static void legacy_get_current_age(struct bo_manager_legacy *boml)
166 {
167 drm_radeon_getparam_t gp;
168 int r;
169
170 if (IS_R300_CLASS(boml->screen)) {
171 gp.param = RADEON_PARAM_LAST_CLEAR;
172 gp.value = (int *)&boml->current_age;
173 r = drmCommandWriteRead(boml->base.fd, DRM_RADEON_GETPARAM,
174 &gp, sizeof(gp));
175 if (r) {
176 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__, r);
177 exit(1);
178 }
179 } else
180 boml->current_age = boml->screen->scratch[3];
181 }
182
183 static int legacy_is_pending(struct radeon_bo *bo)
184 {
185 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
186 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
187
188 if (bo_legacy->is_pending <= 0) {
189 bo_legacy->is_pending = 0;
190 return 0;
191 }
192 if (boml->current_age >= bo_legacy->pending) {
193 if (boml->pending_bos.pprev == bo_legacy) {
194 boml->pending_bos.pprev = bo_legacy->pprev;
195 }
196 bo_legacy->pprev->pnext = bo_legacy->pnext;
197 if (bo_legacy->pnext) {
198 bo_legacy->pnext->pprev = bo_legacy->pprev;
199 }
200 assert(bo_legacy->is_pending <= bo->cref);
201 while (bo_legacy->is_pending--) {
202 bo = radeon_bo_unref(bo);
203 if (!bo)
204 break;
205 }
206 if (bo)
207 bo_legacy->is_pending = 0;
208 boml->cpendings--;
209 return 0;
210 }
211 return 1;
212 }
213
214 static int legacy_wait_pending(struct radeon_bo *bo)
215 {
216 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
217 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
218
219 if (!bo_legacy->is_pending) {
220 return 0;
221 }
222 /* FIXME: lockup and userspace busy looping that's all the folks */
223 legacy_get_current_age(boml);
224 while (legacy_is_pending(bo)) {
225 usleep(10);
226 legacy_get_current_age(boml);
227 }
228 return 0;
229 }
230
231 static void legacy_track_pending(struct bo_manager_legacy *boml, int debug)
232 {
233 struct bo_legacy *bo_legacy;
234 struct bo_legacy *next;
235
236 legacy_get_current_age(boml);
237 bo_legacy = boml->pending_bos.pnext;
238 while (bo_legacy) {
239 if (debug)
240 fprintf(stderr,"pending %p %d %d %d\n", bo_legacy, bo_legacy->base.size,
241 boml->current_age, bo_legacy->pending);
242 next = bo_legacy->pnext;
243 if (legacy_is_pending(&(bo_legacy->base))) {
244 }
245 bo_legacy = next;
246 }
247 }
248
249 static int legacy_wait_any_pending(struct bo_manager_legacy *boml)
250 {
251 struct bo_legacy *bo_legacy;
252
253 legacy_get_current_age(boml);
254 bo_legacy = boml->pending_bos.pnext;
255 if (!bo_legacy)
256 return -1;
257 legacy_wait_pending(&bo_legacy->base);
258 return 0;
259 }
260
261 static struct bo_legacy *bo_allocate(struct bo_manager_legacy *boml,
262 uint32_t size,
263 uint32_t alignment,
264 uint32_t domains,
265 uint32_t flags)
266 {
267 struct bo_legacy *bo_legacy;
268 static int pgsize;
269
270 if (pgsize == 0)
271 pgsize = getpagesize() - 1;
272
273 size = (size + pgsize) & ~pgsize;
274
275 bo_legacy = (struct bo_legacy*)calloc(1, sizeof(struct bo_legacy));
276 if (bo_legacy == NULL) {
277 return NULL;
278 }
279 bo_legacy->base.bom = (struct radeon_bo_manager*)boml;
280 bo_legacy->base.handle = 0;
281 bo_legacy->base.size = size;
282 bo_legacy->base.alignment = alignment;
283 bo_legacy->base.domains = domains;
284 bo_legacy->base.flags = flags;
285 bo_legacy->base.ptr = NULL;
286 bo_legacy->map_count = 0;
287 bo_legacy->next = NULL;
288 bo_legacy->prev = NULL;
289 bo_legacy->got_dri_texture_obj = 0;
290 bo_legacy->pnext = NULL;
291 bo_legacy->pprev = NULL;
292 bo_legacy->next = boml->bos.next;
293 bo_legacy->prev = &boml->bos;
294 boml->bos.next = bo_legacy;
295 if (bo_legacy->next) {
296 bo_legacy->next->prev = bo_legacy;
297 }
298 return bo_legacy;
299 }
300
301 static int bo_dma_alloc(struct radeon_bo *bo)
302 {
303 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
304 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
305 drm_radeon_mem_alloc_t alloc;
306 unsigned size;
307 int base_offset;
308 int r;
309
310 /* align size on 4Kb */
311 size = (((4 * 1024) - 1) + bo->size) & ~((4 * 1024) - 1);
312 alloc.region = RADEON_MEM_REGION_GART;
313 alloc.alignment = bo_legacy->base.alignment;
314 alloc.size = size;
315 alloc.region_offset = &base_offset;
316 r = drmCommandWriteRead(bo->bom->fd,
317 DRM_RADEON_ALLOC,
318 &alloc,
319 sizeof(alloc));
320 if (r) {
321 /* ptr is set to NULL if dma allocation failed */
322 bo_legacy->ptr = NULL;
323 return r;
324 }
325 bo_legacy->ptr = boml->screen->gartTextures.map + base_offset;
326 bo_legacy->offset = boml->screen->gart_texture_offset + base_offset;
327 bo->size = size;
328 boml->dma_alloc_size += size;
329 boml->dma_buf_count++;
330 return 0;
331 }
332
333 static int bo_dma_free(struct radeon_bo *bo)
334 {
335 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
336 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
337 drm_radeon_mem_free_t memfree;
338 int r;
339
340 if (bo_legacy->ptr == NULL) {
341 /* ptr is set to NULL if dma allocation failed */
342 return 0;
343 }
344 legacy_get_current_age(boml);
345 memfree.region = RADEON_MEM_REGION_GART;
346 memfree.region_offset = bo_legacy->offset;
347 memfree.region_offset -= boml->screen->gart_texture_offset;
348 r = drmCommandWrite(boml->base.fd,
349 DRM_RADEON_FREE,
350 &memfree,
351 sizeof(memfree));
352 if (r) {
353 fprintf(stderr, "Failed to free bo[%p] at %08x\n",
354 &bo_legacy->base, memfree.region_offset);
355 fprintf(stderr, "ret = %s\n", strerror(-r));
356 return r;
357 }
358 boml->dma_alloc_size -= bo_legacy->base.size;
359 boml->dma_buf_count--;
360 return 0;
361 }
362
363 static void bo_free(struct bo_legacy *bo_legacy)
364 {
365 struct bo_manager_legacy *boml;
366
367 if (bo_legacy == NULL) {
368 return;
369 }
370 boml = (struct bo_manager_legacy *)bo_legacy->base.bom;
371 bo_legacy->prev->next = bo_legacy->next;
372 if (bo_legacy->next) {
373 bo_legacy->next->prev = bo_legacy->prev;
374 }
375 if (!bo_legacy->static_bo) {
376 legacy_free_handle(boml, bo_legacy->base.handle);
377 if (bo_legacy->base.domains & RADEON_GEM_DOMAIN_GTT) {
378 /* dma buffers */
379 bo_dma_free(&bo_legacy->base);
380 } else {
381 driDestroyTextureObject(&bo_legacy->tobj->base);
382 bo_legacy->tobj = NULL;
383 /* free backing store */
384 free(bo_legacy->ptr);
385 }
386 }
387 memset(bo_legacy, 0 , sizeof(struct bo_legacy));
388 free(bo_legacy);
389 }
390
391 static struct radeon_bo *bo_open(struct radeon_bo_manager *bom,
392 uint32_t handle,
393 uint32_t size,
394 uint32_t alignment,
395 uint32_t domains,
396 uint32_t flags)
397 {
398 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bom;
399 struct bo_legacy *bo_legacy;
400 int r;
401
402 if (handle) {
403 bo_legacy = boml->bos.next;
404 while (bo_legacy) {
405 if (bo_legacy->base.handle == handle) {
406 radeon_bo_ref(&(bo_legacy->base));
407 return (struct radeon_bo*)bo_legacy;
408 }
409 bo_legacy = bo_legacy->next;
410 }
411 return NULL;
412 }
413
414 bo_legacy = bo_allocate(boml, size, alignment, domains, flags);
415 bo_legacy->static_bo = 0;
416 r = legacy_new_handle(boml, &bo_legacy->base.handle);
417 if (r) {
418 bo_free(bo_legacy);
419 return NULL;
420 }
421 if (bo_legacy->base.domains & RADEON_GEM_DOMAIN_GTT) {
422 retry:
423 legacy_track_pending(boml, 0);
424 /* dma buffers */
425
426 r = bo_dma_alloc(&(bo_legacy->base));
427 if (r) {
428 if (legacy_wait_any_pending(boml) == -1) {
429 bo_free(bo_legacy);
430 return NULL;
431 }
432 goto retry;
433 return NULL;
434 }
435 } else {
436 bo_legacy->ptr = malloc(bo_legacy->base.size);
437 if (bo_legacy->ptr == NULL) {
438 bo_free(bo_legacy);
439 return NULL;
440 }
441 }
442 radeon_bo_ref(&(bo_legacy->base));
443 return (struct radeon_bo*)bo_legacy;
444 }
445
446 static void bo_ref(struct radeon_bo *bo)
447 {
448 }
449
450 static struct radeon_bo *bo_unref(struct radeon_bo *bo)
451 {
452 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
453
454 if (bo->cref <= 0) {
455 bo_legacy->prev->next = bo_legacy->next;
456 if (bo_legacy->next) {
457 bo_legacy->next->prev = bo_legacy->prev;
458 }
459 if (!bo_legacy->is_pending) {
460 bo_free(bo_legacy);
461 }
462 return NULL;
463 }
464 return bo;
465 }
466
467 static int bo_map(struct radeon_bo *bo, int write)
468 {
469 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
470 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
471
472 legacy_wait_pending(bo);
473 bo_legacy->validated = 0;
474 bo_legacy->dirty = 1;
475 bo_legacy->map_count++;
476 bo->ptr = bo_legacy->ptr;
477 /* Read the first pixel in the frame buffer. This should
478 * be a noop, right? In fact without this conform fails as reading
479 * from the framebuffer sometimes produces old results -- the
480 * on-card read cache gets mixed up and doesn't notice that the
481 * framebuffer has been updated.
482 *
483 * Note that we should probably be reading some otherwise unused
484 * region of VRAM, otherwise we might get incorrect results when
485 * reading pixels from the top left of the screen.
486 *
487 * I found this problem on an R420 with glean's texCube test.
488 * Note that the R200 span code also *writes* the first pixel in the
489 * framebuffer, but I've found this to be unnecessary.
490 * -- Nicolai Hähnle, June 2008
491 */
492 if (!(bo->domains & RADEON_GEM_DOMAIN_GTT)) {
493 int p;
494 volatile int *buf = (int*)boml->screen->driScreen->pFB;
495 p = *buf;
496 }
497 return 0;
498 }
499
500 static int bo_unmap(struct radeon_bo *bo)
501 {
502 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
503
504 if (--bo_legacy->map_count > 0) {
505 return 0;
506 }
507 bo->ptr = NULL;
508 return 0;
509 }
510
511 static struct radeon_bo_funcs bo_legacy_funcs = {
512 bo_open,
513 bo_ref,
514 bo_unref,
515 bo_map,
516 bo_unmap
517 };
518
519 static int bo_vram_validate(struct radeon_bo *bo,
520 uint32_t *soffset,
521 uint32_t *eoffset)
522 {
523 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
524 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
525 int r;
526
527 if (!bo_legacy->got_dri_texture_obj) {
528 bo_legacy->tobj = CALLOC(sizeof(struct bo_legacy_texture_object));
529 bo_legacy->tobj->parent = bo_legacy;
530 make_empty_list(&bo_legacy->tobj->base);
531 bo_legacy->tobj->base.totalSize = bo->size;
532 r = driAllocateTexture(&boml->texture_heap, 1,
533 &bo_legacy->tobj->base);
534 if (r) {
535 uint8_t *segfault=NULL;
536 fprintf(stderr, "Ouch! vram_validate failed %d\n", r);
537 *segfault=1;
538 return -1;
539 }
540 bo_legacy->offset = boml->texture_offset +
541 bo_legacy->tobj->base.memBlock->ofs;
542 bo_legacy->got_dri_texture_obj = 1;
543 bo_legacy->dirty = 1;
544 }
545
546 if (bo_legacy->got_dri_texture_obj)
547 driUpdateTextureLRU(&bo_legacy->tobj->base);
548
549 if (bo_legacy->dirty || bo_legacy->tobj->base.dirty_images[0]) {
550 /* Copy to VRAM using a blit.
551 * All memory is 4K aligned. We're using 1024 pixels wide blits.
552 */
553 drm_radeon_texture_t tex;
554 drm_radeon_tex_image_t tmp;
555 int ret;
556
557 tex.offset = bo_legacy->offset;
558 tex.image = &tmp;
559 assert(!(tex.offset & 1023));
560
561 tmp.x = 0;
562 tmp.y = 0;
563 if (bo->size < 4096) {
564 tmp.width = (bo->size + 3) / 4;
565 tmp.height = 1;
566 } else {
567 tmp.width = 1024;
568 tmp.height = (bo->size + 4095) / 4096;
569 }
570 tmp.data = bo_legacy->ptr;
571 tex.format = RADEON_TXFORMAT_ARGB8888;
572 tex.width = tmp.width;
573 tex.height = tmp.height;
574 tex.pitch = MAX2(tmp.width / 16, 1);
575 do {
576 ret = drmCommandWriteRead(bo->bom->fd,
577 DRM_RADEON_TEXTURE,
578 &tex,
579 sizeof(drm_radeon_texture_t));
580 if (ret) {
581 if (RADEON_DEBUG & DEBUG_IOCTL)
582 fprintf(stderr, "DRM_RADEON_TEXTURE: again!\n");
583 usleep(1);
584 }
585 } while (ret == -EAGAIN);
586 bo_legacy->dirty = 0;
587 bo_legacy->tobj->base.dirty_images[0] = 0;
588 }
589 return 0;
590 }
591
592 int radeon_bo_legacy_validate(struct radeon_bo *bo,
593 uint32_t *soffset,
594 uint32_t *eoffset)
595 {
596 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
597 int r;
598
599 if (bo_legacy->map_count) {
600 fprintf(stderr, "bo(%p, %d) is mapped (%d) can't valide it.\n",
601 bo, bo->size, bo_legacy->map_count);
602 return -EINVAL;
603 }
604 if (bo_legacy->static_bo || bo_legacy->validated) {
605 *soffset = bo_legacy->offset;
606 *eoffset = bo_legacy->offset + bo->size;
607 return 0;
608 }
609 if (!(bo->domains & RADEON_GEM_DOMAIN_GTT)) {
610 r = bo_vram_validate(bo, soffset, eoffset);
611 if (r) {
612 return r;
613 }
614 }
615 *soffset = bo_legacy->offset;
616 *eoffset = bo_legacy->offset + bo->size;
617 bo_legacy->validated = 1;
618 return 0;
619 }
620
621 void radeon_bo_legacy_pending(struct radeon_bo *bo, uint32_t pending)
622 {
623 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
624 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
625
626 bo_legacy->pending = pending;
627 bo_legacy->is_pending++;
628 /* add to pending list */
629 radeon_bo_ref(bo);
630 if (bo_legacy->is_pending > 1) {
631 return;
632 }
633 bo_legacy->pprev = boml->pending_bos.pprev;
634 bo_legacy->pnext = NULL;
635 bo_legacy->pprev->pnext = bo_legacy;
636 boml->pending_bos.pprev = bo_legacy;
637 boml->cpendings++;
638 }
639
640 void radeon_bo_manager_legacy_dtor(struct radeon_bo_manager *bom)
641 {
642 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bom;
643 struct bo_legacy *bo_legacy;
644
645 if (bom == NULL) {
646 return;
647 }
648 bo_legacy = boml->bos.next;
649 while (bo_legacy) {
650 struct bo_legacy *next;
651
652 next = bo_legacy->next;
653 bo_free(bo_legacy);
654 bo_legacy = next;
655 }
656 driDestroyTextureHeap(boml->texture_heap);
657 free(boml->free_handles);
658 free(boml);
659 }
660
661 static struct bo_legacy *radeon_legacy_bo_alloc_static(struct bo_manager_legacy *bom,
662 int size, uint32_t offset)
663 {
664 struct bo_legacy *bo;
665
666 bo = bo_allocate(bom, size, 0, RADEON_GEM_DOMAIN_VRAM, 0);
667 if (bo == NULL)
668 return NULL;
669 bo->static_bo = 1;
670 bo->offset = offset + bom->fb_location;
671 bo->base.handle = bo->offset;
672 bo->ptr = bom->screen->driScreen->pFB + offset;
673 if (bo->base.handle > bom->nhandle) {
674 bom->nhandle = bo->base.handle + 1;
675 }
676 radeon_bo_ref(&(bo->base));
677 return bo;
678 }
679
680 struct radeon_bo_manager *radeon_bo_manager_legacy_ctor(struct radeon_screen *scrn)
681 {
682 struct bo_manager_legacy *bom;
683 struct bo_legacy *bo;
684 unsigned size;
685
686 bom = (struct bo_manager_legacy*)
687 calloc(1, sizeof(struct bo_manager_legacy));
688 if (bom == NULL) {
689 return NULL;
690 }
691
692 make_empty_list(&bom->texture_swapped);
693
694 bom->texture_heap = driCreateTextureHeap(0,
695 bom,
696 scrn->texSize[0],
697 12,
698 RADEON_NR_TEX_REGIONS,
699 (drmTextureRegionPtr)scrn->sarea->tex_list[0],
700 &scrn->sarea->tex_age[0],
701 &bom->texture_swapped,
702 sizeof(struct bo_legacy_texture_object),
703 &bo_legacy_tobj_destroy);
704 bom->texture_offset = scrn->texOffset[0];
705
706 bom->base.funcs = &bo_legacy_funcs;
707 bom->base.fd = scrn->driScreen->fd;
708 bom->bos.next = NULL;
709 bom->bos.prev = NULL;
710 bom->pending_bos.pprev = &bom->pending_bos;
711 bom->pending_bos.pnext = NULL;
712 bom->screen = scrn;
713 bom->fb_location = scrn->fbLocation;
714 bom->nhandle = 1;
715 bom->cfree_handles = 0;
716 bom->nfree_handles = 0x400;
717 bom->free_handles = (uint32_t*)malloc(bom->nfree_handles * 4);
718 if (bom->free_handles == NULL) {
719 radeon_bo_manager_legacy_dtor((struct radeon_bo_manager*)bom);
720 return NULL;
721 }
722
723 /* biggest framebuffer size */
724 size = 4096*4096*4;
725
726 /* allocate front */
727 bo = radeon_legacy_bo_alloc_static(bom, size, bom->screen->frontOffset);
728 if (!bo) {
729 radeon_bo_manager_legacy_dtor((struct radeon_bo_manager*)bom);
730 return NULL;
731 }
732 if (scrn->sarea->tiling_enabled) {
733 bo->base.flags = RADEON_BO_FLAGS_MACRO_TILE;
734 }
735
736 /* allocate back */
737 bo = radeon_legacy_bo_alloc_static(bom, size, bom->screen->backOffset);
738 if (!bo) {
739 radeon_bo_manager_legacy_dtor((struct radeon_bo_manager*)bom);
740 return NULL;
741 }
742 if (scrn->sarea->tiling_enabled) {
743 bo->base.flags = RADEON_BO_FLAGS_MACRO_TILE;
744 }
745
746 /* allocate depth */
747 bo = radeon_legacy_bo_alloc_static(bom, size, bom->screen->depthOffset);
748 if (!bo) {
749 radeon_bo_manager_legacy_dtor((struct radeon_bo_manager*)bom);
750 return NULL;
751 }
752 bo->base.flags = 0;
753 if (scrn->sarea->tiling_enabled) {
754 bo->base.flags |= RADEON_BO_FLAGS_MACRO_TILE;
755 bo->base.flags |= RADEON_BO_FLAGS_MICRO_TILE;
756 }
757 return (struct radeon_bo_manager*)bom;
758 }
759
760 void radeon_bo_legacy_texture_age(struct radeon_bo_manager *bom)
761 {
762 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bom;
763 DRI_AGE_TEXTURES(boml->texture_heap);
764 }
765
766 unsigned radeon_bo_legacy_relocs_size(struct radeon_bo *bo)
767 {
768 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
769
770 if (bo_legacy->static_bo || (bo->domains & RADEON_GEM_DOMAIN_GTT)) {
771 return 0;
772 }
773 return bo->size;
774 }
775
776 int radeon_legacy_bo_is_static(struct radeon_bo *bo)
777 {
778 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
779 return bo_legacy->static_bo;
780 }
781