radeon: fixup destroy texture object exit path and update LRU
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_bo_legacy.c
1 /*
2 * Copyright © 2008 Nicolai Haehnle
3 * Copyright © 2008 Dave Airlie
4 * Copyright © 2008 Jérôme Glisse
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
25 * of the Software.
26 */
27 /*
28 * Authors:
29 * Aapo Tahkola <aet@rasterburn.org>
30 * Nicolai Haehnle <prefect_@gmx.net>
31 * Dave Airlie
32 * Jérôme Glisse <glisse@freedesktop.org>
33 */
34 #include <stdio.h>
35 #include <stddef.h>
36 #include <stdint.h>
37 #include <stdlib.h>
38 #include <string.h>
39 #include <errno.h>
40 #include <unistd.h>
41 #include <sys/mman.h>
42 #include <sys/ioctl.h>
43 #include "xf86drm.h"
44 #include "texmem.h"
45 #include "main/simple_list.h"
46
47 #include "drm.h"
48 #include "radeon_drm.h"
49 #include "radeon_common.h"
50 #include "radeon_bocs_wrapper.h"
51
52
53 struct bo_legacy {
54 struct radeon_bo base;
55 int map_count;
56 uint32_t pending;
57 int is_pending;
58 int validated;
59 int static_bo;
60 int got_dri_texture_obj;
61 int dirty;
62 uint32_t offset;
63 driTextureObject dri_texture_obj;
64 void *ptr;
65 struct bo_legacy *next, *prev;
66 struct bo_legacy *pnext, *pprev;
67 };
68
69 struct bo_manager_legacy {
70 struct radeon_bo_manager base;
71 unsigned nhandle;
72 unsigned nfree_handles;
73 unsigned cfree_handles;
74 uint32_t current_age;
75 struct bo_legacy bos;
76 struct bo_legacy pending_bos;
77 uint32_t fb_location;
78 uint32_t texture_offset;
79 unsigned dma_alloc_size;
80 uint32_t dma_buf_count;
81 unsigned cpendings;
82 driTextureObject texture_swapped;
83 driTexHeap *texture_heap;
84 struct radeon_screen *screen;
85 unsigned *free_handles;
86 };
87
88 #define container_of(ptr, type, member) ({ \
89 const __typeof( ((type *)0)->member ) *__mptr = (ptr); \
90 (type *)( (char *)__mptr - offsetof(type,member) );})
91
92 static void bo_legacy_tobj_destroy(void *data, driTextureObject *t)
93 {
94 struct bo_legacy *bo_legacy = container_of(t, struct bo_legacy, dri_texture_obj);
95
96
97 bo_legacy->got_dri_texture_obj = 0;
98 bo_legacy->validated = 0;
99 }
100
101 static void inline clean_handles(struct bo_manager_legacy *bom)
102 {
103 while (bom->cfree_handles > 0 &&
104 !bom->free_handles[bom->cfree_handles - 1])
105 bom->cfree_handles--;
106
107 }
108 static int legacy_new_handle(struct bo_manager_legacy *bom, uint32_t *handle)
109 {
110 uint32_t tmp;
111
112 *handle = 0;
113 if (bom->nhandle == 0xFFFFFFFF) {
114 return -EINVAL;
115 }
116 if (bom->cfree_handles > 0) {
117 tmp = bom->free_handles[--bom->cfree_handles];
118 clean_handles(bom);
119 } else {
120 bom->cfree_handles = 0;
121 tmp = bom->nhandle++;
122 }
123 assert(tmp);
124 *handle = tmp;
125 return 0;
126 }
127
128 static int legacy_free_handle(struct bo_manager_legacy *bom, uint32_t handle)
129 {
130 uint32_t *handles;
131
132 if (!handle) {
133 return 0;
134 }
135 if (handle == (bom->nhandle - 1)) {
136 int i;
137
138 bom->nhandle--;
139 for (i = bom->cfree_handles - 1; i >= 0; i--) {
140 if (bom->free_handles[i] == (bom->nhandle - 1)) {
141 bom->nhandle--;
142 bom->free_handles[i] = 0;
143 }
144 }
145 clean_handles(bom);
146 return 0;
147 }
148 if (bom->cfree_handles < bom->nfree_handles) {
149 bom->free_handles[bom->cfree_handles++] = handle;
150 return 0;
151 }
152 bom->nfree_handles += 0x100;
153 handles = (uint32_t*)realloc(bom->free_handles, bom->nfree_handles * 4);
154 if (handles == NULL) {
155 bom->nfree_handles -= 0x100;
156 return -ENOMEM;
157 }
158 bom->free_handles = handles;
159 bom->free_handles[bom->cfree_handles++] = handle;
160 return 0;
161 }
162
163 static void legacy_get_current_age(struct bo_manager_legacy *boml)
164 {
165 drm_radeon_getparam_t gp;
166 int r;
167
168 if (IS_R300_CLASS(boml->screen)) {
169 gp.param = RADEON_PARAM_LAST_CLEAR;
170 gp.value = (int *)&boml->current_age;
171 r = drmCommandWriteRead(boml->base.fd, DRM_RADEON_GETPARAM,
172 &gp, sizeof(gp));
173 if (r) {
174 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__, r);
175 exit(1);
176 }
177 } else
178 boml->current_age = boml->screen->scratch[3];
179 }
180
181 static int legacy_is_pending(struct radeon_bo *bo)
182 {
183 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
184 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
185
186 if (bo_legacy->is_pending <= 0) {
187 bo_legacy->is_pending = 0;
188 return 0;
189 }
190 if (boml->current_age >= bo_legacy->pending) {
191 if (boml->pending_bos.pprev == bo_legacy) {
192 boml->pending_bos.pprev = bo_legacy->pprev;
193 }
194 bo_legacy->pprev->pnext = bo_legacy->pnext;
195 if (bo_legacy->pnext) {
196 bo_legacy->pnext->pprev = bo_legacy->pprev;
197 }
198 assert(bo_legacy->is_pending <= bo->cref);
199 while (bo_legacy->is_pending--) {
200 bo = radeon_bo_unref(bo);
201 if (!bo)
202 break;
203 }
204 if (bo)
205 bo_legacy->is_pending = 0;
206 boml->cpendings--;
207 return 0;
208 }
209 return 1;
210 }
211
212 static int legacy_wait_pending(struct radeon_bo *bo)
213 {
214 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
215 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
216
217 if (!bo_legacy->is_pending) {
218 return 0;
219 }
220 /* FIXME: lockup and userspace busy looping that's all the folks */
221 legacy_get_current_age(boml);
222 while (legacy_is_pending(bo)) {
223 usleep(10);
224 legacy_get_current_age(boml);
225 }
226 return 0;
227 }
228
229 static void legacy_track_pending(struct bo_manager_legacy *boml, int debug)
230 {
231 struct bo_legacy *bo_legacy;
232 struct bo_legacy *next;
233
234 legacy_get_current_age(boml);
235 bo_legacy = boml->pending_bos.pnext;
236 while (bo_legacy) {
237 if (debug)
238 fprintf(stderr,"pending %p %d %d %d\n", bo_legacy, bo_legacy->base.size,
239 boml->current_age, bo_legacy->pending);
240 next = bo_legacy->pnext;
241 if (legacy_is_pending(&(bo_legacy->base))) {
242 }
243 bo_legacy = next;
244 }
245 }
246
247 static int legacy_wait_any_pending(struct bo_manager_legacy *boml)
248 {
249 struct bo_legacy *bo_legacy;
250 struct bo_legacy *next;
251
252 legacy_get_current_age(boml);
253 bo_legacy = boml->pending_bos.pnext;
254 if (!bo_legacy)
255 return -1;
256 legacy_wait_pending(&bo_legacy->base);
257 return 0;
258 }
259
260 static struct bo_legacy *bo_allocate(struct bo_manager_legacy *boml,
261 uint32_t size,
262 uint32_t alignment,
263 uint32_t domains,
264 uint32_t flags)
265 {
266 struct bo_legacy *bo_legacy;
267 static int pgsize;
268
269 if (pgsize == 0)
270 pgsize = getpagesize() - 1;
271
272 size = (size + pgsize) & ~pgsize;
273
274 bo_legacy = (struct bo_legacy*)calloc(1, sizeof(struct bo_legacy));
275 if (bo_legacy == NULL) {
276 return NULL;
277 }
278 bo_legacy->base.bom = (struct radeon_bo_manager*)boml;
279 bo_legacy->base.handle = 0;
280 bo_legacy->base.size = size;
281 bo_legacy->base.alignment = alignment;
282 bo_legacy->base.domains = domains;
283 bo_legacy->base.flags = flags;
284 bo_legacy->base.ptr = NULL;
285 bo_legacy->map_count = 0;
286 bo_legacy->next = NULL;
287 bo_legacy->prev = NULL;
288 bo_legacy->got_dri_texture_obj = 0;
289 bo_legacy->pnext = NULL;
290 bo_legacy->pprev = NULL;
291 bo_legacy->next = boml->bos.next;
292 bo_legacy->prev = &boml->bos;
293 boml->bos.next = bo_legacy;
294 if (bo_legacy->next) {
295 bo_legacy->next->prev = bo_legacy;
296 }
297 return bo_legacy;
298 }
299
300 static int bo_dma_alloc(struct radeon_bo *bo)
301 {
302 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
303 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
304 drm_radeon_mem_alloc_t alloc;
305 unsigned size;
306 int base_offset;
307 int r;
308
309 /* align size on 4Kb */
310 size = (((4 * 1024) - 1) + bo->size) & ~((4 * 1024) - 1);
311 alloc.region = RADEON_MEM_REGION_GART;
312 alloc.alignment = bo_legacy->base.alignment;
313 alloc.size = size;
314 alloc.region_offset = &base_offset;
315 r = drmCommandWriteRead(bo->bom->fd,
316 DRM_RADEON_ALLOC,
317 &alloc,
318 sizeof(alloc));
319 if (r) {
320 /* ptr is set to NULL if dma allocation failed */
321 bo_legacy->ptr = NULL;
322 return r;
323 }
324 bo_legacy->ptr = boml->screen->gartTextures.map + base_offset;
325 bo_legacy->offset = boml->screen->gart_texture_offset + base_offset;
326 bo->size = size;
327 boml->dma_alloc_size += size;
328 boml->dma_buf_count++;
329 return 0;
330 }
331
332 static int bo_dma_free(struct radeon_bo *bo)
333 {
334 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
335 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
336 drm_radeon_mem_free_t memfree;
337 int r;
338
339 if (bo_legacy->ptr == NULL) {
340 /* ptr is set to NULL if dma allocation failed */
341 return 0;
342 }
343 legacy_get_current_age(boml);
344 memfree.region = RADEON_MEM_REGION_GART;
345 memfree.region_offset = bo_legacy->offset;
346 memfree.region_offset -= boml->screen->gart_texture_offset;
347 r = drmCommandWrite(boml->base.fd,
348 DRM_RADEON_FREE,
349 &memfree,
350 sizeof(memfree));
351 if (r) {
352 fprintf(stderr, "Failed to free bo[%p] at %08x\n",
353 &bo_legacy->base, memfree.region_offset);
354 fprintf(stderr, "ret = %s\n", strerror(-r));
355 return r;
356 }
357 boml->dma_alloc_size -= bo_legacy->base.size;
358 boml->dma_buf_count--;
359 return 0;
360 }
361
362 static void bo_free(struct bo_legacy *bo_legacy)
363 {
364 struct bo_manager_legacy *boml;
365
366 if (bo_legacy == NULL) {
367 return;
368 }
369 boml = (struct bo_manager_legacy *)bo_legacy->base.bom;
370 bo_legacy->prev->next = bo_legacy->next;
371 if (bo_legacy->next) {
372 bo_legacy->next->prev = bo_legacy->prev;
373 }
374 if (!bo_legacy->static_bo) {
375 legacy_free_handle(boml, bo_legacy->base.handle);
376 if (bo_legacy->base.domains & RADEON_GEM_DOMAIN_GTT) {
377 /* dma buffers */
378 bo_dma_free(&bo_legacy->base);
379 } else {
380 if (bo_legacy->got_dri_texture_obj)
381 driCleanupTextureObject(&bo_legacy->dri_texture_obj);
382
383 /* free backing store */
384 free(bo_legacy->ptr);
385 }
386 }
387 free(bo_legacy);
388 }
389
390 static struct radeon_bo *bo_open(struct radeon_bo_manager *bom,
391 uint32_t handle,
392 uint32_t size,
393 uint32_t alignment,
394 uint32_t domains,
395 uint32_t flags)
396 {
397 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bom;
398 struct bo_legacy *bo_legacy;
399 int r;
400
401 if (handle) {
402 bo_legacy = boml->bos.next;
403 while (bo_legacy) {
404 if (bo_legacy->base.handle == handle) {
405 radeon_bo_ref(&(bo_legacy->base));
406 return (struct radeon_bo*)bo_legacy;
407 }
408 bo_legacy = bo_legacy->next;
409 }
410 return NULL;
411 }
412
413 bo_legacy = bo_allocate(boml, size, alignment, domains, flags);
414 bo_legacy->static_bo = 0;
415 r = legacy_new_handle(boml, &bo_legacy->base.handle);
416 if (r) {
417 bo_free(bo_legacy);
418 return NULL;
419 }
420 if (bo_legacy->base.domains & RADEON_GEM_DOMAIN_GTT) {
421 retry:
422 legacy_track_pending(boml, 0);
423 /* dma buffers */
424
425 r = bo_dma_alloc(&(bo_legacy->base));
426 if (r) {
427 if (legacy_wait_any_pending(boml) == -1) {
428 bo_free(bo_legacy);
429 return NULL;
430 }
431 goto retry;
432 return NULL;
433 }
434 } else {
435 bo_legacy->ptr = malloc(bo_legacy->base.size);
436 if (bo_legacy->ptr == NULL) {
437 bo_free(bo_legacy);
438 return NULL;
439 }
440 }
441 radeon_bo_ref(&(bo_legacy->base));
442 return (struct radeon_bo*)bo_legacy;
443 }
444
445 static void bo_ref(struct radeon_bo *bo)
446 {
447 }
448
449 static struct radeon_bo *bo_unref(struct radeon_bo *bo)
450 {
451 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
452
453 if (bo->cref <= 0) {
454 bo_legacy->prev->next = bo_legacy->next;
455 if (bo_legacy->next) {
456 bo_legacy->next->prev = bo_legacy->prev;
457 }
458 if (!bo_legacy->is_pending) {
459 bo_free(bo_legacy);
460 }
461 return NULL;
462 }
463 return bo;
464 }
465
466 static int bo_map(struct radeon_bo *bo, int write)
467 {
468 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
469 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
470
471 legacy_wait_pending(bo);
472 bo_legacy->validated = 0;
473 bo_legacy->dirty = 1;
474 bo_legacy->map_count++;
475 bo->ptr = bo_legacy->ptr;
476 /* Read the first pixel in the frame buffer. This should
477 * be a noop, right? In fact without this conform fails as reading
478 * from the framebuffer sometimes produces old results -- the
479 * on-card read cache gets mixed up and doesn't notice that the
480 * framebuffer has been updated.
481 *
482 * Note that we should probably be reading some otherwise unused
483 * region of VRAM, otherwise we might get incorrect results when
484 * reading pixels from the top left of the screen.
485 *
486 * I found this problem on an R420 with glean's texCube test.
487 * Note that the R200 span code also *writes* the first pixel in the
488 * framebuffer, but I've found this to be unnecessary.
489 * -- Nicolai Hähnle, June 2008
490 */
491 if (!(bo->domains & RADEON_GEM_DOMAIN_GTT)) {
492 int p;
493 volatile int *buf = (int*)boml->screen->driScreen->pFB;
494 p = *buf;
495 }
496 return 0;
497 }
498
499 static int bo_unmap(struct radeon_bo *bo)
500 {
501 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
502
503 if (--bo_legacy->map_count > 0) {
504 return 0;
505 }
506 bo->ptr = NULL;
507 return 0;
508 }
509
510 static struct radeon_bo_funcs bo_legacy_funcs = {
511 bo_open,
512 bo_ref,
513 bo_unref,
514 bo_map,
515 bo_unmap
516 };
517
518 static int bo_vram_validate(struct radeon_bo *bo,
519 uint32_t *soffset,
520 uint32_t *eoffset)
521 {
522 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
523 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
524 int r;
525
526 if (!bo_legacy->got_dri_texture_obj) {
527 make_empty_list(&bo_legacy->dri_texture_obj);
528 bo_legacy->dri_texture_obj.totalSize = bo->size;
529 r = driAllocateTexture(&boml->texture_heap, 1,
530 &bo_legacy->dri_texture_obj);
531 if (r) {
532 uint8_t *segfault=NULL;
533 fprintf(stderr, "Ouch! vram_validate failed %d\n", r);
534 *segfault=1;
535 return -1;
536 }
537 bo_legacy->offset = boml->texture_offset +
538 bo_legacy->dri_texture_obj.memBlock->ofs;
539 bo_legacy->got_dri_texture_obj = 1;
540 bo_legacy->dirty = 1;
541 }
542
543 if (bo_legacy->got_dri_texture_obj)
544 driUpdateTextureLRU(&bo_legacy->dri_texture_obj);
545 if (bo_legacy->dirty) {
546 /* Copy to VRAM using a blit.
547 * All memory is 4K aligned. We're using 1024 pixels wide blits.
548 */
549 drm_radeon_texture_t tex;
550 drm_radeon_tex_image_t tmp;
551 int ret;
552
553 tex.offset = bo_legacy->offset;
554 tex.image = &tmp;
555 assert(!(tex.offset & 1023));
556
557 tmp.x = 0;
558 tmp.y = 0;
559 if (bo->size < 4096) {
560 tmp.width = (bo->size + 3) / 4;
561 tmp.height = 1;
562 } else {
563 tmp.width = 1024;
564 tmp.height = (bo->size + 4095) / 4096;
565 }
566 tmp.data = bo_legacy->ptr;
567 tex.format = RADEON_TXFORMAT_ARGB8888;
568 tex.width = tmp.width;
569 tex.height = tmp.height;
570 tex.pitch = MAX2(tmp.width / 16, 1);
571 do {
572 ret = drmCommandWriteRead(bo->bom->fd,
573 DRM_RADEON_TEXTURE,
574 &tex,
575 sizeof(drm_radeon_texture_t));
576 if (ret) {
577 if (RADEON_DEBUG & DEBUG_IOCTL)
578 fprintf(stderr, "DRM_RADEON_TEXTURE: again!\n");
579 usleep(1);
580 }
581 } while (ret == -EAGAIN);
582 bo_legacy->dirty = 0;
583 }
584 return 0;
585 }
586
587 int radeon_bo_legacy_validate(struct radeon_bo *bo,
588 uint32_t *soffset,
589 uint32_t *eoffset)
590 {
591 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
592 int r;
593
594 if (bo_legacy->map_count) {
595 fprintf(stderr, "bo(%p, %d) is mapped (%d) can't valide it.\n",
596 bo, bo->size, bo_legacy->map_count);
597 return -EINVAL;
598 }
599 if (bo_legacy->static_bo || bo_legacy->validated) {
600 *soffset = bo_legacy->offset;
601 *eoffset = bo_legacy->offset + bo->size;
602 return 0;
603 }
604 if (!(bo->domains & RADEON_GEM_DOMAIN_GTT)) {
605 r = bo_vram_validate(bo, soffset, eoffset);
606 if (r) {
607 return r;
608 }
609 }
610 *soffset = bo_legacy->offset;
611 *eoffset = bo_legacy->offset + bo->size;
612 bo_legacy->validated = 1;
613 return 0;
614 }
615
616 void radeon_bo_legacy_pending(struct radeon_bo *bo, uint32_t pending)
617 {
618 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
619 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
620
621 bo_legacy->pending = pending;
622 bo_legacy->is_pending++;
623 /* add to pending list */
624 radeon_bo_ref(bo);
625 if (bo_legacy->is_pending > 1) {
626 return;
627 }
628 bo_legacy->pprev = boml->pending_bos.pprev;
629 bo_legacy->pnext = NULL;
630 bo_legacy->pprev->pnext = bo_legacy;
631 boml->pending_bos.pprev = bo_legacy;
632 boml->cpendings++;
633 }
634
635 void radeon_bo_manager_legacy_dtor(struct radeon_bo_manager *bom)
636 {
637 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bom;
638 struct bo_legacy *bo_legacy;
639
640 if (bom == NULL) {
641 return;
642 }
643 bo_legacy = boml->bos.next;
644 while (bo_legacy) {
645 struct bo_legacy *next;
646
647 next = bo_legacy->next;
648 bo_free(bo_legacy);
649 bo_legacy = next;
650 }
651 free(boml->free_handles);
652 free(boml);
653 }
654
655 static struct bo_legacy *radeon_legacy_bo_alloc_static(struct bo_manager_legacy *bom,
656 int size, uint32_t offset)
657 {
658 struct bo_legacy *bo;
659
660 bo = bo_allocate(bom, size, 0, RADEON_GEM_DOMAIN_VRAM, 0);
661 if (bo == NULL)
662 return NULL;
663 bo->static_bo = 1;
664 bo->offset = offset + bom->fb_location;
665 bo->base.handle = bo->offset;
666 bo->ptr = bom->screen->driScreen->pFB + offset;
667 if (bo->base.handle > bom->nhandle) {
668 bom->nhandle = bo->base.handle + 1;
669 }
670 radeon_bo_ref(&(bo->base));
671 return bo;
672 }
673
674 struct radeon_bo_manager *radeon_bo_manager_legacy_ctor(struct radeon_screen *scrn)
675 {
676 struct bo_manager_legacy *bom;
677 struct bo_legacy *bo;
678 unsigned size;
679
680 bom = (struct bo_manager_legacy*)
681 calloc(1, sizeof(struct bo_manager_legacy));
682 if (bom == NULL) {
683 return NULL;
684 }
685
686 bom->texture_heap = driCreateTextureHeap(0,
687 bom,
688 scrn->texSize[0],
689 12,
690 RADEON_NR_TEX_REGIONS,
691 (drmTextureRegionPtr)scrn->sarea->tex_list[0],
692 &scrn->sarea->tex_age[0],
693 &bom->texture_swapped,
694 sizeof(struct bo_legacy),
695 &bo_legacy_tobj_destroy);
696 bom->texture_offset = scrn->texOffset[0];
697
698 bom->base.funcs = &bo_legacy_funcs;
699 bom->base.fd = scrn->driScreen->fd;
700 bom->bos.next = NULL;
701 bom->bos.prev = NULL;
702 bom->pending_bos.pprev = &bom->pending_bos;
703 bom->pending_bos.pnext = NULL;
704 bom->screen = scrn;
705 bom->fb_location = scrn->fbLocation;
706 bom->nhandle = 1;
707 bom->cfree_handles = 0;
708 bom->nfree_handles = 0x400;
709 bom->free_handles = (uint32_t*)malloc(bom->nfree_handles * 4);
710 if (bom->free_handles == NULL) {
711 radeon_bo_manager_legacy_dtor((struct radeon_bo_manager*)bom);
712 return NULL;
713 }
714
715 /* biggest framebuffer size */
716 size = 4096*4096*4;
717
718 /* allocate front */
719 bo = radeon_legacy_bo_alloc_static(bom, size, bom->screen->frontOffset);
720 if (!bo) {
721 radeon_bo_manager_legacy_dtor((struct radeon_bo_manager*)bom);
722 return NULL;
723 }
724 if (scrn->sarea->tiling_enabled) {
725 bo->base.flags = RADEON_BO_FLAGS_MACRO_TILE;
726 }
727
728 /* allocate back */
729 bo = radeon_legacy_bo_alloc_static(bom, size, bom->screen->backOffset);
730 if (!bo) {
731 radeon_bo_manager_legacy_dtor((struct radeon_bo_manager*)bom);
732 return NULL;
733 }
734 if (scrn->sarea->tiling_enabled) {
735 bo->base.flags = RADEON_BO_FLAGS_MACRO_TILE;
736 }
737
738 /* allocate depth */
739 bo = radeon_legacy_bo_alloc_static(bom, size, bom->screen->depthOffset);
740 if (!bo) {
741 radeon_bo_manager_legacy_dtor((struct radeon_bo_manager*)bom);
742 return NULL;
743 }
744 bo->base.flags = 0;
745 if (scrn->sarea->tiling_enabled) {
746 bo->base.flags |= RADEON_BO_FLAGS_MACRO_TILE;
747 bo->base.flags |= RADEON_BO_FLAGS_MICRO_TILE;
748 }
749 return (struct radeon_bo_manager*)bom;
750 }
751
752 void radeon_bo_legacy_texture_age(struct radeon_bo_manager *bom)
753 {
754 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bom;
755 DRI_AGE_TEXTURES(boml->texture_heap);
756 }
757
758 unsigned radeon_bo_legacy_relocs_size(struct radeon_bo *bo)
759 {
760 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
761
762 if (bo_legacy->static_bo || (bo->domains & RADEON_GEM_DOMAIN_GTT)) {
763 return 0;
764 }
765 return bo->size;
766 }
767
768 int radeon_legacy_bo_is_static(struct radeon_bo *bo)
769 {
770 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
771 return bo_legacy->static_bo;
772 }
773