WIP comit
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_bo_legacy.c
1 /*
2 * Copyright © 2008 Nicolai Haehnle
3 * Copyright © 2008 Dave Airlie
4 * Copyright © 2008 Jérôme Glisse
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
25 * of the Software.
26 */
27 /*
28 * Authors:
29 * Aapo Tahkola <aet@rasterburn.org>
30 * Nicolai Haehnle <prefect_@gmx.net>
31 * Dave Airlie
32 * Jérôme Glisse <glisse@freedesktop.org>
33 */
34 #include <stdio.h>
35 #include <stdint.h>
36 #include <stdlib.h>
37 #include <string.h>
38 #include <errno.h>
39 #include <unistd.h>
40 #include <sys/mman.h>
41 #include <sys/ioctl.h>
42 #include "xf86drm.h"
43 #include "texmem.h"
44 #include "main/simple_list.h"
45
46 #include "drm.h"
47 #include "radeon_drm.h"
48 #include "radeon_bo.h"
49 #include "radeon_bo_legacy.h"
50 #include "common_context.h"
51
52 struct bo_legacy {
53 struct radeon_bo base;
54 driTextureObject tobj_base;
55 int map_count;
56 uint32_t pending;
57 int is_pending;
58 int validated;
59 int static_bo;
60 int got_dri_texture_obj;
61 int dirty;
62 uint32_t offset;
63 driTextureObject dri_texture_obj;
64 void *ptr;
65 struct bo_legacy *next, *prev;
66 struct bo_legacy *pnext, *pprev;
67 };
68
69 struct bo_manager_legacy {
70 struct radeon_bo_manager base;
71 unsigned nhandle;
72 unsigned nfree_handles;
73 unsigned cfree_handles;
74 uint32_t current_age;
75 struct bo_legacy bos;
76 struct bo_legacy pending_bos;
77 uint32_t fb_location;
78 uint32_t texture_offset;
79 unsigned dma_alloc_size;
80 uint32_t dma_buf_count;
81 unsigned cpendings;
82 driTextureObject texture_swapped;
83 driTexHeap *texture_heap;
84 struct radeon_screen *screen;
85 unsigned *free_handles;
86 };
87
88 static void bo_legacy_tobj_destroy(void *data, driTextureObject *t)
89 {
90 struct bo_legacy *bo_legacy;
91
92 bo_legacy = (struct bo_legacy*)((char*)t)-sizeof(struct radeon_bo);
93 bo_legacy->got_dri_texture_obj = 0;
94 bo_legacy->validated = 0;
95 }
96
97 static void inline clean_handles(struct bo_manager_legacy *bom)
98 {
99 while (bom->cfree_handles > 0 &&
100 !bom->free_handles[bom->cfree_handles - 1])
101 bom->cfree_handles--;
102
103 }
104 static int legacy_new_handle(struct bo_manager_legacy *bom, uint32_t *handle)
105 {
106 uint32_t tmp;
107
108 *handle = 0;
109 if (bom->nhandle == 0xFFFFFFFF) {
110 return -EINVAL;
111 }
112 if (bom->cfree_handles > 0) {
113 tmp = bom->free_handles[--bom->cfree_handles];
114 clean_handles(bom);
115 } else {
116 bom->cfree_handles = 0;
117 tmp = bom->nhandle++;
118 }
119 assert(tmp);
120 *handle = tmp;
121 return 0;
122 }
123
124 static int legacy_free_handle(struct bo_manager_legacy *bom, uint32_t handle)
125 {
126 uint32_t *handles;
127
128 if (!handle) {
129 return 0;
130 }
131 if (handle == (bom->nhandle - 1)) {
132 int i;
133
134 bom->nhandle--;
135 for (i = bom->cfree_handles - 1; i >= 0; i--) {
136 if (bom->free_handles[i] == (bom->nhandle - 1)) {
137 bom->nhandle--;
138 bom->free_handles[i] = 0;
139 }
140 }
141 clean_handles(bom);
142 return 0;
143 }
144 if (bom->cfree_handles < bom->nfree_handles) {
145 bom->free_handles[bom->cfree_handles++] = handle;
146 return 0;
147 }
148 bom->nfree_handles += 0x100;
149 handles = (uint32_t*)realloc(bom->free_handles, bom->nfree_handles * 4);
150 if (handles == NULL) {
151 bom->nfree_handles -= 0x100;
152 return -ENOMEM;
153 }
154 bom->free_handles = handles;
155 bom->free_handles[bom->cfree_handles++] = handle;
156 return 0;
157 }
158
159 static void legacy_get_current_age(struct bo_manager_legacy *boml)
160 {
161 drm_radeon_getparam_t gp;
162 int r;
163
164 if (IS_R300_CLASS(boml->screen)) {
165 gp.param = RADEON_PARAM_LAST_CLEAR;
166 gp.value = (int *)&boml->current_age;
167 r = drmCommandWriteRead(boml->base.fd, DRM_RADEON_GETPARAM,
168 &gp, sizeof(gp));
169 if (r) {
170 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__, r);
171 exit(1);
172 }
173 } else
174 boml->current_age = boml->screen->scratch[3];
175 }
176
177 static int legacy_is_pending(struct radeon_bo *bo)
178 {
179 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
180 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
181
182 if (bo_legacy->is_pending <= 0) {
183 bo_legacy->is_pending = 0;
184 return 0;
185 }
186 if (boml->current_age >= bo_legacy->pending) {
187 if (boml->pending_bos.pprev == bo_legacy) {
188 boml->pending_bos.pprev = bo_legacy->pprev;
189 }
190 bo_legacy->pprev->pnext = bo_legacy->pnext;
191 if (bo_legacy->pnext) {
192 bo_legacy->pnext->pprev = bo_legacy->pprev;
193 }
194 assert(bo_legacy->is_pending <= bo->cref);
195 while (bo_legacy->is_pending--) {
196 bo = radeon_bo_unref(bo);
197 if (!bo)
198 break;
199 }
200 if (bo)
201 bo_legacy->is_pending = 0;
202 boml->cpendings--;
203 return 0;
204 }
205 return 1;
206 }
207
208 static int legacy_wait_pending(struct radeon_bo *bo)
209 {
210 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
211 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
212
213 if (!bo_legacy->is_pending) {
214 return 0;
215 }
216 /* FIXME: lockup and userspace busy looping that's all the folks */
217 legacy_get_current_age(boml);
218 while (legacy_is_pending(bo)) {
219 usleep(10);
220 legacy_get_current_age(boml);
221 }
222 return 0;
223 }
224
225 static void legacy_track_pending(struct bo_manager_legacy *boml, int debug)
226 {
227 struct bo_legacy *bo_legacy;
228 struct bo_legacy *next;
229
230 legacy_get_current_age(boml);
231 bo_legacy = boml->pending_bos.pnext;
232 while (bo_legacy) {
233 if (debug)
234 fprintf(stderr,"pending %p %d %d %d\n", bo_legacy, bo_legacy->base.size,
235 boml->current_age, bo_legacy->pending);
236 next = bo_legacy->pnext;
237 if (legacy_is_pending(&(bo_legacy->base))) {
238 }
239 bo_legacy = next;
240 }
241 }
242
243 static int legacy_wait_any_pending(struct bo_manager_legacy *boml)
244 {
245 struct bo_legacy *bo_legacy;
246 struct bo_legacy *next;
247
248 legacy_get_current_age(boml);
249 bo_legacy = boml->pending_bos.pnext;
250 if (!bo_legacy)
251 return -1;
252 legacy_wait_pending(&bo_legacy->base);
253 return 0;
254 }
255
256 static struct bo_legacy *bo_allocate(struct bo_manager_legacy *boml,
257 uint32_t size,
258 uint32_t alignment,
259 uint32_t domains,
260 uint32_t flags)
261 {
262 struct bo_legacy *bo_legacy;
263
264 bo_legacy = (struct bo_legacy*)calloc(1, sizeof(struct bo_legacy));
265 if (bo_legacy == NULL) {
266 return NULL;
267 }
268 bo_legacy->base.bom = (struct radeon_bo_manager*)boml;
269 bo_legacy->base.handle = 0;
270 bo_legacy->base.size = size;
271 bo_legacy->base.alignment = alignment;
272 bo_legacy->base.domains = domains;
273 bo_legacy->base.flags = flags;
274 bo_legacy->base.ptr = NULL;
275 bo_legacy->map_count = 0;
276 bo_legacy->next = NULL;
277 bo_legacy->prev = NULL;
278 bo_legacy->got_dri_texture_obj = 0;
279 bo_legacy->pnext = NULL;
280 bo_legacy->pprev = NULL;
281 bo_legacy->next = boml->bos.next;
282 bo_legacy->prev = &boml->bos;
283 boml->bos.next = bo_legacy;
284 if (bo_legacy->next) {
285 bo_legacy->next->prev = bo_legacy;
286 }
287 return bo_legacy;
288 }
289
290 static int bo_dma_alloc(struct radeon_bo *bo)
291 {
292 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
293 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
294 drm_radeon_mem_alloc_t alloc;
295 unsigned size;
296 int base_offset;
297 int r;
298
299 /* align size on 4Kb */
300 size = (((4 * 1024) - 1) + bo->size) & ~((4 * 1024) - 1);
301 alloc.region = RADEON_MEM_REGION_GART;
302 alloc.alignment = bo_legacy->base.alignment;
303 alloc.size = size;
304 alloc.region_offset = &base_offset;
305 r = drmCommandWriteRead(bo->bom->fd,
306 DRM_RADEON_ALLOC,
307 &alloc,
308 sizeof(alloc));
309 if (r) {
310 /* ptr is set to NULL if dma allocation failed */
311 bo_legacy->ptr = NULL;
312 return r;
313 }
314 bo_legacy->ptr = boml->screen->gartTextures.map + base_offset;
315 bo_legacy->offset = boml->screen->gart_texture_offset + base_offset;
316 bo->size = size;
317 boml->dma_alloc_size += size;
318 boml->dma_buf_count++;
319 return 0;
320 }
321
322 static int bo_dma_free(struct radeon_bo *bo)
323 {
324 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
325 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
326 drm_radeon_mem_free_t memfree;
327 int r;
328
329 if (bo_legacy->ptr == NULL) {
330 /* ptr is set to NULL if dma allocation failed */
331 return 0;
332 }
333 legacy_get_current_age(boml);
334 memfree.region = RADEON_MEM_REGION_GART;
335 memfree.region_offset = bo_legacy->offset;
336 memfree.region_offset -= boml->screen->gart_texture_offset;
337 r = drmCommandWrite(boml->base.fd,
338 DRM_RADEON_FREE,
339 &memfree,
340 sizeof(memfree));
341 if (r) {
342 fprintf(stderr, "Failed to free bo[%p] at %08x\n",
343 &bo_legacy->base, memfree.region_offset);
344 fprintf(stderr, "ret = %s\n", strerror(-r));
345 return r;
346 }
347 boml->dma_alloc_size -= bo_legacy->base.size;
348 boml->dma_buf_count--;
349 return 0;
350 }
351
352 static void bo_free(struct bo_legacy *bo_legacy)
353 {
354 struct bo_manager_legacy *boml;
355
356 if (bo_legacy == NULL) {
357 return;
358 }
359 boml = (struct bo_manager_legacy *)bo_legacy->base.bom;
360 bo_legacy->prev->next = bo_legacy->next;
361 if (bo_legacy->next) {
362 bo_legacy->next->prev = bo_legacy->prev;
363 }
364 if (!bo_legacy->static_bo) {
365 legacy_free_handle(boml, bo_legacy->base.handle);
366 if (bo_legacy->base.domains & RADEON_GEM_DOMAIN_GTT) {
367 /* dma buffers */
368 bo_dma_free(&bo_legacy->base);
369 } else {
370 /* free backing store */
371 free(bo_legacy->ptr);
372 }
373 }
374 memset(bo_legacy, 0 , sizeof(struct bo_legacy));
375 free(bo_legacy);
376 }
377
378 static struct radeon_bo *bo_open(struct radeon_bo_manager *bom,
379 uint32_t handle,
380 uint32_t size,
381 uint32_t alignment,
382 uint32_t domains,
383 uint32_t flags)
384 {
385 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bom;
386 struct bo_legacy *bo_legacy;
387 int r;
388
389 if (handle) {
390 bo_legacy = boml->bos.next;
391 while (bo_legacy) {
392 if (bo_legacy->base.handle == handle) {
393 radeon_bo_ref(&(bo_legacy->base));
394 return (struct radeon_bo*)bo_legacy;
395 }
396 bo_legacy = bo_legacy->next;
397 }
398 return NULL;
399 }
400
401 bo_legacy = bo_allocate(boml, size, alignment, domains, flags);
402 bo_legacy->static_bo = 0;
403 r = legacy_new_handle(boml, &bo_legacy->base.handle);
404 if (r) {
405 bo_free(bo_legacy);
406 return NULL;
407 }
408 if (bo_legacy->base.domains & RADEON_GEM_DOMAIN_GTT) {
409 retry:
410 legacy_track_pending(boml, 0);
411 /* dma buffers */
412
413 r = bo_dma_alloc(&(bo_legacy->base));
414 if (r) {
415 if (legacy_wait_any_pending(boml) == -1) {
416 fprintf(stderr, "Ran out of GART memory (for %d)!\n", size);
417 fprintf(stderr, "Please consider adjusting GARTSize option.\n");
418 bo_free(bo_legacy);
419 exit(-1);
420 }
421 goto retry;
422 return NULL;
423 }
424 } else {
425 bo_legacy->ptr = malloc(bo_legacy->base.size);
426 if (bo_legacy->ptr == NULL) {
427 bo_free(bo_legacy);
428 return NULL;
429 }
430 }
431 radeon_bo_ref(&(bo_legacy->base));
432 return (struct radeon_bo*)bo_legacy;
433 }
434
435 static void bo_ref(struct radeon_bo *bo)
436 {
437 }
438
439 static struct radeon_bo *bo_unref(struct radeon_bo *bo)
440 {
441 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
442
443 if (bo->cref <= 0) {
444 bo_legacy->prev->next = bo_legacy->next;
445 if (bo_legacy->next) {
446 bo_legacy->next->prev = bo_legacy->prev;
447 }
448 if (!bo_legacy->is_pending) {
449 bo_free(bo_legacy);
450 }
451 return NULL;
452 }
453 return bo;
454 }
455
456 static int bo_map(struct radeon_bo *bo, int write)
457 {
458 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
459 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
460
461 legacy_wait_pending(bo);
462 bo_legacy->validated = 0;
463 bo_legacy->dirty = 1;
464 bo_legacy->map_count++;
465 bo->ptr = bo_legacy->ptr;
466 /* Read the first pixel in the frame buffer. This should
467 * be a noop, right? In fact without this conform fails as reading
468 * from the framebuffer sometimes produces old results -- the
469 * on-card read cache gets mixed up and doesn't notice that the
470 * framebuffer has been updated.
471 *
472 * Note that we should probably be reading some otherwise unused
473 * region of VRAM, otherwise we might get incorrect results when
474 * reading pixels from the top left of the screen.
475 *
476 * I found this problem on an R420 with glean's texCube test.
477 * Note that the R200 span code also *writes* the first pixel in the
478 * framebuffer, but I've found this to be unnecessary.
479 * -- Nicolai Hähnle, June 2008
480 */
481 {
482 int p;
483 volatile int *buf = (int*)boml->screen->driScreen->pFB;
484 p = *buf;
485 }
486 return 0;
487 }
488
489 static int bo_unmap(struct radeon_bo *bo)
490 {
491 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
492
493 if (--bo_legacy->map_count > 0) {
494 return 0;
495 }
496 bo->ptr = NULL;
497 return 0;
498 }
499
500 static struct radeon_bo_funcs bo_legacy_funcs = {
501 bo_open,
502 bo_ref,
503 bo_unref,
504 bo_map,
505 bo_unmap
506 };
507
508 static int bo_vram_validate(struct radeon_bo *bo,
509 uint32_t *soffset,
510 uint32_t *eoffset)
511 {
512 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
513 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
514 int r;
515
516 if (!bo_legacy->got_dri_texture_obj) {
517 make_empty_list(&bo_legacy->dri_texture_obj);
518 bo_legacy->dri_texture_obj.totalSize = bo->size;
519 r = driAllocateTexture(&boml->texture_heap, 1,
520 &bo_legacy->dri_texture_obj);
521 if (r) {
522 uint8_t *segfault=NULL;
523 fprintf(stderr, "Ouch! vram_validate failed %d\n", r);
524 *segfault=1;
525 return -1;
526 }
527 bo_legacy->offset = boml->texture_offset +
528 bo_legacy->dri_texture_obj.memBlock->ofs;
529 bo_legacy->got_dri_texture_obj = 1;
530 bo_legacy->dirty = 1;
531 }
532 if (bo_legacy->dirty) {
533 /* Copy to VRAM using a blit.
534 * All memory is 4K aligned. We're using 1024 pixels wide blits.
535 */
536 drm_radeon_texture_t tex;
537 drm_radeon_tex_image_t tmp;
538 int ret;
539
540 tex.offset = bo_legacy->offset;
541 tex.image = &tmp;
542 assert(!(tex.offset & 1023));
543
544 tmp.x = 0;
545 tmp.y = 0;
546 if (bo->size < 4096) {
547 tmp.width = (bo->size + 3) / 4;
548 tmp.height = 1;
549 } else {
550 tmp.width = 1024;
551 tmp.height = (bo->size + 4095) / 4096;
552 }
553 tmp.data = bo_legacy->ptr;
554 tex.format = RADEON_TXFORMAT_ARGB8888;
555 tex.width = tmp.width;
556 tex.height = tmp.height;
557 tex.pitch = MAX2(tmp.width / 16, 1);
558 do {
559 ret = drmCommandWriteRead(bo->bom->fd,
560 DRM_RADEON_TEXTURE,
561 &tex,
562 sizeof(drm_radeon_texture_t));
563 if (ret) {
564 if (RADEON_DEBUG & DEBUG_IOCTL)
565 fprintf(stderr, "DRM_RADEON_TEXTURE: again!\n");
566 usleep(1);
567 }
568 } while (ret == -EAGAIN);
569 bo_legacy->dirty = 0;
570 }
571 return 0;
572 }
573
574 int radeon_bo_legacy_validate(struct radeon_bo *bo,
575 uint32_t *soffset,
576 uint32_t *eoffset)
577 {
578 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
579 int r;
580
581 if (bo_legacy->map_count) {
582 fprintf(stderr, "bo(%p, %d) is mapped (%d) can't valide it.\n",
583 bo, bo->size, bo_legacy->map_count);
584 return -EINVAL;
585 }
586 if (bo_legacy->static_bo || bo_legacy->validated) {
587 *soffset = bo_legacy->offset;
588 *eoffset = bo_legacy->offset + bo->size;
589 return 0;
590 }
591 if (!(bo->domains & RADEON_GEM_DOMAIN_GTT)) {
592 r = bo_vram_validate(bo, soffset, eoffset);
593 if (r) {
594 return r;
595 }
596 }
597 *soffset = bo_legacy->offset;
598 *eoffset = bo_legacy->offset + bo->size;
599 bo_legacy->validated = 1;
600 return 0;
601 }
602
603 void radeon_bo_legacy_pending(struct radeon_bo *bo, uint32_t pending)
604 {
605 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
606 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
607
608 bo_legacy->pending = pending;
609 bo_legacy->is_pending++;
610 /* add to pending list */
611 radeon_bo_ref(bo);
612 if (bo_legacy->is_pending > 1) {
613 return;
614 }
615 bo_legacy->pprev = boml->pending_bos.pprev;
616 bo_legacy->pnext = NULL;
617 bo_legacy->pprev->pnext = bo_legacy;
618 boml->pending_bos.pprev = bo_legacy;
619 boml->cpendings++;
620 }
621
622 void radeon_bo_manager_legacy_dtor(struct radeon_bo_manager *bom)
623 {
624 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bom;
625 struct bo_legacy *bo_legacy;
626
627 if (bom == NULL) {
628 return;
629 }
630 bo_legacy = boml->bos.next;
631 while (bo_legacy) {
632 struct bo_legacy *next;
633
634 next = bo_legacy->next;
635 bo_free(bo_legacy);
636 bo_legacy = next;
637 }
638 free(boml->free_handles);
639 free(boml);
640 }
641
642 struct radeon_bo_manager *radeon_bo_manager_legacy_ctor(struct radeon_screen *scrn)
643 {
644 struct bo_manager_legacy *bom;
645 struct bo_legacy *bo;
646 unsigned size;
647
648 bom = (struct bo_manager_legacy*)
649 calloc(1, sizeof(struct bo_manager_legacy));
650 if (bom == NULL) {
651 return NULL;
652 }
653
654 bom->texture_heap = driCreateTextureHeap(0,
655 bom,
656 scrn->texSize[0],
657 12,
658 RADEON_NR_TEX_REGIONS,
659 (drmTextureRegionPtr)scrn->sarea->tex_list[0],
660 &scrn->sarea->tex_age[0],
661 &bom->texture_swapped,
662 sizeof(struct bo_legacy),
663 &bo_legacy_tobj_destroy);
664 bom->texture_offset = scrn->texOffset[0];
665
666 bom->base.funcs = &bo_legacy_funcs;
667 bom->base.fd = scrn->driScreen->fd;
668 bom->bos.next = NULL;
669 bom->bos.prev = NULL;
670 bom->pending_bos.pprev = &bom->pending_bos;
671 bom->pending_bos.pnext = NULL;
672 bom->screen = scrn;
673 bom->fb_location = scrn->fbLocation;
674 bom->nhandle = 1;
675 bom->cfree_handles = 0;
676 bom->nfree_handles = 0x400;
677 bom->free_handles = (uint32_t*)malloc(bom->nfree_handles * 4);
678 if (bom->free_handles == NULL) {
679 radeon_bo_manager_legacy_dtor((struct radeon_bo_manager*)bom);
680 return NULL;
681 }
682
683 /* biggest framebuffer size */
684 size = 4096*4096*4;
685 /* allocate front */
686 bo = bo_allocate(bom, size, 0, RADEON_GEM_DOMAIN_VRAM, 0);
687 if (bo == NULL) {
688 radeon_bo_manager_legacy_dtor((struct radeon_bo_manager*)bom);
689 return NULL;
690 }
691 if (scrn->sarea->tiling_enabled) {
692 bo->base.flags = RADEON_BO_FLAGS_MACRO_TILE;
693 }
694 bo->static_bo = 1;
695 bo->offset = bom->screen->frontOffset + bom->fb_location;
696 bo->base.handle = bo->offset;
697 bo->ptr = scrn->driScreen->pFB + bom->screen->frontOffset;
698 if (bo->base.handle > bom->nhandle) {
699 bom->nhandle = bo->base.handle + 1;
700 }
701 /* allocate back */
702 bo = bo_allocate(bom, size, 0, RADEON_GEM_DOMAIN_VRAM, 0);
703 if (bo == NULL) {
704 radeon_bo_manager_legacy_dtor((struct radeon_bo_manager*)bom);
705 return NULL;
706 }
707 if (scrn->sarea->tiling_enabled) {
708 bo->base.flags = RADEON_BO_FLAGS_MACRO_TILE;
709 }
710 bo->static_bo = 1;
711 bo->offset = bom->screen->backOffset + bom->fb_location;
712 bo->base.handle = bo->offset;
713 bo->ptr = scrn->driScreen->pFB + bom->screen->backOffset;
714 if (bo->base.handle > bom->nhandle) {
715 bom->nhandle = bo->base.handle + 1;
716 }
717 /* allocate depth */
718 bo = bo_allocate(bom, size, 0, RADEON_GEM_DOMAIN_VRAM, 0);
719 if (bo == NULL) {
720 radeon_bo_manager_legacy_dtor((struct radeon_bo_manager*)bom);
721 return NULL;
722 }
723 bo->base.flags = 0;
724 if (scrn->sarea->tiling_enabled) {
725 bo->base.flags |= RADEON_BO_FLAGS_MACRO_TILE;
726 bo->base.flags |= RADEON_BO_FLAGS_MICRO_TILE;
727 }
728 bo->static_bo = 1;
729 bo->offset = bom->screen->depthOffset + bom->fb_location;
730 bo->base.handle = bo->offset;
731 bo->ptr = scrn->driScreen->pFB + bom->screen->depthOffset;
732 if (bo->base.handle > bom->nhandle) {
733 bom->nhandle = bo->base.handle + 1;
734 }
735 return (struct radeon_bo_manager*)bom;
736 }
737
738 void radeon_bo_legacy_texture_age(struct radeon_bo_manager *bom)
739 {
740 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bom;
741 DRI_AGE_TEXTURES(boml->texture_heap);
742 }
743
744 unsigned radeon_bo_legacy_relocs_size(struct radeon_bo *bo)
745 {
746 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
747
748 if (bo_legacy->static_bo || (bo->domains & RADEON_GEM_DOMAIN_GTT)) {
749 return 0;
750 }
751 return bo->size;
752 }