r300: cs + DRI2 support
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_bo_legacy.c
1 /*
2 * Copyright © 2008 Nicolai Haehnle
3 * Copyright © 2008 Dave Airlie
4 * Copyright © 2008 Jérôme Glisse
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
25 * of the Software.
26 */
27 /*
28 * Authors:
29 * Aapo Tahkola <aet@rasterburn.org>
30 * Nicolai Haehnle <prefect_@gmx.net>
31 * Dave Airlie
32 * Jérôme Glisse <glisse@freedesktop.org>
33 */
34 #include <stdio.h>
35 #include <stdint.h>
36 #include <stdlib.h>
37 #include <string.h>
38 #include <errno.h>
39 #include <unistd.h>
40 #include <sys/mman.h>
41 #include <sys/ioctl.h>
42 #include "xf86drm.h"
43 #include "drm.h"
44 #include "radeon_drm.h"
45 #include "radeon_bo.h"
46 #include "radeon_bo_legacy.h"
47 #include "radeon_ioctl.h"
48 #include "texmem.h"
49
50 struct bo_legacy {
51 struct radeon_bo base;
52 driTextureObject tobj_base;
53 int map_count;
54 uint32_t pending;
55 int is_pending;
56 int validated;
57 int static_bo;
58 int got_dri_texture_obj;
59 int dirty;
60 uint32_t offset;
61 driTextureObject dri_texture_obj;
62 void *ptr;
63 struct bo_legacy *next, *prev;
64 struct bo_legacy *pnext, *pprev;
65 };
66
67 struct bo_manager_legacy {
68 struct radeon_bo_manager base;
69 unsigned nhandle;
70 unsigned nfree_handles;
71 unsigned cfree_handles;
72 uint32_t current_age;
73 struct bo_legacy bos;
74 struct bo_legacy pending_bos;
75 uint32_t fb_location;
76 uint32_t texture_offset;
77 unsigned dma_alloc_size;
78 unsigned cpendings;
79 driTextureObject texture_swapped;
80 driTexHeap *texture_heap;
81 struct radeon_screen *screen;
82 unsigned *free_handles;
83 };
84
85 static void bo_legacy_tobj_destroy(void *data, driTextureObject *t)
86 {
87 struct bo_legacy *bo_legacy;
88
89 bo_legacy = (struct bo_legacy*)((char*)t)-sizeof(struct radeon_bo);
90 bo_legacy->got_dri_texture_obj = 0;
91 bo_legacy->validated = 0;
92 }
93
94 static int legacy_new_handle(struct bo_manager_legacy *bom, uint32_t *handle)
95 {
96 uint32_t tmp;
97
98 *handle = 0;
99 if (bom->nhandle == 0xFFFFFFFF) {
100 return -EINVAL;
101 }
102 if (bom->cfree_handles > 0) {
103 tmp = bom->free_handles[--bom->cfree_handles];
104 while (!bom->free_handles[bom->cfree_handles - 1]) {
105 bom->cfree_handles--;
106 if (bom->cfree_handles <= 0) {
107 bom->cfree_handles = 0;
108 }
109 }
110 } else {
111 bom->cfree_handles = 0;
112 tmp = bom->nhandle++;
113 }
114 assert(tmp);
115 *handle = tmp;
116 return 0;
117 }
118
119 static int legacy_free_handle(struct bo_manager_legacy *bom, uint32_t handle)
120 {
121 uint32_t *handles;
122
123 if (!handle) {
124 return 0;
125 }
126 if (handle == (bom->nhandle - 1)) {
127 int i;
128
129 bom->nhandle--;
130 for (i = bom->cfree_handles - 1; i >= 0; i--) {
131 if (bom->free_handles[i] == (bom->nhandle - 1)) {
132 bom->nhandle--;
133 bom->free_handles[i] = 0;
134 }
135 }
136 while (!bom->free_handles[bom->cfree_handles - 1]) {
137 bom->cfree_handles--;
138 if (bom->cfree_handles <= 0) {
139 bom->cfree_handles = 0;
140 }
141 }
142 return 0;
143 }
144 if (bom->cfree_handles < bom->nfree_handles) {
145 bom->free_handles[bom->cfree_handles++] = handle;
146 return 0;
147 }
148 bom->nfree_handles += 0x100;
149 handles = (uint32_t*)realloc(bom->free_handles, bom->nfree_handles * 4);
150 if (handles == NULL) {
151 bom->nfree_handles -= 0x100;
152 return -ENOMEM;
153 }
154 bom->free_handles = handles;
155 bom->free_handles[bom->cfree_handles++] = handle;
156 return 0;
157 }
158
159 static void legacy_get_current_age(struct bo_manager_legacy *boml)
160 {
161 drm_radeon_getparam_t gp;
162 int r;
163
164 gp.param = RADEON_PARAM_LAST_CLEAR;
165 gp.value = (int *)&boml->current_age;
166 r = drmCommandWriteRead(boml->base.fd, DRM_RADEON_GETPARAM,
167 &gp, sizeof(gp));
168 if (r) {
169 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__, r);
170 exit(1);
171 }
172 }
173
174 static int legacy_is_pending(struct radeon_bo *bo)
175 {
176 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
177 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
178
179 if (bo_legacy->is_pending <= 0) {
180 bo_legacy->is_pending = 0;
181 return 0;
182 }
183 if (boml->current_age >= bo_legacy->pending) {
184 if (boml->pending_bos.pprev == bo_legacy) {
185 boml->pending_bos.pprev = bo_legacy->pprev;
186 }
187 bo_legacy->pprev->pnext = bo_legacy->pnext;
188 if (bo_legacy->pnext) {
189 bo_legacy->pnext->pprev = bo_legacy->pprev;
190 }
191 while (bo_legacy->is_pending--) {
192 radeon_bo_unref(bo);
193 }
194 bo_legacy->is_pending = 0;
195 boml->cpendings--;
196 return 0;
197 }
198 return 1;
199 }
200
201 static int legacy_wait_pending(struct radeon_bo *bo)
202 {
203 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
204 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
205
206 if (!bo_legacy->is_pending) {
207 return 0;
208 }
209 /* FIXME: lockup and userspace busy looping that's all the folks */
210 legacy_get_current_age(boml);
211 while (legacy_is_pending(bo)) {
212 usleep(10);
213 legacy_get_current_age(boml);
214 }
215 return 0;
216 }
217
218 static void legacy_track_pending(struct bo_manager_legacy *boml)
219 {
220 struct bo_legacy *bo_legacy;
221 struct bo_legacy *next;
222
223 legacy_get_current_age(boml);
224 bo_legacy = boml->pending_bos.pnext;
225 while (bo_legacy) {
226 next = bo_legacy->pnext;
227 if (legacy_is_pending(&(bo_legacy->base))) {
228 }
229 bo_legacy = next;
230 }
231 }
232
233 static struct bo_legacy *bo_allocate(struct bo_manager_legacy *boml,
234 uint32_t size,
235 uint32_t alignment,
236 uint32_t domains,
237 uint32_t flags)
238 {
239 struct bo_legacy *bo_legacy;
240
241 bo_legacy = (struct bo_legacy*)calloc(1, sizeof(struct bo_legacy));
242 if (bo_legacy == NULL) {
243 return NULL;
244 }
245 bo_legacy->base.bom = (struct radeon_bo_manager*)boml;
246 bo_legacy->base.handle = 0;
247 bo_legacy->base.size = size;
248 bo_legacy->base.alignment = alignment;
249 bo_legacy->base.domains = domains;
250 bo_legacy->base.flags = flags;
251 bo_legacy->base.ptr = NULL;
252 bo_legacy->map_count = 0;
253 bo_legacy->next = NULL;
254 bo_legacy->prev = NULL;
255 bo_legacy->got_dri_texture_obj = 0;
256 bo_legacy->pnext = NULL;
257 bo_legacy->pprev = NULL;
258 bo_legacy->next = boml->bos.next;
259 bo_legacy->prev = &boml->bos;
260 boml->bos.next = bo_legacy;
261 if (bo_legacy->next) {
262 bo_legacy->next->prev = bo_legacy;
263 }
264 return bo_legacy;
265 }
266
267 static int bo_dma_alloc(struct radeon_bo *bo)
268 {
269 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
270 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
271 drm_radeon_mem_alloc_t alloc;
272 unsigned size;
273 int base_offset;
274 int r;
275
276 /* align size on 4Kb */
277 size = (((4 * 1024) - 1) + bo->size) & ~((4 * 1024) - 1);
278 alloc.region = RADEON_MEM_REGION_GART;
279 alloc.alignment = bo_legacy->base.alignment;
280 alloc.size = size;
281 alloc.region_offset = &base_offset;
282 r = drmCommandWriteRead(bo->bom->fd,
283 DRM_RADEON_ALLOC,
284 &alloc,
285 sizeof(alloc));
286 if (r) {
287 /* ptr is set to NULL if dma allocation failed */
288 bo_legacy->ptr = NULL;
289 exit(0);
290 return r;
291 }
292 bo_legacy->ptr = boml->screen->gartTextures.map + base_offset;
293 bo_legacy->offset = boml->screen->gart_texture_offset + base_offset;
294 bo->size = size;
295 boml->dma_alloc_size += size;
296 return 0;
297 }
298
299 static int bo_dma_free(struct radeon_bo *bo)
300 {
301 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
302 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
303 drm_radeon_mem_free_t memfree;
304 int r;
305
306 if (bo_legacy->ptr == NULL) {
307 /* ptr is set to NULL if dma allocation failed */
308 return 0;
309 }
310 legacy_get_current_age(boml);
311 memfree.region = RADEON_MEM_REGION_GART;
312 memfree.region_offset = bo_legacy->offset;
313 memfree.region_offset -= boml->screen->gart_texture_offset;
314 r = drmCommandWrite(boml->base.fd,
315 DRM_RADEON_FREE,
316 &memfree,
317 sizeof(memfree));
318 if (r) {
319 fprintf(stderr, "Failed to free bo[%p] at %08x\n",
320 &bo_legacy->base, memfree.region_offset);
321 fprintf(stderr, "ret = %s\n", strerror(-r));
322 return r;
323 }
324 boml->dma_alloc_size -= bo_legacy->base.size;
325 return 0;
326 }
327
328 static void bo_free(struct bo_legacy *bo_legacy)
329 {
330 struct bo_manager_legacy *boml;
331
332 if (bo_legacy == NULL) {
333 return;
334 }
335 boml = (struct bo_manager_legacy *)bo_legacy->base.bom;
336 bo_legacy->prev->next = bo_legacy->next;
337 if (bo_legacy->next) {
338 bo_legacy->next->prev = bo_legacy->prev;
339 }
340 if (!bo_legacy->static_bo) {
341 legacy_free_handle(boml, bo_legacy->base.handle);
342 if (bo_legacy->base.domains & RADEON_GEM_DOMAIN_GTT) {
343 /* dma buffers */
344 bo_dma_free(&bo_legacy->base);
345 } else {
346 /* free backing store */
347 free(bo_legacy->ptr);
348 }
349 }
350 memset(bo_legacy, 0 , sizeof(struct bo_legacy));
351 free(bo_legacy);
352 }
353
354 static struct radeon_bo *bo_open(struct radeon_bo_manager *bom,
355 uint32_t handle,
356 uint32_t size,
357 uint32_t alignment,
358 uint32_t domains,
359 uint32_t flags)
360 {
361 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bom;
362 struct bo_legacy *bo_legacy;
363 int r;
364
365 if (handle) {
366 bo_legacy = boml->bos.next;
367 while (bo_legacy) {
368 if (bo_legacy->base.handle == handle) {
369 radeon_bo_ref(&(bo_legacy->base));
370 return (struct radeon_bo*)bo_legacy;
371 }
372 bo_legacy = bo_legacy->next;
373 }
374 return NULL;
375 }
376
377 bo_legacy = bo_allocate(boml, size, alignment, domains, flags);
378 bo_legacy->static_bo = 0;
379 r = legacy_new_handle(boml, &bo_legacy->base.handle);
380 if (r) {
381 bo_free(bo_legacy);
382 return NULL;
383 }
384 if (bo_legacy->base.domains & RADEON_GEM_DOMAIN_GTT) {
385 legacy_track_pending(boml);
386 /* dma buffers */
387 r = bo_dma_alloc(&(bo_legacy->base));
388 if (r) {
389 fprintf(stderr, "Ran out of GART memory (for %d)!\n", size);
390 fprintf(stderr, "Please consider adjusting GARTSize option.\n");
391 bo_free(bo_legacy);
392 exit(-1);
393 return NULL;
394 }
395 } else {
396 bo_legacy->ptr = malloc(bo_legacy->base.size);
397 if (bo_legacy->ptr == NULL) {
398 bo_free(bo_legacy);
399 return NULL;
400 }
401 }
402 radeon_bo_ref(&(bo_legacy->base));
403 return (struct radeon_bo*)bo_legacy;
404 }
405
406 static void bo_ref(struct radeon_bo *bo)
407 {
408 }
409
410 static void bo_unref(struct radeon_bo *bo)
411 {
412 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
413
414 if (bo->cref <= 0) {
415 bo_legacy->prev->next = bo_legacy->next;
416 if (bo_legacy->next) {
417 bo_legacy->next->prev = bo_legacy->prev;
418 }
419 if (!bo_legacy->is_pending) {
420 bo_free(bo_legacy);
421 }
422 }
423 }
424
425 static int bo_map(struct radeon_bo *bo, int write)
426 {
427 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
428 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
429
430 legacy_wait_pending(bo);
431 bo_legacy->validated = 0;
432 bo_legacy->dirty = 1;
433 bo_legacy->map_count++;
434 bo->ptr = bo_legacy->ptr;
435 /* Read the first pixel in the frame buffer. This should
436 * be a noop, right? In fact without this conform fails as reading
437 * from the framebuffer sometimes produces old results -- the
438 * on-card read cache gets mixed up and doesn't notice that the
439 * framebuffer has been updated.
440 *
441 * Note that we should probably be reading some otherwise unused
442 * region of VRAM, otherwise we might get incorrect results when
443 * reading pixels from the top left of the screen.
444 *
445 * I found this problem on an R420 with glean's texCube test.
446 * Note that the R200 span code also *writes* the first pixel in the
447 * framebuffer, but I've found this to be unnecessary.
448 * -- Nicolai Hähnle, June 2008
449 */
450 {
451 int p;
452 volatile int *buf = (int*)boml->screen->driScreen->pFB;
453 p = *buf;
454 }
455
456 return 0;
457 }
458
459 static int bo_unmap(struct radeon_bo *bo)
460 {
461 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
462
463 if (--bo_legacy->map_count > 0) {
464 return 0;
465 }
466 bo->ptr = NULL;
467 return 0;
468 }
469
470 static struct radeon_bo_funcs bo_legacy_funcs = {
471 bo_open,
472 bo_ref,
473 bo_unref,
474 bo_map,
475 bo_unmap
476 };
477
478 static int bo_vram_validate(struct radeon_bo *bo,
479 uint32_t *soffset,
480 uint32_t *eoffset)
481 {
482 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
483 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
484 int r;
485
486 if (!bo_legacy->got_dri_texture_obj) {
487 make_empty_list(&bo_legacy->dri_texture_obj);
488 bo_legacy->dri_texture_obj.totalSize = bo->size;
489 r = driAllocateTexture(&boml->texture_heap, 1,
490 &bo_legacy->dri_texture_obj);
491 if (r) {
492 uint8_t *segfault=NULL;
493 fprintf(stderr, "Ouch! vram_validate failed %d\n", r);
494 *segfault=1;
495 return -1;
496 }
497 bo_legacy->offset = boml->texture_offset +
498 bo_legacy->dri_texture_obj.memBlock->ofs;
499 bo_legacy->got_dri_texture_obj = 1;
500 bo_legacy->dirty = 1;
501 }
502 if (bo_legacy->dirty) {
503 /* Copy to VRAM using a blit.
504 * All memory is 4K aligned. We're using 1024 pixels wide blits.
505 */
506 drm_radeon_texture_t tex;
507 drm_radeon_tex_image_t tmp;
508 int ret;
509
510 tex.offset = bo_legacy->offset;
511 tex.image = &tmp;
512 assert(!(tex.offset & 1023));
513
514 tmp.x = 0;
515 tmp.y = 0;
516 if (bo->size < 4096) {
517 tmp.width = (bo->size + 3) / 4;
518 tmp.height = 1;
519 } else {
520 tmp.width = 1024;
521 tmp.height = (bo->size + 4095) / 4096;
522 }
523 tmp.data = bo_legacy->ptr;
524 tex.format = RADEON_TXFORMAT_ARGB8888;
525 tex.width = tmp.width;
526 tex.height = tmp.height;
527 tex.pitch = MAX2(tmp.width / 16, 1);
528 do {
529 ret = drmCommandWriteRead(bo->bom->fd,
530 DRM_RADEON_TEXTURE,
531 &tex,
532 sizeof(drm_radeon_texture_t));
533 if (ret) {
534 if (RADEON_DEBUG & DEBUG_IOCTL)
535 fprintf(stderr, "DRM_RADEON_TEXTURE: again!\n");
536 usleep(1);
537 }
538 } while (ret == -EAGAIN);
539 bo_legacy->dirty = 0;
540 }
541 return 0;
542 }
543
544 int radeon_bo_legacy_validate(struct radeon_bo *bo,
545 uint32_t *soffset,
546 uint32_t *eoffset)
547 {
548 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
549 int r;
550
551 if (bo_legacy->map_count) {
552 fprintf(stderr, "bo(%p, %d) is mapped (%d) can't valide it.\n",
553 bo, bo->size, bo_legacy->map_count);
554 return -EINVAL;
555 }
556 if (bo_legacy->static_bo || bo_legacy->validated) {
557 *soffset = bo_legacy->offset;
558 *eoffset = bo_legacy->offset + bo->size;
559 return 0;
560 }
561 if (!(bo->domains & RADEON_GEM_DOMAIN_GTT)) {
562 r = bo_vram_validate(bo, soffset, eoffset);
563 if (r) {
564 return r;
565 }
566 }
567 *soffset = bo_legacy->offset;
568 *eoffset = bo_legacy->offset + bo->size;
569 bo_legacy->validated = 1;
570 return 0;
571 }
572
573 void radeon_bo_legacy_pending(struct radeon_bo *bo, uint32_t pending)
574 {
575 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
576 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
577
578 bo_legacy->pending = pending;
579 bo_legacy->is_pending += 1;
580 /* add to pending list */
581 radeon_bo_ref(bo);
582 if (bo_legacy->is_pending > 1) {
583 return;
584 }
585 bo_legacy->pprev = boml->pending_bos.pprev;
586 bo_legacy->pnext = NULL;
587 bo_legacy->pprev->pnext = bo_legacy;
588 boml->pending_bos.pprev = bo_legacy;
589 boml->cpendings++;
590 }
591
592 void radeon_bo_manager_legacy_shutdown(struct radeon_bo_manager *bom)
593 {
594 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bom;
595 struct bo_legacy *bo_legacy;
596
597 if (bom == NULL) {
598 return;
599 }
600 bo_legacy = boml->bos.next;
601 while (bo_legacy) {
602 struct bo_legacy *next;
603
604 next = bo_legacy->next;
605 bo_free(bo_legacy);
606 bo_legacy = next;
607 }
608 free(boml->free_handles);
609 free(boml);
610 }
611
612 struct radeon_bo_manager *radeon_bo_manager_legacy(struct radeon_screen *scrn)
613 {
614 struct bo_manager_legacy *bom;
615 struct bo_legacy *bo;
616 unsigned size;
617
618 bom = (struct bo_manager_legacy*)
619 calloc(1, sizeof(struct bo_manager_legacy));
620 if (bom == NULL) {
621 return NULL;
622 }
623
624 bom->texture_heap = driCreateTextureHeap(0,
625 bom,
626 scrn->texSize[0],
627 12,
628 RADEON_NR_TEX_REGIONS,
629 (drmTextureRegionPtr)scrn->sarea->tex_list[0],
630 &scrn->sarea->tex_age[0],
631 &bom->texture_swapped,
632 sizeof(struct bo_legacy),
633 &bo_legacy_tobj_destroy);
634 bom->texture_offset = scrn->texOffset[0];
635
636 bom->base.funcs = &bo_legacy_funcs;
637 bom->base.fd = scrn->driScreen->fd;
638 bom->bos.next = NULL;
639 bom->bos.prev = NULL;
640 bom->pending_bos.pprev = &bom->pending_bos;
641 bom->pending_bos.pnext = NULL;
642 bom->screen = scrn;
643 bom->fb_location = scrn->fbLocation;
644 bom->nhandle = 1;
645 bom->cfree_handles = 0;
646 bom->nfree_handles = 0x400;
647 bom->free_handles = (uint32_t*)malloc(bom->nfree_handles * 4);
648 if (bom->free_handles == NULL) {
649 radeon_bo_manager_legacy_shutdown((struct radeon_bo_manager*)bom);
650 return NULL;
651 }
652
653 /* biggest framebuffer size */
654 size = 4096*4096*4;
655 /* allocate front */
656 bo = bo_allocate(bom, size, 0, RADEON_GEM_DOMAIN_VRAM, 0);
657 if (bo == NULL) {
658 radeon_bo_manager_legacy_shutdown((struct radeon_bo_manager*)bom);
659 return NULL;
660 }
661 if (scrn->sarea->tiling_enabled) {
662 bo->base.flags = RADEON_BO_FLAGS_MACRO_TILE;
663 }
664 bo->static_bo = 1;
665 bo->offset = bom->screen->frontOffset + bom->fb_location;
666 bo->base.handle = bo->offset;
667 bo->ptr = scrn->driScreen->pFB + bom->screen->frontOffset;
668 if (bo->base.handle > bom->nhandle) {
669 bom->nhandle = bo->base.handle + 1;
670 }
671 /* allocate back */
672 bo = bo_allocate(bom, size, 0, RADEON_GEM_DOMAIN_VRAM, 0);
673 if (bo == NULL) {
674 radeon_bo_manager_legacy_shutdown((struct radeon_bo_manager*)bom);
675 return NULL;
676 }
677 if (scrn->sarea->tiling_enabled) {
678 bo->base.flags = RADEON_BO_FLAGS_MACRO_TILE;
679 }
680 bo->static_bo = 1;
681 bo->offset = bom->screen->backOffset + bom->fb_location;
682 bo->base.handle = bo->offset;
683 bo->ptr = scrn->driScreen->pFB + bom->screen->backOffset;
684 if (bo->base.handle > bom->nhandle) {
685 bom->nhandle = bo->base.handle + 1;
686 }
687 /* allocate depth */
688 bo = bo_allocate(bom, size, 0, RADEON_GEM_DOMAIN_VRAM, 0);
689 if (bo == NULL) {
690 radeon_bo_manager_legacy_shutdown((struct radeon_bo_manager*)bom);
691 return NULL;
692 }
693 bo->base.flags = 0;
694 if (scrn->sarea->tiling_enabled) {
695 bo->base.flags |= RADEON_BO_FLAGS_MACRO_TILE;
696 bo->base.flags |= RADEON_BO_FLAGS_MICRO_TILE;
697 }
698 bo->static_bo = 1;
699 bo->offset = bom->screen->depthOffset + bom->fb_location;
700 bo->base.handle = bo->offset;
701 bo->ptr = scrn->driScreen->pFB + bom->screen->depthOffset;
702 if (bo->base.handle > bom->nhandle) {
703 bom->nhandle = bo->base.handle + 1;
704 }
705 return (struct radeon_bo_manager*)bom;
706 }
707
708 void radeon_bo_legacy_texture_age(struct radeon_bo_manager *bom)
709 {
710 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bom;
711 DRI_AGE_TEXTURES(boml->texture_heap);
712 }
713
714 unsigned radeon_bo_legacy_relocs_size(struct radeon_bo *bo)
715 {
716 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
717
718 if (bo_legacy->static_bo || (bo->domains & RADEON_GEM_DOMAIN_GTT)) {
719 return 0;
720 }
721 return bo->size;
722 }