Merge branch 'new-frag-attribs'
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_bo_legacy.c
1 /*
2 * Copyright © 2008 Nicolai Haehnle
3 * Copyright © 2008 Dave Airlie
4 * Copyright © 2008 Jérôme Glisse
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
25 * of the Software.
26 */
27 /*
28 * Authors:
29 * Aapo Tahkola <aet@rasterburn.org>
30 * Nicolai Haehnle <prefect_@gmx.net>
31 * Dave Airlie
32 * Jérôme Glisse <glisse@freedesktop.org>
33 */
34 #include <stdio.h>
35 #include <stddef.h>
36 #include <stdint.h>
37 #include <stdlib.h>
38 #include <string.h>
39 #include <errno.h>
40 #include <unistd.h>
41 #include <sys/mman.h>
42 #include <sys/ioctl.h>
43 #include "xf86drm.h"
44 #include "texmem.h"
45 #include "main/simple_list.h"
46
47 #include "drm.h"
48 #include "radeon_drm.h"
49 #include "radeon_common.h"
50 #include "radeon_bocs_wrapper.h"
51 #include "radeon_macros.h"
52
53 /* no seriously texmem.c is this screwed up */
54 struct bo_legacy_texture_object {
55 driTextureObject base;
56 struct bo_legacy *parent;
57 };
58
59 struct bo_legacy {
60 struct radeon_bo base;
61 int map_count;
62 uint32_t pending;
63 int is_pending;
64 int static_bo;
65 uint32_t offset;
66 struct bo_legacy_texture_object *tobj;
67 int validated;
68 int dirty;
69 void *ptr;
70 struct bo_legacy *next, *prev;
71 struct bo_legacy *pnext, *pprev;
72 #ifdef RADEON_DEBUG_BO
73 char szBufUsage[16];
74 #endif /* RADEON_DEBUG_BO */
75 };
76
77 struct bo_manager_legacy {
78 struct radeon_bo_manager base;
79 unsigned nhandle;
80 unsigned nfree_handles;
81 unsigned cfree_handles;
82 uint32_t current_age;
83 struct bo_legacy bos;
84 struct bo_legacy pending_bos;
85 uint32_t fb_location;
86 uint32_t texture_offset;
87 unsigned dma_alloc_size;
88 uint32_t dma_buf_count;
89 unsigned cpendings;
90 driTextureObject texture_swapped;
91 driTexHeap *texture_heap;
92 struct radeon_screen *screen;
93 unsigned *free_handles;
94 };
95
96 static void bo_legacy_tobj_destroy(void *data, driTextureObject *t)
97 {
98 struct bo_legacy_texture_object *tobj = (struct bo_legacy_texture_object *)t;
99
100 if (tobj->parent) {
101 tobj->parent->tobj = NULL;
102 tobj->parent->validated = 0;
103 }
104 }
105
106 static void inline clean_handles(struct bo_manager_legacy *bom)
107 {
108 while (bom->cfree_handles > 0 &&
109 !bom->free_handles[bom->cfree_handles - 1])
110 bom->cfree_handles--;
111
112 }
113 static int legacy_new_handle(struct bo_manager_legacy *bom, uint32_t *handle)
114 {
115 uint32_t tmp;
116
117 *handle = 0;
118 if (bom->nhandle == 0xFFFFFFFF) {
119 return -EINVAL;
120 }
121 if (bom->cfree_handles > 0) {
122 tmp = bom->free_handles[--bom->cfree_handles];
123 clean_handles(bom);
124 } else {
125 bom->cfree_handles = 0;
126 tmp = bom->nhandle++;
127 }
128 assert(tmp);
129 *handle = tmp;
130 return 0;
131 }
132
133 static int legacy_free_handle(struct bo_manager_legacy *bom, uint32_t handle)
134 {
135 uint32_t *handles;
136
137 if (!handle) {
138 return 0;
139 }
140 if (handle == (bom->nhandle - 1)) {
141 int i;
142
143 bom->nhandle--;
144 for (i = bom->cfree_handles - 1; i >= 0; i--) {
145 if (bom->free_handles[i] == (bom->nhandle - 1)) {
146 bom->nhandle--;
147 bom->free_handles[i] = 0;
148 }
149 }
150 clean_handles(bom);
151 return 0;
152 }
153 if (bom->cfree_handles < bom->nfree_handles) {
154 bom->free_handles[bom->cfree_handles++] = handle;
155 return 0;
156 }
157 bom->nfree_handles += 0x100;
158 handles = (uint32_t*)realloc(bom->free_handles, bom->nfree_handles * 4);
159 if (handles == NULL) {
160 bom->nfree_handles -= 0x100;
161 return -ENOMEM;
162 }
163 bom->free_handles = handles;
164 bom->free_handles[bom->cfree_handles++] = handle;
165 return 0;
166 }
167
168 static void legacy_get_current_age(struct bo_manager_legacy *boml)
169 {
170 drm_radeon_getparam_t gp;
171 unsigned char *RADEONMMIO = NULL;
172 int r;
173
174 if ( IS_R300_CLASS(boml->screen)
175 || IS_R600_CLASS(boml->screen) )
176 {
177 gp.param = RADEON_PARAM_LAST_CLEAR;
178 gp.value = (int *)&boml->current_age;
179 r = drmCommandWriteRead(boml->base.fd, DRM_RADEON_GETPARAM,
180 &gp, sizeof(gp));
181 if (r) {
182 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__, r);
183 exit(1);
184 }
185 }
186 else {
187 RADEONMMIO = boml->screen->mmio.map;
188 boml->current_age = boml->screen->scratch[3];
189 boml->current_age = INREG(RADEON_GUI_SCRATCH_REG3);
190 }
191 }
192
193 static int legacy_is_pending(struct radeon_bo *bo)
194 {
195 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
196 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
197
198 if (bo_legacy->is_pending <= 0) {
199 bo_legacy->is_pending = 0;
200 return 0;
201 }
202 if (boml->current_age >= bo_legacy->pending) {
203 if (boml->pending_bos.pprev == bo_legacy) {
204 boml->pending_bos.pprev = bo_legacy->pprev;
205 }
206 bo_legacy->pprev->pnext = bo_legacy->pnext;
207 if (bo_legacy->pnext) {
208 bo_legacy->pnext->pprev = bo_legacy->pprev;
209 }
210 assert(bo_legacy->is_pending <= bo->cref);
211 while (bo_legacy->is_pending--) {
212 bo = radeon_bo_unref(bo);
213 if (!bo)
214 break;
215 }
216 if (bo)
217 bo_legacy->is_pending = 0;
218 boml->cpendings--;
219 return 0;
220 }
221 return 1;
222 }
223
224 static int legacy_wait_pending(struct radeon_bo *bo)
225 {
226 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
227 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
228
229 if (!bo_legacy->is_pending) {
230 return 0;
231 }
232 /* FIXME: lockup and userspace busy looping that's all the folks */
233 legacy_get_current_age(boml);
234 while (legacy_is_pending(bo)) {
235 usleep(10);
236 legacy_get_current_age(boml);
237 }
238 return 0;
239 }
240
241 static void legacy_track_pending(struct bo_manager_legacy *boml, int debug)
242 {
243 struct bo_legacy *bo_legacy;
244 struct bo_legacy *next;
245
246 legacy_get_current_age(boml);
247 bo_legacy = boml->pending_bos.pnext;
248 while (bo_legacy) {
249 if (debug)
250 fprintf(stderr,"pending %p %d %d %d\n", bo_legacy, bo_legacy->base.size,
251 boml->current_age, bo_legacy->pending);
252 next = bo_legacy->pnext;
253 if (legacy_is_pending(&(bo_legacy->base))) {
254 }
255 bo_legacy = next;
256 }
257 }
258
259 static int legacy_wait_any_pending(struct bo_manager_legacy *boml)
260 {
261 struct bo_legacy *bo_legacy;
262
263 legacy_get_current_age(boml);
264 bo_legacy = boml->pending_bos.pnext;
265 if (!bo_legacy)
266 return -1;
267 legacy_wait_pending(&bo_legacy->base);
268 return 0;
269 }
270
271 static void legacy_kick_all_buffers(struct bo_manager_legacy *boml)
272 {
273 struct bo_legacy *legacy;
274
275 legacy = boml->bos.next;
276 while (legacy != &boml->bos) {
277 if (legacy->tobj) {
278 if (legacy->validated) {
279 driDestroyTextureObject(&legacy->tobj->base);
280 legacy->tobj = 0;
281 legacy->validated = 0;
282 }
283 }
284 legacy = legacy->next;
285 }
286 }
287
288 static struct bo_legacy *bo_allocate(struct bo_manager_legacy *boml,
289 uint32_t size,
290 uint32_t alignment,
291 uint32_t domains,
292 #ifdef RADEON_DEBUG_BO
293 uint32_t flags,
294 char * szBufUsage)
295 #else
296 uint32_t flags)
297 #endif /* RADEON_DEBUG_BO */
298 {
299 struct bo_legacy *bo_legacy;
300 static int pgsize;
301
302 if (pgsize == 0)
303 pgsize = getpagesize() - 1;
304
305 size = (size + pgsize) & ~pgsize;
306
307 bo_legacy = (struct bo_legacy*)calloc(1, sizeof(struct bo_legacy));
308 if (bo_legacy == NULL) {
309 return NULL;
310 }
311 bo_legacy->base.bom = (struct radeon_bo_manager*)boml;
312 bo_legacy->base.handle = 0;
313 bo_legacy->base.size = size;
314 bo_legacy->base.alignment = alignment;
315 bo_legacy->base.domains = domains;
316 bo_legacy->base.flags = flags;
317 bo_legacy->base.ptr = NULL;
318 bo_legacy->map_count = 0;
319 bo_legacy->next = NULL;
320 bo_legacy->prev = NULL;
321 bo_legacy->pnext = NULL;
322 bo_legacy->pprev = NULL;
323 bo_legacy->next = boml->bos.next;
324 bo_legacy->prev = &boml->bos;
325 boml->bos.next = bo_legacy;
326 if (bo_legacy->next) {
327 bo_legacy->next->prev = bo_legacy;
328 }
329
330 #ifdef RADEON_DEBUG_BO
331 sprintf(bo_legacy->szBufUsage, "%s", szBufUsage);
332 #endif /* RADEON_DEBUG_BO */
333
334 return bo_legacy;
335 }
336
337 static int bo_dma_alloc(struct radeon_bo *bo)
338 {
339 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
340 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
341 drm_radeon_mem_alloc_t alloc;
342 unsigned size;
343 int base_offset;
344 int r;
345
346 /* align size on 4Kb */
347 size = (((4 * 1024) - 1) + bo->size) & ~((4 * 1024) - 1);
348 alloc.region = RADEON_MEM_REGION_GART;
349 alloc.alignment = bo_legacy->base.alignment;
350 alloc.size = size;
351 alloc.region_offset = &base_offset;
352 r = drmCommandWriteRead(bo->bom->fd,
353 DRM_RADEON_ALLOC,
354 &alloc,
355 sizeof(alloc));
356 if (r) {
357 /* ptr is set to NULL if dma allocation failed */
358 bo_legacy->ptr = NULL;
359 return r;
360 }
361 bo_legacy->ptr = boml->screen->gartTextures.map + base_offset;
362 bo_legacy->offset = boml->screen->gart_texture_offset + base_offset;
363 bo->size = size;
364 boml->dma_alloc_size += size;
365 boml->dma_buf_count++;
366 return 0;
367 }
368
369 static int bo_dma_free(struct radeon_bo *bo)
370 {
371 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
372 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
373 drm_radeon_mem_free_t memfree;
374 int r;
375
376 if (bo_legacy->ptr == NULL) {
377 /* ptr is set to NULL if dma allocation failed */
378 return 0;
379 }
380 legacy_get_current_age(boml);
381 memfree.region = RADEON_MEM_REGION_GART;
382 memfree.region_offset = bo_legacy->offset;
383 memfree.region_offset -= boml->screen->gart_texture_offset;
384 r = drmCommandWrite(boml->base.fd,
385 DRM_RADEON_FREE,
386 &memfree,
387 sizeof(memfree));
388 if (r) {
389 fprintf(stderr, "Failed to free bo[%p] at %08x\n",
390 &bo_legacy->base, memfree.region_offset);
391 fprintf(stderr, "ret = %s\n", strerror(-r));
392 return r;
393 }
394 boml->dma_alloc_size -= bo_legacy->base.size;
395 boml->dma_buf_count--;
396 return 0;
397 }
398
399 static void bo_free(struct bo_legacy *bo_legacy)
400 {
401 struct bo_manager_legacy *boml;
402
403 if (bo_legacy == NULL) {
404 return;
405 }
406 boml = (struct bo_manager_legacy *)bo_legacy->base.bom;
407 bo_legacy->prev->next = bo_legacy->next;
408 if (bo_legacy->next) {
409 bo_legacy->next->prev = bo_legacy->prev;
410 }
411 if (!bo_legacy->static_bo) {
412 legacy_free_handle(boml, bo_legacy->base.handle);
413 if (bo_legacy->base.domains & RADEON_GEM_DOMAIN_GTT) {
414 /* dma buffers */
415 bo_dma_free(&bo_legacy->base);
416 } else {
417 driDestroyTextureObject(&bo_legacy->tobj->base);
418 bo_legacy->tobj = NULL;
419 /* free backing store */
420 free(bo_legacy->ptr);
421 }
422 }
423 memset(bo_legacy, 0 , sizeof(struct bo_legacy));
424 free(bo_legacy);
425 }
426
427 static struct radeon_bo *bo_open(struct radeon_bo_manager *bom,
428 uint32_t handle,
429 uint32_t size,
430 uint32_t alignment,
431 uint32_t domains,
432 #ifdef RADEON_DEBUG_BO
433 uint32_t flags,
434 char * szBufUsage)
435 #else
436 uint32_t flags)
437 #endif /* RADEON_DEBUG_BO */
438 {
439 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bom;
440 struct bo_legacy *bo_legacy;
441 int r;
442
443 if (handle) {
444 bo_legacy = boml->bos.next;
445 while (bo_legacy) {
446 if (bo_legacy->base.handle == handle) {
447 radeon_bo_ref(&(bo_legacy->base));
448 return (struct radeon_bo*)bo_legacy;
449 }
450 bo_legacy = bo_legacy->next;
451 }
452 return NULL;
453 }
454 #ifdef RADEON_DEBUG_BO
455 bo_legacy = bo_allocate(boml, size, alignment, domains, flags, szBufUsage);
456 #else
457 bo_legacy = bo_allocate(boml, size, alignment, domains, flags);
458 #endif /* RADEON_DEBUG_BO */
459 bo_legacy->static_bo = 0;
460 r = legacy_new_handle(boml, &bo_legacy->base.handle);
461 if (r) {
462 bo_free(bo_legacy);
463 return NULL;
464 }
465 if (bo_legacy->base.domains & RADEON_GEM_DOMAIN_GTT)
466 {
467 retry:
468 legacy_track_pending(boml, 0);
469 /* dma buffers */
470
471 r = bo_dma_alloc(&(bo_legacy->base));
472 if (r)
473 {
474 if (legacy_wait_any_pending(boml) == -1)
475 {
476 bo_free(bo_legacy);
477 return NULL;
478 }
479 goto retry;
480 return NULL;
481 }
482 }
483 else
484 {
485 bo_legacy->ptr = malloc(bo_legacy->base.size);
486 if (bo_legacy->ptr == NULL) {
487 bo_free(bo_legacy);
488 return NULL;
489 }
490 }
491 radeon_bo_ref(&(bo_legacy->base));
492
493 return (struct radeon_bo*)bo_legacy;
494 }
495
496 static void bo_ref(struct radeon_bo *bo)
497 {
498 }
499
500 static struct radeon_bo *bo_unref(struct radeon_bo *bo)
501 {
502 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
503
504 if (bo->cref <= 0) {
505 bo_legacy->prev->next = bo_legacy->next;
506 if (bo_legacy->next) {
507 bo_legacy->next->prev = bo_legacy->prev;
508 }
509 if (!bo_legacy->is_pending) {
510 bo_free(bo_legacy);
511 }
512 return NULL;
513 }
514 return bo;
515 }
516
517 static int bo_map(struct radeon_bo *bo, int write)
518 {
519 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
520 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
521
522 legacy_wait_pending(bo);
523 bo_legacy->validated = 0;
524 bo_legacy->dirty = 1;
525 bo_legacy->map_count++;
526 bo->ptr = bo_legacy->ptr;
527 /* Read the first pixel in the frame buffer. This should
528 * be a noop, right? In fact without this conform fails as reading
529 * from the framebuffer sometimes produces old results -- the
530 * on-card read cache gets mixed up and doesn't notice that the
531 * framebuffer has been updated.
532 *
533 * Note that we should probably be reading some otherwise unused
534 * region of VRAM, otherwise we might get incorrect results when
535 * reading pixels from the top left of the screen.
536 *
537 * I found this problem on an R420 with glean's texCube test.
538 * Note that the R200 span code also *writes* the first pixel in the
539 * framebuffer, but I've found this to be unnecessary.
540 * -- Nicolai Hähnle, June 2008
541 */
542 if (!(bo->domains & RADEON_GEM_DOMAIN_GTT)) {
543 int p;
544 volatile int *buf = (int*)boml->screen->driScreen->pFB;
545 p = *buf;
546 }
547
548 return 0;
549 }
550
551 static int bo_unmap(struct radeon_bo *bo)
552 {
553 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
554
555 if (--bo_legacy->map_count > 0)
556 {
557 return 0;
558 }
559
560 bo->ptr = NULL;
561
562 return 0;
563 }
564
565
566 static int bo_is_static(struct radeon_bo *bo)
567 {
568 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
569 return bo_legacy->static_bo;
570 }
571
572 static struct radeon_bo_funcs bo_legacy_funcs = {
573 bo_open,
574 bo_ref,
575 bo_unref,
576 bo_map,
577 bo_unmap,
578 NULL,
579 bo_is_static,
580 NULL,
581 NULL,
582 };
583
584 static int bo_vram_validate(struct radeon_bo *bo,
585 uint32_t *soffset,
586 uint32_t *eoffset)
587 {
588 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
589 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
590 int r;
591 int retry_count = 0, pending_retry = 0;
592
593 if (!bo_legacy->tobj) {
594 bo_legacy->tobj = CALLOC(sizeof(struct bo_legacy_texture_object));
595 bo_legacy->tobj->parent = bo_legacy;
596 make_empty_list(&bo_legacy->tobj->base);
597 bo_legacy->tobj->base.totalSize = bo->size;
598 retry:
599 r = driAllocateTexture(&boml->texture_heap, 1,
600 &bo_legacy->tobj->base);
601 if (r) {
602 pending_retry = 0;
603 while(boml->cpendings && pending_retry++ < 10000) {
604 legacy_track_pending(boml, 0);
605 retry_count++;
606 if (retry_count > 2) {
607 free(bo_legacy->tobj);
608 bo_legacy->tobj = NULL;
609 fprintf(stderr, "Ouch! vram_validate failed %d\n", r);
610 return -1;
611 }
612 goto retry;
613 }
614 }
615 bo_legacy->offset = boml->texture_offset +
616 bo_legacy->tobj->base.memBlock->ofs;
617 bo_legacy->dirty = 1;
618 }
619
620 assert(bo_legacy->tobj->base.memBlock);
621
622 if (bo_legacy->tobj)
623 driUpdateTextureLRU(&bo_legacy->tobj->base);
624
625 if (bo_legacy->dirty || bo_legacy->tobj->base.dirty_images[0]) {
626 if (IS_R600_CLASS(boml->screen)) {
627 drm_radeon_texture_t tex;
628 drm_radeon_tex_image_t tmp;
629 int ret;
630
631 tex.offset = bo_legacy->offset;
632 tex.image = &tmp;
633 assert(!(tex.offset & 1023));
634
635 tmp.x = 0;
636 tmp.y = 0;
637 tmp.width = bo->size;
638 tmp.height = 1;
639 tmp.data = bo_legacy->ptr;
640 tex.format = RADEON_TXFORMAT_ARGB8888;
641 tex.width = tmp.width;
642 tex.height = tmp.height;
643 tex.pitch = bo->size;
644 do {
645 ret = drmCommandWriteRead(bo->bom->fd,
646 DRM_RADEON_TEXTURE,
647 &tex,
648 sizeof(drm_radeon_texture_t));
649 if (ret) {
650 if (RADEON_DEBUG & DEBUG_IOCTL)
651 fprintf(stderr, "DRM_RADEON_TEXTURE: again!\n");
652 usleep(1);
653 }
654 } while (ret == -EAGAIN);
655 } else {
656 /* Copy to VRAM using a blit.
657 * All memory is 4K aligned. We're using 1024 pixels wide blits.
658 */
659 drm_radeon_texture_t tex;
660 drm_radeon_tex_image_t tmp;
661 int ret;
662
663 tex.offset = bo_legacy->offset;
664 tex.image = &tmp;
665 assert(!(tex.offset & 1023));
666
667 tmp.x = 0;
668 tmp.y = 0;
669 if (bo->size < 4096) {
670 tmp.width = (bo->size + 3) / 4;
671 tmp.height = 1;
672 } else {
673 tmp.width = 1024;
674 tmp.height = (bo->size + 4095) / 4096;
675 }
676 tmp.data = bo_legacy->ptr;
677 tex.format = RADEON_TXFORMAT_ARGB8888;
678 tex.width = tmp.width;
679 tex.height = tmp.height;
680 tex.pitch = MAX2(tmp.width / 16, 1);
681 do {
682 ret = drmCommandWriteRead(bo->bom->fd,
683 DRM_RADEON_TEXTURE,
684 &tex,
685 sizeof(drm_radeon_texture_t));
686 if (ret) {
687 if (RADEON_DEBUG & DEBUG_IOCTL)
688 fprintf(stderr, "DRM_RADEON_TEXTURE: again!\n");
689 usleep(1);
690 }
691 } while (ret == -EAGAIN);
692 }
693 bo_legacy->dirty = 0;
694 bo_legacy->tobj->base.dirty_images[0] = 0;
695 }
696 return 0;
697 }
698
699 /*
700 * radeon_bo_legacy_validate -
701 * returns:
702 * 0 - all good
703 * -EINVAL - mapped buffer can't be validated
704 * -EAGAIN - restart validation we've kicked all the buffers out
705 */
706 int radeon_bo_legacy_validate(struct radeon_bo *bo,
707 uint32_t *soffset,
708 uint32_t *eoffset)
709 {
710 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
711 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
712 int r;
713 int retries = 0;
714
715 if (bo_legacy->map_count) {
716 #ifdef RADEON_DEBUG_BO
717 fprintf(stderr, "bo(%p, %d, %s) is mapped (%d) can't valide it.\n",
718 bo, bo->size, bo_legacy->szBufUsage, bo_legacy->map_count);
719 #else
720 fprintf(stderr, "bo(%p, %d) is mapped (%d) can't valide it.\n",
721 bo, bo->size, bo_legacy->map_count);
722 #endif /* RADEON_DEBUG_BO */
723
724 return -EINVAL;
725 }
726 if (bo_legacy->static_bo || bo_legacy->validated) {
727 *soffset = bo_legacy->offset;
728 *eoffset = bo_legacy->offset + bo->size;
729
730 return 0;
731 }
732 if (!(bo->domains & RADEON_GEM_DOMAIN_GTT)) {
733
734 r = bo_vram_validate(bo, soffset, eoffset);
735 if (r) {
736 legacy_track_pending(boml, 0);
737 legacy_kick_all_buffers(boml);
738 retries++;
739 if (retries == 2) {
740 fprintf(stderr,"legacy bo: failed to get relocations into aperture\n");
741 assert(0);
742 exit(-1);
743 }
744 return -EAGAIN;
745 }
746 }
747 *soffset = bo_legacy->offset;
748 *eoffset = bo_legacy->offset + bo->size;
749 bo_legacy->validated = 1;
750
751 return 0;
752 }
753
754 void radeon_bo_legacy_pending(struct radeon_bo *bo, uint32_t pending)
755 {
756 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
757 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
758
759 bo_legacy->pending = pending;
760 bo_legacy->is_pending++;
761 /* add to pending list */
762 radeon_bo_ref(bo);
763 if (bo_legacy->is_pending > 1) {
764 return;
765 }
766 bo_legacy->pprev = boml->pending_bos.pprev;
767 bo_legacy->pnext = NULL;
768 bo_legacy->pprev->pnext = bo_legacy;
769 boml->pending_bos.pprev = bo_legacy;
770 boml->cpendings++;
771 }
772
773 void radeon_bo_manager_legacy_dtor(struct radeon_bo_manager *bom)
774 {
775 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bom;
776 struct bo_legacy *bo_legacy;
777
778 if (bom == NULL) {
779 return;
780 }
781 bo_legacy = boml->bos.next;
782 while (bo_legacy) {
783 struct bo_legacy *next;
784
785 next = bo_legacy->next;
786 bo_free(bo_legacy);
787 bo_legacy = next;
788 }
789 driDestroyTextureHeap(boml->texture_heap);
790 free(boml->free_handles);
791 free(boml);
792 }
793
794 static struct bo_legacy *radeon_legacy_bo_alloc_static(struct bo_manager_legacy *bom,
795 int size,
796 #ifdef RADEON_DEBUG_BO
797 uint32_t offset,
798 char * szBufUsage)
799 #else
800 uint32_t offset)
801 #endif /* RADEON_DEBUG_BO */
802 {
803 struct bo_legacy *bo;
804
805 #ifdef RADEON_DEBUG_BO
806 bo = bo_allocate(bom, size, 0, RADEON_GEM_DOMAIN_VRAM, 0, szBufUsage);
807 #else
808 bo = bo_allocate(bom, size, 0, RADEON_GEM_DOMAIN_VRAM, 0);
809 #endif /* RADEON_DEBUG_BO */
810 if (bo == NULL)
811 return NULL;
812 bo->static_bo = 1;
813 bo->offset = offset + bom->fb_location;
814 bo->base.handle = bo->offset;
815 bo->ptr = bom->screen->driScreen->pFB + offset;
816 if (bo->base.handle > bom->nhandle) {
817 bom->nhandle = bo->base.handle + 1;
818 }
819 radeon_bo_ref(&(bo->base));
820 return bo;
821 }
822
823 struct radeon_bo_manager *radeon_bo_manager_legacy_ctor(struct radeon_screen *scrn)
824 {
825 struct bo_manager_legacy *bom;
826 struct bo_legacy *bo;
827 unsigned size;
828
829 bom = (struct bo_manager_legacy*)
830 calloc(1, sizeof(struct bo_manager_legacy));
831 if (bom == NULL) {
832 return NULL;
833 }
834
835 make_empty_list(&bom->texture_swapped);
836
837 bom->texture_heap = driCreateTextureHeap(0,
838 bom,
839 scrn->texSize[0],
840 12,
841 RADEON_NR_TEX_REGIONS,
842 (drmTextureRegionPtr)scrn->sarea->tex_list[0],
843 &scrn->sarea->tex_age[0],
844 &bom->texture_swapped,
845 sizeof(struct bo_legacy_texture_object),
846 &bo_legacy_tobj_destroy);
847 bom->texture_offset = scrn->texOffset[0];
848
849 bom->base.funcs = &bo_legacy_funcs;
850 bom->base.fd = scrn->driScreen->fd;
851 bom->bos.next = NULL;
852 bom->bos.prev = NULL;
853 bom->pending_bos.pprev = &bom->pending_bos;
854 bom->pending_bos.pnext = NULL;
855 bom->screen = scrn;
856 bom->fb_location = scrn->fbLocation;
857 bom->nhandle = 1;
858 bom->cfree_handles = 0;
859 bom->nfree_handles = 0x400;
860 bom->free_handles = (uint32_t*)malloc(bom->nfree_handles * 4);
861 if (bom->free_handles == NULL) {
862 radeon_bo_manager_legacy_dtor((struct radeon_bo_manager*)bom);
863 return NULL;
864 }
865
866 /* biggest framebuffer size */
867 size = 4096*4096*4;
868
869 /* allocate front */
870 #ifdef RADEON_DEBUG_BO
871 bo = radeon_legacy_bo_alloc_static(bom, size, bom->screen->frontOffset, "FRONT BUF");
872 #else
873 bo = radeon_legacy_bo_alloc_static(bom, size, bom->screen->frontOffset);
874 #endif /* RADEON_DEBUG_BO */
875 if (!bo) {
876 radeon_bo_manager_legacy_dtor((struct radeon_bo_manager*)bom);
877 return NULL;
878 }
879 if (scrn->sarea->tiling_enabled) {
880 bo->base.flags = RADEON_BO_FLAGS_MACRO_TILE;
881 }
882
883 /* allocate back */
884 #ifdef RADEON_DEBUG_BO
885 bo = radeon_legacy_bo_alloc_static(bom, size, bom->screen->backOffset, "BACK BUF");
886 #else
887 bo = radeon_legacy_bo_alloc_static(bom, size, bom->screen->backOffset);
888 #endif /* RADEON_DEBUG_BO */
889 if (!bo) {
890 radeon_bo_manager_legacy_dtor((struct radeon_bo_manager*)bom);
891 return NULL;
892 }
893 if (scrn->sarea->tiling_enabled) {
894 bo->base.flags = RADEON_BO_FLAGS_MACRO_TILE;
895 }
896
897 /* allocate depth */
898 #ifdef RADEON_DEBUG_BO
899 bo = radeon_legacy_bo_alloc_static(bom, size, bom->screen->depthOffset, "Z BUF");
900 #else
901 bo = radeon_legacy_bo_alloc_static(bom, size, bom->screen->depthOffset);
902 #endif /* RADEON_DEBUG_BO */
903 if (!bo) {
904 radeon_bo_manager_legacy_dtor((struct radeon_bo_manager*)bom);
905 return NULL;
906 }
907 bo->base.flags = 0;
908 if (scrn->sarea->tiling_enabled) {
909 bo->base.flags |= RADEON_BO_FLAGS_MACRO_TILE;
910 bo->base.flags |= RADEON_BO_FLAGS_MICRO_TILE;
911 }
912 return (struct radeon_bo_manager*)bom;
913 }
914
915 void radeon_bo_legacy_texture_age(struct radeon_bo_manager *bom)
916 {
917 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bom;
918 DRI_AGE_TEXTURES(boml->texture_heap);
919 }
920
921 unsigned radeon_bo_legacy_relocs_size(struct radeon_bo *bo)
922 {
923 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
924
925 if (bo_legacy->static_bo || (bo->domains & RADEON_GEM_DOMAIN_GTT)) {
926 return 0;
927 }
928 return bo->size;
929 }
930
931 /*
932 * Fake up a bo for things like texture image_override.
933 * bo->offset already includes fb_location
934 */
935 struct radeon_bo *radeon_legacy_bo_alloc_fake(struct radeon_bo_manager *bom,
936 int size,
937 uint32_t offset)
938 {
939 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bom;
940 struct bo_legacy *bo;
941
942 #ifdef RADEON_DEBUG_BO
943 bo = bo_allocate(boml, size, 0, RADEON_GEM_DOMAIN_VRAM, 0, "fake bo");
944 #else
945 bo = bo_allocate(boml, size, 0, RADEON_GEM_DOMAIN_VRAM, 0);
946 #endif /* RADEON_DEBUG_BO */
947 if (bo == NULL)
948 return NULL;
949 bo->static_bo = 1;
950 bo->offset = offset;
951 bo->base.handle = bo->offset;
952 bo->ptr = boml->screen->driScreen->pFB + (offset - boml->fb_location);
953 if (bo->base.handle > boml->nhandle) {
954 boml->nhandle = bo->base.handle + 1;
955 }
956 radeon_bo_ref(&(bo->base));
957 return &(bo->base);
958 }
959