radeon/r600: use new libdrm_radeon api
[mesa.git] / src / mesa / drivers / dri / radeon / radeon_bo_legacy.c
1 /*
2 * Copyright © 2008 Nicolai Haehnle
3 * Copyright © 2008 Dave Airlie
4 * Copyright © 2008 Jérôme Glisse
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
25 * of the Software.
26 */
27 /*
28 * Authors:
29 * Aapo Tahkola <aet@rasterburn.org>
30 * Nicolai Haehnle <prefect_@gmx.net>
31 * Dave Airlie
32 * Jérôme Glisse <glisse@freedesktop.org>
33 */
34 #include <stdio.h>
35 #include <stddef.h>
36 #include <stdint.h>
37 #include <stdlib.h>
38 #include <string.h>
39 #include <errno.h>
40 #include <unistd.h>
41 #include <sys/mman.h>
42 #include <sys/ioctl.h>
43 #include "xf86drm.h"
44 #include "texmem.h"
45 #include "main/simple_list.h"
46
47 #include "drm.h"
48 #include "radeon_drm.h"
49 #include "radeon_common.h"
50 #include "radeon_bocs_wrapper.h"
51 #include "radeon_macros.h"
52
53 #ifdef HAVE_LIBDRM_RADEON
54 #include "radeon_bo_int.h"
55 #else
56 #include "radeon_bo_int_drm.h"
57 #endif
58
59 /* no seriously texmem.c is this screwed up */
60 struct bo_legacy_texture_object {
61 driTextureObject base;
62 struct bo_legacy *parent;
63 };
64
65 struct bo_legacy {
66 struct radeon_bo_int base;
67 int map_count;
68 uint32_t pending;
69 int is_pending;
70 int static_bo;
71 uint32_t offset;
72 struct bo_legacy_texture_object *tobj;
73 int validated;
74 int dirty;
75 void *ptr;
76 struct bo_legacy *next, *prev;
77 struct bo_legacy *pnext, *pprev;
78 };
79
80 struct bo_manager_legacy {
81 struct radeon_bo_manager base;
82 unsigned nhandle;
83 unsigned nfree_handles;
84 unsigned cfree_handles;
85 uint32_t current_age;
86 struct bo_legacy bos;
87 struct bo_legacy pending_bos;
88 uint32_t fb_location;
89 uint32_t texture_offset;
90 unsigned dma_alloc_size;
91 uint32_t dma_buf_count;
92 unsigned cpendings;
93 driTextureObject texture_swapped;
94 driTexHeap *texture_heap;
95 struct radeon_screen *screen;
96 unsigned *free_handles;
97 };
98
99 static void bo_legacy_tobj_destroy(void *data, driTextureObject *t)
100 {
101 struct bo_legacy_texture_object *tobj = (struct bo_legacy_texture_object *)t;
102
103 if (tobj->parent) {
104 tobj->parent->tobj = NULL;
105 tobj->parent->validated = 0;
106 }
107 }
108
109 static void inline clean_handles(struct bo_manager_legacy *bom)
110 {
111 while (bom->cfree_handles > 0 &&
112 !bom->free_handles[bom->cfree_handles - 1])
113 bom->cfree_handles--;
114
115 }
116 static int legacy_new_handle(struct bo_manager_legacy *bom, uint32_t *handle)
117 {
118 uint32_t tmp;
119
120 *handle = 0;
121 if (bom->nhandle == 0xFFFFFFFF) {
122 return -EINVAL;
123 }
124 if (bom->cfree_handles > 0) {
125 tmp = bom->free_handles[--bom->cfree_handles];
126 clean_handles(bom);
127 } else {
128 bom->cfree_handles = 0;
129 tmp = bom->nhandle++;
130 }
131 assert(tmp);
132 *handle = tmp;
133 return 0;
134 }
135
136 static int legacy_free_handle(struct bo_manager_legacy *bom, uint32_t handle)
137 {
138 uint32_t *handles;
139
140 if (!handle) {
141 return 0;
142 }
143 if (handle == (bom->nhandle - 1)) {
144 int i;
145
146 bom->nhandle--;
147 for (i = bom->cfree_handles - 1; i >= 0; i--) {
148 if (bom->free_handles[i] == (bom->nhandle - 1)) {
149 bom->nhandle--;
150 bom->free_handles[i] = 0;
151 }
152 }
153 clean_handles(bom);
154 return 0;
155 }
156 if (bom->cfree_handles < bom->nfree_handles) {
157 bom->free_handles[bom->cfree_handles++] = handle;
158 return 0;
159 }
160 bom->nfree_handles += 0x100;
161 handles = (uint32_t*)realloc(bom->free_handles, bom->nfree_handles * 4);
162 if (handles == NULL) {
163 bom->nfree_handles -= 0x100;
164 return -ENOMEM;
165 }
166 bom->free_handles = handles;
167 bom->free_handles[bom->cfree_handles++] = handle;
168 return 0;
169 }
170
171 static void legacy_get_current_age(struct bo_manager_legacy *boml)
172 {
173 drm_radeon_getparam_t gp;
174 unsigned char *RADEONMMIO = NULL;
175 int r;
176
177 if ( IS_R300_CLASS(boml->screen)
178 || IS_R600_CLASS(boml->screen) )
179 {
180 gp.param = RADEON_PARAM_LAST_CLEAR;
181 gp.value = (int *)&boml->current_age;
182 r = drmCommandWriteRead(boml->base.fd, DRM_RADEON_GETPARAM,
183 &gp, sizeof(gp));
184 if (r) {
185 fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__, r);
186 exit(1);
187 }
188 }
189 else {
190 RADEONMMIO = boml->screen->mmio.map;
191 boml->current_age = boml->screen->scratch[3];
192 boml->current_age = INREG(RADEON_GUI_SCRATCH_REG3);
193 }
194 }
195
196 static int legacy_is_pending(struct radeon_bo_int *boi)
197 {
198 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)boi->bom;
199 struct bo_legacy *bo_legacy = (struct bo_legacy*)boi;
200
201 if (bo_legacy->is_pending <= 0) {
202 bo_legacy->is_pending = 0;
203 return 0;
204 }
205 if (boml->current_age >= bo_legacy->pending) {
206 if (boml->pending_bos.pprev == bo_legacy) {
207 boml->pending_bos.pprev = bo_legacy->pprev;
208 }
209 bo_legacy->pprev->pnext = bo_legacy->pnext;
210 if (bo_legacy->pnext) {
211 bo_legacy->pnext->pprev = bo_legacy->pprev;
212 }
213 assert(bo_legacy->is_pending <= boi->cref);
214 while (bo_legacy->is_pending--) {
215 boi = (struct radeon_bo_int *)radeon_bo_unref((struct radeon_bo *)boi);
216 if (!boi)
217 break;
218 }
219 if (boi)
220 bo_legacy->is_pending = 0;
221 boml->cpendings--;
222 return 0;
223 }
224 return 1;
225 }
226
227 static int legacy_wait_pending(struct radeon_bo_int *bo)
228 {
229 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
230 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
231
232 if (!bo_legacy->is_pending) {
233 return 0;
234 }
235 /* FIXME: lockup and userspace busy looping that's all the folks */
236 legacy_get_current_age(boml);
237 while (legacy_is_pending(bo)) {
238 usleep(10);
239 legacy_get_current_age(boml);
240 }
241 return 0;
242 }
243
244 void legacy_track_pending(struct radeon_bo_manager *bom, int debug)
245 {
246 struct bo_manager_legacy *boml = (struct bo_manager_legacy*) bom;
247 struct bo_legacy *bo_legacy;
248 struct bo_legacy *next;
249
250 legacy_get_current_age(boml);
251 bo_legacy = boml->pending_bos.pnext;
252 while (bo_legacy) {
253 if (debug)
254 fprintf(stderr,"pending %p %d %d %d\n", bo_legacy, bo_legacy->base.size,
255 boml->current_age, bo_legacy->pending);
256 next = bo_legacy->pnext;
257 if (legacy_is_pending(&(bo_legacy->base))) {
258 }
259 bo_legacy = next;
260 }
261 }
262
263 static int legacy_wait_any_pending(struct bo_manager_legacy *boml)
264 {
265 struct bo_legacy *bo_legacy;
266
267 legacy_get_current_age(boml);
268 bo_legacy = boml->pending_bos.pnext;
269 if (!bo_legacy)
270 return -1;
271 legacy_wait_pending(&bo_legacy->base);
272 return 0;
273 }
274
275 static void legacy_kick_all_buffers(struct bo_manager_legacy *boml)
276 {
277 struct bo_legacy *legacy;
278
279 legacy = boml->bos.next;
280 while (legacy != &boml->bos) {
281 if (legacy->tobj) {
282 if (legacy->validated) {
283 driDestroyTextureObject(&legacy->tobj->base);
284 legacy->tobj = 0;
285 legacy->validated = 0;
286 }
287 }
288 legacy = legacy->next;
289 }
290 }
291
292 static struct bo_legacy *bo_allocate(struct bo_manager_legacy *boml,
293 uint32_t size,
294 uint32_t alignment,
295 uint32_t domains,
296 uint32_t flags)
297 {
298 struct bo_legacy *bo_legacy;
299 static int pgsize;
300
301 if (pgsize == 0)
302 pgsize = getpagesize() - 1;
303
304 size = (size + pgsize) & ~pgsize;
305
306 bo_legacy = (struct bo_legacy*)calloc(1, sizeof(struct bo_legacy));
307 if (bo_legacy == NULL) {
308 return NULL;
309 }
310 bo_legacy->base.bom = (struct radeon_bo_manager*)boml;
311 bo_legacy->base.handle = 0;
312 bo_legacy->base.size = size;
313 bo_legacy->base.alignment = alignment;
314 bo_legacy->base.domains = domains;
315 bo_legacy->base.flags = flags;
316 bo_legacy->base.ptr = NULL;
317 bo_legacy->map_count = 0;
318 bo_legacy->next = NULL;
319 bo_legacy->prev = NULL;
320 bo_legacy->pnext = NULL;
321 bo_legacy->pprev = NULL;
322 bo_legacy->next = boml->bos.next;
323 bo_legacy->prev = &boml->bos;
324 boml->bos.next = bo_legacy;
325 if (bo_legacy->next) {
326 bo_legacy->next->prev = bo_legacy;
327 }
328
329 return bo_legacy;
330 }
331
332 static int bo_dma_alloc(struct radeon_bo_int *bo)
333 {
334 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
335 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
336 drm_radeon_mem_alloc_t alloc;
337 unsigned size;
338 int base_offset;
339 int r;
340
341 /* align size on 4Kb */
342 size = (((4 * 1024) - 1) + bo_legacy->base.size) & ~((4 * 1024) - 1);
343 alloc.region = RADEON_MEM_REGION_GART;
344 alloc.alignment = bo_legacy->base.alignment;
345 alloc.size = size;
346 alloc.region_offset = &base_offset;
347 r = drmCommandWriteRead(bo->bom->fd,
348 DRM_RADEON_ALLOC,
349 &alloc,
350 sizeof(alloc));
351 if (r) {
352 /* ptr is set to NULL if dma allocation failed */
353 bo_legacy->ptr = NULL;
354 return r;
355 }
356 bo_legacy->ptr = boml->screen->gartTextures.map + base_offset;
357 bo_legacy->offset = boml->screen->gart_texture_offset + base_offset;
358 bo->size = size;
359 boml->dma_alloc_size += size;
360 boml->dma_buf_count++;
361 return 0;
362 }
363
364 static int bo_dma_free(struct radeon_bo_int *bo)
365 {
366 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
367 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
368 drm_radeon_mem_free_t memfree;
369 int r;
370
371 if (bo_legacy->ptr == NULL) {
372 /* ptr is set to NULL if dma allocation failed */
373 return 0;
374 }
375 legacy_get_current_age(boml);
376 memfree.region = RADEON_MEM_REGION_GART;
377 memfree.region_offset = bo_legacy->offset;
378 memfree.region_offset -= boml->screen->gart_texture_offset;
379 r = drmCommandWrite(boml->base.fd,
380 DRM_RADEON_FREE,
381 &memfree,
382 sizeof(memfree));
383 if (r) {
384 fprintf(stderr, "Failed to free bo[%p] at %08x\n",
385 &bo_legacy->base, memfree.region_offset);
386 fprintf(stderr, "ret = %s\n", strerror(-r));
387 return r;
388 }
389 boml->dma_alloc_size -= bo_legacy->base.size;
390 boml->dma_buf_count--;
391 return 0;
392 }
393
394 static void bo_free(struct bo_legacy *bo_legacy)
395 {
396 struct bo_manager_legacy *boml;
397
398 if (bo_legacy == NULL) {
399 return;
400 }
401 boml = (struct bo_manager_legacy *)bo_legacy->base.bom;
402 bo_legacy->prev->next = bo_legacy->next;
403 if (bo_legacy->next) {
404 bo_legacy->next->prev = bo_legacy->prev;
405 }
406 if (!bo_legacy->static_bo) {
407 legacy_free_handle(boml, bo_legacy->base.handle);
408 if (bo_legacy->base.domains & RADEON_GEM_DOMAIN_GTT) {
409 /* dma buffers */
410 bo_dma_free(&bo_legacy->base);
411 } else {
412 driDestroyTextureObject(&bo_legacy->tobj->base);
413 bo_legacy->tobj = NULL;
414 /* free backing store */
415 free(bo_legacy->ptr);
416 }
417 }
418 memset(bo_legacy, 0 , sizeof(struct bo_legacy));
419 free(bo_legacy);
420 }
421
422 static struct radeon_bo *bo_open(struct radeon_bo_manager *bom,
423 uint32_t handle,
424 uint32_t size,
425 uint32_t alignment,
426 uint32_t domains,
427 uint32_t flags)
428 {
429 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bom;
430 struct bo_legacy *bo_legacy;
431 int r;
432
433 if (handle) {
434 bo_legacy = boml->bos.next;
435 while (bo_legacy) {
436 if (bo_legacy->base.handle == handle) {
437 radeon_bo_ref((struct radeon_bo *)&(bo_legacy->base));
438 return (struct radeon_bo*)bo_legacy;
439 }
440 bo_legacy = bo_legacy->next;
441 }
442 return NULL;
443 }
444 bo_legacy = bo_allocate(boml, size, alignment, domains, flags);
445 bo_legacy->static_bo = 0;
446 r = legacy_new_handle(boml, &bo_legacy->base.handle);
447 if (r) {
448 bo_free(bo_legacy);
449 return NULL;
450 }
451 if (bo_legacy->base.domains & RADEON_GEM_DOMAIN_GTT)
452 {
453 retry:
454 legacy_track_pending(&boml->base, 0);
455 /* dma buffers */
456
457 r = bo_dma_alloc(&(bo_legacy->base));
458 if (r)
459 {
460 if (legacy_wait_any_pending(boml) == -1)
461 {
462 bo_free(bo_legacy);
463 return NULL;
464 }
465 goto retry;
466 return NULL;
467 }
468 }
469 else
470 {
471 bo_legacy->ptr = malloc(bo_legacy->base.size);
472 if (bo_legacy->ptr == NULL) {
473 bo_free(bo_legacy);
474 return NULL;
475 }
476 }
477 radeon_bo_ref((struct radeon_bo *)&(bo_legacy->base));
478
479 return (struct radeon_bo*)bo_legacy;
480 }
481
482 static void bo_ref(struct radeon_bo_int *bo)
483 {
484 }
485
486 static struct radeon_bo *bo_unref(struct radeon_bo_int *boi)
487 {
488 struct bo_legacy *bo_legacy = (struct bo_legacy*)boi;
489
490 if (boi->cref <= 0) {
491 bo_legacy->prev->next = bo_legacy->next;
492 if (bo_legacy->next) {
493 bo_legacy->next->prev = bo_legacy->prev;
494 }
495 if (!bo_legacy->is_pending) {
496 bo_free(bo_legacy);
497 }
498 return NULL;
499 }
500 return (struct radeon_bo *)boi;
501 }
502
503 static int bo_map(struct radeon_bo_int *bo, int write)
504 {
505 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
506 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
507
508 legacy_wait_pending(bo);
509 bo_legacy->validated = 0;
510 bo_legacy->dirty = 1;
511 bo_legacy->map_count++;
512 bo->ptr = bo_legacy->ptr;
513 /* Read the first pixel in the frame buffer. This should
514 * be a noop, right? In fact without this conform fails as reading
515 * from the framebuffer sometimes produces old results -- the
516 * on-card read cache gets mixed up and doesn't notice that the
517 * framebuffer has been updated.
518 *
519 * Note that we should probably be reading some otherwise unused
520 * region of VRAM, otherwise we might get incorrect results when
521 * reading pixels from the top left of the screen.
522 *
523 * I found this problem on an R420 with glean's texCube test.
524 * Note that the R200 span code also *writes* the first pixel in the
525 * framebuffer, but I've found this to be unnecessary.
526 * -- Nicolai Hähnle, June 2008
527 */
528 if (!(bo->domains & RADEON_GEM_DOMAIN_GTT)) {
529 int p;
530 volatile int *buf = (int*)boml->screen->driScreen->pFB;
531 p = *buf;
532 }
533
534 return 0;
535 }
536
537 static int bo_unmap(struct radeon_bo_int *bo)
538 {
539 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
540
541 if (--bo_legacy->map_count > 0)
542 {
543 return 0;
544 }
545
546 bo->ptr = NULL;
547
548 return 0;
549 }
550
551 static int bo_is_busy(struct radeon_bo_int *bo, uint32_t *domain)
552 {
553 *domain = 0;
554 if (bo->domains & RADEON_GEM_DOMAIN_GTT)
555 *domain = RADEON_GEM_DOMAIN_GTT;
556 else
557 *domain = RADEON_GEM_DOMAIN_CPU;
558 if (legacy_is_pending(bo))
559 return -EBUSY;
560 else
561 return 0;
562 }
563
564 static int bo_is_static(struct radeon_bo_int *bo)
565 {
566 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
567 return bo_legacy->static_bo;
568 }
569
570 static struct radeon_bo_funcs bo_legacy_funcs = {
571 bo_open,
572 bo_ref,
573 bo_unref,
574 bo_map,
575 bo_unmap,
576 NULL,
577 bo_is_static,
578 NULL,
579 NULL,
580 bo_is_busy
581 };
582
583 static int bo_vram_validate(struct radeon_bo_int *bo,
584 uint32_t *soffset,
585 uint32_t *eoffset)
586 {
587 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
588 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
589 int r;
590 int retry_count = 0, pending_retry = 0;
591
592 if (!bo_legacy->tobj) {
593 bo_legacy->tobj = CALLOC(sizeof(struct bo_legacy_texture_object));
594 bo_legacy->tobj->parent = bo_legacy;
595 make_empty_list(&bo_legacy->tobj->base);
596 bo_legacy->tobj->base.totalSize = bo->size;
597 retry:
598 r = driAllocateTexture(&boml->texture_heap, 1,
599 &bo_legacy->tobj->base);
600 if (r) {
601 pending_retry = 0;
602 while(boml->cpendings && pending_retry++ < 10000) {
603 legacy_track_pending(&boml->base, 0);
604 retry_count++;
605 if (retry_count > 2) {
606 free(bo_legacy->tobj);
607 bo_legacy->tobj = NULL;
608 fprintf(stderr, "Ouch! vram_validate failed %d\n", r);
609 return -1;
610 }
611 goto retry;
612 }
613 }
614 bo_legacy->offset = boml->texture_offset +
615 bo_legacy->tobj->base.memBlock->ofs;
616 bo_legacy->dirty = 1;
617 }
618
619 assert(bo_legacy->tobj->base.memBlock);
620
621 if (bo_legacy->tobj)
622 driUpdateTextureLRU(&bo_legacy->tobj->base);
623
624 if (bo_legacy->dirty || bo_legacy->tobj->base.dirty_images[0]) {
625 if (IS_R600_CLASS(boml->screen)) {
626 drm_radeon_texture_t tex;
627 drm_radeon_tex_image_t tmp;
628 int ret;
629
630 tex.offset = bo_legacy->offset;
631 tex.image = &tmp;
632 assert(!(tex.offset & 1023));
633
634 tmp.x = 0;
635 tmp.y = 0;
636 tmp.width = bo->size;
637 tmp.height = 1;
638 tmp.data = bo_legacy->ptr;
639 tex.format = RADEON_TXFORMAT_ARGB8888;
640 tex.width = tmp.width;
641 tex.height = tmp.height;
642 tex.pitch = bo->size;
643 do {
644 ret = drmCommandWriteRead(bo->bom->fd,
645 DRM_RADEON_TEXTURE,
646 &tex,
647 sizeof(drm_radeon_texture_t));
648 if (ret) {
649 if (RADEON_DEBUG & RADEON_IOCTL)
650 fprintf(stderr, "DRM_RADEON_TEXTURE: again!\n");
651 usleep(1);
652 }
653 } while (ret == -EAGAIN);
654 } else {
655 /* Copy to VRAM using a blit.
656 * All memory is 4K aligned. We're using 1024 pixels wide blits.
657 */
658 drm_radeon_texture_t tex;
659 drm_radeon_tex_image_t tmp;
660 int ret;
661
662 tex.offset = bo_legacy->offset;
663 tex.image = &tmp;
664 assert(!(tex.offset & 1023));
665
666 tmp.x = 0;
667 tmp.y = 0;
668 if (bo->size < 4096) {
669 tmp.width = (bo->size + 3) / 4;
670 tmp.height = 1;
671 } else {
672 tmp.width = 1024;
673 tmp.height = (bo->size + 4095) / 4096;
674 }
675 tmp.data = bo_legacy->ptr;
676 tex.format = RADEON_TXFORMAT_ARGB8888;
677 tex.width = tmp.width;
678 tex.height = tmp.height;
679 tex.pitch = MAX2(tmp.width / 16, 1);
680 do {
681 ret = drmCommandWriteRead(bo->bom->fd,
682 DRM_RADEON_TEXTURE,
683 &tex,
684 sizeof(drm_radeon_texture_t));
685 if (ret) {
686 if (RADEON_DEBUG & RADEON_IOCTL)
687 fprintf(stderr, "DRM_RADEON_TEXTURE: again!\n");
688 usleep(1);
689 }
690 } while (ret == -EAGAIN);
691 }
692 bo_legacy->dirty = 0;
693 bo_legacy->tobj->base.dirty_images[0] = 0;
694 }
695 return 0;
696 }
697
698 /*
699 * radeon_bo_legacy_validate -
700 * returns:
701 * 0 - all good
702 * -EINVAL - mapped buffer can't be validated
703 * -EAGAIN - restart validation we've kicked all the buffers out
704 */
705 int radeon_bo_legacy_validate(struct radeon_bo *bo,
706 uint32_t *soffset,
707 uint32_t *eoffset)
708 {
709 struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
710 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)boi->bom;
711 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
712 int r;
713 int retries = 0;
714
715 if (bo_legacy->map_count) {
716 fprintf(stderr, "bo(%p, %d) is mapped (%d) can't valide it.\n",
717 bo, boi->size, bo_legacy->map_count);
718 return -EINVAL;
719 }
720 if(boi->size == 0) {
721 fprintf(stderr, "bo(%p) has size 0.\n", bo);
722 return -EINVAL;
723 }
724 if (bo_legacy->static_bo || bo_legacy->validated) {
725 *soffset = bo_legacy->offset;
726 *eoffset = bo_legacy->offset + boi->size;
727
728 return 0;
729 }
730 if (!(boi->domains & RADEON_GEM_DOMAIN_GTT)) {
731
732 r = bo_vram_validate(boi, soffset, eoffset);
733 if (r) {
734 legacy_track_pending(&boml->base, 0);
735 legacy_kick_all_buffers(boml);
736 retries++;
737 if (retries == 2) {
738 fprintf(stderr,"legacy bo: failed to get relocations into aperture\n");
739 assert(0);
740 exit(-1);
741 }
742 return -EAGAIN;
743 }
744 }
745 *soffset = bo_legacy->offset;
746 *eoffset = bo_legacy->offset + boi->size;
747 bo_legacy->validated = 1;
748
749 return 0;
750 }
751
752 void radeon_bo_legacy_pending(struct radeon_bo *bo, uint32_t pending)
753 {
754 struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
755 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)boi->bom;
756 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
757
758 bo_legacy->pending = pending;
759 bo_legacy->is_pending++;
760 /* add to pending list */
761 radeon_bo_ref(bo);
762 if (bo_legacy->is_pending > 1) {
763 return;
764 }
765 bo_legacy->pprev = boml->pending_bos.pprev;
766 bo_legacy->pnext = NULL;
767 bo_legacy->pprev->pnext = bo_legacy;
768 boml->pending_bos.pprev = bo_legacy;
769 boml->cpendings++;
770 }
771
772 void radeon_bo_manager_legacy_dtor(struct radeon_bo_manager *bom)
773 {
774 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bom;
775 struct bo_legacy *bo_legacy;
776
777 if (bom == NULL) {
778 return;
779 }
780 bo_legacy = boml->bos.next;
781 while (bo_legacy) {
782 struct bo_legacy *next;
783
784 next = bo_legacy->next;
785 bo_free(bo_legacy);
786 bo_legacy = next;
787 }
788 driDestroyTextureHeap(boml->texture_heap);
789 free(boml->free_handles);
790 free(boml);
791 }
792
793 static struct bo_legacy *radeon_legacy_bo_alloc_static(struct bo_manager_legacy *bom,
794 int size,
795 uint32_t offset)
796 {
797 struct bo_legacy *bo;
798
799 bo = bo_allocate(bom, size, 0, RADEON_GEM_DOMAIN_VRAM, 0);
800
801 if (bo == NULL)
802 return NULL;
803 bo->static_bo = 1;
804 bo->offset = offset + bom->fb_location;
805 bo->base.handle = bo->offset;
806 bo->ptr = bom->screen->driScreen->pFB + offset;
807 if (bo->base.handle > bom->nhandle) {
808 bom->nhandle = bo->base.handle + 1;
809 }
810 radeon_bo_ref((struct radeon_bo *)&(bo->base));
811 return bo;
812 }
813
814 struct radeon_bo_manager *radeon_bo_manager_legacy_ctor(struct radeon_screen *scrn)
815 {
816 struct bo_manager_legacy *bom;
817 struct bo_legacy *bo;
818 unsigned size;
819
820 bom = (struct bo_manager_legacy*)
821 calloc(1, sizeof(struct bo_manager_legacy));
822 if (bom == NULL) {
823 return NULL;
824 }
825
826 make_empty_list(&bom->texture_swapped);
827
828 bom->texture_heap = driCreateTextureHeap(0,
829 bom,
830 scrn->texSize[0],
831 12,
832 RADEON_NR_TEX_REGIONS,
833 (drmTextureRegionPtr)scrn->sarea->tex_list[0],
834 &scrn->sarea->tex_age[0],
835 &bom->texture_swapped,
836 sizeof(struct bo_legacy_texture_object),
837 &bo_legacy_tobj_destroy);
838 bom->texture_offset = scrn->texOffset[0];
839
840 bom->base.funcs = &bo_legacy_funcs;
841 bom->base.fd = scrn->driScreen->fd;
842 bom->bos.next = NULL;
843 bom->bos.prev = NULL;
844 bom->pending_bos.pprev = &bom->pending_bos;
845 bom->pending_bos.pnext = NULL;
846 bom->screen = scrn;
847 bom->fb_location = scrn->fbLocation;
848 bom->nhandle = 1;
849 bom->cfree_handles = 0;
850 bom->nfree_handles = 0x400;
851 bom->free_handles = (uint32_t*)malloc(bom->nfree_handles * 4);
852 if (bom->free_handles == NULL) {
853 radeon_bo_manager_legacy_dtor((struct radeon_bo_manager*)bom);
854 return NULL;
855 }
856
857 /* biggest framebuffer size */
858 size = 4096*4096*4;
859
860 /* allocate front */
861 bo = radeon_legacy_bo_alloc_static(bom, size, bom->screen->frontOffset);
862
863 if (!bo) {
864 radeon_bo_manager_legacy_dtor((struct radeon_bo_manager*)bom);
865 return NULL;
866 }
867 if (scrn->sarea->tiling_enabled) {
868 bo->base.flags = RADEON_BO_FLAGS_MACRO_TILE;
869 }
870
871 /* allocate back */
872 bo = radeon_legacy_bo_alloc_static(bom, size, bom->screen->backOffset);
873
874 if (!bo) {
875 radeon_bo_manager_legacy_dtor((struct radeon_bo_manager*)bom);
876 return NULL;
877 }
878 if (scrn->sarea->tiling_enabled) {
879 bo->base.flags = RADEON_BO_FLAGS_MACRO_TILE;
880 }
881
882 /* allocate depth */
883 bo = radeon_legacy_bo_alloc_static(bom, size, bom->screen->depthOffset);
884
885 if (!bo) {
886 radeon_bo_manager_legacy_dtor((struct radeon_bo_manager*)bom);
887 return NULL;
888 }
889 bo->base.flags = 0;
890 if (scrn->sarea->tiling_enabled) {
891 bo->base.flags |= RADEON_BO_FLAGS_MACRO_TILE;
892 bo->base.flags |= RADEON_BO_FLAGS_MICRO_TILE;
893 }
894 return (struct radeon_bo_manager*)bom;
895 }
896
897 void radeon_bo_legacy_texture_age(struct radeon_bo_manager *bom)
898 {
899 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bom;
900 DRI_AGE_TEXTURES(boml->texture_heap);
901 }
902
903 unsigned radeon_bo_legacy_relocs_size(struct radeon_bo *bo)
904 {
905 struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
906 struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
907
908 if (bo_legacy->static_bo || (boi->domains & RADEON_GEM_DOMAIN_GTT)) {
909 return 0;
910 }
911 return boi->size;
912 }
913
914 /*
915 * Fake up a bo for things like texture image_override.
916 * bo->offset already includes fb_location
917 */
918 struct radeon_bo *radeon_legacy_bo_alloc_fake(struct radeon_bo_manager *bom,
919 int size,
920 uint32_t offset)
921 {
922 struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bom;
923 struct bo_legacy *bo;
924
925 bo = bo_allocate(boml, size, 0, RADEON_GEM_DOMAIN_VRAM, 0);
926
927 if (bo == NULL)
928 return NULL;
929 bo->static_bo = 1;
930 bo->offset = offset;
931 bo->base.handle = bo->offset;
932 bo->ptr = boml->screen->driScreen->pFB + (offset - boml->fb_location);
933 if (bo->base.handle > boml->nhandle) {
934 boml->nhandle = bo->base.handle + 1;
935 }
936 radeon_bo_ref((struct radeon_bo *)&(bo->base));
937 return (struct radeon_bo *)&(bo->base);
938 }
939