2 * Copyright © 2008 Nicolai Haehnle
3 * Copyright © 2008 Dave Airlie
4 * Copyright © 2008 Jérôme Glisse
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
29 * Aapo Tahkola <aet@rasterburn.org>
30 * Nicolai Haehnle <prefect_@gmx.net>
32 * Jérôme Glisse <glisse@freedesktop.org>
41 #include <sys/ioctl.h>
44 #include "main/simple_list.h"
47 #include "radeon_drm.h"
48 #include "radeon_bo.h"
49 #include "radeon_bo_legacy.h"
50 #include "common_context.h"
53 struct radeon_bo base
;
54 driTextureObject tobj_base
;
60 int got_dri_texture_obj
;
63 driTextureObject dri_texture_obj
;
65 struct bo_legacy
*next
, *prev
;
66 struct bo_legacy
*pnext
, *pprev
;
69 struct bo_manager_legacy
{
70 struct radeon_bo_manager base
;
72 unsigned nfree_handles
;
73 unsigned cfree_handles
;
76 struct bo_legacy pending_bos
;
78 uint32_t texture_offset
;
79 unsigned dma_alloc_size
;
80 uint32_t dma_buf_count
;
82 driTextureObject texture_swapped
;
83 driTexHeap
*texture_heap
;
84 struct radeon_screen
*screen
;
85 unsigned *free_handles
;
88 static void bo_legacy_tobj_destroy(void *data
, driTextureObject
*t
)
90 struct bo_legacy
*bo_legacy
;
92 bo_legacy
= (struct bo_legacy
*)((char*)t
)-sizeof(struct radeon_bo
);
93 bo_legacy
->got_dri_texture_obj
= 0;
94 bo_legacy
->validated
= 0;
97 static void inline clean_handles(struct bo_manager_legacy
*bom
)
99 while (bom
->cfree_handles
> 0 &&
100 !bom
->free_handles
[bom
->cfree_handles
- 1])
101 bom
->cfree_handles
--;
104 static int legacy_new_handle(struct bo_manager_legacy
*bom
, uint32_t *handle
)
109 if (bom
->nhandle
== 0xFFFFFFFF) {
112 if (bom
->cfree_handles
> 0) {
113 tmp
= bom
->free_handles
[--bom
->cfree_handles
];
116 bom
->cfree_handles
= 0;
117 tmp
= bom
->nhandle
++;
124 static int legacy_free_handle(struct bo_manager_legacy
*bom
, uint32_t handle
)
131 if (handle
== (bom
->nhandle
- 1)) {
135 for (i
= bom
->cfree_handles
- 1; i
>= 0; i
--) {
136 if (bom
->free_handles
[i
] == (bom
->nhandle
- 1)) {
138 bom
->free_handles
[i
] = 0;
144 if (bom
->cfree_handles
< bom
->nfree_handles
) {
145 bom
->free_handles
[bom
->cfree_handles
++] = handle
;
148 bom
->nfree_handles
+= 0x100;
149 handles
= (uint32_t*)realloc(bom
->free_handles
, bom
->nfree_handles
* 4);
150 if (handles
== NULL
) {
151 bom
->nfree_handles
-= 0x100;
154 bom
->free_handles
= handles
;
155 bom
->free_handles
[bom
->cfree_handles
++] = handle
;
159 static void legacy_get_current_age(struct bo_manager_legacy
*boml
)
161 drm_radeon_getparam_t gp
;
164 if (IS_R300_CLASS(boml
->screen
)) {
165 gp
.param
= RADEON_PARAM_LAST_CLEAR
;
166 gp
.value
= (int *)&boml
->current_age
;
167 r
= drmCommandWriteRead(boml
->base
.fd
, DRM_RADEON_GETPARAM
,
170 fprintf(stderr
, "%s: drmRadeonGetParam: %d\n", __FUNCTION__
, r
);
174 boml
->current_age
= boml
->screen
->scratch
[3];
177 static int legacy_is_pending(struct radeon_bo
*bo
)
179 struct bo_manager_legacy
*boml
= (struct bo_manager_legacy
*)bo
->bom
;
180 struct bo_legacy
*bo_legacy
= (struct bo_legacy
*)bo
;
182 if (bo_legacy
->is_pending
<= 0) {
183 bo_legacy
->is_pending
= 0;
186 if (boml
->current_age
>= bo_legacy
->pending
) {
187 if (boml
->pending_bos
.pprev
== bo_legacy
) {
188 boml
->pending_bos
.pprev
= bo_legacy
->pprev
;
190 bo_legacy
->pprev
->pnext
= bo_legacy
->pnext
;
191 if (bo_legacy
->pnext
) {
192 bo_legacy
->pnext
->pprev
= bo_legacy
->pprev
;
194 assert(bo_legacy
->is_pending
<= bo
->cref
);
195 while (bo_legacy
->is_pending
--) {
196 bo
= radeon_bo_unref(bo
);
201 bo_legacy
->is_pending
= 0;
208 static int legacy_wait_pending(struct radeon_bo
*bo
)
210 struct bo_manager_legacy
*boml
= (struct bo_manager_legacy
*)bo
->bom
;
211 struct bo_legacy
*bo_legacy
= (struct bo_legacy
*)bo
;
213 if (!bo_legacy
->is_pending
) {
216 /* FIXME: lockup and userspace busy looping that's all the folks */
217 legacy_get_current_age(boml
);
218 while (legacy_is_pending(bo
)) {
220 legacy_get_current_age(boml
);
225 static void legacy_track_pending(struct bo_manager_legacy
*boml
, int debug
)
227 struct bo_legacy
*bo_legacy
;
228 struct bo_legacy
*next
;
230 legacy_get_current_age(boml
);
231 bo_legacy
= boml
->pending_bos
.pnext
;
234 fprintf(stderr
,"pending %p %d %d %d\n", bo_legacy
, bo_legacy
->base
.size
,
235 boml
->current_age
, bo_legacy
->pending
);
236 next
= bo_legacy
->pnext
;
237 if (legacy_is_pending(&(bo_legacy
->base
))) {
243 static int legacy_wait_any_pending(struct bo_manager_legacy
*boml
)
245 struct bo_legacy
*bo_legacy
;
246 struct bo_legacy
*next
;
248 legacy_get_current_age(boml
);
249 bo_legacy
= boml
->pending_bos
.pnext
;
252 legacy_wait_pending(&bo_legacy
->base
);
256 static struct bo_legacy
*bo_allocate(struct bo_manager_legacy
*boml
,
262 struct bo_legacy
*bo_legacy
;
264 bo_legacy
= (struct bo_legacy
*)calloc(1, sizeof(struct bo_legacy
));
265 if (bo_legacy
== NULL
) {
268 bo_legacy
->base
.bom
= (struct radeon_bo_manager
*)boml
;
269 bo_legacy
->base
.handle
= 0;
270 bo_legacy
->base
.size
= size
;
271 bo_legacy
->base
.alignment
= alignment
;
272 bo_legacy
->base
.domains
= domains
;
273 bo_legacy
->base
.flags
= flags
;
274 bo_legacy
->base
.ptr
= NULL
;
275 bo_legacy
->map_count
= 0;
276 bo_legacy
->next
= NULL
;
277 bo_legacy
->prev
= NULL
;
278 bo_legacy
->got_dri_texture_obj
= 0;
279 bo_legacy
->pnext
= NULL
;
280 bo_legacy
->pprev
= NULL
;
281 bo_legacy
->next
= boml
->bos
.next
;
282 bo_legacy
->prev
= &boml
->bos
;
283 boml
->bos
.next
= bo_legacy
;
284 if (bo_legacy
->next
) {
285 bo_legacy
->next
->prev
= bo_legacy
;
290 static int bo_dma_alloc(struct radeon_bo
*bo
)
292 struct bo_manager_legacy
*boml
= (struct bo_manager_legacy
*)bo
->bom
;
293 struct bo_legacy
*bo_legacy
= (struct bo_legacy
*)bo
;
294 drm_radeon_mem_alloc_t alloc
;
299 /* align size on 4Kb */
300 size
= (((4 * 1024) - 1) + bo
->size
) & ~((4 * 1024) - 1);
301 alloc
.region
= RADEON_MEM_REGION_GART
;
302 alloc
.alignment
= bo_legacy
->base
.alignment
;
304 alloc
.region_offset
= &base_offset
;
305 r
= drmCommandWriteRead(bo
->bom
->fd
,
310 /* ptr is set to NULL if dma allocation failed */
311 bo_legacy
->ptr
= NULL
;
314 bo_legacy
->ptr
= boml
->screen
->gartTextures
.map
+ base_offset
;
315 bo_legacy
->offset
= boml
->screen
->gart_texture_offset
+ base_offset
;
317 boml
->dma_alloc_size
+= size
;
318 boml
->dma_buf_count
++;
322 static int bo_dma_free(struct radeon_bo
*bo
)
324 struct bo_manager_legacy
*boml
= (struct bo_manager_legacy
*)bo
->bom
;
325 struct bo_legacy
*bo_legacy
= (struct bo_legacy
*)bo
;
326 drm_radeon_mem_free_t memfree
;
329 if (bo_legacy
->ptr
== NULL
) {
330 /* ptr is set to NULL if dma allocation failed */
333 legacy_get_current_age(boml
);
334 memfree
.region
= RADEON_MEM_REGION_GART
;
335 memfree
.region_offset
= bo_legacy
->offset
;
336 memfree
.region_offset
-= boml
->screen
->gart_texture_offset
;
337 r
= drmCommandWrite(boml
->base
.fd
,
342 fprintf(stderr
, "Failed to free bo[%p] at %08x\n",
343 &bo_legacy
->base
, memfree
.region_offset
);
344 fprintf(stderr
, "ret = %s\n", strerror(-r
));
347 boml
->dma_alloc_size
-= bo_legacy
->base
.size
;
348 boml
->dma_buf_count
--;
352 static void bo_free(struct bo_legacy
*bo_legacy
)
354 struct bo_manager_legacy
*boml
;
356 if (bo_legacy
== NULL
) {
359 boml
= (struct bo_manager_legacy
*)bo_legacy
->base
.bom
;
360 bo_legacy
->prev
->next
= bo_legacy
->next
;
361 if (bo_legacy
->next
) {
362 bo_legacy
->next
->prev
= bo_legacy
->prev
;
364 if (!bo_legacy
->static_bo
) {
365 legacy_free_handle(boml
, bo_legacy
->base
.handle
);
366 if (bo_legacy
->base
.domains
& RADEON_GEM_DOMAIN_GTT
) {
368 bo_dma_free(&bo_legacy
->base
);
370 /* free backing store */
371 free(bo_legacy
->ptr
);
374 memset(bo_legacy
, 0 , sizeof(struct bo_legacy
));
378 static struct radeon_bo
*bo_open(struct radeon_bo_manager
*bom
,
385 struct bo_manager_legacy
*boml
= (struct bo_manager_legacy
*)bom
;
386 struct bo_legacy
*bo_legacy
;
390 bo_legacy
= boml
->bos
.next
;
392 if (bo_legacy
->base
.handle
== handle
) {
393 radeon_bo_ref(&(bo_legacy
->base
));
394 return (struct radeon_bo
*)bo_legacy
;
396 bo_legacy
= bo_legacy
->next
;
401 bo_legacy
= bo_allocate(boml
, size
, alignment
, domains
, flags
);
402 bo_legacy
->static_bo
= 0;
403 r
= legacy_new_handle(boml
, &bo_legacy
->base
.handle
);
408 if (bo_legacy
->base
.domains
& RADEON_GEM_DOMAIN_GTT
) {
410 legacy_track_pending(boml
, 0);
413 r
= bo_dma_alloc(&(bo_legacy
->base
));
415 if (legacy_wait_any_pending(boml
) == -1) {
423 bo_legacy
->ptr
= malloc(bo_legacy
->base
.size
);
424 if (bo_legacy
->ptr
== NULL
) {
429 radeon_bo_ref(&(bo_legacy
->base
));
430 return (struct radeon_bo
*)bo_legacy
;
433 static void bo_ref(struct radeon_bo
*bo
)
437 static struct radeon_bo
*bo_unref(struct radeon_bo
*bo
)
439 struct bo_legacy
*bo_legacy
= (struct bo_legacy
*)bo
;
442 bo_legacy
->prev
->next
= bo_legacy
->next
;
443 if (bo_legacy
->next
) {
444 bo_legacy
->next
->prev
= bo_legacy
->prev
;
446 if (!bo_legacy
->is_pending
) {
454 static int bo_map(struct radeon_bo
*bo
, int write
)
456 struct bo_manager_legacy
*boml
= (struct bo_manager_legacy
*)bo
->bom
;
457 struct bo_legacy
*bo_legacy
= (struct bo_legacy
*)bo
;
459 legacy_wait_pending(bo
);
460 bo_legacy
->validated
= 0;
461 bo_legacy
->dirty
= 1;
462 bo_legacy
->map_count
++;
463 bo
->ptr
= bo_legacy
->ptr
;
464 /* Read the first pixel in the frame buffer. This should
465 * be a noop, right? In fact without this conform fails as reading
466 * from the framebuffer sometimes produces old results -- the
467 * on-card read cache gets mixed up and doesn't notice that the
468 * framebuffer has been updated.
470 * Note that we should probably be reading some otherwise unused
471 * region of VRAM, otherwise we might get incorrect results when
472 * reading pixels from the top left of the screen.
474 * I found this problem on an R420 with glean's texCube test.
475 * Note that the R200 span code also *writes* the first pixel in the
476 * framebuffer, but I've found this to be unnecessary.
477 * -- Nicolai Hähnle, June 2008
481 volatile int *buf
= (int*)boml
->screen
->driScreen
->pFB
;
487 static int bo_unmap(struct radeon_bo
*bo
)
489 struct bo_legacy
*bo_legacy
= (struct bo_legacy
*)bo
;
491 if (--bo_legacy
->map_count
> 0) {
498 static struct radeon_bo_funcs bo_legacy_funcs
= {
506 static int bo_vram_validate(struct radeon_bo
*bo
,
510 struct bo_manager_legacy
*boml
= (struct bo_manager_legacy
*)bo
->bom
;
511 struct bo_legacy
*bo_legacy
= (struct bo_legacy
*)bo
;
514 if (!bo_legacy
->got_dri_texture_obj
) {
515 make_empty_list(&bo_legacy
->dri_texture_obj
);
516 bo_legacy
->dri_texture_obj
.totalSize
= bo
->size
;
517 r
= driAllocateTexture(&boml
->texture_heap
, 1,
518 &bo_legacy
->dri_texture_obj
);
520 uint8_t *segfault
=NULL
;
521 fprintf(stderr
, "Ouch! vram_validate failed %d\n", r
);
525 bo_legacy
->offset
= boml
->texture_offset
+
526 bo_legacy
->dri_texture_obj
.memBlock
->ofs
;
527 bo_legacy
->got_dri_texture_obj
= 1;
528 bo_legacy
->dirty
= 1;
530 if (bo_legacy
->dirty
) {
531 /* Copy to VRAM using a blit.
532 * All memory is 4K aligned. We're using 1024 pixels wide blits.
534 drm_radeon_texture_t tex
;
535 drm_radeon_tex_image_t tmp
;
538 tex
.offset
= bo_legacy
->offset
;
540 assert(!(tex
.offset
& 1023));
544 if (bo
->size
< 4096) {
545 tmp
.width
= (bo
->size
+ 3) / 4;
549 tmp
.height
= (bo
->size
+ 4095) / 4096;
551 tmp
.data
= bo_legacy
->ptr
;
552 tex
.format
= RADEON_TXFORMAT_ARGB8888
;
553 tex
.width
= tmp
.width
;
554 tex
.height
= tmp
.height
;
555 tex
.pitch
= MAX2(tmp
.width
/ 16, 1);
557 ret
= drmCommandWriteRead(bo
->bom
->fd
,
560 sizeof(drm_radeon_texture_t
));
562 if (RADEON_DEBUG
& DEBUG_IOCTL
)
563 fprintf(stderr
, "DRM_RADEON_TEXTURE: again!\n");
566 } while (ret
== -EAGAIN
);
567 bo_legacy
->dirty
= 0;
572 int radeon_bo_legacy_validate(struct radeon_bo
*bo
,
576 struct bo_legacy
*bo_legacy
= (struct bo_legacy
*)bo
;
579 if (bo_legacy
->map_count
) {
580 fprintf(stderr
, "bo(%p, %d) is mapped (%d) can't valide it.\n",
581 bo
, bo
->size
, bo_legacy
->map_count
);
584 if (bo_legacy
->static_bo
|| bo_legacy
->validated
) {
585 *soffset
= bo_legacy
->offset
;
586 *eoffset
= bo_legacy
->offset
+ bo
->size
;
589 if (!(bo
->domains
& RADEON_GEM_DOMAIN_GTT
)) {
590 r
= bo_vram_validate(bo
, soffset
, eoffset
);
595 *soffset
= bo_legacy
->offset
;
596 *eoffset
= bo_legacy
->offset
+ bo
->size
;
597 bo_legacy
->validated
= 1;
601 void radeon_bo_legacy_pending(struct radeon_bo
*bo
, uint32_t pending
)
603 struct bo_manager_legacy
*boml
= (struct bo_manager_legacy
*)bo
->bom
;
604 struct bo_legacy
*bo_legacy
= (struct bo_legacy
*)bo
;
606 bo_legacy
->pending
= pending
;
607 bo_legacy
->is_pending
++;
608 /* add to pending list */
610 if (bo_legacy
->is_pending
> 1) {
613 bo_legacy
->pprev
= boml
->pending_bos
.pprev
;
614 bo_legacy
->pnext
= NULL
;
615 bo_legacy
->pprev
->pnext
= bo_legacy
;
616 boml
->pending_bos
.pprev
= bo_legacy
;
620 void radeon_bo_manager_legacy_dtor(struct radeon_bo_manager
*bom
)
622 struct bo_manager_legacy
*boml
= (struct bo_manager_legacy
*)bom
;
623 struct bo_legacy
*bo_legacy
;
628 bo_legacy
= boml
->bos
.next
;
630 struct bo_legacy
*next
;
632 next
= bo_legacy
->next
;
636 free(boml
->free_handles
);
640 static struct bo_legacy
*radeon_legacy_bo_alloc_static(struct bo_manager_legacy
*bom
,
641 int size
, uint32_t offset
)
643 struct bo_legacy
*bo
;
645 bo
= bo_allocate(bom
, size
, 0, RADEON_GEM_DOMAIN_VRAM
, 0);
649 bo
->offset
= offset
+ bom
->fb_location
;
650 bo
->base
.handle
= bo
->offset
;
651 bo
->ptr
= bom
->screen
->driScreen
->pFB
+ offset
;
652 if (bo
->base
.handle
> bom
->nhandle
) {
653 bom
->nhandle
= bo
->base
.handle
+ 1;
658 struct radeon_bo_manager
*radeon_bo_manager_legacy_ctor(struct radeon_screen
*scrn
)
660 struct bo_manager_legacy
*bom
;
661 struct bo_legacy
*bo
;
664 bom
= (struct bo_manager_legacy
*)
665 calloc(1, sizeof(struct bo_manager_legacy
));
670 bom
->texture_heap
= driCreateTextureHeap(0,
674 RADEON_NR_TEX_REGIONS
,
675 (drmTextureRegionPtr
)scrn
->sarea
->tex_list
[0],
676 &scrn
->sarea
->tex_age
[0],
677 &bom
->texture_swapped
,
678 sizeof(struct bo_legacy
),
679 &bo_legacy_tobj_destroy
);
680 bom
->texture_offset
= scrn
->texOffset
[0];
682 bom
->base
.funcs
= &bo_legacy_funcs
;
683 bom
->base
.fd
= scrn
->driScreen
->fd
;
684 bom
->bos
.next
= NULL
;
685 bom
->bos
.prev
= NULL
;
686 bom
->pending_bos
.pprev
= &bom
->pending_bos
;
687 bom
->pending_bos
.pnext
= NULL
;
689 bom
->fb_location
= scrn
->fbLocation
;
691 bom
->cfree_handles
= 0;
692 bom
->nfree_handles
= 0x400;
693 bom
->free_handles
= (uint32_t*)malloc(bom
->nfree_handles
* 4);
694 if (bom
->free_handles
== NULL
) {
695 radeon_bo_manager_legacy_dtor((struct radeon_bo_manager
*)bom
);
699 /* biggest framebuffer size */
703 bo
= radeon_legacy_bo_alloc_static(bom
, size
, bom
->screen
->frontOffset
);
705 radeon_bo_manager_legacy_dtor((struct radeon_bo_manager
*)bom
);
708 if (scrn
->sarea
->tiling_enabled
) {
709 bo
->base
.flags
= RADEON_BO_FLAGS_MACRO_TILE
;
713 bo
= radeon_legacy_bo_alloc_static(bom
, size
, bom
->screen
->backOffset
);
715 radeon_bo_manager_legacy_dtor((struct radeon_bo_manager
*)bom
);
718 if (scrn
->sarea
->tiling_enabled
) {
719 bo
->base
.flags
= RADEON_BO_FLAGS_MACRO_TILE
;
723 bo
= radeon_legacy_bo_alloc_static(bom
, size
, bom
->screen
->depthOffset
);
725 radeon_bo_manager_legacy_dtor((struct radeon_bo_manager
*)bom
);
729 if (scrn
->sarea
->tiling_enabled
) {
730 bo
->base
.flags
|= RADEON_BO_FLAGS_MACRO_TILE
;
731 bo
->base
.flags
|= RADEON_BO_FLAGS_MICRO_TILE
;
733 return (struct radeon_bo_manager
*)bom
;
736 void radeon_bo_legacy_texture_age(struct radeon_bo_manager
*bom
)
738 struct bo_manager_legacy
*boml
= (struct bo_manager_legacy
*)bom
;
739 DRI_AGE_TEXTURES(boml
->texture_heap
);
742 unsigned radeon_bo_legacy_relocs_size(struct radeon_bo
*bo
)
744 struct bo_legacy
*bo_legacy
= (struct bo_legacy
*)bo
;
746 if (bo_legacy
->static_bo
|| (bo
->domains
& RADEON_GEM_DOMAIN_GTT
)) {
752 int radeon_legacy_bo_is_static(struct radeon_bo
*bo
)
754 struct bo_legacy
*bo_legacy
= (struct bo_legacy
*)bo
;
755 return bo_legacy
->static_bo
;