2 * Copyright © 2008 Nicolai Haehnle
3 * Copyright © 2008 Dave Airlie
4 * Copyright © 2008 Jérôme Glisse
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
29 * Aapo Tahkola <aet@rasterburn.org>
30 * Nicolai Haehnle <prefect_@gmx.net>
32 * Jérôme Glisse <glisse@freedesktop.org>
41 #include <sys/ioctl.h>
44 #include "main/simple_list.h"
47 #include "radeon_drm.h"
48 #include "radeon_bo.h"
49 #include "radeon_bo_legacy.h"
50 #include "common_context.h"
53 struct radeon_bo base
;
54 driTextureObject tobj_base
;
60 int got_dri_texture_obj
;
63 driTextureObject dri_texture_obj
;
65 struct bo_legacy
*next
, *prev
;
66 struct bo_legacy
*pnext
, *pprev
;
69 struct bo_manager_legacy
{
70 struct radeon_bo_manager base
;
72 unsigned nfree_handles
;
73 unsigned cfree_handles
;
76 struct bo_legacy pending_bos
;
78 uint32_t texture_offset
;
79 unsigned dma_alloc_size
;
80 uint32_t dma_buf_count
;
82 driTextureObject texture_swapped
;
83 driTexHeap
*texture_heap
;
84 struct radeon_screen
*screen
;
85 unsigned *free_handles
;
88 static void bo_legacy_tobj_destroy(void *data
, driTextureObject
*t
)
90 struct bo_legacy
*bo_legacy
;
92 bo_legacy
= (struct bo_legacy
*)((char*)t
)-sizeof(struct radeon_bo
);
93 bo_legacy
->got_dri_texture_obj
= 0;
94 bo_legacy
->validated
= 0;
97 static void inline clean_handles(struct bo_manager_legacy
*bom
)
99 while (bom
->cfree_handles
> 0 &&
100 !bom
->free_handles
[bom
->cfree_handles
- 1])
101 bom
->cfree_handles
--;
104 static int legacy_new_handle(struct bo_manager_legacy
*bom
, uint32_t *handle
)
109 if (bom
->nhandle
== 0xFFFFFFFF) {
112 if (bom
->cfree_handles
> 0) {
113 tmp
= bom
->free_handles
[--bom
->cfree_handles
];
116 bom
->cfree_handles
= 0;
117 tmp
= bom
->nhandle
++;
124 static int legacy_free_handle(struct bo_manager_legacy
*bom
, uint32_t handle
)
131 if (handle
== (bom
->nhandle
- 1)) {
135 for (i
= bom
->cfree_handles
- 1; i
>= 0; i
--) {
136 if (bom
->free_handles
[i
] == (bom
->nhandle
- 1)) {
138 bom
->free_handles
[i
] = 0;
144 if (bom
->cfree_handles
< bom
->nfree_handles
) {
145 bom
->free_handles
[bom
->cfree_handles
++] = handle
;
148 bom
->nfree_handles
+= 0x100;
149 handles
= (uint32_t*)realloc(bom
->free_handles
, bom
->nfree_handles
* 4);
150 if (handles
== NULL
) {
151 bom
->nfree_handles
-= 0x100;
154 bom
->free_handles
= handles
;
155 bom
->free_handles
[bom
->cfree_handles
++] = handle
;
159 static void legacy_get_current_age(struct bo_manager_legacy
*boml
)
161 drm_radeon_getparam_t gp
;
164 if (IS_R300_CLASS(boml
->screen
)) {
165 gp
.param
= RADEON_PARAM_LAST_CLEAR
;
166 gp
.value
= (int *)&boml
->current_age
;
167 r
= drmCommandWriteRead(boml
->base
.fd
, DRM_RADEON_GETPARAM
,
170 fprintf(stderr
, "%s: drmRadeonGetParam: %d\n", __FUNCTION__
, r
);
174 boml
->current_age
= boml
->screen
->scratch
[3];
177 static int legacy_is_pending(struct radeon_bo
*bo
)
179 struct bo_manager_legacy
*boml
= (struct bo_manager_legacy
*)bo
->bom
;
180 struct bo_legacy
*bo_legacy
= (struct bo_legacy
*)bo
;
182 if (bo_legacy
->is_pending
<= 0) {
183 bo_legacy
->is_pending
= 0;
186 if (boml
->current_age
>= bo_legacy
->pending
) {
187 if (boml
->pending_bos
.pprev
== bo_legacy
) {
188 boml
->pending_bos
.pprev
= bo_legacy
->pprev
;
190 bo_legacy
->pprev
->pnext
= bo_legacy
->pnext
;
191 if (bo_legacy
->pnext
) {
192 bo_legacy
->pnext
->pprev
= bo_legacy
->pprev
;
194 assert(bo_legacy
->is_pending
<= bo
->cref
);
195 while (bo_legacy
->is_pending
--) {
196 bo
= radeon_bo_unref(bo
);
201 bo_legacy
->is_pending
= 0;
208 static int legacy_wait_pending(struct radeon_bo
*bo
)
210 struct bo_manager_legacy
*boml
= (struct bo_manager_legacy
*)bo
->bom
;
211 struct bo_legacy
*bo_legacy
= (struct bo_legacy
*)bo
;
213 if (!bo_legacy
->is_pending
) {
216 /* FIXME: lockup and userspace busy looping that's all the folks */
217 legacy_get_current_age(boml
);
218 while (legacy_is_pending(bo
)) {
220 legacy_get_current_age(boml
);
225 static void legacy_track_pending(struct bo_manager_legacy
*boml
, int debug
)
227 struct bo_legacy
*bo_legacy
;
228 struct bo_legacy
*next
;
230 legacy_get_current_age(boml
);
231 bo_legacy
= boml
->pending_bos
.pnext
;
234 fprintf(stderr
,"pending %p %d %d %d\n", bo_legacy
, bo_legacy
->base
.size
,
235 boml
->current_age
, bo_legacy
->pending
);
236 next
= bo_legacy
->pnext
;
237 if (legacy_is_pending(&(bo_legacy
->base
))) {
243 static int legacy_wait_any_pending(struct bo_manager_legacy
*boml
)
245 struct bo_legacy
*bo_legacy
;
246 struct bo_legacy
*next
;
248 legacy_get_current_age(boml
);
249 bo_legacy
= boml
->pending_bos
.pnext
;
252 legacy_wait_pending(&bo_legacy
->base
);
256 static struct bo_legacy
*bo_allocate(struct bo_manager_legacy
*boml
,
262 struct bo_legacy
*bo_legacy
;
266 pgsize
= getpagesize() - 1;
268 size
= (size
+ pgsize
) & ~pgsize
;
270 bo_legacy
= (struct bo_legacy
*)calloc(1, sizeof(struct bo_legacy
));
271 if (bo_legacy
== NULL
) {
274 bo_legacy
->base
.bom
= (struct radeon_bo_manager
*)boml
;
275 bo_legacy
->base
.handle
= 0;
276 bo_legacy
->base
.size
= size
;
277 bo_legacy
->base
.alignment
= alignment
;
278 bo_legacy
->base
.domains
= domains
;
279 bo_legacy
->base
.flags
= flags
;
280 bo_legacy
->base
.ptr
= NULL
;
281 bo_legacy
->map_count
= 0;
282 bo_legacy
->next
= NULL
;
283 bo_legacy
->prev
= NULL
;
284 bo_legacy
->got_dri_texture_obj
= 0;
285 bo_legacy
->pnext
= NULL
;
286 bo_legacy
->pprev
= NULL
;
287 bo_legacy
->next
= boml
->bos
.next
;
288 bo_legacy
->prev
= &boml
->bos
;
289 boml
->bos
.next
= bo_legacy
;
290 if (bo_legacy
->next
) {
291 bo_legacy
->next
->prev
= bo_legacy
;
296 static int bo_dma_alloc(struct radeon_bo
*bo
)
298 struct bo_manager_legacy
*boml
= (struct bo_manager_legacy
*)bo
->bom
;
299 struct bo_legacy
*bo_legacy
= (struct bo_legacy
*)bo
;
300 drm_radeon_mem_alloc_t alloc
;
305 /* align size on 4Kb */
306 size
= (((4 * 1024) - 1) + bo
->size
) & ~((4 * 1024) - 1);
307 alloc
.region
= RADEON_MEM_REGION_GART
;
308 alloc
.alignment
= bo_legacy
->base
.alignment
;
310 alloc
.region_offset
= &base_offset
;
311 r
= drmCommandWriteRead(bo
->bom
->fd
,
316 /* ptr is set to NULL if dma allocation failed */
317 bo_legacy
->ptr
= NULL
;
320 bo_legacy
->ptr
= boml
->screen
->gartTextures
.map
+ base_offset
;
321 bo_legacy
->offset
= boml
->screen
->gart_texture_offset
+ base_offset
;
323 boml
->dma_alloc_size
+= size
;
324 boml
->dma_buf_count
++;
328 static int bo_dma_free(struct radeon_bo
*bo
)
330 struct bo_manager_legacy
*boml
= (struct bo_manager_legacy
*)bo
->bom
;
331 struct bo_legacy
*bo_legacy
= (struct bo_legacy
*)bo
;
332 drm_radeon_mem_free_t memfree
;
335 if (bo_legacy
->ptr
== NULL
) {
336 /* ptr is set to NULL if dma allocation failed */
339 legacy_get_current_age(boml
);
340 memfree
.region
= RADEON_MEM_REGION_GART
;
341 memfree
.region_offset
= bo_legacy
->offset
;
342 memfree
.region_offset
-= boml
->screen
->gart_texture_offset
;
343 r
= drmCommandWrite(boml
->base
.fd
,
348 fprintf(stderr
, "Failed to free bo[%p] at %08x\n",
349 &bo_legacy
->base
, memfree
.region_offset
);
350 fprintf(stderr
, "ret = %s\n", strerror(-r
));
353 boml
->dma_alloc_size
-= bo_legacy
->base
.size
;
354 boml
->dma_buf_count
--;
358 static void bo_free(struct bo_legacy
*bo_legacy
)
360 struct bo_manager_legacy
*boml
;
362 if (bo_legacy
== NULL
) {
365 boml
= (struct bo_manager_legacy
*)bo_legacy
->base
.bom
;
366 bo_legacy
->prev
->next
= bo_legacy
->next
;
367 if (bo_legacy
->next
) {
368 bo_legacy
->next
->prev
= bo_legacy
->prev
;
370 if (!bo_legacy
->static_bo
) {
371 legacy_free_handle(boml
, bo_legacy
->base
.handle
);
372 if (bo_legacy
->base
.domains
& RADEON_GEM_DOMAIN_GTT
) {
374 bo_dma_free(&bo_legacy
->base
);
376 /* free backing store */
377 free(bo_legacy
->ptr
);
380 memset(bo_legacy
, 0 , sizeof(struct bo_legacy
));
384 static struct radeon_bo
*bo_open(struct radeon_bo_manager
*bom
,
391 struct bo_manager_legacy
*boml
= (struct bo_manager_legacy
*)bom
;
392 struct bo_legacy
*bo_legacy
;
396 bo_legacy
= boml
->bos
.next
;
398 if (bo_legacy
->base
.handle
== handle
) {
399 radeon_bo_ref(&(bo_legacy
->base
));
400 return (struct radeon_bo
*)bo_legacy
;
402 bo_legacy
= bo_legacy
->next
;
407 bo_legacy
= bo_allocate(boml
, size
, alignment
, domains
, flags
);
408 bo_legacy
->static_bo
= 0;
409 r
= legacy_new_handle(boml
, &bo_legacy
->base
.handle
);
414 if (bo_legacy
->base
.domains
& RADEON_GEM_DOMAIN_GTT
) {
416 legacy_track_pending(boml
, 0);
419 r
= bo_dma_alloc(&(bo_legacy
->base
));
421 if (legacy_wait_any_pending(boml
) == -1) {
429 bo_legacy
->ptr
= malloc(bo_legacy
->base
.size
);
430 if (bo_legacy
->ptr
== NULL
) {
435 radeon_bo_ref(&(bo_legacy
->base
));
436 return (struct radeon_bo
*)bo_legacy
;
439 static void bo_ref(struct radeon_bo
*bo
)
443 static struct radeon_bo
*bo_unref(struct radeon_bo
*bo
)
445 struct bo_legacy
*bo_legacy
= (struct bo_legacy
*)bo
;
448 bo_legacy
->prev
->next
= bo_legacy
->next
;
449 if (bo_legacy
->next
) {
450 bo_legacy
->next
->prev
= bo_legacy
->prev
;
452 if (!bo_legacy
->is_pending
) {
460 static int bo_map(struct radeon_bo
*bo
, int write
)
462 struct bo_manager_legacy
*boml
= (struct bo_manager_legacy
*)bo
->bom
;
463 struct bo_legacy
*bo_legacy
= (struct bo_legacy
*)bo
;
465 legacy_wait_pending(bo
);
466 bo_legacy
->validated
= 0;
467 bo_legacy
->dirty
= 1;
468 bo_legacy
->map_count
++;
469 bo
->ptr
= bo_legacy
->ptr
;
470 /* Read the first pixel in the frame buffer. This should
471 * be a noop, right? In fact without this conform fails as reading
472 * from the framebuffer sometimes produces old results -- the
473 * on-card read cache gets mixed up and doesn't notice that the
474 * framebuffer has been updated.
476 * Note that we should probably be reading some otherwise unused
477 * region of VRAM, otherwise we might get incorrect results when
478 * reading pixels from the top left of the screen.
480 * I found this problem on an R420 with glean's texCube test.
481 * Note that the R200 span code also *writes* the first pixel in the
482 * framebuffer, but I've found this to be unnecessary.
483 * -- Nicolai Hähnle, June 2008
485 if (!(bo
->domains
& RADEON_GEM_DOMAIN_GTT
)) {
487 volatile int *buf
= (int*)boml
->screen
->driScreen
->pFB
;
493 static int bo_unmap(struct radeon_bo
*bo
)
495 struct bo_legacy
*bo_legacy
= (struct bo_legacy
*)bo
;
497 if (--bo_legacy
->map_count
> 0) {
504 static struct radeon_bo_funcs bo_legacy_funcs
= {
512 static int bo_vram_validate(struct radeon_bo
*bo
,
516 struct bo_manager_legacy
*boml
= (struct bo_manager_legacy
*)bo
->bom
;
517 struct bo_legacy
*bo_legacy
= (struct bo_legacy
*)bo
;
520 if (!bo_legacy
->got_dri_texture_obj
) {
521 make_empty_list(&bo_legacy
->dri_texture_obj
);
522 bo_legacy
->dri_texture_obj
.totalSize
= bo
->size
;
523 r
= driAllocateTexture(&boml
->texture_heap
, 1,
524 &bo_legacy
->dri_texture_obj
);
526 uint8_t *segfault
=NULL
;
527 fprintf(stderr
, "Ouch! vram_validate failed %d\n", r
);
531 bo_legacy
->offset
= boml
->texture_offset
+
532 bo_legacy
->dri_texture_obj
.memBlock
->ofs
;
533 bo_legacy
->got_dri_texture_obj
= 1;
534 bo_legacy
->dirty
= 1;
536 if (bo_legacy
->dirty
) {
537 /* Copy to VRAM using a blit.
538 * All memory is 4K aligned. We're using 1024 pixels wide blits.
540 drm_radeon_texture_t tex
;
541 drm_radeon_tex_image_t tmp
;
544 tex
.offset
= bo_legacy
->offset
;
546 assert(!(tex
.offset
& 1023));
550 if (bo
->size
< 4096) {
551 tmp
.width
= (bo
->size
+ 3) / 4;
555 tmp
.height
= (bo
->size
+ 4095) / 4096;
557 tmp
.data
= bo_legacy
->ptr
;
558 tex
.format
= RADEON_TXFORMAT_ARGB8888
;
559 tex
.width
= tmp
.width
;
560 tex
.height
= tmp
.height
;
561 tex
.pitch
= MAX2(tmp
.width
/ 16, 1);
563 ret
= drmCommandWriteRead(bo
->bom
->fd
,
566 sizeof(drm_radeon_texture_t
));
568 if (RADEON_DEBUG
& DEBUG_IOCTL
)
569 fprintf(stderr
, "DRM_RADEON_TEXTURE: again!\n");
572 } while (ret
== -EAGAIN
);
573 bo_legacy
->dirty
= 0;
578 int radeon_bo_legacy_validate(struct radeon_bo
*bo
,
582 struct bo_legacy
*bo_legacy
= (struct bo_legacy
*)bo
;
585 if (bo_legacy
->map_count
) {
586 fprintf(stderr
, "bo(%p, %d) is mapped (%d) can't valide it.\n",
587 bo
, bo
->size
, bo_legacy
->map_count
);
590 if (bo_legacy
->static_bo
|| bo_legacy
->validated
) {
591 *soffset
= bo_legacy
->offset
;
592 *eoffset
= bo_legacy
->offset
+ bo
->size
;
595 if (!(bo
->domains
& RADEON_GEM_DOMAIN_GTT
)) {
596 r
= bo_vram_validate(bo
, soffset
, eoffset
);
601 *soffset
= bo_legacy
->offset
;
602 *eoffset
= bo_legacy
->offset
+ bo
->size
;
603 bo_legacy
->validated
= 1;
607 void radeon_bo_legacy_pending(struct radeon_bo
*bo
, uint32_t pending
)
609 struct bo_manager_legacy
*boml
= (struct bo_manager_legacy
*)bo
->bom
;
610 struct bo_legacy
*bo_legacy
= (struct bo_legacy
*)bo
;
612 bo_legacy
->pending
= pending
;
613 bo_legacy
->is_pending
++;
614 /* add to pending list */
616 if (bo_legacy
->is_pending
> 1) {
619 bo_legacy
->pprev
= boml
->pending_bos
.pprev
;
620 bo_legacy
->pnext
= NULL
;
621 bo_legacy
->pprev
->pnext
= bo_legacy
;
622 boml
->pending_bos
.pprev
= bo_legacy
;
626 void radeon_bo_manager_legacy_dtor(struct radeon_bo_manager
*bom
)
628 struct bo_manager_legacy
*boml
= (struct bo_manager_legacy
*)bom
;
629 struct bo_legacy
*bo_legacy
;
634 bo_legacy
= boml
->bos
.next
;
636 struct bo_legacy
*next
;
638 next
= bo_legacy
->next
;
642 free(boml
->free_handles
);
646 static struct bo_legacy
*radeon_legacy_bo_alloc_static(struct bo_manager_legacy
*bom
,
647 int size
, uint32_t offset
)
649 struct bo_legacy
*bo
;
651 bo
= bo_allocate(bom
, size
, 0, RADEON_GEM_DOMAIN_VRAM
, 0);
655 bo
->offset
= offset
+ bom
->fb_location
;
656 bo
->base
.handle
= bo
->offset
;
657 bo
->ptr
= bom
->screen
->driScreen
->pFB
+ offset
;
658 if (bo
->base
.handle
> bom
->nhandle
) {
659 bom
->nhandle
= bo
->base
.handle
+ 1;
661 radeon_bo_ref(&(bo
->base
));
665 struct radeon_bo_manager
*radeon_bo_manager_legacy_ctor(struct radeon_screen
*scrn
)
667 struct bo_manager_legacy
*bom
;
668 struct bo_legacy
*bo
;
671 bom
= (struct bo_manager_legacy
*)
672 calloc(1, sizeof(struct bo_manager_legacy
));
677 bom
->texture_heap
= driCreateTextureHeap(0,
681 RADEON_NR_TEX_REGIONS
,
682 (drmTextureRegionPtr
)scrn
->sarea
->tex_list
[0],
683 &scrn
->sarea
->tex_age
[0],
684 &bom
->texture_swapped
,
685 sizeof(struct bo_legacy
),
686 &bo_legacy_tobj_destroy
);
687 bom
->texture_offset
= scrn
->texOffset
[0];
689 bom
->base
.funcs
= &bo_legacy_funcs
;
690 bom
->base
.fd
= scrn
->driScreen
->fd
;
691 bom
->bos
.next
= NULL
;
692 bom
->bos
.prev
= NULL
;
693 bom
->pending_bos
.pprev
= &bom
->pending_bos
;
694 bom
->pending_bos
.pnext
= NULL
;
696 bom
->fb_location
= scrn
->fbLocation
;
698 bom
->cfree_handles
= 0;
699 bom
->nfree_handles
= 0x400;
700 bom
->free_handles
= (uint32_t*)malloc(bom
->nfree_handles
* 4);
701 if (bom
->free_handles
== NULL
) {
702 radeon_bo_manager_legacy_dtor((struct radeon_bo_manager
*)bom
);
706 /* biggest framebuffer size */
710 bo
= radeon_legacy_bo_alloc_static(bom
, size
, bom
->screen
->frontOffset
);
712 radeon_bo_manager_legacy_dtor((struct radeon_bo_manager
*)bom
);
715 if (scrn
->sarea
->tiling_enabled
) {
716 bo
->base
.flags
= RADEON_BO_FLAGS_MACRO_TILE
;
720 bo
= radeon_legacy_bo_alloc_static(bom
, size
, bom
->screen
->backOffset
);
722 radeon_bo_manager_legacy_dtor((struct radeon_bo_manager
*)bom
);
725 if (scrn
->sarea
->tiling_enabled
) {
726 bo
->base
.flags
= RADEON_BO_FLAGS_MACRO_TILE
;
730 bo
= radeon_legacy_bo_alloc_static(bom
, size
, bom
->screen
->depthOffset
);
732 radeon_bo_manager_legacy_dtor((struct radeon_bo_manager
*)bom
);
736 if (scrn
->sarea
->tiling_enabled
) {
737 bo
->base
.flags
|= RADEON_BO_FLAGS_MACRO_TILE
;
738 bo
->base
.flags
|= RADEON_BO_FLAGS_MICRO_TILE
;
740 return (struct radeon_bo_manager
*)bom
;
743 void radeon_bo_legacy_texture_age(struct radeon_bo_manager
*bom
)
745 struct bo_manager_legacy
*boml
= (struct bo_manager_legacy
*)bom
;
746 DRI_AGE_TEXTURES(boml
->texture_heap
);
749 unsigned radeon_bo_legacy_relocs_size(struct radeon_bo
*bo
)
751 struct bo_legacy
*bo_legacy
= (struct bo_legacy
*)bo
;
753 if (bo_legacy
->static_bo
|| (bo
->domains
& RADEON_GEM_DOMAIN_GTT
)) {
759 int radeon_legacy_bo_is_static(struct radeon_bo
*bo
)
761 struct bo_legacy
*bo_legacy
= (struct bo_legacy
*)bo
;
762 return bo_legacy
->static_bo
;